import * as types from 'app/types' import { store, history, dispatch } from 'app/store' import actions from 'app/actions' import { MEDIA_ANNOTATION_TYPES, MEDIA_LABEL_TYPES, TEXT_ANNOTATION_TYPES, INLINE_UTILITY_ANNOTATION_TYPES, FULLSCREEN_UTILITY_ANNOTATION_TYPES, GALLERY_UTILITY_ANNOTATION_TYPES, CURTAIN_COLOR_LOOKUP, GROWL, } from 'app/constants' import { floatInRange, timestampToSeconds } from 'app/utils' import { buildParagraphs } from 'app/utils/transcript.utils' import { annotationFadeTimings } from 'app/utils/annotation.utils' import { getNextSection } from 'app/utils/viewer.utils' // build the list of sections from the raw annotation list. export const loadSections = () => dispatch => { // list of all sections let sections = [] // current section being processed (i.e. last section) let currentSection // keep tally of all media, so that we can display them with correct IDs in the checklist let mediaIndex = 0 let eventIndex = 0 // dedupe the labels that we see in each section let currentMediaLabels = {} let seenMedia = {} // keep track of all annotations that constitute the "text" of the essay // these include sentences, headings, and inline media. used to build paragraphs, then reset. let sectionTextAnnotationOrder = [] // keep track of all annotations that constitute fullscreen events. // these include curtains, title cards, and fullscreen media. // let fullscreenTimeline = [] // fetch all annotations and media const state = store.getState() const { timeline } = state.align const { order: annotationOrder, lookup: annotationLookup } = state.annotation.index const { lookup: mediaLookup } = state.media.index // loop over the annotations in time order. annotationOrder.forEach((annotation_id, i) => { // fetch the current annotation const annotation = annotationLookup[annotation_id] // we have reached a new section. if (annotation.type === 'section_heading') { // finish off the previous section. if (currentSection) { currentSection.mediaLabels = Object.keys(currentMediaLabels).sort().join(', ') currentSection.paragraphs = buildParagraphs(sectionTextAnnotationOrder, currentSection.index) currentSection.end_ts = currentSection.paragraphs[currentSection.paragraphs.length - 1].end_ts } // create a new section and reset state variables currentSection = newSection(annotation, sections.length, mediaIndex) currentMediaLabels = {} sectionTextAnnotationOrder = [] // add this new section to the list! sections.push(currentSection) } // sanity check. ignore everything before the first section. if (!currentSection) { return } // add media to the current section. if (MEDIA_ANNOTATION_TYPES.has(annotation.type)) { const media = mediaLookup[annotation.settings.media_id] // fetch the media and add it to the list of media (TODO: handle carousels) if (!media.settings.hide_in_bibliography && !(media.id in seenMedia)) { currentSection.media.push({ start_ts: annotation.start_ts, media, }) seenMedia[media.id] = true } // get the display string for this media type // console.log(annotation.type, media.type) if (annotation.type in MEDIA_LABEL_TYPES) { currentMediaLabels[MEDIA_LABEL_TYPES[annotation.type]] = true } // increment the media tally mediaIndex += 1 // non-fullscreen (or fullscreen-inline) media should be displayed in the transcript. if (!annotation.settings.fullscreen || annotation.settings.inline) { sectionTextAnnotationOrder.push(annotation.id) } } // build timeline of gallery / carousel advance instructions. this is in reverse so we can step thru it if (GALLERY_UTILITY_ANNOTATION_TYPES.has(annotation.type) && currentSection.fullscreenTimeline.length) { const lastTimelineEvent = currentSection.fullscreenTimeline[currentSection.fullscreenTimeline.length - 1] annotation.settings.frame_index = parseInt(annotation.settings.frame_index) annotation.settings.seek_index = annotation.settings.half_frame ? annotation.settings.frame_index + 0.5 : annotation.settings.frame_index lastTimelineEvent.timeline.unshift(annotation) lastTimelineEvent.timelineLookup[annotation.settings.frame_index] = annotation } // build timeline of special inline items if (INLINE_UTILITY_ANNOTATION_TYPES.has(annotation.type)) { sectionTextAnnotationOrder.push(annotation.id) if (annotation.type === 'intro') { if (annotation.settings.intro_start_ts) { currentSection.intro_start_ts = timestampToSeconds(annotation.settings.intro_start_ts) } else { currentSection.intro_start_ts = 0 } } } // build timeline of fullscreen events if ((FULLSCREEN_UTILITY_ANNOTATION_TYPES.has(annotation.type) || annotation.settings.fullscreen) && !annotation.settings.inline) { const event = makeFullscreenEvent(eventIndex++, annotation) currentSection.fullscreenTimeline.push(event) } // add text annotations to section annotation order if (TEXT_ANNOTATION_TYPES.has(annotation.type)) { sectionTextAnnotationOrder.push(annotation.id) } }) // finished processing all annotations. finish off the last section. if (currentSection) { currentSection.mediaLabels = Object.keys(currentMediaLabels).sort().join(', ') currentSection.paragraphs = buildParagraphs(sectionTextAnnotationOrder, currentSection.index) currentSection.duration = timeline.duration } let time_to_first_fullscreen_element, initial_curtain_event // last fixes on the sections for (let i = 0; i < sections.length - 1; i++) { currentSection = sections[i] // set the end_ts for each section (i.e. just before the next section starts) if (currentSection.end_ts === 0) { currentSection.end_ts = sections[i+1].start_ts - 1 } // if the first fullscreen event is close to the beginning of the section, move it there time_to_first_fullscreen_element = 0 if (currentSection.fullscreenTimeline.length) { time_to_first_fullscreen_element = Math.abs(currentSection.fullscreenTimeline[0].start_ts - currentSection.start_ts) if (time_to_first_fullscreen_element < 1.0) { currentSection.fullscreenTimeline[0].start_ts = currentSection.start_ts time_to_first_fullscreen_element = 0.0 } } if (!currentSection.fullscreenTimeline.length || time_to_first_fullscreen_element > 0.0) { // here we should create a dummy curtain event initial_curtain_event = makeFullscreenEvent(0, { start_ts: currentSection.start_ts, type: 'curtain', settings: { color: CURTAIN_COLOR_LOOKUP.white, // TODO: get this from the first annotation fade_in_duration: '0.0', fade_out_duration: '1.0', duration: '0', } }) // currentSection.fullscreenTimeline.push(initial_curtain_event) } currentSection.duration = currentSection.end_ts - currentSection.start_ts } // console.log(sections) // console.log(fullscreenTimeline) dispatch({ type: types.viewer.load_sections, sections }) } const newSection = (annotation, index, mediaIndex) => ({ start_ts: annotation.start_ts, end_ts: 0, title: annotation.text, media: [], fullscreenTimeline: [], index, mediaIndex, no_audio: !!annotation.settings.no_audio, color: annotation.settings.color || 'white', section_nav_color: annotation.settings.section_nav_color || 'white', }) const makeFullscreenEvent = (index, annotation) => { const timing = annotationFadeTimings(annotation) const event = { ...timing, annotation, index, settings: annotation.settings, type: annotation.type, timeline: [], timelineLookup: {}, } if (annotation.settings.color) { event.color = CURTAIN_COLOR_LOOKUP[annotation.settings.color] || CURTAIN_COLOR_LOOKUP.white } return event } export const setNavStyle = color => dispatch => { dispatch({ type: types.viewer.set_nav_style, color }) } export const setMediaTitle = title => dispatch => { dispatch({ type: types.viewer.set_media_title, title }) } export const showComponent = key => dispatch => { dispatch({ type: types.viewer.toggle_component, key, value: true }) } export const hideComponent = key => dispatch => { dispatch({ type: types.viewer.toggle_component, key, value: false }) } export const toggleComponent = key => dispatch => { const state = store.getState().viewer if (key !== "share" && state.share) { dispatch({ type: types.viewer.toggle_component, key: "share", value: false }) } dispatch({ type: types.viewer.toggle_component, key, value: !state[key] }) } export const openTranscript = () => dispatch => { actions.viewer.hideComponent('checklist') actions.viewer.hideComponent('share') actions.viewer.toggleComponent('transcript') } export const reachedEndOfSection = currentSection => dispatch => { actions.audio.pause() dispatch({ type: types.viewer.reached_end_of_section }) if (currentSection && currentSection.index === 0) { actions.viewer.openGrowl(GROWL.REACHED_END_OF_FIRST_SECTION) } } export const playFromClick = () => dispatch => { const state = store.getState() if (state.audio.play_ts === 0 && state.viewer.currentSection.intro_start_ts) { actions.viewer.seekToTimestamp(state.viewer.currentSection.intro_start_ts) } else { actions.audio.play() } } export const setCurrentSection = (currentSection, nextSection) => dispatch => { dispatch({ type: types.viewer.set_current_section, currentSection, nextSection }) } export const seekToSection = section => dispatch => { actions.viewer.setCurrentSection(section, getNextSection(section)) actions.audio.seek(section.start_ts) actions.audio.play() actions.viewer.hideComponent('nav') actions.viewer.hideComponent('share') } export const seekToMediaItem = (section, mediaItem) => dispatch => { actions.viewer.setCurrentSection(section, getNextSection(section)) actions.audio.seek(mediaItem.start_ts) actions.audio.play() actions.viewer.hideComponent('nav') actions.viewer.hideComponent('checklist') actions.viewer.hideComponent('share') } export const seekToTimestamp = play_ts => dispatch => { actions.viewer.setSectionFromTimestamp(play_ts) actions.audio.seek(play_ts) actions.audio.play() } export const setSectionFromTimestamp = play_ts => dispatch => { const { sections, currentSection } = store.getState().viewer const insideSection = sections.some((section, i) => { if (floatInRange(section.start_ts, play_ts, section.end_ts)) { if (currentSection !== section) { const nextSection = sections[i+1] actions.viewer.setCurrentSection(section, nextSection) } return true } return false }) if (!insideSection) { actions.viewer.setCurrentSection(sections[sections.length-1], null) } } export const openVitrineModal = (media, color, id) => dispatch => { console.log(media) const index = media.settings.image_order.indexOf(id) dispatch({ type: types.viewer.open_vitrine_modal, media, color, index }) } export const closeVitrineModal = () => dispatch => { dispatch({ type: types.viewer.close_vitrine_modal }) } export const setVitrineIndex = (index) => dispatch => { dispatch({ type: types.viewer.set_vitrine_index, index }) } export const vitrineGo = direction => dispatch => { const { vitrineModal } = store.getState().viewer const { media, index } = vitrineModal const targetIndex = index + direction const shouldClose = (targetIndex < 0) || (targetIndex === media.settings.image_order.length) if (shouldClose) { actions.viewer.closeVitrineModal() } else { actions.viewer.setVitrineIndex(targetIndex) } } export const openGrowl = message => dispatch => { dispatch({ type: types.viewer.open_growl, message }) } export const closeGrowl = () => dispatch => { dispatch({ type: types.viewer.close_growl }) }