Skip to content

Commit

Permalink
handle videos
Browse files Browse the repository at this point in the history
  • Loading branch information
antoinerousseau committed Oct 5, 2019
1 parent a754656 commit 5724267
Show file tree
Hide file tree
Showing 3 changed files with 92 additions and 40 deletions.
5 changes: 4 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,10 @@ Then create a `.env` file based on `example.env` (`cp {example,}.env`), and edit

./start.js

By default, it will process all photos and videos. If you want only photos or only videos, use the `MEDIA` environment variable. Set it to `photos` or `videos`, e.g.:

MEDIA=photos ./start.js

## Daemonize

You can use [PM2](https://github.com/Unitech/pm2)
Expand All @@ -28,5 +32,4 @@ You can use [PM2](https://github.com/Unitech/pm2)

## Limitations

- This script does not handle videos ([yet](https://github.com/antoinerousseau/flickr2google/pull/1))
- If your [Google storage](https://drive.google.com/settings/storage) is limited and you hit the limit, the Google API will return a "Bad Request". You must then either buy more storage, or go to your [Google Photos settings](https://photos.google.com/settings), choose "High Quality" and click "Recover storage". This will convert your uploads to [16 Megapixels compressed photos](https://support.google.com/photos/answer/6220791), which the API cannot do on the fly. Also, you can only convert once per day.
2 changes: 1 addition & 1 deletion google.js
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ module.exports = async () => {
{
url: GOOGLE_API_ENDPOINT + "uploads",
headers: {
// "Content-type": "application/octet-stream",
"Content-type": "application/octet-stream",
// "Content-length": set by stream
"X-Goog-Upload-File-Name": filename,
"X-Goog-Upload-Protocol": "raw",
Expand Down
125 changes: 87 additions & 38 deletions start.js
Original file line number Diff line number Diff line change
Expand Up @@ -6,14 +6,16 @@ const googleConnect = require("./google")
const { ALBUMS_PATH } = require("./constants")
const { log, logError, readJson, writeJson, fileExists, mkdir } = require("./utils")

const media = process.env.MEDIA || "all" // "photos" or "videos"
const extras = "url_o,media,path_alias,original_format"
const per_page = 500 // max

const getAlbumPath = (id) => `${ALBUMS_PATH}/${id}.json`

if (!fileExists(ALBUMS_PATH)) {
mkdir(ALBUMS_PATH)
}

const per_page = 500 // max

const main = async () => {
const { flickr, user_id } = await flickrConnect()
const { stream, post } = await googleConnect()
Expand All @@ -31,85 +33,119 @@ const main = async () => {
id: "NotInSet",
})

const albums_cache = {}
const memory = {}

photosets.forEach((set) => {
const path = getAlbumPath(set.id)
if (fileExists(path)) {
albums_cache[set.id] = readJson(path)
if (set.photos && albums_cache[set.id].num_photos !== set.photos) {
memory[set.id] = readJson(path)
if (set.photos != null && memory[set.id].num_photos !== set.photos) {
// number of photos has changed since last time, update:
albums_cache[set.id].num_photos = set.photos
writeJson(path, albums_cache[set.id])
memory[set.id].num_photos = set.photos
writeJson(path, memory[set.id])
}
if (set.videos != null && memory[set.id].num_videos !== set.videos) {
// number of videos has changed since last time, update:
memory[set.id].num_videos = set.videos
writeJson(path, memory[set.id])
}
} else {
albums_cache[set.id] = {
title: set.title && set.title._content,
memory[set.id] = {
title: set.id === "NotInSet" ? "Not in a set" : set.title._content,
flickr_set: set.id,
google_album: null,
num_photos: set.photos,
num_videos: set.videos,
total: set.photos + set.videos,
uploaded_photos: 0,
uploaded_videos: 0,
done: [],
}
writeJson(path, albums_cache[set.id])
writeJson(path, memory[set.id])
}
})

for (let i = 0; i < photosets.length; i++) {
const photoset_id = photosets[i].id
const path = getAlbumPath(photoset_id)
const data = albums_cache[photoset_id]
if (data.done.length === data.num_photos) {
const data = memory[photoset_id]

if (media === "photos" && data.num_photos === data.uploaded_photos) {
continue
}
if (media === "videos" && data.num_videos === data.uploaded_videos) {
continue
}
if (media === "all" && data.num_photos + data.num_videos === data.uploaded_photos + data.uploaded_videos) {
continue
}

// FOR EACH PHOTOSET, RETRIEVE PHOTOS
const total_photos = data.num_photos == null ? "?" : data.num_photos
const total_videos = data.num_videos == null ? "?" : data.num_videos

log(
`Processing "${data.title || photoset_id}" set ${i + 1}/${photosets.length};`,
`Fetching ${media} in "${data.title}" set ${i + 1}/${photosets.length};`,
`Flickr id: ${photoset_id};`,
`Total: ${data.num_photos || "?"} photos`
`Total: ${total_photos} photos & ${total_videos} videos`
)

// FOR EACH PHOTOSET, RETRIEVE PHOTOS

let photoset
let page = 0
do {
page++
if (photoset_id === "NotInSet") {
const { body } = await flickr.photos.getNotInSet({
// https://www.flickr.com/services/api/flickr.photos.getNotInSet.html
media: "photos",
extras: "url_o",
page,
media,
extras,
per_page,
page,
})
photoset = body.photos
photoset.title = "Photos not in a set"

if (!data.num_photos) {
data.num_photos = Number(photoset.total)
const totalNotInSet = Number(photoset.total)

if (!totalNotInSet) {
continue
}
if (media === "photos" && data.num_photos == null) {
data.num_photos = totalNotInSet
writeJson(path, data)
}
if (media === "videos" && data.num_videos == null) {
data.num_videos = totalNotInSet
writeJson(path, data)
}
if (media === "all" && data.total == null) {
data.total = totalNotInSet
writeJson(path, data)
}
} else {
const { body } = await flickr.photosets.getPhotos({
// https://www.flickr.com/services/api/flickr.photosets.getPhotos.html
photoset_id,
user_id,
media: "photos",
extras: "url_o",
page,
media,
extras,
per_page,
page,
})
photoset = body.photoset
}

const { title, photo: photos, pages } = photoset
const { photo: items, pages } = photoset

log(`Processing page ${page}/${pages} (${photos.length} photos);`, `Done ${data.done.length}`)
log(
`Processing page ${page}/${pages} (${items.length} ${media === "all" ? "items" : media});`,
media === "all" ? `Done ${data.done.length}` : ""
)

if (!data.google_album) {
const albumRequest = {
album: {
title,
title: data.title,
},
}
const { json: album } = await post("albums", albumRequest)
Expand All @@ -118,22 +154,34 @@ const main = async () => {
writeJson(path, data)
}

for (let j = 0; j < photos.length; j++) {
const photo = photos[j]
if (data.done.includes(photo.id)) {
for (let j = 0; j < items.length; j++) {
const item = items[j]
if (data.done.includes(item.id)) {
continue
}

let url
let ext = `.${item.originalformat}`
if (item.media === "video") {
url = `https://www.flickr.com/photos/${item.pathalias}/${item.id}/play/orig/${item.originalsecret}/`
if (item.originalformat === "jpg") {
// Flickr inconsistency
ext = ""
}
} else {
url = item.url_o
}

// FOR EACH PHOTO, UPLOAD TO GOOGLE PHOTOS

const uploadToken = await stream(photo.url_o, `flickr_${photo.id}.jpg`)
const uploadToken = await stream(url, `flickr_${item.id}${ext}`)

const media = {
const mediaItem = {
// https://developers.google.com/photos/library/reference/rest/v1/mediaItems/batchCreate
albumId: data.google_album,
newMediaItems: [
{
description: photo.title,
description: item.title,
simpleMediaItem: {
uploadToken,
},
Expand All @@ -143,18 +191,19 @@ const main = async () => {
const {
json: { newMediaItemResults: results },
status,
} = await post("mediaItems:batchCreate", media)
} = await post("mediaItems:batchCreate", mediaItem)

if (status === 200) {
if (results.length === 1 && results[0].mediaItem) {
data.done.push(photo.id)
if (results.length === 1 && results[0].mediaItem && !results[0].status.code) {
data.done.push(item.id)
data[`uploaded_${item.media}s`]++
writeJson(path, data)
log("Created media item @", results[0].mediaItem.productUrl)
} else {
logError("Media Item creation status 200 OK but wrong response:", results)
logError("Media Item creation status 200 OK but wrong response:", results[0].status)
}
} else {
logError("Could not create media item", results[0])
logError("Could not create media item", results[0].status)
}
}
} while (page < photoset.pages)
Expand Down

0 comments on commit 5724267

Please sign in to comment.