-
Notifications
You must be signed in to change notification settings - Fork 59
/
Notion-Voice-Notes.mjs
2693 lines (2343 loc) · 85.4 KB
/
Notion-Voice-Notes.mjs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/* -- Imports -- */
// Transcription and LLM clients
import { createClient } from "@deepgram/sdk"; // Deepgram SDK
import { webvtt } from "@deepgram/captions"; // Deepgram WebVTT formatter
import OpenAI from "openai"; // OpenAI SDK
// Other clients
import { Client } from "@notionhq/client"; // Notion SDK
// Audio utils
import { parseFile } from "music-metadata"; // Audio duration parser
import ffmpegInstaller from "@ffmpeg-installer/ffmpeg"; // ffmpeg
// Text utils
import natural from "natural"; // Sentence tokenization
import { franc, francAll } from "franc"; // Language detection
import { encode, decode } from "gpt-3-encoder"; // GPT-3 encoder for ChatGPT-specific tokenization
// Rate limiting and error handling
import Bottleneck from "bottleneck"; // Concurrency handler
import retry from "async-retry"; // Retry handler
// Node.js utils
import stream from "stream"; // Stream handling
import { promisify } from "util"; // Promisify
import fs from "fs"; // File system
import got from "got"; // HTTP requests
import { inspect } from "util"; // Object inspection
import { join, extname } from "path"; // Path handling
import { exec } from "child_process"; // Shell commands
// Project utils
import lang from "./helpers/languages.mjs"; // Language codes
import common from "./helpers/common.mjs"; // Common functions
import translation from "./helpers/translate-transcript.mjs"; // Transcript translation
import openaiOptions from "./helpers/openai-options.mjs"; // OpenAI options
import EMOJI from "./helpers/emoji.mjs"; // Emoji list
const execAsync = promisify(exec);
const rates = {
"gpt-3.5-turbo": {
prompt: 0.001,
completion: 0.002,
},
"gpt-3.5-turbo-16k": {
prompt: 0.003,
completion: 0.004,
},
"gpt-4": {
prompt: 0.03,
completion: 0.06,
},
"gpt-4-32k": {
prompt: 0.06,
completion: 0.12,
},
"gpt-4-1106-preview": {
prompt: 0.01,
completion: 0.03,
},
"gpt-3.5-turbo-1106": {
prompt: 0.001,
completion: 0.002,
},
whisper: {
completion: 0.006, // $0.006 per minute
},
"nova-2": {
completion: 0.0043, // $0.0043 per minute
},
};
const config = {
filePath: "",
chunkDir: "",
supportedMimes: [".mp3", ".m4a", ".wav", ".mp4", ".mpeg", ".mpga", ".webm"],
no_duration_flag: false,
};
export default {
name: "Notion Voice Notes – Core",
description:
"Transcribes audio files, summarizes the transcript, and sends both transcript and summary to Notion.",
key: "notion-voice-notes",
version: "0.7.30",
type: "action",
props: {
notion: {
type: "app",
app: "notion",
description: `⬆ Don\'t forget to connect your Notion account! Additionally, be sure to give Pipedream access to your Notes database, or to a page that contains it.\n\n## Overview\n\nThis workflow lets you create perfectly-transcribed and summarized notes from voice recordings.\n\nIt also creates useful lists from the transcript, including:\n\n* Main points\n* Action items\n* Follow-up questions\n* Potential rebuttals\n\n**Need help with this workflow? [Check out the full instructions and FAQ here.](https://thomasjfrank.com/how-to-transcribe-audio-to-text-with-chatgpt-and-notion/)**\n\n## Compatibility\n\nThis workflow will work with any Notion database.\n\n### Upgrade Your Notion Experience\n\nWhile this workflow will work with any Notion database, it\'s even better with a template.\n\nFor general productivity use, you\'ll love [Ultimate Brain](https://thomasjfrank.com/brain/) – my all-in-one second brain template for Notion. \n\nUltimate Brain brings tasks, notes, projects, and goals all into one tool. Naturally, it works very well with this workflow.\n\n**Are you a creator?** \n\nMy [Creator\'s Companion](https://thomasjfrank.com/creators-companion/) template includes a ton of features that will help you make better-performing content and optimize your production process. There\'s even a version that includes Ultimate Brain, so you can easily use this workflow to create notes whenever you have an idea for a new video or piece of content.\n\n## Instructions\n\n[Click here for the full instructions on setting up this workflow.](https://thomasjfrank.com/how-to-transcribe-audio-to-text-with-chatgpt-and-notion/)\n\n## More Resources\n\n**More automations you may find useful:**\n\n* [Create Tasks in Notion with Your Voice](https://thomasjfrank.com/notion-chatgpt-voice-tasks/)\n* [Notion to Google Calendar Sync](https://thomasjfrank.com/notion-google-calendar-sync/)\n\n**All My Notion Automations:**\n\n* [Notion Automations Hub](https://thomasjfrank.com/notion-automations/)\n\n**Want to get notified about updates to this workflow (and about new Notion templates, automations, and tutorials)?**\n\n* [Join my Notion Tips newsletter](https://thomasjfrank.com/fundamentals/#get-the-newsletter)\n\n## Support My Work\n\nThis workflow is **100% free** – and it gets updates and improvements! *When there's an update, you'll see an **update** button in the top-right corner of this step.*\n\nIf you want to support my work, the best way to do so is buying one of my premium Notion Templates:\n\n* [Ultimate Brain](https://thomasjfrank.com/brain/) – the ultimate second-brain template for Notion\n* [Creator\'s Companion](https://thomasjfrank.com/creators-companion/) – my advanced template for serious content creators looking to publish better content more frequently\n\nBeyond that, sharing this automation\'s YouTube tutorial online or with friends is also helpful!`,
},
openai: {
type: "app",
app: "openai",
description: `**Important:** If you're currently using OpenAI's free trial credit, your API key will be subject to much lower [rate limits](https://platform.openai.com/account/rate-limits), and may not be able to handle longer files (approx. 1 hour+, but the actual limit is hard to determine). If you're looking to work with long files, I recommend [setting up your billing info at OpenAI now](https://platform.openai.com/account/billing/overview).\n\nAdditionally, you'll need to generate a new API key and enter it here once you enter your billing information at OpenAI; once you do that, trial keys stop working.\n\n`,
},
steps: common.props.steps,
summary_options: {
type: "string[]",
label: "Summary Options",
description: `Select the options you would like to include in your summary. You can select multiple options.\n\nYou can also de-select all options, which will cause the summary step to only run once in order to generate a title for your note.`,
options: [
"Summary",
"Main Points",
"Action Items",
"Follow-up Questions",
"Stories",
"References",
"Arguments",
"Related Topics",
"Sentiment",
],
default: ["Summary", "Main Points", "Action Items", "Follow-up Questions"],
optional: false,
},
databaseID: common.props.databaseID,
},
async additionalProps() {
let results;
if (this.openai) {
try {
// Initialize OpenAI
const openai = new OpenAI({
apiKey: this.openai.$auth.api_key,
});
const response = await openai.models.list();
const initialResults = response.data.filter(
(model) =>
model.id.includes("gpt")
).sort((a, b) => a.id.localeCompare(b.id));
const preferredModels = ["gpt-3.5-turbo", "gpt-4o", "gpt-4-turbo"]
const preferredItems = []
for (const model of preferredModels) {
const index = initialResults.findIndex((result) => result.id === model)
if (index !== -1) {
preferredItems.push(initialResults.splice(index, 1)[0])
}
}
results = [...preferredItems, ...initialResults];
} catch (err) {
console.error(
`Encountered an error with OpenAI: ${err} – Please check that your API key is still valid.`
);
}
}
if (results === undefined || results.length === 0) {
throw new Error(
`No available ChatGPT models found. Please check that your OpenAI API key is still valid. If you have recently added billing information to your OpenAI account, you may need to generate a new API key.Keys generated during the trial credit period may not work once billing information is added.`
);
}
if (!this.databaseID) return {};
const notion = new Client({
auth: this.notion.$auth.oauth_access_token,
});
const database = await notion.databases.retrieve({
database_id: this.databaseID,
});
const properties = database.properties;
const titleProps = Object.keys(properties).filter(
(k) => properties[k].type === "title"
);
const numberProps = Object.keys(properties).filter(
(k) => properties[k].type === "number"
);
const selectProps = Object.keys(properties).filter(
(k) => properties[k].type === "select"
);
const dateProps = Object.keys(properties).filter(
(k) => properties[k].type === "date"
);
const textProps = Object.keys(properties).filter(
(k) => properties[k].type === "rich_text"
);
const urlProps = Object.keys(properties).filter(
(k) => properties[k].type === "url"
);
const props = {
noteTitle: {
type: "string",
label: "Note Title (Required)",
description: `Select the title property for your notes. By default, it is called **Name**.`,
options: titleProps.map((prop) => ({ label: prop, value: prop })),
optional: false,
reloadProps: true,
},
...(this.noteTitle && {
noteTitleValue: {
type: "string",
label: "Note Title Value",
description:
'Choose the value for your note title. Defaults to an AI-generated title based off of the first summarized chunk from your transcription. You can also choose to use the audio file name, or both. If you pick both, the title will be in the format "File Name – AI Title".\n\n**Advanced:** You can also construct a custom title by choosing the *Enter a custom expression* tab and building an expression that evaluates to a string.',
options: [
"AI Generated Title",
"Audio File Name",
'Both ("File Name – AI Title")',
],
default: "AI Generated Title",
optional: true,
},
}),
noteDuration: {
type: "string",
label: "Note Duration",
description:
"Select the duration property for your notes. This must be a Number-type property. Duration will be expressed in **seconds**.",
options: numberProps.map((prop) => ({ label: prop, value: prop })),
optional: true,
},
noteCost: {
type: "string",
label: "Note Cost",
description:
"Select the cost property for your notes. This will store the total cost of the run, including both the Whisper (transcription) and ChatGPT (summarization) costs. This must be a Number-type property.",
options: numberProps.map((prop) => ({ label: prop, value: prop })),
optional: true,
},
noteTag: {
type: "string",
label: "Note Tag",
description:
'Choose a Select-type property for tagging your note (e.g. tagging it as "AI Transcription").',
options: selectProps.map((prop) => ({ label: prop, value: prop })),
optional: true,
reloadProps: true,
},
noteIcon: {
type: "string",
label: "Note Page Icon",
description:
"Choose an emoji to use as the icon for your note page. Defaults to 🤖. If you don't see the emoji you want in the list, you can also simply type or paste it in the box below.",
options: EMOJI,
optional: true,
default: "🤖",
},
...(this.noteTag && {
noteTagValue: {
type: "string",
label: "Note Tag Value",
description: "Choose the value for your note tag.",
options: this.noteTag
? properties[this.noteTag].select.options.map((option) => ({
label: option.name,
value: option.name,
}))
: [],
default: "AI Transcription",
optional: true,
},
}),
noteDate: {
type: "string",
label: "Note Date",
description:
"Select a date property for your note. This property will be set to the date the audio file was created.",
options: dateProps.map((prop) => ({ label: prop, value: prop })),
optional: true,
},
noteFileName: {
type: "string",
label: "Note File Name",
description:
"Select a text-type property for your note's file name. This property will store the name of the audio file.",
options: textProps.map((prop) => ({ label: prop, value: prop })),
optional: true,
},
noteFileLink: {
type: "string",
label: "Note File Link",
description:
"Select a URL-type property for your note's file link. This property will store a link to the audio file.",
options: urlProps.map((prop) => ({ label: prop, value: prop })),
optional: true,
},
chat_model: {
type: "string",
label: "ChatGPT Model",
description: `Select the model you would like to use.\n\nDefaults to **gpt-3.5-turbo**, which is recommended for this workflow.\n\nSwitching to the gpt-3.5-turbo-16k model will allow you to set the **summary density** option below up to 5,000 tokens, rather than gpt-3.5-turbo's max of 2,750.\n\nYou can also use **gpt-4**, which may provide more insightful summaries and lists, but it will increase the cost of the summarization step by a factor of 20 (it won't increase the cost of transcription, which is typically about 90% of the cost).`,
default: "gpt-3.5-turbo",
options: results.map((model) => ({
label: model.id,
value: model.id,
})),
optional: true,
reloadProps: true,
},
transcript_language: translation.props.transcript_language,
transcription_service: {
type: "string",
label: "Transcription Service",
description:
"Choose the service to use for transcription. By default, OpenAI's Whisper service is used, which uses your OpenAI API key. If you choose to transcribe with [Deepgram](https://deepgram.com/), you'll need to provide a Deepgram API key in the property that appears after you select Deepgram. \n\n**Note: Deepgram transcription is in beta and may not work as expected.**",
options: ["OpenAI", "Deepgram"],
default: "OpenAI",
reloadProps: true,
},
...(this.transcription_service === "Deepgram" && {
deepgram: {
type: "app",
app: "deepgram",
},
deepgram_model: {
type: "string",
label: "Deepgram Model",
description:
"Select the model you would like to use. Defaults to **nova-2-general**.",
default: "nova-2-general",
options: [
"nova-2-general",
"nova-2-medical",
"nova-2-finance",
"nova-2-meeting",
"nova-2-phonecall",
"nova-2-voicemail",
"nova-2-video",
"nova-2-automotive",
"nova-general",
"whisper-tiny",
"whisper-base",
"whisper-small",
"whisper-medium",
"whisper-large",
],
},
}),
advanced_options: {
type: "boolean",
label: "Enable Advanced Options",
description: `Set this to **True** to enable advanced options for this workflow.`,
default: false,
optional: true,
reloadProps: true,
},
...(this.chat_model &&
this.advanced_options === true && {
summary_density: {
type: "integer",
label: "Summary Density (Advanced)",
description: `*It is recommended to leave this setting at its default unless you have a good understanding of how ChatGPT handles tokens.*\n\nSets the maximum number of tokens (word fragments) for each chunk of your transcript, and therefore the max number of user-prompt tokens that will be sent to ChatGPT in each summarization request.\n\nA smaller number will result in a more "dense" summary, as the same summarization prompt will be run for a smaller chunk of the transcript – hence, more requests will be made, as the transcript will be split into more chunks.\n\nThis will enable the script to handle longer files, as the script uses concurrent requests, and ChatGPT will take less time to process a chunk with fewer prompt tokens.\n\nThis does mean your summary and list will be longer, as you'll get them for each chunk. You can somewhat counteract this with the **Summary Verbosity** option.\n\n**Lowering the number here will also *slightly* increase the cost of the summarization step**, both because you're getting more summarization data and because the summarization prompt's system instructions will be sent more times.\n\nDefaults to 2,750 tokens. The maximum value is 5,000 tokens (2,750 for gpt-3.5-turbo, which has a 4,096-token limit that includes the completion and system instruction tokens), and the minimum value is 500 tokens.\n\nIf you're using an OpenAI trial account and haven't added your billing info yet, note that you may get rate-limited due to the low requests-per-minute (RPM) rate on trial accounts.`,
min: 500,
max:
this.chat_model.includes("gpt-4") ||
this.chat_model.includes("gpt-3.5-turbo-16k") ||
this.chat_model.includes("gpt-3.5-turbo-1106")
? 5000
: 2750,
default: 2750,
optional: true,
},
}),
...(this.advanced_options === true && {
whisper_prompt: openaiOptions.props.whisper_prompt,
verbosity: openaiOptions.props.verbosity,
summary_language: translation.props.summary_language,
...(this.summary_language && {
translate_transcript: translation.props.translate_transcript,
}),
temperature: openaiOptions.props.temperature,
chunk_size: openaiOptions.props.chunk_size,
disable_moderation_check: openaiOptions.props.disable_moderation_check,
fail_on_no_duration: openaiOptions.props.fail_on_no_duration,
}),
};
return props;
},
methods: {
...common.methods,
async checkSize(fileSize) {
if (fileSize > 200000000) {
throw new Error(
`File is too large. Files must be under 200mb and one of the following file types: ${config.supportedMimes.join(
", "
)}.
Note: If you upload a particularly large file and get an Out of Memory error, try setting your workflow's RAM setting higher. Learn how to do this here: https://pipedream.com/docs/workflows/settings/#memory`
);
} else {
// Log file size in mb to nearest hundredth
const readableFileSize = fileSize / 1000000;
console.log(
`File size is approximately ${readableFileSize.toFixed(1).toString()}mb.`
);
}
},
setLanguages() {
if (this.transcript_language) {
console.log(`User set transcript language to ${this.transcript_language}.`);
config.transcriptLanguage = this.transcript_language;
}
if (this.summary_language) {
console.log(`User set summary language to ${this.summary_language}.`);
config.summaryLanguage = this.summary_language;
}
if (!this.transcript_language && !this.summary_language) {
console.log(
`No language set. Whisper will attempt to detect the language.`
);
}
},
...translation.methods,
async downloadToTmp(fileLink, filePath, fileName) {
try {
// Define the mimetype
const mime = filePath.match(/\.\w+$/)[0];
// Check if the mime type is supported (mp3 or m4a)
if (config.supportedMimes.includes(mime) === false) {
throw new Error(
`Unsupported file type. Supported file types include ${config.supportedMimes.join(
", "
)}.`
);
}
// Define the tmp file path
const tmpPath = `/tmp/${filePath
.match(/[^\/]*\.\w+$/)[0]
.replace(/[\?$#&\{\}\[\]<>\*!@:\+\\\/]/g, "")}`;
// Download the audio recording from Dropbox to tmp file path
const pipeline = promisify(stream.pipeline);
await pipeline(got.stream(fileLink), fs.createWriteStream(tmpPath));
// Create a results object
const results = {
file_name: fileName,
path: tmpPath,
mime: mime,
};
console.log("Downloaded file to tmp storage:");
console.log(results);
return results;
} catch (error) {
throw new Error(`Failed to download file: ${error.message}`);
}
},
async getDuration(filePath) {
try {
let dataPack;
try {
dataPack = await parseFile(filePath);
} catch (error) {
throw new Error(
"Failed to read audio file metadata. The file format might be unsupported or corrupted, or the file might no longer exist at the specified file path (which is in temp storage). If you are using the Google Drive or OneDrive versions of this workflow and are currently setting it up, please try testing your 'download' step again in order to re-download the file into temp storage. Then test this step again. Learn more here: https://thomasjfrank.com/how-to-transcribe-audio-to-text-with-chatgpt-and-notion/#error-failed-to-read-audio-file-metadata"
);
}
const duration = Math.round(
await inspect(dataPack.format.duration, {
showHidden: false,
depth: null,
})
);
console.log(`Successfully got duration: ${duration} seconds`);
return duration;
} catch (error) {
console.error(error);
await this.cleanTmp(false);
throw new Error(
`An error occurred while processing the audio file: ${error.message}`
);
}
},
async chunkFileAndTranscribe({ file }, openai) {
const chunkDirName = "chunks-" + this.steps.trigger.context.id;
const outputDir = join("/tmp", chunkDirName);
config.chunkDir = outputDir;
await execAsync(`mkdir -p "${outputDir}"`);
await execAsync(`rm -f "${outputDir}/*"`);
try {
console.log(`Chunking file: ${file}`);
await this.chunkFile({
file,
outputDir,
});
const files = await fs.promises.readdir(outputDir);
console.log(`Chunks created successfully. Transcribing chunks: ${files}`);
return await this.transcribeFiles(
{
files,
outputDir,
},
openai
);
} catch (error) {
await this.cleanTmp();
let errorText;
if (/connection error/i.test(error.message)) {
errorText = `PLEASE READ THIS ENTIRE ERROR MESSAGE.
An error occured while attempting to split the file into chunks, or while sending the chunks to OpenAI.
If the full error below says "Unidentified connection error", please double-check that you have entered valid billing info in your OpenAI account. Afterward, generate a new API key and enter it in the OpenAI app here in Pipedream. Then, try running the workflow again.
IF THAT DOES NOT WORK, IT MEANS OPENAI'S SERVERS ARE OVERLOADED RIGHT NOW. "Connection error" means OpenAI's servers simply rejected the request. Please come back and retry the workflow later.
If retrying later does not work, please open an issue at this workflow's Github repo: https://github.com/TomFrankly/pipedream-notion-voice-notes/issues`;
} else if (/Invalid file format/i.test(error.message)) {
errorText = `An error occured while attempting to split the file into chunks, or while sending the chunks to OpenAI.
Note: OpenAI officially supports .m4a files, but some apps create .m4a files that OpenAI can't read. If you're using an .m4a file, try converting it to .mp3 and running the workflow again.`;
} else {
errorText = `An error occured while attempting to split the file into chunks, or while sending the chunks to OpenAI.`;
}
throw new Error(
`${errorText}
Full error from OpenAI: ${error.message}`
);
}
},
async chunkFile({ file, outputDir }) {
const ffmpegPath = ffmpegInstaller.path;
const ext = extname(file);
const fileSizeInMB = fs.statSync(file).size / (1024 * 1024);
const chunkSize = this.chunk_size ?? 24;
const numberOfChunks = Math.ceil(fileSizeInMB / chunkSize);
console.log(
`Full file size: ${fileSizeInMB}mb. Chunk size: ${chunkSize}mb. Expected number of chunks: ${numberOfChunks}. Commencing chunking...`
);
if (numberOfChunks === 1) {
await execAsync(`cp "${file}" "${outputDir}/chunk-000${ext}"`);
console.log(`Created 1 chunk: ${outputDir}/chunk-000${ext}`);
return;
}
const { stdout: durationOutput } = await execAsync(
`${ffmpegPath} -i "${file}" 2>&1 | grep "Duration"`
);
const duration = durationOutput.match(/\d{2}:\d{2}:\d{2}\.\d{2}/s)[0];
const [hours, minutes, seconds] = duration.split(":").map(parseFloat);
const totalSeconds = hours * 60 * 60 + minutes * 60 + seconds;
const segmentTime = Math.ceil(totalSeconds / numberOfChunks);
const command = `${ffmpegPath} -i "${file}" -f segment -segment_time ${segmentTime} -c copy -loglevel verbose "${outputDir}/chunk-%03d${ext}"`;
console.log(`Spliting file into chunks with ffmpeg command: ${command}`);
try {
const { stdout: chunkOutput, stderr: chunkError } = await execAsync(
command
);
if (chunkOutput) {
console.log(`stdout: ${chunkOutput}`);
}
if (chunkError) {
console.log(`stderr: ${chunkError}`);
}
const chunkFiles = await fs.promises.readdir(outputDir);
const chunkCount = chunkFiles.filter((file) =>
file.includes("chunk-")
).length;
console.log(`Created ${chunkCount} chunks.`);
} catch (error) {
console.error(
`An error occurred while splitting the file into chunks: ${error}`
);
throw error;
}
},
transcribeFiles({ files, outputDir }, openai) {
const limiter = new Bottleneck({
maxConcurrent: 30,
minTime: 1000 / 30,
});
return Promise.all(
files.map((file) => {
return limiter.schedule(() =>
this.transcribe(
{
file,
outputDir,
},
openai
)
);
})
);
},
transcribe({ file, outputDir }, openai) {
return retry(
async (bail, attempt) => {
const readStream = fs.createReadStream(join(outputDir, file));
console.log(`Transcribing file: ${file}`);
try {
const response = await openai.audio.transcriptions
.create(
{
model: "whisper-1",
...(config.transcriptLanguage &&
config.transcriptLanguage !== "" && {
language: config.transcriptLanguage,
}),
file: readStream,
prompt:
this.whisper_prompt && this.whisper_prompt !== ""
? this.whisper_prompt
: `Hello, welcome to my lecture.`,
},
{
maxRetries: 5,
}
)
.withResponse();
const limits = {
requestRate: response.response.headers.get("x-ratelimit-limit-requests"),
tokenRate: response.response.headers.get("x-ratelimit-limit-tokens"),
remainingRequests: response.response.headers.get(
"x-ratelimit-remaining-requests"
),
remainingTokens: response.response.headers.get(
"x-ratelimit-remaining-tokens"
),
rateResetTimeRemaining: response.response.headers.get(
"x-ratelimit-reset-requests"
),
tokenRestTimeRemaining: response.response.headers.get(
"x-ratelimit-reset-tokens"
),
};
console.log(
`Received response from OpenAI Whisper endpoint for ${file}. Your API key's current Audio endpoing limits (learn more at https://platform.openai.com/docs/guides/rate-limits/overview):`
);
console.table(limits);
if (limits.remainingRequests <= 1) {
console.log(
"WARNING: Only 1 request remaining in the current time period. Rate-limiting may occur after the next request. If so, this script will attempt to retry with exponential backoff, but the workflow run may hit your Timeout Settings (https://pipedream.com/docs/workflows/settings/#execution-timeout-limit) before completing. If you have not upgraded your OpenAI account to a paid account by adding your billing information (and generated a new API key afterwards, replacing your trial key here in Pipedream with that new one), your trial API key is subject to low rate limits. Learn more here: https://platform.openai.com/docs/guides/rate-limits/overview"
);
}
return response;
} catch (error) {
if (error instanceof OpenAI.APIError) {
console.log(`Encountered error from OpenAI: ${error.message}`);
console.log(`Status code: ${error.status}`);
console.log(`Error name: ${error.name}`);
console.log(`Error headers: ${JSON.stringify(error.headers)}`);
} else {
console.log(
`Encountered generic error, not described by OpenAI SDK error handler: ${error}`
);
}
if (
error.message.toLowerCase().includes("econnreset") ||
error.message.toLowerCase().includes("connection error") ||
(error.status && error.status >= 500)
) {
console.log(`Encountered a recoverable error. Retrying...`);
throw error;
} else {
console.log(
`Encountered an error that won't be helped by retrying. Bailing...`
);
bail(error);
}
} finally {
readStream.destroy();
}
},
{
retries: 3,
onRetry: (err) => {
console.log(`Retrying transcription for ${file} due to error: ${err}`);
},
}
);
},
async transcribeDeepgram(file) {
const deepgram = createClient(this.deepgram.$auth.api_key);
const { result, error } = await deepgram.listen.prerecorded.transcribeFile(
fs.createReadStream(file),
{
model: this.deepgram_model ?? "nova-2-general",
smart_format: true,
punctuate: true,
detect_language: true,
// diarize: true,
numerals: false,
// keywords: [{ word: "Flylighter", boost: 1.5 }],
}
);
if (error) {
throw new Error(`Deepgram error: ${error.message}`);
}
const vttOutput = this.formatWebVTT(webvtt(result));
const output = {
metadata: result?.metadata ?? "No metadata available",
raw_transcript:
result?.results?.channels?.[0]?.alternatives?.[0]?.transcript ??
"Transcript not available",
raw_transcript_confidence:
result?.results?.channels?.[0]?.alternatives?.[0]?.confidence ??
"Confidence score not available",
paragraphs:
result?.results?.channels?.[0]?.alternatives?.[0]?.paragraphs
?.transcript ?? "No paragraphs available",
detected_language:
result?.results?.channels?.[0]?.detected_language ??
"Language not detected",
language_confidence:
result?.results?.channels?.[0]?.language_confidence ??
"Language confidence not available",
vttOutput: vttOutput ?? "VTT output not available",
};
return output;
},
formatWebVTT(webVTTString) {
// Split the input into lines
const lines = webVTTString.split("\n");
let formattedLines = [];
for (let i = 0; i < lines.length; i++) {
const clearedLine = lines[i].trim();
if (clearedLine.match(/^\d{2}:\d{2}:\d{2}.\d{3}.*/)) {
// Keep only the start timestamp
const timestampParts = clearedLine.split(" --> ");
//console.log(timestampParts);
formattedLines.push(timestampParts[0]);
}
// Check and format speaker lines
else if (clearedLine.match(/<v ([^>]+)>(.*)/)) {
const speakerMatch = clearedLine.match(/<v ([^>]+)>(.*)/);
// Adjust speaker format
if (speakerMatch) {
formattedLines.push(`${speakerMatch[1]}: ${speakerMatch[2].trim()}`);
}
} else {
// For lines that do not need formatting, push them as they are
formattedLines.push(clearedLine);
}
}
return formattedLines.join("\n");
},
async combineWhisperChunks(chunksArray) {
console.log(
`Combining ${chunksArray.length} transcript chunks into a single transcript...`
);
try {
let combinedText = "";
for (let i = 0; i < chunksArray.length; i++) {
let currentChunk = chunksArray[i].data.text;
let nextChunk =
i < chunksArray.length - 1 ? chunksArray[i + 1].data.text : null;
if (
nextChunk &&
currentChunk.endsWith(".") &&
nextChunk.charAt(0).toLowerCase() === nextChunk.charAt(0)
) {
currentChunk = currentChunk.slice(0, -1);
}
if (i < chunksArray.length - 1) {
currentChunk += " ";
}
combinedText += currentChunk;
}
console.log("Transcript combined successfully.");
return combinedText;
} catch (error) {
throw new Error(
`An error occurred while combining the transcript chunks: ${error.message}`
);
}
},
findLongestPeriodGap(text, maxTokens) {
let lastPeriodIndex = -1;
let longestGap = 0;
let longestGapText = "";
for (let i = 0; i < text.length; i++) {
if (text[i] === ".") {
if (lastPeriodIndex === -1) {
lastPeriodIndex = i;
continue;
}
let gap = i - lastPeriodIndex - 1;
let gapText = text.substring(lastPeriodIndex + 1, i);
if (gap > longestGap) {
longestGap = gap;
longestGapText = gapText;
}
lastPeriodIndex = i;
}
}
if (lastPeriodIndex === -1) {
return { longestGap: -1, longestGapText: "No period found" };
} else {
const encodedLongestGapText = encode(longestGapText);
return {
longestGap,
longestGapText,
maxTokens,
encodedGapLength: encodedLongestGapText.length,
};
}
},
splitTranscript(encodedTranscript, maxTokens, periodInfo) {
console.log(`Splitting transcript into chunks of ${maxTokens} tokens...`);
const stringsArray = [];
let currentIndex = 0;
let round = 0;
while (currentIndex < encodedTranscript.length) {
console.log(`Round ${round++} of transcript splitting...`);
let endIndex = Math.min(currentIndex + maxTokens, encodedTranscript.length);
console.log(`Current endIndex: ${endIndex}`);
const nonPeriodEndIndex = endIndex;
if (periodInfo.longestGap !== -1) {
let forwardEndIndex = endIndex;
let backwardEndIndex = endIndex;
let maxForwardEndIndex = 100;
let maxBackwardEndIndex = 100;
while (
forwardEndIndex < encodedTranscript.length &&
maxForwardEndIndex > 0 &&
decode([encodedTranscript[forwardEndIndex]]) !== "."
) {
forwardEndIndex++;
maxForwardEndIndex--;
}
while (
backwardEndIndex > 0 &&
maxBackwardEndIndex > 0 &&
decode([encodedTranscript[backwardEndIndex]]) !== "."
) {
backwardEndIndex--;
maxBackwardEndIndex--;
}
if (
Math.abs(forwardEndIndex - nonPeriodEndIndex) <
Math.abs(backwardEndIndex - nonPeriodEndIndex)
) {
endIndex = forwardEndIndex;
} else {
endIndex = backwardEndIndex;
}
if (endIndex < encodedTranscript.length) {
endIndex++;
}
console.log(
`endIndex updated to ${endIndex} to keep sentences whole. Non-period endIndex was ${nonPeriodEndIndex}. Total added/removed tokens to account for this: ${
endIndex - nonPeriodEndIndex
}.`
);
}
const chunk = encodedTranscript.slice(currentIndex, endIndex);
stringsArray.push(decode(chunk));
currentIndex = endIndex;
}
console.log(`Split transcript into ${stringsArray.length} chunks.`);
return stringsArray;
},
async moderationCheck(transcript, openai) {
console.log(`Initiating moderation check on the transcript.`);
const chunks = this.makeParagraphs(transcript, 1800);
console.log(
`Transcript split into ${chunks.length} chunks. Moderation check is most accurate on chunks of 2,000 characters or less. Moderation check will be performed on each chunk.`
);
try {
const limiter = new Bottleneck({
maxConcurrent: 500,
});
const moderationPromises = chunks.map((chunk, index) => {
return limiter.schedule(() => this.moderateChunk(index, chunk, openai));
});
await Promise.all(moderationPromises);
console.log(
`Moderation check completed successfully. No abusive content detected.`
);
} catch (error) {
throw new Error(
`An error occurred while performing a moderation check on the transcript: ${error.message}
Note that you can set Enable Advanced Settings to True, and then set Disable Moderation Check to True, to skip the moderation check. This will speed up the workflow run, but it will also increase the risk of inappropriate content being sent to ChatGPT.`
);
}
},
async moderateChunk(index, chunk, openai) {
try {
const moderationResponse = await openai.moderations.create({
input: chunk,
});
const flagged = moderationResponse.results[0].flagged;
if (flagged === undefined || flagged === null) {
throw new Error(
`Moderation check failed. Request to OpenAI's Moderation endpoint could not be completed.
Note that you can set Enable Advanced Settings to True, and then set Disable Moderation Check to True, to skip the moderation check. This will speed up the workflow run, but it will also increase the risk of inappropriate content being sent to ChatGPT.`
);
}
if (flagged === true) {
console.log(
`Moderation check flagged innapropriate content in chunk ${index}.
The content of this chunk is as follows:
${chunk}
Contents of moderation check:`
);
console.dir(moderationResponse, { depth: null });
throw new Error(
`Detected inappropriate content in the transcript chunk. Summarization on this file cannot be completed.
The content of this chunk is as follows:
${chunk}
Note that you can set Enable Advanced Settings to True, and then set Disable Moderation Check to True, to skip the moderation check. This will speed up the workflow run, but it will also increase the risk of inappropriate content being sent to ChatGPT.
`
);
}
} catch (error) {
throw new Error(
`An error occurred while performing a moderation check on chunk ${index}.
The content of this chunk is as follows:
${chunk}