From 7b04cf199fe1c893bc3e9d532525af50de5419f8 Mon Sep 17 00:00:00 2001 From: raivisdejus Date: Tue, 4 Jun 2024 09:48:07 +0000 Subject: [PATCH] deploy: 045bd211b3387924bff4148a1fe6979590406090 --- 404.html | 4 ++-- assets/js/{0480b142.4cf7acba.js => 0480b142.b45e1b79.js} | 2 +- .../{runtime~main.a5cebb54.js => runtime~main.404564ed.js} | 2 +- docs.html | 4 ++-- docs/cli.html | 4 ++-- docs/faq.html | 7 +++---- docs/installation.html | 4 ++-- docs/preferences.html | 4 ++-- docs/usage/file_import.html | 4 ++-- docs/usage/live_recording.html | 4 ++-- index.html | 4 ++-- 11 files changed, 21 insertions(+), 22 deletions(-) rename assets/js/{0480b142.4cf7acba.js => 0480b142.b45e1b79.js} (56%) rename assets/js/{runtime~main.a5cebb54.js => runtime~main.404564ed.js} (98%) diff --git a/404.html b/404.html index 38d191209..4cb14ab3b 100644 --- a/404.html +++ b/404.html @@ -4,13 +4,13 @@ Page Not Found | Buzz - +
Skip to main content

Page Not Found

We could not find what you were looking for.

Please contact the owner of the site that linked you to the original URL and let them know their link is broken.

- + \ No newline at end of file diff --git a/assets/js/0480b142.4cf7acba.js b/assets/js/0480b142.b45e1b79.js similarity index 56% rename from assets/js/0480b142.4cf7acba.js rename to assets/js/0480b142.b45e1b79.js index 21808b6fe..0b7d8cc0b 100644 --- a/assets/js/0480b142.4cf7acba.js +++ b/assets/js/0480b142.b45e1b79.js @@ -1 +1 @@ -"use strict";(self.webpackChunkdocs=self.webpackChunkdocs||[]).push([[836],{3905:(e,t,r)=>{r.d(t,{Zo:()=>s,kt:()=>f});var n=r(7294);function a(e,t,r){return t in e?Object.defineProperty(e,t,{value:r,enumerable:!0,configurable:!0,writable:!0}):e[t]=r,e}function o(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,n)}return r}function i(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,t);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,r)&&(a[r]=e[r])}return a}var p=n.createContext({}),c=function(e){var t=n.useContext(p),r=t;return e&&(r="function"==typeof e?e(t):i(i({},t),e)),r},s=function(e){var t=c(e.components);return n.createElement(p.Provider,{value:t},e.children)},u="mdxType",m={inlineCode:"code",wrapper:function(e){var t=e.children;return n.createElement(n.Fragment,{},t)}},d=n.forwardRef((function(e,t){var r=e.components,a=e.mdxType,o=e.originalType,p=e.parentName,s=l(e,["components","mdxType","originalType","parentName"]),u=c(r),d=a,f=u["".concat(p,".").concat(d)]||u[d]||m[d]||o;return r?n.createElement(f,i(i({ref:t},s),{},{components:r})):n.createElement(f,i({ref:t},s))}));function f(e,t){var r=arguments,a=t&&t.mdxType;if("string"==typeof e||a){var o=r.length,i=new Array(o);i[0]=d;var l={};for(var p in t)hasOwnProperty.call(t,p)&&(l[p]=t[p]);l.originalType=e,l[u]="string"==typeof e?e:a,i[1]=l;for(var c=2;c{r.r(t),r.d(t,{assets:()=>p,contentTitle:()=>i,default:()=>m,frontMatter:()=>o,metadata:()=>l,toc:()=>c});var n=r(7462),a=(r(7294),r(3905));const o={title:"FAQ",sidebar_position:5},i=void 0,l={unversionedId:"faq",id:"faq",title:"FAQ",description:"1. Where are the models stored?",source:"@site/docs/faq.md",sourceDirName:".",slug:"/faq",permalink:"/buzz/docs/faq",draft:!1,tags:[],version:"current",sidebarPosition:5,frontMatter:{title:"FAQ",sidebar_position:5},sidebar:"tutorialSidebar",previous:{title:"CLI",permalink:"/buzz/docs/cli"}},p={},c=[],s={toc:c},u="wrapper";function m(e){let{components:t,...r}=e;return(0,a.kt)(u,(0,n.Z)({},s,r,{components:t,mdxType:"MDXLayout"}),(0,a.kt)("ol",null,(0,a.kt)("li",{parentName:"ol"},(0,a.kt)("p",{parentName:"li"},(0,a.kt)("strong",{parentName:"p"},"Where are the models stored?")),(0,a.kt)("p",{parentName:"li"},"The Whisper models are stored in ",(0,a.kt)("inlineCode",{parentName:"p"},"~/.cache/whisper"),". The Whisper.cpp models are stored in ",(0,a.kt)("inlineCode",{parentName:"p"},"~/Library/Caches/Buzz"),"\n(Mac OS), ",(0,a.kt)("inlineCode",{parentName:"p"},"~/.cache/Buzz")," (Unix), or ",(0,a.kt)("inlineCode",{parentName:"p"},"C:\\Users\\\\AppData\\Local\\Buzz\\Buzz\\Cache")," (Windows). The Hugging Face\nmodels are stored in ",(0,a.kt)("inlineCode",{parentName:"p"},"~/.cache/huggingface/hub"),".")),(0,a.kt)("li",{parentName:"ol"},(0,a.kt)("p",{parentName:"li"},(0,a.kt)("strong",{parentName:"p"},"What can I try if the transcription runs too slowly?")),(0,a.kt)("p",{parentName:"li"},"Try using a lower Whisper model size or using a Whisper.cpp model.")),(0,a.kt)("li",{parentName:"ol"},(0,a.kt)("p",{parentName:"li"},(0,a.kt)("strong",{parentName:"p"},"How to record system audio?")),(0,a.kt)("p",{parentName:"li"},"To transcribe system audio you need to configure virtual audio device and connect output from the applications you want to transcribe to this virtual speaker. After that you can select it as source in the Buzz. See ",(0,a.kt)("a",{parentName:"p",href:"https://chidiwilliams.github.io/buzz/docs/usage#live-recording"},"Usage")," section for more details."),(0,a.kt)("p",{parentName:"li"},"Relevant tools:"),(0,a.kt)("ul",{parentName:"li"},(0,a.kt)("li",{parentName:"ul"},"Mac OS - ",(0,a.kt)("a",{parentName:"li",href:"https://github.com/ExistentialAudio/BlackHole"},"BlackHole"),"."),(0,a.kt)("li",{parentName:"ul"},"Windows - ",(0,a.kt)("a",{parentName:"li",href:"https://vb-audio.com/Cable/"},"VB CABLE")),(0,a.kt)("li",{parentName:"ul"},"Linux - ",(0,a.kt)("a",{parentName:"li",href:"https://wiki.ubuntu.com/record_system_sound"},"PulseAudio Volume Control"))))))}m.isMDXComponent=!0}}]); \ No newline at end of file +"use strict";(self.webpackChunkdocs=self.webpackChunkdocs||[]).push([[836],{3905:(e,t,r)=>{r.d(t,{Zo:()=>c,kt:()=>f});var n=r(7294);function a(e,t,r){return t in e?Object.defineProperty(e,t,{value:r,enumerable:!0,configurable:!0,writable:!0}):e[t]=r,e}function o(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,n)}return r}function i(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,t);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,r)&&(a[r]=e[r])}return a}var p=n.createContext({}),s=function(e){var t=n.useContext(p),r=t;return e&&(r="function"==typeof e?e(t):i(i({},t),e)),r},c=function(e){var t=s(e.components);return n.createElement(p.Provider,{value:t},e.children)},u="mdxType",m={inlineCode:"code",wrapper:function(e){var t=e.children;return n.createElement(n.Fragment,{},t)}},d=n.forwardRef((function(e,t){var r=e.components,a=e.mdxType,o=e.originalType,p=e.parentName,c=l(e,["components","mdxType","originalType","parentName"]),u=s(r),d=a,f=u["".concat(p,".").concat(d)]||u[d]||m[d]||o;return r?n.createElement(f,i(i({ref:t},c),{},{components:r})):n.createElement(f,i({ref:t},c))}));function f(e,t){var r=arguments,a=t&&t.mdxType;if("string"==typeof e||a){var o=r.length,i=new Array(o);i[0]=d;var l={};for(var p in t)hasOwnProperty.call(t,p)&&(l[p]=t[p]);l.originalType=e,l[u]="string"==typeof e?e:a,i[1]=l;for(var s=2;s{r.r(t),r.d(t,{assets:()=>p,contentTitle:()=>i,default:()=>m,frontMatter:()=>o,metadata:()=>l,toc:()=>s});var n=r(7462),a=(r(7294),r(3905));const o={title:"FAQ",sidebar_position:5},i=void 0,l={unversionedId:"faq",id:"faq",title:"FAQ",description:"1. Where are the models stored?",source:"@site/docs/faq.md",sourceDirName:".",slug:"/faq",permalink:"/buzz/docs/faq",draft:!1,tags:[],version:"current",sidebarPosition:5,frontMatter:{title:"FAQ",sidebar_position:5},sidebar:"tutorialSidebar",previous:{title:"CLI",permalink:"/buzz/docs/cli"}},p={},s=[],c={toc:s},u="wrapper";function m(e){let{components:t,...r}=e;return(0,a.kt)(u,(0,n.Z)({},c,r,{components:t,mdxType:"MDXLayout"}),(0,a.kt)("ol",null,(0,a.kt)("li",{parentName:"ol"},(0,a.kt)("p",{parentName:"li"},(0,a.kt)("strong",{parentName:"p"},"Where are the models stored?")),(0,a.kt)("p",{parentName:"li"},"The Whisper models are stored in ",(0,a.kt)("inlineCode",{parentName:"p"},"~/.cache/whisper"),". The Whisper.cpp models are stored in ",(0,a.kt)("inlineCode",{parentName:"p"},"~/Library/Caches/Buzz"),"\n(Mac OS), ",(0,a.kt)("inlineCode",{parentName:"p"},"~/.cache/Buzz")," (Unix), or ",(0,a.kt)("inlineCode",{parentName:"p"},"C:\\Users\\\\AppData\\Local\\Buzz\\Buzz\\Cache")," (Windows).")),(0,a.kt)("li",{parentName:"ol"},(0,a.kt)("p",{parentName:"li"},(0,a.kt)("strong",{parentName:"p"},"What can I try if the transcription runs too slowly?")),(0,a.kt)("p",{parentName:"li"},"Try using a lower Whisper model size or using a Whisper.cpp model.")),(0,a.kt)("li",{parentName:"ol"},(0,a.kt)("p",{parentName:"li"},(0,a.kt)("strong",{parentName:"p"},"How to record system audio?")),(0,a.kt)("p",{parentName:"li"},"To transcribe system audio you need to configure virtual audio device and connect output from the applications you want to transcribe to this virtual speaker. After that you can select it as source in the Buzz. See ",(0,a.kt)("a",{parentName:"p",href:"https://chidiwilliams.github.io/buzz/docs/usage#live-recording"},"Usage")," section for more details."),(0,a.kt)("p",{parentName:"li"},"Relevant tools:"),(0,a.kt)("ul",{parentName:"li"},(0,a.kt)("li",{parentName:"ul"},"Mac OS - ",(0,a.kt)("a",{parentName:"li",href:"https://github.com/ExistentialAudio/BlackHole"},"BlackHole"),"."),(0,a.kt)("li",{parentName:"ul"},"Windows - ",(0,a.kt)("a",{parentName:"li",href:"https://vb-audio.com/Cable/"},"VB CABLE")),(0,a.kt)("li",{parentName:"ul"},"Linux - ",(0,a.kt)("a",{parentName:"li",href:"https://wiki.ubuntu.com/record_system_sound"},"PulseAudio Volume Control"))))))}m.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/runtime~main.a5cebb54.js b/assets/js/runtime~main.404564ed.js similarity index 98% rename from assets/js/runtime~main.a5cebb54.js rename to assets/js/runtime~main.404564ed.js index 0d208d2ba..a5913c086 100644 --- a/assets/js/runtime~main.a5cebb54.js +++ b/assets/js/runtime~main.404564ed.js @@ -1 +1 @@ -(()=>{"use strict";var e,t,r,o,a,n={},f={};function i(e){var t=f[e];if(void 0!==t)return t.exports;var r=f[e]={id:e,loaded:!1,exports:{}};return n[e].call(r.exports,r,r.exports,i),r.loaded=!0,r.exports}i.m=n,i.c=f,e=[],i.O=(t,r,o,a)=>{if(!r){var n=1/0;for(b=0;b=a)&&Object.keys(i.O).every((e=>i.O[e](r[c])))?r.splice(c--,1):(f=!1,a0&&e[b-1][2]>a;b--)e[b]=e[b-1];e[b]=[r,o,a]},i.n=e=>{var t=e&&e.__esModule?()=>e.default:()=>e;return i.d(t,{a:t}),t},r=Object.getPrototypeOf?e=>Object.getPrototypeOf(e):e=>e.__proto__,i.t=function(e,o){if(1&o&&(e=this(e)),8&o)return e;if("object"==typeof e&&e){if(4&o&&e.__esModule)return e;if(16&o&&"function"==typeof e.then)return e}var a=Object.create(null);i.r(a);var n={};t=t||[null,r({}),r([]),r(r)];for(var f=2&o&&e;"object"==typeof f&&!~t.indexOf(f);f=r(f))Object.getOwnPropertyNames(f).forEach((t=>n[t]=()=>e[t]));return n.default=()=>e,i.d(a,n),a},i.d=(e,t)=>{for(var r in t)i.o(t,r)&&!i.o(e,r)&&Object.defineProperty(e,r,{enumerable:!0,get:t[r]})},i.f={},i.e=e=>Promise.all(Object.keys(i.f).reduce(((t,r)=>(i.f[r](e,t),t)),[])),i.u=e=>"assets/js/"+({34:"871e3331",53:"935f2afb",217:"3b8c55ea",237:"1df93b7f",355:"e53fa2b7",382:"1102fda7",468:"1a20bc57",514:"1be78505",836:"0480b142",860:"3e407b54",918:"17896441",958:"6dbc2e00",971:"c377a04b"}[e]||e)+"."+{34:"b7e0063b",53:"26bf1532",217:"2386d6f2",237:"71d7b441",355:"28463cc8",382:"25cda54b",468:"23883b0e",514:"131974b6",836:"4cf7acba",860:"987a7018",918:"0aaaf3b3",958:"24fcb0eb",971:"b0b8fe80",972:"d5cc17b1"}[e]+".js",i.miniCssF=e=>{},i.g=function(){if("object"==typeof globalThis)return globalThis;try{return this||new Function("return this")()}catch(e){if("object"==typeof window)return window}}(),i.o=(e,t)=>Object.prototype.hasOwnProperty.call(e,t),o={},a="docs:",i.l=(e,t,r,n)=>{if(o[e])o[e].push(t);else{var f,c;if(void 0!==r)for(var u=document.getElementsByTagName("script"),b=0;b{f.onerror=f.onload=null,clearTimeout(s);var a=o[e];if(delete o[e],f.parentNode&&f.parentNode.removeChild(f),a&&a.forEach((e=>e(r))),t)return t(r)},s=setTimeout(l.bind(null,void 0,{type:"timeout",target:f}),12e4);f.onerror=l.bind(null,f.onerror),f.onload=l.bind(null,f.onload),c&&document.head.appendChild(f)}},i.r=e=>{"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},i.p="/buzz/",i.gca=function(e){return e={17896441:"918","871e3331":"34","935f2afb":"53","3b8c55ea":"217","1df93b7f":"237",e53fa2b7:"355","1102fda7":"382","1a20bc57":"468","1be78505":"514","0480b142":"836","3e407b54":"860","6dbc2e00":"958",c377a04b:"971"}[e]||e,i.p+i.u(e)},(()=>{var e={303:0,532:0};i.f.j=(t,r)=>{var o=i.o(e,t)?e[t]:void 0;if(0!==o)if(o)r.push(o[2]);else if(/^(303|532)$/.test(t))e[t]=0;else{var a=new Promise(((r,a)=>o=e[t]=[r,a]));r.push(o[2]=a);var n=i.p+i.u(t),f=new Error;i.l(n,(r=>{if(i.o(e,t)&&(0!==(o=e[t])&&(e[t]=void 0),o)){var a=r&&("load"===r.type?"missing":r.type),n=r&&r.target&&r.target.src;f.message="Loading chunk "+t+" failed.\n("+a+": "+n+")",f.name="ChunkLoadError",f.type=a,f.request=n,o[1](f)}}),"chunk-"+t,t)}},i.O.j=t=>0===e[t];var t=(t,r)=>{var o,a,n=r[0],f=r[1],c=r[2],u=0;if(n.some((t=>0!==e[t]))){for(o in f)i.o(f,o)&&(i.m[o]=f[o]);if(c)var b=c(i)}for(t&&t(r);u{"use strict";var e,t,r,o,a,n={},f={};function i(e){var t=f[e];if(void 0!==t)return t.exports;var r=f[e]={id:e,loaded:!1,exports:{}};return n[e].call(r.exports,r,r.exports,i),r.loaded=!0,r.exports}i.m=n,i.c=f,e=[],i.O=(t,r,o,a)=>{if(!r){var n=1/0;for(b=0;b=a)&&Object.keys(i.O).every((e=>i.O[e](r[c])))?r.splice(c--,1):(f=!1,a0&&e[b-1][2]>a;b--)e[b]=e[b-1];e[b]=[r,o,a]},i.n=e=>{var t=e&&e.__esModule?()=>e.default:()=>e;return i.d(t,{a:t}),t},r=Object.getPrototypeOf?e=>Object.getPrototypeOf(e):e=>e.__proto__,i.t=function(e,o){if(1&o&&(e=this(e)),8&o)return e;if("object"==typeof e&&e){if(4&o&&e.__esModule)return e;if(16&o&&"function"==typeof e.then)return e}var a=Object.create(null);i.r(a);var n={};t=t||[null,r({}),r([]),r(r)];for(var f=2&o&&e;"object"==typeof f&&!~t.indexOf(f);f=r(f))Object.getOwnPropertyNames(f).forEach((t=>n[t]=()=>e[t]));return n.default=()=>e,i.d(a,n),a},i.d=(e,t)=>{for(var r in t)i.o(t,r)&&!i.o(e,r)&&Object.defineProperty(e,r,{enumerable:!0,get:t[r]})},i.f={},i.e=e=>Promise.all(Object.keys(i.f).reduce(((t,r)=>(i.f[r](e,t),t)),[])),i.u=e=>"assets/js/"+({34:"871e3331",53:"935f2afb",217:"3b8c55ea",237:"1df93b7f",355:"e53fa2b7",382:"1102fda7",468:"1a20bc57",514:"1be78505",836:"0480b142",860:"3e407b54",918:"17896441",958:"6dbc2e00",971:"c377a04b"}[e]||e)+"."+{34:"b7e0063b",53:"26bf1532",217:"2386d6f2",237:"71d7b441",355:"28463cc8",382:"25cda54b",468:"23883b0e",514:"131974b6",836:"b45e1b79",860:"987a7018",918:"0aaaf3b3",958:"24fcb0eb",971:"b0b8fe80",972:"d5cc17b1"}[e]+".js",i.miniCssF=e=>{},i.g=function(){if("object"==typeof globalThis)return globalThis;try{return this||new Function("return this")()}catch(e){if("object"==typeof window)return window}}(),i.o=(e,t)=>Object.prototype.hasOwnProperty.call(e,t),o={},a="docs:",i.l=(e,t,r,n)=>{if(o[e])o[e].push(t);else{var f,c;if(void 0!==r)for(var u=document.getElementsByTagName("script"),b=0;b{f.onerror=f.onload=null,clearTimeout(s);var a=o[e];if(delete o[e],f.parentNode&&f.parentNode.removeChild(f),a&&a.forEach((e=>e(r))),t)return t(r)},s=setTimeout(l.bind(null,void 0,{type:"timeout",target:f}),12e4);f.onerror=l.bind(null,f.onerror),f.onload=l.bind(null,f.onload),c&&document.head.appendChild(f)}},i.r=e=>{"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},i.p="/buzz/",i.gca=function(e){return e={17896441:"918","871e3331":"34","935f2afb":"53","3b8c55ea":"217","1df93b7f":"237",e53fa2b7:"355","1102fda7":"382","1a20bc57":"468","1be78505":"514","0480b142":"836","3e407b54":"860","6dbc2e00":"958",c377a04b:"971"}[e]||e,i.p+i.u(e)},(()=>{var e={303:0,532:0};i.f.j=(t,r)=>{var o=i.o(e,t)?e[t]:void 0;if(0!==o)if(o)r.push(o[2]);else if(/^(303|532)$/.test(t))e[t]=0;else{var a=new Promise(((r,a)=>o=e[t]=[r,a]));r.push(o[2]=a);var n=i.p+i.u(t),f=new Error;i.l(n,(r=>{if(i.o(e,t)&&(0!==(o=e[t])&&(e[t]=void 0),o)){var a=r&&("load"===r.type?"missing":r.type),n=r&&r.target&&r.target.src;f.message="Loading chunk "+t+" failed.\n("+a+": "+n+")",f.name="ChunkLoadError",f.type=a,f.request=n,o[1](f)}}),"chunk-"+t,t)}},i.O.j=t=>0===e[t];var t=(t,r)=>{var o,a,n=r[0],f=r[1],c=r[2],u=0;if(n.some((t=>0!==e[t]))){for(o in f)i.o(f,o)&&(i.m[o]=f[o]);if(c)var b=c(i)}for(t&&t(r);u Introduction | Buzz - + @@ -20,7 +20,7 @@ Whisper.cpp, Faster Whisper, Whisper-compatible Hugging Face models, and the OpenAI Whisper API
  • Command-Line Interface
  • Available on Mac, Windows, and Linux
  • - + \ No newline at end of file diff --git a/docs/cli.html b/docs/cli.html index 5bb08d268..e89e6dcf3 100644 --- a/docs/cli.html +++ b/docs/cli.html @@ -4,13 +4,13 @@ CLI | Buzz - +

    CLI

    Commands

    add

    Start a new transcription task.

    Usage: buzz add [options] [file file file...]

    Options:
    -t, --task <task> The task to perform. Allowed: translate,
    transcribe. Default: transcribe.
    -m, --model-type <model-type> Model type. Allowed: whisper, whispercpp,
    huggingface, fasterwhisper, openaiapi. Default:
    whisper.
    -s, --model-size <model-size> Model size. Use only when --model-type is
    whisper, whispercpp, or fasterwhisper. Allowed:
    tiny, base, small, medium, large. Default:
    tiny.
    --hfid <id> Hugging Face model ID. Use only when
    --model-type is huggingface. Example:
    "openai/whisper-tiny"
    -l, --language <code> Language code. Allowed: af (Afrikaans), am
    (Amharic), ar (Arabic), as (Assamese), az
    (Azerbaijani), ba (Bashkir), be (Belarusian),
    bg (Bulgarian), bn (Bengali), bo (Tibetan), br
    (Breton), bs (Bosnian), ca (Catalan), cs
    (Czech), cy (Welsh), da (Danish), de (German),
    el (Greek), en (English), es (Spanish), et
    (Estonian), eu (Basque), fa (Persian), fi
    (Finnish), fo (Faroese), fr (French), gl
    (Galician), gu (Gujarati), ha (Hausa), haw
    (Hawaiian), he (Hebrew), hi (Hindi), hr
    (Croatian), ht (Haitian Creole), hu
    (Hungarian), hy (Armenian), id (Indonesian), is
    (Icelandic), it (Italian), ja (Japanese), jw
    (Javanese), ka (Georgian), kk (Kazakh), km
    (Khmer), kn (Kannada), ko (Korean), la (Latin),
    lb (Luxembourgish), ln (Lingala), lo (Lao), lt
    (Lithuanian), lv (Latvian), mg (Malagasy), mi
    (Maori), mk (Macedonian), ml (Malayalam), mn
    (Mongolian), mr (Marathi), ms (Malay), mt
    (Maltese), my (Myanmar), ne (Nepali), nl
    (Dutch), nn (Nynorsk), no (Norwegian), oc
    (Occitan), pa (Punjabi), pl (Polish), ps
    (Pashto), pt (Portuguese), ro (Romanian), ru
    (Russian), sa (Sanskrit), sd (Sindhi), si
    (Sinhala), sk (Slovak), sl (Slovenian), sn
    (Shona), so (Somali), sq (Albanian), sr
    (Serbian), su (Sundanese), sv (Swedish), sw
    (Swahili), ta (Tamil), te (Telugu), tg (Tajik),
    th (Thai), tk (Turkmen), tl (Tagalog), tr
    (Turkish), tt (Tatar), uk (Ukrainian), ur
    (Urdu), uz (Uzbek), vi (Vietnamese), yi
    (Yiddish), yo (Yoruba), zh (Chinese). Leave
    empty to detect language.
    -p, --prompt <prompt> Initial prompt
    --openai-token <token> OpenAI access token. Use only when
    --model-type is openaiapi. Defaults to your
    previously saved access token, if one exists.
    --srt Output result in an SRT file.
    --vtt Output result in a VTT file.
    --txt Output result in a TXT file.
    -h, --help Displays help on commandline options.
    --help-all Displays help including Qt specific options.
    -v, --version Displays version information.

    Arguments:
    files Input file paths

    Examples:

    # Translate two MP3 files from French to English using OpenAI Whisper API
    buzz add --task translate --language fr --model-type openaiapi /Users/user/Downloads/1b3b03e4-8db5-ea2c-ace5-b71ff32e3304.mp3 /Users/user/Downloads/koaf9083k1lkpsfdi0.mp3

    # Transcribe an MP4 using Whisper.cpp "small" model and immediately export to SRT and VTT files
    buzz add --task transcribe --model-type whispercpp --model-size small --prompt "My initial prompt" --srt --vtt /Users/user/Downloads/buzz/1b3b03e4-8db5-ea2c-ace5-b71ff32e3304.mp4
    - + \ No newline at end of file diff --git a/docs/faq.html b/docs/faq.html index c1b388983..40cb9f60b 100644 --- a/docs/faq.html +++ b/docs/faq.html @@ -4,15 +4,14 @@ FAQ | Buzz - +

    FAQ

    1. Where are the models stored?

      The Whisper models are stored in ~/.cache/whisper. The Whisper.cpp models are stored in ~/Library/Caches/Buzz -(Mac OS), ~/.cache/Buzz (Unix), or C:\Users\<username>\AppData\Local\Buzz\Buzz\Cache (Windows). The Hugging Face -models are stored in ~/.cache/huggingface/hub.

    2. What can I try if the transcription runs too slowly?

      Try using a lower Whisper model size or using a Whisper.cpp model.

    3. How to record system audio?

      To transcribe system audio you need to configure virtual audio device and connect output from the applications you want to transcribe to this virtual speaker. After that you can select it as source in the Buzz. See Usage section for more details.

      Relevant tools:

    - +(Mac OS), ~/.cache/Buzz (Unix), or C:\Users\<username>\AppData\Local\Buzz\Buzz\Cache (Windows).

  • What can I try if the transcription runs too slowly?

    Try using a lower Whisper model size or using a Whisper.cpp model.

  • How to record system audio?

    To transcribe system audio you need to configure virtual audio device and connect output from the applications you want to transcribe to this virtual speaker. After that you can select it as source in the Buzz. See Usage section for more details.

    Relevant tools:

  • + \ No newline at end of file diff --git a/docs/installation.html b/docs/installation.html index 45dee9065..0eed590da 100644 --- a/docs/installation.html +++ b/docs/installation.html @@ -4,7 +4,7 @@ Installation | Buzz - + @@ -14,7 +14,7 @@ the App Store version.)

    macOS (Intel, macOS 11.7 and later)

    Install via brew:

    brew install --cask buzz

    Alternatively, download and run the Buzz-x.y.z.dmg file.

    For Mac Silicon (and for a better experience on Mac Intel), download Buzz Captions on the App Store.

    Windows (Windows 10 and later)

    Download and run the Buzz-x.y.z.exe file.

    Linux

    sudo apt-get install libportaudio2
    sudo snap install buzz

    Get it from the Snap Store

    Alternatively, on Ubuntu 20.04 and later, install the dependencies:

    sudo apt-get install libportaudio2

    Then, download and extract the Buzz-x.y.z-unix.tar.gz file

    PyPI

    pip install buzz-captions
    python -m buzz
    - + \ No newline at end of file diff --git a/docs/preferences.html b/docs/preferences.html index 0ab76b64e..6fd0922fe 100644 --- a/docs/preferences.html +++ b/docs/preferences.html @@ -4,7 +4,7 @@ Preferences | Buzz - + @@ -12,7 +12,7 @@

    Preferences

    Open the Preferences window from the Menu bar, or click Ctrl/Cmd + ,.

    General Preferences

    Default export file name

    Sets the default export file name for file transcriptions. For example, a value of {{ input_file_name }} ({{ task }}d on {{ date_time }}) will save TXT exports as Input Filename (transcribed on 19-Sep-2023 20-39-25).txt by default.

    Available variables:

    KeyDescriptionExample
    input_file_nameFile name of the imported fileaudio (e.g. if the imported file path was /path/to/audio.wav
    taskTranscription tasktranscribe, translate
    languageLanguage codeen, fr, yo, etc.
    model_typeModel typeWhisper, Whisper.cpp, Faster Whisper, etc.
    model_sizeModel sizetiny, base, small, medium, large, etc.
    date_timeExport time (format: %d-%b-%Y %H-%M-%S)19-Sep-2023 20-39-25
    - + \ No newline at end of file diff --git a/docs/usage/file_import.html b/docs/usage/file_import.html index 5f4fa1634..8be44e8d7 100644 --- a/docs/usage/file_import.html +++ b/docs/usage/file_import.html @@ -4,14 +4,14 @@ File Import | Buzz - +

    File Import

    To import a file:

    • Click Import Media File on the File menu (or the '+' icon on the toolbar, or Command/Ctrl + O).
    • Choose an audio or video file.
    • Select a task, language, and the model settings.
    • Click Run.
    • When the transcription status shows 'Completed', double-click on the row (or select the row and click the '⤢' icon) to open the transcription.
    FieldOptionsDefaultDescription
    Export As"TXT", "SRT", "VTT""TXT"Export file format
    Word-Level TimingsOff / OnOffIf checked, the transcription will generate a separate subtitle line for each word in the audio. Enabled only when "Export As" is set to "SRT" or "VTT".

    (See the Live Recording section for more information about the task, language, and quality settings.)

    Media File Import on Buzz

    - + \ No newline at end of file diff --git a/docs/usage/live_recording.html b/docs/usage/live_recording.html index f612059b2..52726da2a 100644 --- a/docs/usage/live_recording.html +++ b/docs/usage/live_recording.html @@ -4,7 +4,7 @@ Live Recording | Buzz - + @@ -17,7 +17,7 @@ see LoopBeAudio, LoopBack, and Virtual Audio Cable).

    1. Install BlackHole via Homebrew

      brew install blackhole-2ch
    2. Open Audio MIDI Setup from Spotlight or from /Applications/Utilities/Audio Midi Setup.app.

      Open Audio MIDI Setup from Spotlight

    3. Click the '+' icon at the lower left corner and select 'Create Multi-Output Device'.

      Create multi-output device

    4. Add your default speaker and BlackHole to the multi-output device.

      Screenshot of multi-output device

    5. Select this multi-output device as your speaker (application or system-wide) to play audio into BlackHole.

    6. Open Buzz, select BlackHole as your microphone, and record as before to see transcriptions from the audio playing through BlackHole.

    Record audio playing from computer (Windows)

    To transcribe system audio you need to configure virtual audio device and connect output from the applications you whant to transcribe to this virtual speaker. After that you can select it as source in the Buzz.

    1. Install VB CABLE as virtual audio device.

    2. Configure using Windows Sound settings. Right-click on the speaker icon in the system tray and select "Open Sound settings". In the "Choose your output device" dropdown select "CABLE Input" to send all system sound to the virtual device or use "Advanced sound options" to select application that will output their sound to this device.

    Record audio playing from computer (Linux)

    As described on Ubuntu Wiki on any Linux with pulse audio you can redirect application audio to a virtual speaker. After that you can select it as source in Buzz.

    Overall steps:

    1. Launch application that will produce the sound you want to transcribe and start the playback. For example start a video in a media player.
    2. Launch Buzz and open Live recording screen, so you see the settings.
    3. Configure sound routing from the application you want to transcribe sound from to Buzz in Recording tab of the PulseAudio Volume Control (pavucontrol).
    - + \ No newline at end of file diff --git a/index.html b/index.html index a0ca4142c..591192f7a 100644 --- a/index.html +++ b/index.html @@ -4,13 +4,13 @@ Buzz - +
    - + \ No newline at end of file