From b3148139ec23cd22042117aa1f7b3e14dc3978aa Mon Sep 17 00:00:00 2001 From: raivisdejus Date: Sun, 4 Aug 2024 16:32:47 +0000 Subject: [PATCH] deploy: 38e1254adf98b5ebcf2e16591e662ad61639a883 --- 404.html | 4 ++-- assets/js/{0480b142.61b1ce51.js => 0480b142.0adbc5c7.js} | 2 +- .../{runtime~main.da1429f8.js => runtime~main.92eff68d.js} | 2 +- docs.html | 4 ++-- docs/cli.html | 4 ++-- docs/faq.html | 6 +++--- docs/installation.html | 4 ++-- docs/preferences.html | 4 ++-- docs/usage/file_import.html | 4 ++-- docs/usage/live_recording.html | 4 ++-- docs/usage/translations.html | 4 ++-- index.html | 4 ++-- 12 files changed, 23 insertions(+), 23 deletions(-) rename assets/js/{0480b142.61b1ce51.js => 0480b142.0adbc5c7.js} (91%) rename assets/js/{runtime~main.da1429f8.js => runtime~main.92eff68d.js} (75%) diff --git a/404.html b/404.html index ae49009f3..20e10e6d7 100644 --- a/404.html +++ b/404.html @@ -4,13 +4,13 @@ Page Not Found | Buzz - +
Skip to main content

Page Not Found

We could not find what you were looking for.

Please contact the owner of the site that linked you to the original URL and let them know their link is broken.

- + \ No newline at end of file diff --git a/assets/js/0480b142.61b1ce51.js b/assets/js/0480b142.0adbc5c7.js similarity index 91% rename from assets/js/0480b142.61b1ce51.js rename to assets/js/0480b142.0adbc5c7.js index a4dd49672..9e826653a 100644 --- a/assets/js/0480b142.61b1ce51.js +++ b/assets/js/0480b142.0adbc5c7.js @@ -1 +1 @@ -"use strict";(self.webpackChunkdocs=self.webpackChunkdocs||[]).push([[836],{3905:(e,t,r)=>{r.d(t,{Zo:()=>c,kt:()=>h});var a=r(7294);function n(e,t,r){return t in e?Object.defineProperty(e,t,{value:r,enumerable:!0,configurable:!0,writable:!0}):e[t]=r,e}function o(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);t&&(a=a.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,a)}return r}function i(e){for(var t=1;t=0||(n[r]=e[r]);return n}(e,t);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(a=0;a=0||Object.prototype.propertyIsEnumerable.call(e,r)&&(n[r]=e[r])}return n}var l=a.createContext({}),p=function(e){var t=a.useContext(l),r=t;return e&&(r="function"==typeof e?e(t):i(i({},t),e)),r},c=function(e){var t=p(e.components);return a.createElement(l.Provider,{value:t},e.children)},u="mdxType",m={inlineCode:"code",wrapper:function(e){var t=e.children;return a.createElement(a.Fragment,{},t)}},d=a.forwardRef((function(e,t){var r=e.components,n=e.mdxType,o=e.originalType,l=e.parentName,c=s(e,["components","mdxType","originalType","parentName"]),u=p(r),d=n,h=u["".concat(l,".").concat(d)]||u[d]||m[d]||o;return r?a.createElement(h,i(i({ref:t},c),{},{components:r})):a.createElement(h,i({ref:t},c))}));function h(e,t){var r=arguments,n=t&&t.mdxType;if("string"==typeof e||n){var o=r.length,i=new Array(o);i[0]=d;var s={};for(var l in t)hasOwnProperty.call(t,l)&&(s[l]=t[l]);s.originalType=e,s[u]="string"==typeof e?e:n,i[1]=s;for(var p=2;p{r.r(t),r.d(t,{assets:()=>l,contentTitle:()=>i,default:()=>m,frontMatter:()=>o,metadata:()=>s,toc:()=>p});var a=r(7462),n=(r(7294),r(3905));const o={title:"FAQ",sidebar_position:5},i=void 0,s={unversionedId:"faq",id:"faq",title:"FAQ",description:"1. Where are the models stored?",source:"@site/docs/faq.md",sourceDirName:".",slug:"/faq",permalink:"/buzz/docs/faq",draft:!1,tags:[],version:"current",sidebarPosition:5,frontMatter:{title:"FAQ",sidebar_position:5},sidebar:"tutorialSidebar",previous:{title:"CLI",permalink:"/buzz/docs/cli"}},l={},p=[],c={toc:p},u="wrapper";function m(e){let{components:t,...r}=e;return(0,n.kt)(u,(0,a.Z)({},c,r,{components:t,mdxType:"MDXLayout"}),(0,n.kt)("ol",null,(0,n.kt)("li",{parentName:"ol"},(0,n.kt)("p",{parentName:"li"},(0,n.kt)("strong",{parentName:"p"},"Where are the models stored?")),(0,n.kt)("p",{parentName:"li"},"The Whisper models are stored in ",(0,n.kt)("inlineCode",{parentName:"p"},"~/.cache/whisper"),". The Whisper.cpp models are stored in ",(0,n.kt)("inlineCode",{parentName:"p"},"~/Library/Caches/Buzz"),"\n(Mac OS), ",(0,n.kt)("inlineCode",{parentName:"p"},"~/.cache/Buzz")," (Unix), or ",(0,n.kt)("inlineCode",{parentName:"p"},"C:\\Users\\\\AppData\\Local\\Buzz\\Buzz\\Cache")," (Windows).")),(0,n.kt)("li",{parentName:"ol"},(0,n.kt)("p",{parentName:"li"},(0,n.kt)("strong",{parentName:"p"},"What can I try if the transcription runs too slowly?")),(0,n.kt)("p",{parentName:"li"},"Speech recognition requires large amount of computation, so one option is to try using a lower Whisper model size or using a Whisper.cpp model to run speech recognition of your computer. If you have access to a computer with GPU that has at least 6GB of VRAM you can try using the Faster Whisper model."),(0,n.kt)("p",{parentName:"li"},"Buzz also supports using OpenAI API to do speech recognition on a remote server. To use this feature you need to set OpenAI API key in Preferences. See ",(0,n.kt)("a",{parentName:"p",href:"https://chidiwilliams.github.io/buzz/docs/preferences"},"Preferences")," section for more details.")),(0,n.kt)("li",{parentName:"ol"},(0,n.kt)("p",{parentName:"li"},(0,n.kt)("strong",{parentName:"p"},"How to record system audio?")),(0,n.kt)("p",{parentName:"li"},"To transcribe system audio you need to configure virtual audio device and connect output from the applications you want to transcribe to this virtual speaker. After that you can select it as source in the Buzz. See ",(0,n.kt)("a",{parentName:"p",href:"https://chidiwilliams.github.io/buzz/docs/usage/live_recording"},"Usage")," section for more details."),(0,n.kt)("p",{parentName:"li"},"Relevant tools:"),(0,n.kt)("ul",{parentName:"li"},(0,n.kt)("li",{parentName:"ul"},"Mac OS - ",(0,n.kt)("a",{parentName:"li",href:"https://github.com/ExistentialAudio/BlackHole"},"BlackHole"),"."),(0,n.kt)("li",{parentName:"ul"},"Windows - ",(0,n.kt)("a",{parentName:"li",href:"https://vb-audio.com/Cable/"},"VB CABLE")),(0,n.kt)("li",{parentName:"ul"},"Linux - ",(0,n.kt)("a",{parentName:"li",href:"https://wiki.ubuntu.com/record_system_sound"},"PulseAudio Volume Control")))),(0,n.kt)("li",{parentName:"ol"},(0,n.kt)("p",{parentName:"li"},(0,n.kt)("strong",{parentName:"p"},"What model should I use?")),(0,n.kt)("p",{parentName:"li"},"Model size to use will depend on your hardware and use case. Smaller models will work faster but will have more inaccuracies. Larger models will be more accurate but will require more powerful hardware or longer time to transcribe. "),(0,n.kt)("p",{parentName:"li"},'When choosing among large models consider the following. "Large" is the first released older model, "Large-V2" is later updated model with better accuracy, for some languages considered the most robust and stable. "Large-V3" is the latest model with the best accuracy in many cases, but some times can hallucinate or invent words that were never in the audio. The only sure way to know what model best suits your needs is to test them all in your language. ')),(0,n.kt)("li",{parentName:"ol"},(0,n.kt)("p",{parentName:"li"},(0,n.kt)("strong",{parentName:"p"},"How to get GPU acceleration for faster transcription?")),(0,n.kt)("p",{parentName:"li"},"On Linux GPU acceleration is supported out of the box on Nvidia GPUs with ",(0,n.kt)("a",{parentName:"p",href:"https://developer.nvidia.com/cuda-downloads"},"CUDA installed"),"."),(0,n.kt)("p",{parentName:"li"},"On Windows see ",(0,n.kt)("a",{parentName:"p",href:"https://github.com/chidiwilliams/buzz/blob/main/CONTRIBUTING.md#gpu-support"},"this note")," on enabling CUDA GPU support."))))}m.isMDXComponent=!0}}]); \ No newline at end of file +"use strict";(self.webpackChunkdocs=self.webpackChunkdocs||[]).push([[836],{3905:(e,t,r)=>{r.d(t,{Zo:()=>c,kt:()=>h});var a=r(7294);function n(e,t,r){return t in e?Object.defineProperty(e,t,{value:r,enumerable:!0,configurable:!0,writable:!0}):e[t]=r,e}function o(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);t&&(a=a.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,a)}return r}function i(e){for(var t=1;t=0||(n[r]=e[r]);return n}(e,t);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(a=0;a=0||Object.prototype.propertyIsEnumerable.call(e,r)&&(n[r]=e[r])}return n}var l=a.createContext({}),p=function(e){var t=a.useContext(l),r=t;return e&&(r="function"==typeof e?e(t):i(i({},t),e)),r},c=function(e){var t=p(e.components);return a.createElement(l.Provider,{value:t},e.children)},u="mdxType",m={inlineCode:"code",wrapper:function(e){var t=e.children;return a.createElement(a.Fragment,{},t)}},d=a.forwardRef((function(e,t){var r=e.components,n=e.mdxType,o=e.originalType,l=e.parentName,c=s(e,["components","mdxType","originalType","parentName"]),u=p(r),d=n,h=u["".concat(l,".").concat(d)]||u[d]||m[d]||o;return r?a.createElement(h,i(i({ref:t},c),{},{components:r})):a.createElement(h,i({ref:t},c))}));function h(e,t){var r=arguments,n=t&&t.mdxType;if("string"==typeof e||n){var o=r.length,i=new Array(o);i[0]=d;var s={};for(var l in t)hasOwnProperty.call(t,l)&&(s[l]=t[l]);s.originalType=e,s[u]="string"==typeof e?e:n,i[1]=s;for(var p=2;p{r.r(t),r.d(t,{assets:()=>l,contentTitle:()=>i,default:()=>m,frontMatter:()=>o,metadata:()=>s,toc:()=>p});var a=r(7462),n=(r(7294),r(3905));const o={title:"FAQ",sidebar_position:5},i=void 0,s={unversionedId:"faq",id:"faq",title:"FAQ",description:"1. Where are the models stored?",source:"@site/docs/faq.md",sourceDirName:".",slug:"/faq",permalink:"/buzz/docs/faq",draft:!1,tags:[],version:"current",sidebarPosition:5,frontMatter:{title:"FAQ",sidebar_position:5},sidebar:"tutorialSidebar",previous:{title:"CLI",permalink:"/buzz/docs/cli"}},l={},p=[],c={toc:p},u="wrapper";function m(e){let{components:t,...r}=e;return(0,n.kt)(u,(0,a.Z)({},c,r,{components:t,mdxType:"MDXLayout"}),(0,n.kt)("ol",null,(0,n.kt)("li",{parentName:"ol"},(0,n.kt)("p",{parentName:"li"},(0,n.kt)("strong",{parentName:"p"},"Where are the models stored?")),(0,n.kt)("p",{parentName:"li"},"The Whisper models are stored in ",(0,n.kt)("inlineCode",{parentName:"p"},"~/.cache/whisper"),". The Whisper.cpp models are stored in ",(0,n.kt)("inlineCode",{parentName:"p"},"~/Library/Caches/Buzz"),"\n(Mac OS), ",(0,n.kt)("inlineCode",{parentName:"p"},"~/.cache/Buzz")," (Unix), or ",(0,n.kt)("inlineCode",{parentName:"p"},"C:\\Users\\\\AppData\\Local\\Buzz\\Buzz\\Cache")," (Windows).")),(0,n.kt)("li",{parentName:"ol"},(0,n.kt)("p",{parentName:"li"},(0,n.kt)("strong",{parentName:"p"},"What can I try if the transcription runs too slowly?")),(0,n.kt)("p",{parentName:"li"},"Speech recognition requires large amount of computation, so one option is to try using a lower Whisper model size or using a Whisper.cpp model to run speech recognition of your computer. If you have access to a computer with GPU that has at least 6GB of VRAM you can try using the Faster Whisper model."),(0,n.kt)("p",{parentName:"li"},"Buzz also supports using OpenAI API to do speech recognition on a remote server. To use this feature you need to set OpenAI API key in Preferences. See ",(0,n.kt)("a",{parentName:"p",href:"https://chidiwilliams.github.io/buzz/docs/preferences"},"Preferences")," section for more details.")),(0,n.kt)("li",{parentName:"ol"},(0,n.kt)("p",{parentName:"li"},(0,n.kt)("strong",{parentName:"p"},"How to record system audio?")),(0,n.kt)("p",{parentName:"li"},"To transcribe system audio you need to configure virtual audio device and connect output from the applications you want to transcribe to this virtual speaker. After that you can select it as source in the Buzz. See ",(0,n.kt)("a",{parentName:"p",href:"https://chidiwilliams.github.io/buzz/docs/usage/live_recording"},"Usage")," section for more details."),(0,n.kt)("p",{parentName:"li"},"Relevant tools:"),(0,n.kt)("ul",{parentName:"li"},(0,n.kt)("li",{parentName:"ul"},"Mac OS - ",(0,n.kt)("a",{parentName:"li",href:"https://github.com/ExistentialAudio/BlackHole"},"BlackHole"),"."),(0,n.kt)("li",{parentName:"ul"},"Windows - ",(0,n.kt)("a",{parentName:"li",href:"https://vb-audio.com/Cable/"},"VB CABLE")),(0,n.kt)("li",{parentName:"ul"},"Linux - ",(0,n.kt)("a",{parentName:"li",href:"https://wiki.ubuntu.com/record_system_sound"},"PulseAudio Volume Control")))),(0,n.kt)("li",{parentName:"ol"},(0,n.kt)("p",{parentName:"li"},(0,n.kt)("strong",{parentName:"p"},"What model should I use?")),(0,n.kt)("p",{parentName:"li"},"Model size to use will depend on your hardware and use case. Smaller models will work faster but will have more inaccuracies. Larger models will be more accurate but will require more powerful hardware or longer time to transcribe. "),(0,n.kt)("p",{parentName:"li"},'When choosing among large models consider the following. "Large" is the first released older model, "Large-V2" is later updated model with better accuracy, for some languages considered the most robust and stable. "Large-V3" is the latest model with the best accuracy in many cases, but some times can hallucinate or invent words that were never in the audio. The only sure way to know what model best suits your needs is to test them all in your language. ')),(0,n.kt)("li",{parentName:"ol"},(0,n.kt)("p",{parentName:"li"},(0,n.kt)("strong",{parentName:"p"},"How to get GPU acceleration for faster transcription?")),(0,n.kt)("p",{parentName:"li"},"On Linux GPU acceleration is supported out of the box on Nvidia GPUs. If you still get any issues install ",(0,n.kt)("a",{parentName:"p",href:"https://developer.nvidia.com/cuda-downloads"},"CUDA 12"),", ",(0,n.kt)("a",{parentName:"p",href:"https://developer.nvidia.com/cublas"},"cuBLASS")," and ",(0,n.kt)("a",{parentName:"p",href:"https://developer.nvidia.com/cudnn"},"cuDNN"),"."),(0,n.kt)("p",{parentName:"li"},"On Windows see ",(0,n.kt)("a",{parentName:"p",href:"https://github.com/chidiwilliams/buzz/blob/main/CONTRIBUTING.md#gpu-support"},"this note")," on enabling CUDA GPU support."))))}m.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/runtime~main.da1429f8.js b/assets/js/runtime~main.92eff68d.js similarity index 75% rename from assets/js/runtime~main.da1429f8.js rename to assets/js/runtime~main.92eff68d.js index db3852686..c3d3c4895 100644 --- a/assets/js/runtime~main.da1429f8.js +++ b/assets/js/runtime~main.92eff68d.js @@ -1 +1 @@ -(()=>{"use strict";var e,t,r,o,a,n={},f={};function i(e){var t=f[e];if(void 0!==t)return t.exports;var r=f[e]={id:e,loaded:!1,exports:{}};return n[e].call(r.exports,r,r.exports,i),r.loaded=!0,r.exports}i.m=n,i.c=f,e=[],i.O=(t,r,o,a)=>{if(!r){var n=1/0;for(c=0;c=a)&&Object.keys(i.O).every((e=>i.O[e](r[d])))?r.splice(d--,1):(f=!1,a0&&e[c-1][2]>a;c--)e[c]=e[c-1];e[c]=[r,o,a]},i.n=e=>{var t=e&&e.__esModule?()=>e.default:()=>e;return i.d(t,{a:t}),t},r=Object.getPrototypeOf?e=>Object.getPrototypeOf(e):e=>e.__proto__,i.t=function(e,o){if(1&o&&(e=this(e)),8&o)return e;if("object"==typeof e&&e){if(4&o&&e.__esModule)return e;if(16&o&&"function"==typeof e.then)return e}var a=Object.create(null);i.r(a);var n={};t=t||[null,r({}),r([]),r(r)];for(var f=2&o&&e;"object"==typeof f&&!~t.indexOf(f);f=r(f))Object.getOwnPropertyNames(f).forEach((t=>n[t]=()=>e[t]));return n.default=()=>e,i.d(a,n),a},i.d=(e,t)=>{for(var r in t)i.o(t,r)&&!i.o(e,r)&&Object.defineProperty(e,r,{enumerable:!0,get:t[r]})},i.f={},i.e=e=>Promise.all(Object.keys(i.f).reduce(((t,r)=>(i.f[r](e,t),t)),[])),i.u=e=>"assets/js/"+({34:"871e3331",53:"935f2afb",217:"3b8c55ea",237:"1df93b7f",355:"e53fa2b7",382:"1102fda7",468:"1a20bc57",514:"1be78505",542:"2d9f39b3",836:"0480b142",860:"3e407b54",918:"17896441",958:"6dbc2e00",971:"c377a04b"}[e]||e)+"."+{34:"b7e0063b",53:"ee4c5b72",217:"a80ecf58",237:"71d7b441",355:"270fcee2",382:"0e12deaa",468:"23883b0e",514:"131974b6",542:"0a265eb2",836:"61b1ce51",860:"987a7018",918:"0aaaf3b3",958:"24fcb0eb",971:"b0b8fe80",972:"d5cc17b1"}[e]+".js",i.miniCssF=e=>{},i.g=function(){if("object"==typeof globalThis)return globalThis;try{return this||new Function("return this")()}catch(e){if("object"==typeof window)return window}}(),i.o=(e,t)=>Object.prototype.hasOwnProperty.call(e,t),o={},a="docs:",i.l=(e,t,r,n)=>{if(o[e])o[e].push(t);else{var f,d;if(void 0!==r)for(var b=document.getElementsByTagName("script"),c=0;c{f.onerror=f.onload=null,clearTimeout(s);var a=o[e];if(delete o[e],f.parentNode&&f.parentNode.removeChild(f),a&&a.forEach((e=>e(r))),t)return t(r)},s=setTimeout(l.bind(null,void 0,{type:"timeout",target:f}),12e4);f.onerror=l.bind(null,f.onerror),f.onload=l.bind(null,f.onload),d&&document.head.appendChild(f)}},i.r=e=>{"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},i.p="/buzz/",i.gca=function(e){return e={17896441:"918","871e3331":"34","935f2afb":"53","3b8c55ea":"217","1df93b7f":"237",e53fa2b7:"355","1102fda7":"382","1a20bc57":"468","1be78505":"514","2d9f39b3":"542","0480b142":"836","3e407b54":"860","6dbc2e00":"958",c377a04b:"971"}[e]||e,i.p+i.u(e)},(()=>{var e={303:0,532:0};i.f.j=(t,r)=>{var o=i.o(e,t)?e[t]:void 0;if(0!==o)if(o)r.push(o[2]);else if(/^(303|532)$/.test(t))e[t]=0;else{var a=new Promise(((r,a)=>o=e[t]=[r,a]));r.push(o[2]=a);var n=i.p+i.u(t),f=new Error;i.l(n,(r=>{if(i.o(e,t)&&(0!==(o=e[t])&&(e[t]=void 0),o)){var a=r&&("load"===r.type?"missing":r.type),n=r&&r.target&&r.target.src;f.message="Loading chunk "+t+" failed.\n("+a+": "+n+")",f.name="ChunkLoadError",f.type=a,f.request=n,o[1](f)}}),"chunk-"+t,t)}},i.O.j=t=>0===e[t];var t=(t,r)=>{var o,a,n=r[0],f=r[1],d=r[2],b=0;if(n.some((t=>0!==e[t]))){for(o in f)i.o(f,o)&&(i.m[o]=f[o]);if(d)var c=d(i)}for(t&&t(r);b{"use strict";var e,t,r,o,a,n={},f={};function i(e){var t=f[e];if(void 0!==t)return t.exports;var r=f[e]={id:e,loaded:!1,exports:{}};return n[e].call(r.exports,r,r.exports,i),r.loaded=!0,r.exports}i.m=n,i.c=f,e=[],i.O=(t,r,o,a)=>{if(!r){var n=1/0;for(b=0;b=a)&&Object.keys(i.O).every((e=>i.O[e](r[c])))?r.splice(c--,1):(f=!1,a0&&e[b-1][2]>a;b--)e[b]=e[b-1];e[b]=[r,o,a]},i.n=e=>{var t=e&&e.__esModule?()=>e.default:()=>e;return i.d(t,{a:t}),t},r=Object.getPrototypeOf?e=>Object.getPrototypeOf(e):e=>e.__proto__,i.t=function(e,o){if(1&o&&(e=this(e)),8&o)return e;if("object"==typeof e&&e){if(4&o&&e.__esModule)return e;if(16&o&&"function"==typeof e.then)return e}var a=Object.create(null);i.r(a);var n={};t=t||[null,r({}),r([]),r(r)];for(var f=2&o&&e;"object"==typeof f&&!~t.indexOf(f);f=r(f))Object.getOwnPropertyNames(f).forEach((t=>n[t]=()=>e[t]));return n.default=()=>e,i.d(a,n),a},i.d=(e,t)=>{for(var r in t)i.o(t,r)&&!i.o(e,r)&&Object.defineProperty(e,r,{enumerable:!0,get:t[r]})},i.f={},i.e=e=>Promise.all(Object.keys(i.f).reduce(((t,r)=>(i.f[r](e,t),t)),[])),i.u=e=>"assets/js/"+({34:"871e3331",53:"935f2afb",217:"3b8c55ea",237:"1df93b7f",355:"e53fa2b7",382:"1102fda7",468:"1a20bc57",514:"1be78505",542:"2d9f39b3",836:"0480b142",860:"3e407b54",918:"17896441",958:"6dbc2e00",971:"c377a04b"}[e]||e)+"."+{34:"b7e0063b",53:"ee4c5b72",217:"a80ecf58",237:"71d7b441",355:"270fcee2",382:"0e12deaa",468:"23883b0e",514:"131974b6",542:"0a265eb2",836:"0adbc5c7",860:"987a7018",918:"0aaaf3b3",958:"24fcb0eb",971:"b0b8fe80",972:"d5cc17b1"}[e]+".js",i.miniCssF=e=>{},i.g=function(){if("object"==typeof globalThis)return globalThis;try{return this||new Function("return this")()}catch(e){if("object"==typeof window)return window}}(),i.o=(e,t)=>Object.prototype.hasOwnProperty.call(e,t),o={},a="docs:",i.l=(e,t,r,n)=>{if(o[e])o[e].push(t);else{var f,c;if(void 0!==r)for(var d=document.getElementsByTagName("script"),b=0;b{f.onerror=f.onload=null,clearTimeout(s);var a=o[e];if(delete o[e],f.parentNode&&f.parentNode.removeChild(f),a&&a.forEach((e=>e(r))),t)return t(r)},s=setTimeout(l.bind(null,void 0,{type:"timeout",target:f}),12e4);f.onerror=l.bind(null,f.onerror),f.onload=l.bind(null,f.onload),c&&document.head.appendChild(f)}},i.r=e=>{"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},i.p="/buzz/",i.gca=function(e){return e={17896441:"918","871e3331":"34","935f2afb":"53","3b8c55ea":"217","1df93b7f":"237",e53fa2b7:"355","1102fda7":"382","1a20bc57":"468","1be78505":"514","2d9f39b3":"542","0480b142":"836","3e407b54":"860","6dbc2e00":"958",c377a04b:"971"}[e]||e,i.p+i.u(e)},(()=>{var e={303:0,532:0};i.f.j=(t,r)=>{var o=i.o(e,t)?e[t]:void 0;if(0!==o)if(o)r.push(o[2]);else if(/^(303|532)$/.test(t))e[t]=0;else{var a=new Promise(((r,a)=>o=e[t]=[r,a]));r.push(o[2]=a);var n=i.p+i.u(t),f=new Error;i.l(n,(r=>{if(i.o(e,t)&&(0!==(o=e[t])&&(e[t]=void 0),o)){var a=r&&("load"===r.type?"missing":r.type),n=r&&r.target&&r.target.src;f.message="Loading chunk "+t+" failed.\n("+a+": "+n+")",f.name="ChunkLoadError",f.type=a,f.request=n,o[1](f)}}),"chunk-"+t,t)}},i.O.j=t=>0===e[t];var t=(t,r)=>{var o,a,n=r[0],f=r[1],c=r[2],d=0;if(n.some((t=>0!==e[t]))){for(o in f)i.o(f,o)&&(i.m[o]=f[o]);if(c)var b=c(i)}for(t&&t(r);d Introduction | Buzz - + @@ -20,7 +20,7 @@ Whisper.cpp, Faster Whisper, Whisper-compatible Hugging Face models, and the OpenAI Whisper API
  • Command-Line Interface
  • Available on Mac, Windows, and Linux
  • - + \ No newline at end of file diff --git a/docs/cli.html b/docs/cli.html index ac008fdbd..ede91f749 100644 --- a/docs/cli.html +++ b/docs/cli.html @@ -4,13 +4,13 @@ CLI | Buzz - +

    CLI

    Commands

    add

    Start a new transcription task.

    Usage: buzz add [options] [file file file...]

    Options:
    -t, --task <task> The task to perform. Allowed: translate,
    transcribe. Default: transcribe.
    -m, --model-type <model-type> Model type. Allowed: whisper, whispercpp,
    huggingface, fasterwhisper, openaiapi. Default:
    whisper.
    -s, --model-size <model-size> Model size. Use only when --model-type is
    whisper, whispercpp, or fasterwhisper. Allowed:
    tiny, base, small, medium, large. Default:
    tiny.
    --hfid <id> Hugging Face model ID. Use only when
    --model-type is huggingface. Example:
    "openai/whisper-tiny"
    -l, --language <code> Language code. Allowed: af (Afrikaans), am
    (Amharic), ar (Arabic), as (Assamese), az
    (Azerbaijani), ba (Bashkir), be (Belarusian),
    bg (Bulgarian), bn (Bengali), bo (Tibetan), br
    (Breton), bs (Bosnian), ca (Catalan), cs
    (Czech), cy (Welsh), da (Danish), de (German),
    el (Greek), en (English), es (Spanish), et
    (Estonian), eu (Basque), fa (Persian), fi
    (Finnish), fo (Faroese), fr (French), gl
    (Galician), gu (Gujarati), ha (Hausa), haw
    (Hawaiian), he (Hebrew), hi (Hindi), hr
    (Croatian), ht (Haitian Creole), hu
    (Hungarian), hy (Armenian), id (Indonesian), is
    (Icelandic), it (Italian), ja (Japanese), jw
    (Javanese), ka (Georgian), kk (Kazakh), km
    (Khmer), kn (Kannada), ko (Korean), la (Latin),
    lb (Luxembourgish), ln (Lingala), lo (Lao), lt
    (Lithuanian), lv (Latvian), mg (Malagasy), mi
    (Maori), mk (Macedonian), ml (Malayalam), mn
    (Mongolian), mr (Marathi), ms (Malay), mt
    (Maltese), my (Myanmar), ne (Nepali), nl
    (Dutch), nn (Nynorsk), no (Norwegian), oc
    (Occitan), pa (Punjabi), pl (Polish), ps
    (Pashto), pt (Portuguese), ro (Romanian), ru
    (Russian), sa (Sanskrit), sd (Sindhi), si
    (Sinhala), sk (Slovak), sl (Slovenian), sn
    (Shona), so (Somali), sq (Albanian), sr
    (Serbian), su (Sundanese), sv (Swedish), sw
    (Swahili), ta (Tamil), te (Telugu), tg (Tajik),
    th (Thai), tk (Turkmen), tl (Tagalog), tr
    (Turkish), tt (Tatar), uk (Ukrainian), ur
    (Urdu), uz (Uzbek), vi (Vietnamese), yi
    (Yiddish), yo (Yoruba), zh (Chinese). Leave
    empty to detect language.
    -p, --prompt <prompt> Initial prompt
    --openai-token <token> OpenAI access token. Use only when
    --model-type is openaiapi. Defaults to your
    previously saved access token, if one exists.
    --srt Output result in an SRT file.
    --vtt Output result in a VTT file.
    --txt Output result in a TXT file.
    -h, --help Displays help on commandline options.
    --help-all Displays help including Qt specific options.
    -v, --version Displays version information.

    Arguments:
    files Input file paths

    Examples:

    # Translate two MP3 files from French to English using OpenAI Whisper API
    buzz add --task translate --language fr --model-type openaiapi /Users/user/Downloads/1b3b03e4-8db5-ea2c-ace5-b71ff32e3304.mp3 /Users/user/Downloads/koaf9083k1lkpsfdi0.mp3

    # Transcribe an MP4 using Whisper.cpp "small" model and immediately export to SRT and VTT files
    buzz add --task transcribe --model-type whispercpp --model-size small --prompt "My initial prompt" --srt --vtt /Users/user/Downloads/buzz/1b3b03e4-8db5-ea2c-ace5-b71ff32e3304.mp4
    - + \ No newline at end of file diff --git a/docs/faq.html b/docs/faq.html index 05ba9dc00..74067de99 100644 --- a/docs/faq.html +++ b/docs/faq.html @@ -4,14 +4,14 @@ FAQ | Buzz - +

    FAQ

    1. Where are the models stored?

      The Whisper models are stored in ~/.cache/whisper. The Whisper.cpp models are stored in ~/Library/Caches/Buzz -(Mac OS), ~/.cache/Buzz (Unix), or C:\Users\<username>\AppData\Local\Buzz\Buzz\Cache (Windows).

    2. What can I try if the transcription runs too slowly?

      Speech recognition requires large amount of computation, so one option is to try using a lower Whisper model size or using a Whisper.cpp model to run speech recognition of your computer. If you have access to a computer with GPU that has at least 6GB of VRAM you can try using the Faster Whisper model.

      Buzz also supports using OpenAI API to do speech recognition on a remote server. To use this feature you need to set OpenAI API key in Preferences. See Preferences section for more details.

    3. How to record system audio?

      To transcribe system audio you need to configure virtual audio device and connect output from the applications you want to transcribe to this virtual speaker. After that you can select it as source in the Buzz. See Usage section for more details.

      Relevant tools:

    4. What model should I use?

      Model size to use will depend on your hardware and use case. Smaller models will work faster but will have more inaccuracies. Larger models will be more accurate but will require more powerful hardware or longer time to transcribe.

      When choosing among large models consider the following. "Large" is the first released older model, "Large-V2" is later updated model with better accuracy, for some languages considered the most robust and stable. "Large-V3" is the latest model with the best accuracy in many cases, but some times can hallucinate or invent words that were never in the audio. The only sure way to know what model best suits your needs is to test them all in your language.

    5. How to get GPU acceleration for faster transcription?

      On Linux GPU acceleration is supported out of the box on Nvidia GPUs with CUDA installed.

      On Windows see this note on enabling CUDA GPU support.

    - +(Mac OS), ~/.cache/Buzz (Unix), or C:\Users\<username>\AppData\Local\Buzz\Buzz\Cache (Windows).

  • What can I try if the transcription runs too slowly?

    Speech recognition requires large amount of computation, so one option is to try using a lower Whisper model size or using a Whisper.cpp model to run speech recognition of your computer. If you have access to a computer with GPU that has at least 6GB of VRAM you can try using the Faster Whisper model.

    Buzz also supports using OpenAI API to do speech recognition on a remote server. To use this feature you need to set OpenAI API key in Preferences. See Preferences section for more details.

  • How to record system audio?

    To transcribe system audio you need to configure virtual audio device and connect output from the applications you want to transcribe to this virtual speaker. After that you can select it as source in the Buzz. See Usage section for more details.

    Relevant tools:

  • What model should I use?

    Model size to use will depend on your hardware and use case. Smaller models will work faster but will have more inaccuracies. Larger models will be more accurate but will require more powerful hardware or longer time to transcribe.

    When choosing among large models consider the following. "Large" is the first released older model, "Large-V2" is later updated model with better accuracy, for some languages considered the most robust and stable. "Large-V3" is the latest model with the best accuracy in many cases, but some times can hallucinate or invent words that were never in the audio. The only sure way to know what model best suits your needs is to test them all in your language.

  • How to get GPU acceleration for faster transcription?

    On Linux GPU acceleration is supported out of the box on Nvidia GPUs. If you still get any issues install CUDA 12, cuBLASS and cuDNN.

    On Windows see this note on enabling CUDA GPU support.

  • + \ No newline at end of file diff --git a/docs/installation.html b/docs/installation.html index 27f3f0218..ecb8055e7 100644 --- a/docs/installation.html +++ b/docs/installation.html @@ -4,7 +4,7 @@ Installation | Buzz - + @@ -14,7 +14,7 @@ the App Store version.)

    macOS (Intel, macOS 11.7 and later)

    Install via brew:

    brew install --cask buzz

    Alternatively, download and run the Buzz-x.y.z.dmg file.

    For Mac Silicon (and for a better experience on Mac Intel), download Buzz Captions on the App Store.

    Windows (Windows 10 and later)

    Download and run the Buzz-x.y.z.exe file.

    Linux

    sudo apt-get install libportaudio2 libcanberra-gtk-module libcanberra-gtk3-module
    sudo snap install buzz
    sudo snap connect buzz:audio-record
    sudo snap connect buzz:password-manager-service
    sudo snap connect buzz:pulseaudio
    sudo snap connect buzz:removable-media

    Get it from the Snap Store

    Alternatively, on Ubuntu 20.04 and later, install the dependencies:

    sudo apt-get install libportaudio2

    Then, download and extract the Buzz-x.y.z-unix.tar.gz file

    PyPI

    pip install buzz-captions
    python -m buzz
    - + \ No newline at end of file diff --git a/docs/preferences.html b/docs/preferences.html index ab68b07ca..ff3793688 100644 --- a/docs/preferences.html +++ b/docs/preferences.html @@ -4,7 +4,7 @@ Preferences | Buzz - + @@ -12,7 +12,7 @@

    Preferences

    Open the Preferences window from the Menu bar, or click Ctrl/Cmd + ,.

    General Preferences

    OpenAI API preferences

    API Key - key to authenticate your requests to OpenAI API. To get API key from OpenAI see this article.

    Base Url - By default all requests are sent to API provided by OpenAI company. Their api url is https://api.openai.com/v1/. Compatible APIs are also provided by other companies. List of available API urls you can find on discussion page

    Default export file name

    Sets the default export file name for file transcriptions. For example, a value of {{ input_file_name }} ({{ task }}d on {{ date_time }}) will save TXT exports as Input Filename (transcribed on 19-Sep-2023 20-39-25).txt by default.

    Available variables:

    KeyDescriptionExample
    input_file_nameFile name of the imported fileaudio (e.g. if the imported file path was /path/to/audio.wav
    taskTranscription tasktranscribe, translate
    languageLanguage codeen, fr, yo, etc.
    model_typeModel typeWhisper, Whisper.cpp, Faster Whisper, etc.
    model_sizeModel sizetiny, base, small, medium, large, etc.
    date_timeExport time (format: %d-%b-%Y %H-%M-%S)19-Sep-2023 20-39-25

    Live transcript exports

    Live transcription export can be used to integrate Buzz with other applications like OBS Studio. When enabled, live text transcripts will be exported to a text file as they get generated and translated.

    If AI translation is enabled for live recordings, the translated text will also be exported to the text file. Filename for the translated text will end with .translated.txt.

    - + \ No newline at end of file diff --git a/docs/usage/file_import.html b/docs/usage/file_import.html index 8b4ffaf7a..b40fffa03 100644 --- a/docs/usage/file_import.html +++ b/docs/usage/file_import.html @@ -4,14 +4,14 @@ File Import | Buzz - +

    File Import

    To import a file:

    • Click Import Media File on the File menu (or the '+' icon on the toolbar, or Command/Ctrl + O).
    • Choose an audio or video file.
    • Select a task, language, and the model settings.
    • Click Run.
    • When the transcription status shows 'Completed', double-click on the row (or select the row and click the '⤢' icon) to open the transcription.
    FieldOptionsDefaultDescription
    Export As"TXT", "SRT", "VTT""TXT"Export file format
    Word-Level TimingsOff / OnOffIf checked, the transcription will generate a separate subtitle line for each word in the audio. Enabled only when "Export As" is set to "SRT" or "VTT".

    (See the Live Recording section for more information about the task, language, and quality settings.)

    Media File Import on Buzz

    - + \ No newline at end of file diff --git a/docs/usage/live_recording.html b/docs/usage/live_recording.html index fcb76d8c9..2a738538f 100644 --- a/docs/usage/live_recording.html +++ b/docs/usage/live_recording.html @@ -4,7 +4,7 @@ Live Recording | Buzz - + @@ -17,7 +17,7 @@ see LoopBeAudio, LoopBack, and Virtual Audio Cable).

    1. Install BlackHole via Homebrew

      brew install blackhole-2ch
    2. Open Audio MIDI Setup from Spotlight or from /Applications/Utilities/Audio Midi Setup.app.

      Open Audio MIDI Setup from Spotlight

    3. Click the '+' icon at the lower left corner and select 'Create Multi-Output Device'.

      Create multi-output device

    4. Add your default speaker and BlackHole to the multi-output device.

      Screenshot of multi-output device

    5. Select this multi-output device as your speaker (application or system-wide) to play audio into BlackHole.

    6. Open Buzz, select BlackHole as your microphone, and record as before to see transcriptions from the audio playing through BlackHole.

    Record audio playing from computer (Windows)

    To transcribe system audio you need to configure virtual audio device and connect output from the applications you whant to transcribe to this virtual speaker. After that you can select it as source in the Buzz.

    1. Install VB CABLE as virtual audio device.

    2. Configure using Windows Sound settings. Right-click on the speaker icon in the system tray and select "Open Sound settings". In the "Choose your output device" dropdown select "CABLE Input" to send all system sound to the virtual device or use "Advanced sound options" to select application that will output their sound to this device.

    Record audio playing from computer (Linux)

    As described on Ubuntu Wiki on any Linux with pulse audio you can redirect application audio to a virtual speaker. After that you can select it as source in Buzz.

    Overall steps:

    1. Launch application that will produce the sound you want to transcribe and start the playback. For example start a video in a media player.
    2. Launch Buzz and open Live recording screen, so you see the settings.
    3. Configure sound routing from the application you want to transcribe sound from to Buzz in Recording tab of the PulseAudio Volume Control (pavucontrol).
    - + \ No newline at end of file diff --git a/docs/usage/translations.html b/docs/usage/translations.html index 16df5a88f..44dd541b6 100644 --- a/docs/usage/translations.html +++ b/docs/usage/translations.html @@ -4,13 +4,13 @@ Translations | Buzz - +

    Translations

    Default Translation task uses Whisper model ability to translate to English. Since version 1.0.0 Buzz supports additional AI translations to any other language.

    To use translation feature you will need to configure OpenAI API key and translation settings. Set OpenAI API ket in Preferences. Buzz also supports custom locally running translation AIs that support OpenAI API. For more information on locally running AIs see ollama or LM Studio.

    To configure translation for Live recordings enable it in Advances settings dialog of the Live Recording settings. Enter AI model to use and prompt with instructions for the AI on how to translate. Translation option is also available for files that already have speech recognised. Use Translate button on transcription viewer toolbar.

    For AI to know how to translate enter translation instructions in the "Instructions for AI" section. In your instructions you should describe to what language you want it to translate the text to. Also, you may need to add additional instructions to not add any notes or comments as AIs tend to add them. Example instructions to translate English subtitles to Spanish:

    You are a professional translator, skilled in translating English to Spanish. You will only translate each sentence sent to you into Spanish and not add any notes or comments.

    If you enable "Enable live recording transcription export" in Preferences, Live text transcripts will be exported to a text file as they get generated and translated. This file can be used to further integrate Live transcripts with other applications like OBS Studio.

    Approximate cost of translation for 1 hour long audio with ChatGPT gpt-4o model is around 0.50$

    - + \ No newline at end of file diff --git a/index.html b/index.html index fe23ff151..45e6c82b0 100644 --- a/index.html +++ b/index.html @@ -4,13 +4,13 @@ Buzz - +
    - + \ No newline at end of file