Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Ft/speech to txt #741

Draft
wants to merge 2 commits into
base: development
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,203 @@
// eslint-disable-next-line no-unused-vars
import React, { useEffect, useState } from 'react';
// import { SquaresPlusIcon, CogIcon, XMarkIcon } from '@heroicons/react/24/outline';

// eslint-disable-next-line no-unused-vars
export default function SpeechRecognition({ trigger }) {
const Speech = window.SpeechRecognition || window.webkitSpeechRecognition;
// eslint-disable-next-line prefer-const
let sr = new Speech();

sr.continuous = true;
sr.interimResults = true;
sr.lang = 'en-IN';

// eslint-disable-next-line no-unused-vars
const [islistening, setIslistening] = useState(false);
// eslint-disable-next-line no-unused-vars
const [isPaused, setIsPaused] = useState(false);
// eslint-disable-next-line no-unused-vars
const [suggestion, setSuggestion] = useState(null);
// eslint-disable-next-line no-unused-vars
const [speechMode, setSpeechMode] = useState(false);

// console.log({
// speechMode, trigger, islistening, isPaused,
// });

// function handleSpeechActions(trigger) {
// console.log('in handleSpeechAction ------------');
// switch (trigger) {
// case 'record':
// setIslistening(true);
// break;
// case 'recPause':
// setIsPaused(true);
// break;
// case 'recResume':
// setIsPaused(false);
// break;
// case 'recStop':
// setIslistening(false);
// setIsPaused(false);
// break;

// default:
// break;
// }
// }

// function handleListen() {
// console.log('inside handle listen *******', { sr });
// if (islistening) {
// sr.start();
// console.log('call start recognition ...................');
// // sr.onend = () => {
// // console.log('continue listening ...');
// // sr.start();
// // };
// } else {
// sr.stop();
// console.log('call STOP recognition ...................');
// sr.onend = () => {
// console.log('stopped record mic off');
// sr.onresult = (e) => {
// console.log('on result &&&&&&&&&&&&&&&&&&&&&&&&&&');
// const transcript = Array.from(e.results)
// .map((result) => result[0])
// .map((result) => result.transcript)
// .join('');
// console.log(transcript);
// sr.onerror = (e) => {
// console.log('error : ', e.error);
// };
// };
// };
// }
// sr.onstart = () => {
// console.log('mic on');
// };
// sr.onend = () => {
// console.log('mic off');
// };

// // sr.onresult = (e) => {
// // console.log('on result &&&&&&&&&&&&&&&&&&&&&&&&&&');
// // const transcript = Array.from(e.results)
// // .map((result) => result[0])
// // .map((result) => result.transcript)
// // .join('');
// // console.log(transcript);
// // sr.onerror = (e) => {
// // console.log('error : ', e.error);
// // };
// // };
// }

// function handleSpeechActions(trigger) {
// console.log('in handleSpeechAction ********************', { trigger, sr });
// switch (trigger) {
// case 'record':
// setIslistening(true);
// sr.start();
// sr.onend = () => {
// console.log('continue listening ...');
// sr.start();
// };
// break;
// case 'recPause':
// setIsPaused(true);
// break;
// case 'recResume':
// setIsPaused(false);
// break;
// case 'recStop':
// setIslistening(false);
// setIsPaused(false);
// sr.stop();
// console.log('inisde stop >>>>>>>>>>>>>>>>>>.');
// sr.onend = () => {
// console.log('stopped record mic off');
// };
// break;

// default:
// break;
// }
// sr.onstart = () => {
// console.log('mic on');
// };
// sr.onresult = (e) => {
// const transcript = Array.from(e.results)
// .map((result) => result[0])
// .map((result) => result.transcript)
// .join('');
// console.log('out : ', transcript);
// sr.onerror = (e) => {
// console.log('error : ', e.error);
// };
// };
// }

// useEffect(() => {
// handleSpeechActions(trigger);
// }, [trigger]);

// useEffect(() => {
// handleListen();
// }, [islistening]);
// console.log({ islistening });

function startfunc() {
console.log('on start func ------');
sr.start();
console.log('on after start func ------');
sr.onstart = () => {
console.log('mic on');
};
}

function stopmic() {
console.log('on stop mic func ------', { sr });
sr.onend = () => {
console.log('mic off');
};
}

function stopfunc() {
console.log('on stop func ------');
sr.onend = () => {
console.log('mic off BEFORE stop call');
};
sr.stop();
console.log('on after stop func ------');
stopmic();

sr.onresult = (e) => {
const transcript = Array.from(e.results)
.map((result) => result[0])
.map((result) => result.transcript)
.join('');
console.log('out : ', transcript);
sr.onerror = (e) => {
console.log('error : ', e.error);
};
};
}

return (
<div className="p-2 bg-dark rounded-md hover:bg-error">
{/* {islistening ? <span>🎙️</span> : <span>🛑</span>} */}
{/* <input type="checkbox" onChange={() => setSpeechMode(!speechMode)} /> */}
{/* <button type="button" className="" onClick={() => setIslistening((prev) => !prev)}>
{islistening ? <span>🛑</span> : <span>🎙️</span>}
</button> */}
<button type="button" className="" onClick={startfunc}>
<span>🎙️</span>
</button>
<button type="button" className="" onClick={stopfunc}>
<span>🛑</span>
</button>
</div>
);
}
11 changes: 11 additions & 0 deletions renderer/src/components/AudioRecorder/components/Player.js
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ import PropTypes from 'prop-types';
import { useState } from 'react';
import PlayIcon from '@/icons/basil/Outline/Media/Play.svg';
import PauseIcon from '@/icons/basil/Outline/Media/Pause.svg';
import SpeechRecognition from '../SpeechRecognitionWebApi/SpeechRecognition';

const AudioWaveform = dynamic(() => import('./WaveForm'), { ssr: false });

Expand Down Expand Up @@ -375,6 +376,16 @@ const Player = ({
</button>
</div>
</div>
{/* speech section */}
<div className="flex flex-col px-10 items-center border-l border-l-gray-800">
<div className="text-xxs text-gray-300 uppercase tracking-wider mb-2">
Speech
</div>
<div className="flex flex-col items-center">
<SpeechRecognition trigger={trigger} />
</div>
</div>

</div>
<div className="border-t border-gray-800 bg-black text-white">
<AudioWaveform
Expand Down
4 changes: 4 additions & 0 deletions renderer/src/layouts/projects/SideBar.js
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ import Link from 'next/link';
import { useEffect, useState } from 'react';
import * as localForage from 'localforage';
import { useTranslation } from 'react-i18next';
// import SpeechRecognition from '@/components/AudioRecorder/SpeechRecognitionWebApi/SpeechRecognition';
import LogoIcon from '@/icons/logo.svg';
import ProjectsIcon from '@/icons/projects.svg';
import NewProjectIcon from '@/icons/new.svg';
Expand All @@ -26,6 +27,9 @@ export default function SideBar() {
/>
</div>
<ul>
{/* <li>
<SpeechRecognition true />
</li> */}
<li className="text-gray-900 font-medium hover:text-white hover:bg-primary cursor-pointer py-5 group">
<Link
href="/projects"
Expand Down