diff --git a/.github/workflows/develop.yaml b/.github/workflows/develop.yaml
index 6d86c472..e9b27125 100644
--- a/.github/workflows/develop.yaml
+++ b/.github/workflows/develop.yaml
@@ -5,13 +5,13 @@ on:
- 'develop'
jobs:
- build:
- name: Build
+ trigger-circleci:
runs-on: ubuntu-latest
steps:
- - name: Repository Dispatch
- uses: peter-evans/repository-dispatch@v1
- with:
- token: ${{ secrets.REPO_ACCESS_TOKEN }}
- repository: ${{ secrets.REPO_NAME }}
- event-type: build-staging
+ - name: Trigger exiting circleci job
+ uses: zivkaziv/circleci-trigger-github-action@master
+ with:
+ token: ${{ secrets.CCI_DOCS_TOKEN }}
+ branch: develop
+ org: rammerai
+ repo: docs-v1
diff --git a/.github/workflows/master.yaml b/.github/workflows/master.yaml
index a4db8826..829bb2da 100644
--- a/.github/workflows/master.yaml
+++ b/.github/workflows/master.yaml
@@ -1,17 +1,16 @@
-
on:
push:
branches:
- 'master'
jobs:
- build:
- name: Build
+ trigger-circleci:
runs-on: ubuntu-latest
steps:
- - name: Repository Dispatch
- uses: peter-evans/repository-dispatch@v1
- with:
- token: ${{ secrets.REPO_ACCESS_TOKEN }}
- repository: ${{ secrets.REPO_NAME }}
- event-type: build-production
+ - name: Trigger exiting circleci job
+ uses: zivkaziv/circleci-trigger-github-action@master
+ with:
+ token: ${{ secrets.CCI_DOCS_TOKEN }}
+ branch: master
+ org: rammerai
+ repo: docs-v1
diff --git a/.gitignore b/.gitignore
index 03b7cc31..cf529359 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,3 +1,4 @@
+
# Dependencies
/node_modules
@@ -18,5 +19,4 @@ config.json
.env.production.local
npm-debug.log*
-yarn-debug.log*
-yarn-error.log*
+yarn-debug.log*
\ No newline at end of file
diff --git a/docs/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md
similarity index 100%
rename from docs/CODE_OF_CONDUCT.md
rename to CODE_OF_CONDUCT.md
diff --git a/docs/Contributing.md b/Contributing.md
similarity index 100%
rename from docs/Contributing.md
rename to Contributing.md
diff --git a/Dockerfile.dev b/Dockerfile.dev
deleted file mode 100755
index 5c5e465c..00000000
--- a/Dockerfile.dev
+++ /dev/null
@@ -1,30 +0,0 @@
-FROM gcr.io/self-serve-dev-261801/docs-v1:base AS base
-
-WORKDIR /opt/services/docs-v1/
-
-COPY . .
-
-COPY gitconfigfile .git/config
-
-RUN git submodule update docs/
-
-RUN cd docs/ && git config user.email "service-account@symbl.ai" && git config user.name "service-account" && git checkout develop && git pull origin develop && cd .. && rm -rf node_modules && npm i
-
-RUN rm -rf src/theme/DocPage
-
-RUN npm audit fix
-
-RUN npm run build-staging
-
-RUN mkdir docs-build && mv build docs-build && mv docs-build build && mv build/build build/docs
-
-FROM nginx:alpine
-
-RUN apk update && apk del curl && apk add curl-doc && apk --no-cache add curl
-
-COPY default-dev /etc/nginx/conf.d/default.conf
-
-COPY --chown=0:0 --from=base /opt/services/docs-v1 /opt/services/docs-v1
-
-EXPOSE 80
-
diff --git a/Dockerfile.prod b/Dockerfile.prod
deleted file mode 100755
index 3e300978..00000000
--- a/Dockerfile.prod
+++ /dev/null
@@ -1,28 +0,0 @@
-FROM gcr.io/prod-temp-265113/docs-v1:base AS base
-
-WORKDIR /opt/services/docs-v1/
-
-COPY . .
-
-COPY gitconfigfile .git/config
-
-RUN git submodule update docs/
-
-RUN cd docs/ && git config user.email "service-account@symbl.ai" && git config user.name "service-account" && git checkout master && git pull origin master && cd .. && rm -rf node_modules && npm i
-
-RUN npm audit fix
-
-RUN npm run build
-
-RUN mkdir docs-build && mv build docs-build && mv docs-build build && mv build/build build/docs
-
-FROM nginx:alpine
-
-RUN apk update && apk del curl && apk add curl-doc && apk --no-cache add curl
-
-COPY default-prod /etc/nginx/conf.d/default.conf
-
-COPY --chown=0:0 --from=base /opt/services/docs-v1 /opt/services/docs-v1
-
-EXPOSE 80
-
diff --git a/Dockerfile.stage b/Dockerfile.stage
deleted file mode 100755
index dfe9988f..00000000
--- a/Dockerfile.stage
+++ /dev/null
@@ -1,28 +0,0 @@
-FROM gcr.io/self-serve-dev-261801/docs-v1:base AS base
-
-WORKDIR /opt/services/docs-v1/
-
-COPY . .
-
-COPY gitconfigfile .git/config
-
-RUN git submodule update docs/
-
-RUN cd docs/ && git config user.email "service-account@symbl.ai" && git config user.name "service-account" && git checkout experiment && git pull origin experiment && cd .. && rm -rf node_modules && npm i
-
-RUN npm audit fix
-
-RUN npm run build-staging
-
-RUN mkdir docs-build && mv build docs-build && mv docs-build build && mv build/build build/docs
-
-FROM nginx:alpine
-
-RUN apk update && apk del curl && apk add curl-doc && apk --no-cache add curl
-
-COPY default-stage /etc/nginx/conf.d/default.conf
-
-COPY --chown=0:0 --from=base /opt/services/docs-v1 /opt/services/docs-v1
-
-EXPOSE 80
-
diff --git a/Makefile b/Makefile
deleted file mode 100644
index b9c5daa8..00000000
--- a/Makefile
+++ /dev/null
@@ -1,15 +0,0 @@
-.PHONY: build clean push
-
-IMAGE_TAG:=$(shell git rev-parse --short HEAD)
-ENV:=$(shell git branch --show-current)
-
-build-prod: Dockerfile.prod
- docker build -f Dockerfile.prod . -t ${COMPONENT}:${IMAGE_TAG}
-
-
-build-dev: Dockerfile.dev
- docker build -f Dockerfile.dev . -t ${COMPONENT}:${IMAGE_TAG}
-
-
-build-stage: Dockerfile.stage
- docker build -f Dockerfile.stage . -t ${COMPONENT}:${IMAGE_TAG}
diff --git a/docs/README.md b/README.md
similarity index 100%
rename from docs/README.md
rename to README.md
diff --git a/default b/default
deleted file mode 100644
index a7fdc77b..00000000
--- a/default
+++ /dev/null
@@ -1,20 +0,0 @@
-server {
-
- listen 80;
-
- server_name _;
- add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
-
- location / {
- return 301 http://$host$request_uri;
- }
- location ~ /docs(.*) {
- root /opt/services/docs-v1/build;
- index index.html;
- }
-
- location /nginx-health {
- return 200;
- }
-
-}
diff --git a/default-dev b/default-dev
deleted file mode 100644
index 56ce58b4..00000000
--- a/default-dev
+++ /dev/null
@@ -1,20 +0,0 @@
-server {
-
- listen 80;
-
- server_name _;
- add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
-
- location / {
- return 301 https://docs-testing.symbl.ai/docs;
- }
- location ~ /docs(.*) {
- root /opt/services/docs-v1/build;
- index index.html;
- }
-
- location /nginx-health {
- return 200;
- }
-
-}
diff --git a/default-prod b/default-prod
deleted file mode 100644
index ebe593fc..00000000
--- a/default-prod
+++ /dev/null
@@ -1,20 +0,0 @@
-server {
-
- listen 80;
-
- server_name _;
- add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
-
- location / {
- return 301 https://docs.symbl.ai/docs;
- }
- location ~ /docs(.*) {
- root /opt/services/docs-v1/build;
- index index.html;
- }
-
- location /nginx-health {
- return 200;
- }
-
-}
diff --git a/default-stage b/default-stage
deleted file mode 100644
index 403e48d8..00000000
--- a/default-stage
+++ /dev/null
@@ -1,20 +0,0 @@
-server {
-
- listen 80;
-
- server_name _;
- add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
-
- location / {
- return 301 https://docs-staging.symbl.ai/docs;
- }
- location ~ /docs(.*) {
- root /opt/services/docs-v1/build;
- index index.html;
- }
-
- location /nginx-health {
- return 200;
- }
-
-}
diff --git a/docs/api-reference/experience-api/post-trackers-and-analytics-ui.md b/docs/api-reference/experience-api/post-trackers-and-analytics-ui.md
index fb51b7fd..25eb59e7 100644
--- a/docs/api-reference/experience-api/post-trackers-and-analytics-ui.md
+++ b/docs/api-reference/experience-api/post-trackers-and-analytics-ui.md
@@ -38,7 +38,7 @@ curl --location --request POST "https://api.symbl.ai/v1/conversations/$CONVERSAT
--header "Authorization: Bearer $AUTH_TOKEN" \
--data-raw '{
"name": "audio-summary",
- "audioUrl": "https://storage.googleapis.com/rammer-transcription-bucket/small.mp3",
+ "audioUrl": "https://symbl-test-conversation.s3.amazonaws.com/4_comcast_customer_service_9min03sec.mp3",
}'
```
@@ -47,7 +47,7 @@ curl --location --request POST "https://api.symbl.ai/v1/conversations/$CONVERSAT
```javascript
{
"name": "audio-summary",
- "url": "https://meetinginsights.symbl.ai/meeting/#/eyJzZXNzaW9uSWQiOiI1ODU5NjczMDg1MzEzMDI0IiwidmlkZW9VcmwiOiJodHRwczovL3N0b3JhZ2UuZ29vZ2xlYXBpcy5jb20vcmFtbWVyLXRyYW5zY3JpcHRpb24tYnVja2V0L3NtYWxsLm1wNCJ9?showVideoSummary=true"
+ "url": "https://meetinginsights-experience.symbl.ai/#/eyJjb252ZXJzYXRpb25JZCI6IjU5NDg0ODUwNDUwNTk1ODQiLCJhdWRpb1VybCI6Imh0dHBzOi8vc3ltYmwtdGVzdC1jb252ZXJzYXRpb24uczMuYW1hem9uYXdzLmNvbS80X2NvbWNhc3RfY3VzdG9tZXJfc2VydmljZV85bWluMDNzZWMubXAzIn0.?o=fb5a99d192b2821a40639c5c7af86021db2ed6c7e32b3a8fccf6967b7e126c4ed6bd1e4636082ba3fc3a3da3980e5b99272c241e9d44c518715bf5c9772fe3bc405efb43e2cd11ef9c6e106215034ee3ac91c8dda4c09263032103519e56c690980c1c3f07604c183b1a4ddbcfca5df6cee1f7841492017eb2bb28b761cf57f218f05e233a2f34d223d4e0e4d8615fb2fca9c31fa534237c82e276ef4c4ec2c77f4fa320a7c00cded9e897d879b0f77d819475c0383f677214fa366d85bd6b99b10e1b7f56410d1c5813fd71d8f7f441de040f0bddfe2253c6161cb9990ca47f69e052ae5553a33b3cb0fd9dff80c009b466953f671d0ddefcf4534a17b56b2a89b671c07f0bc51daa85939494423b394ada8fabd44b91efc1817e77566ead15ab69e61fe2773a4eb4086d3ae0ca6bceda3274c5361e5ad389"
}
```
diff --git a/docs/api-reference/getting-conversation-intelligence.md b/docs/api-reference/getting-conversation-intelligence.md
index a3f34fbe..cdbbb2de 100644
--- a/docs/api-reference/getting-conversation-intelligence.md
+++ b/docs/api-reference/getting-conversation-intelligence.md
@@ -14,7 +14,7 @@ To get Conversation Intelligence,
[Step 2: Get Conversation Intelligence with Conversation API.](#2-get-conversation-intelligence-with-conversation-api)
-### 1. Process conversation with Real-time OR Async API
+### Step 1: Process conversation with Real-time OR Async API
---
You can process your text, audio or video conversation with Symbl using:
- [Real-time APIs](#real-time-apis) for connecting Symbl on live audio and video conversations
@@ -54,7 +54,7 @@ The Async API provides a REST interface that helps you submit any recorded or sa
- [Audio API](/docs/async-api/overview/audio/post-audio) for processing recorded conversations via VoIP, voice mails, and audio calls.
- [Video API](/docs/async-api/overview/video/post-video) for processing recorded conversations via video calls.
-### 2. Get Conversation Intelligence with Conversation API
+### Step 2: Get Conversation Intelligence with Conversation API
---
In this step, you will use the **Conversation API** to fetch Transcripts, Topics, Action Items and all the supported insights.
diff --git a/docs/api-reference/getting-started.md b/docs/api-reference/getting-started.md
index 84f028c1..e2cb5d8c 100644
--- a/docs/api-reference/getting-started.md
+++ b/docs/api-reference/getting-started.md
@@ -13,10 +13,10 @@ Our REST APIs support all HTTP verbs (or methods, as they are referred to in RES
Symbl provides a suite of APIs for different usecases. Some of them are listed below:
- 👉 [Async APIs](/docs/async-api/reference/reference) allow you to send text, audio or video conversations in recorded format.
+ 👉 [Async APIs](/docs/async-api/introduction) allow you to send text, audio or video conversations in recorded format.
👉 [Streaming APIs](/docs/streamingapi/introduction) allow you to connect Symbl on a live call via WebSocket protocol.
👉 [Telephony APIs](/docs/telephony/introduction) allow you to connect Symbl on an live audio conversation via SIP and PSTN.
- 👉 [Conversation API](/docs/async-api/reference/reference) allows you to get Conversation Intelligence such as Sentiment Analysis, Action Items, Topics, Trackers, Summary and more.
+ 👉 [Conversation API](/docs/conversation-api/introduction) allows you to get Conversation Intelligence such as Sentiment Analysis, Action Items, Topics, Trackers, Summary and more.
[![Run in Postman](https://run.pstmn.io/button.svg)](https://god.gw.postman.com/run-collection/13497402-108cafc3-da45-4b00-97fe-4819894f58bb?action=collection%2Ffork&collection-url=entityId%3D13497402-108cafc3-da45-4b00-97fe-4819894f58bb%26entityType%3Dcollection%26workspaceId%3D5f563cfe-42ef-4344-a98a-eae13183fb7c)
diff --git a/docs/async-api/overview/audio/post-audio-url.md b/docs/async-api/overview/audio/post-audio-url.md
index 5cd23400..1dad2930 100644
--- a/docs/async-api/overview/audio/post-audio-url.md
+++ b/docs/async-api/overview/audio/post-audio-url.md
@@ -229,7 +229,7 @@ responses = {
500: 'Something went wrong! Please contact support@symbl.ai'
}
-response = requests.request("POST", url, headers=headers, data=json.dumps(payload), params=json.dumps(params)))
+response = requests.request("POST", url, headers=headers, data=json.dumps(payload), params=json.dumps(params))
if response.status_code == 201:
# Successful API execution
diff --git a/docs/async-api/overview/audio/put-audio-url.md b/docs/async-api/overview/audio/put-audio-url.md
index 3865a933..992652a1 100644
--- a/docs/async-api/overview/audio/put-audio-url.md
+++ b/docs/async-api/overview/audio/put-audio-url.md
@@ -239,7 +239,7 @@ responses = {
500: 'Something went wrong! Please contact support@symbl.ai'
}
-response = requests.request("PUT", url, headers=headers, data=json.dumps(payload), params=json.dumps(params)))
+response = requests.request("PUT", url, headers=headers, data=json.dumps(payload), params=json.dumps(params))
if response.status_code == 201:
# Successful API execution
@@ -442,4 +442,4 @@ Here value of `X` can be found in [FAQ](/docs/faq).
:::caution
You must wait for the job to complete processing before you proceed with getting the Conversation Intelligence. If you immediately make a GET request to Conversation API, it is possible that you'll receive incomplete insights. Therefore, ensure that you wait for the job to complete.
-:::
\ No newline at end of file
+:::
diff --git a/docs/async-api/overview/text/post-text.md b/docs/async-api/overview/text/post-text.md
index e6e794f2..9610938b 100644
--- a/docs/async-api/overview/text/post-text.md
+++ b/docs/async-api/overview/text/post-text.md
@@ -36,7 +36,7 @@ curl --location --request POST 'https://api.symbl.ai/v1/process/text' \
--header 'Content-Type: application/json' \
--data-raw '{
"name": "Business Meeting",
- "detectPhrases": "True",
+ "detectPhrases": "true",
"confidenceThreshold": 0.6,
"messages": [
{
@@ -199,7 +199,7 @@ payload = {
"confidenceThreshold": 0.6,
#
- "detectPhrases": True,
+ "detectPhrases": true,
#
"messages": [
diff --git a/docs/async-api/overview/text/put-text.md b/docs/async-api/overview/text/put-text.md
index 9f1592a2..fd51f45c 100644
--- a/docs/async-api/overview/text/put-text.md
+++ b/docs/async-api/overview/text/put-text.md
@@ -40,8 +40,7 @@ curl --location --request PUT "https://api.symbl.ai/v1/process/text/$CONVERSATIO
--header "Authorization: Bearer $AUTH_TOKEN" \
--header 'Content-Type: application/json' \
--data-raw '{
- "customEntities": [{"customType": "Hiring Process", "text": "internships"}],
- "detectPhrases": "True",
+ "detectPhrases": "true",
"messages": [
{
"payload": {
@@ -218,7 +217,7 @@ payload = {
"confidenceThreshold": 0.6, #
- "detectPhrases": True, #
+ "detectPhrases": true, #
"messages": [
{
"payload": {
diff --git a/docs/async-api/overview/video/post-video-url.md b/docs/async-api/overview/video/post-video-url.md
index d89a1f0d..1f50a8e9 100644
--- a/docs/async-api/overview/video/post-video-url.md
+++ b/docs/async-api/overview/video/post-video-url.md
@@ -193,7 +193,7 @@ payload = {
#
# 'customVocabulary': ['Platform', 'Discussion', 'Targets'],
# |Contains a list of words and phrases that provide hints to the speech recognition task.
- # 'detectPhrases': True,
+ # 'detectPhrases': true,
#
# 'languageCode': "en-US"
# |code of language of recording.>
diff --git a/docs/async-api/overview/video/post-video.md b/docs/async-api/overview/video/post-video.md
index f6cd7b17..5ec191b0 100644
--- a/docs/async-api/overview/video/post-video.md
+++ b/docs/async-api/overview/video/post-video.md
@@ -65,7 +65,7 @@ const params = {
// 'customVocabulary': ['Platform', 'Discussion', 'Targets'],
// |Contains a list of words and phrases that provide hints to the speech recognition task.
- // 'detectPhrases': True,
+ // 'detectPhrases': true,
// |Accepted values are true & false. It shows Actionable Phrases in each sentence of conversation. These sentences can be found in the Conversation's Messages API.
// 'languageCode': "en-US"
@@ -139,7 +139,7 @@ params = {
# 'customVocabulary': ['Platform', 'Discussion', 'Targets'],
# |Contains a list of words and phrases that provide hints to the speech recognition task.
- # 'detectPhrases': True,
+ # 'detectPhrases': true,
# |Accepted values are true & false. It shows Actionable Phrases in each sentence of conversation. These sentences can be found in the Conversation's Messages API.
# 'languageCode': "en-US"
diff --git a/docs/async-api/overview/video/put-video-url.md b/docs/async-api/overview/video/put-video-url.md
index cb9c9af2..b0e1b7f5 100644
--- a/docs/async-api/overview/video/put-video-url.md
+++ b/docs/async-api/overview/video/put-video-url.md
@@ -203,7 +203,7 @@ payload = {
# 'customVocabulary': ['Platform', 'Discussion', 'Targets'],
# |Contains a list of words and phrases that provide hints to the speech recognition task.
- # 'detectPhrases': True,
+ # 'detectPhrases': true,
# |Accepted values are true & false. It shows Actionable Phrases in each sentence of conversation. These sentences can be found in the Conversation's Messages API.
# 'languageCode': "en-US"
diff --git a/docs/async-api/overview/video/put-video.md b/docs/async-api/overview/video/put-video.md
index dc3118ee..1f56b8b0 100644
--- a/docs/async-api/overview/video/put-video.md
+++ b/docs/async-api/overview/video/put-video.md
@@ -74,7 +74,7 @@ const params = {
// 'customVocabulary': ['Platform', 'Discussion', 'Targets'],
// |Contains a list of words and phrases that provide hints to the speech recognition task.
- // 'detectPhrases': True,
+ // 'detectPhrases': true,
// " |Accepted values are true & false. It shows Actionable Phrases in each sentence of conversation. These sentences can be found in the Conversation's Messages API.>
// 'languageCode': "en-US"
@@ -151,7 +151,7 @@ params = {
# 'customVocabulary': ['Platform', 'Discussion', 'Targets'],
# |Contains a list of words and phrases that provide hints to the speech recognition task.
- # 'detectPhrases': True,
+ # 'detectPhrases': true,
# " |Accepted values are true & false. It shows Actionable Phrases in each sentence of conversation. These sentences can be found in the Conversation's Messages API.>
# 'languageCode': "en-US"
diff --git a/docs/changelog.md b/docs/changelog.md
index 4a32f5e2..35c2d5ac 100644
--- a/docs/changelog.md
+++ b/docs/changelog.md
@@ -11,6 +11,30 @@ import TabItem from '@theme/TabItem';
We continuously add new features and enhancements, fix critical bugs, and regularly deploy changes to improve performance. Keep a tab of our latest updates on this page.
+### 31 Jan 2022
+
+![api update](/img/api-update.png)
+- **Trackers Management UI**: You can create, view, edit, and delete Trackers via the Trackers Management UI. To access this feature, log in to [Symbl Platform](https://platform.symbl.ai/#/login).
+[Read more here](/docs/concepts/trackers#trackers-management-ui).
+
+### 11 Jan 2022
+
+![api update](/img/api-update.png)
+- **Streaming API Logs**: You can view the log details of all your Streaming API requests. To access this feature, log in to [Symbl Platform](https://platform.symbl.ai/#/login).
+[Read more here](/docs/streaming-api/api-reference#streaming-api-logs).
+
+### 10 Jan 2022
+
+![api update](/img/api-update.png)
+- **Word level confidence score**: You can get word-level confidence score in messages API.
+[Read more here](/docs/conversation-api/messages#word-level-confidence-score--labs).
+
+### 28 Dec 2021
+
+![api update](/img/api-update.png)
+- **Added custom vocabulary support in Topics API.** (LABS)
+[Read more here](/docs/conversation-api/get-topics#custom-vocabulary-for-topics-labs).
+
### 26 Nov 2021
![api update](/img/api-update.png)
diff --git a/docs/conversation-api/api-reference/messages.md b/docs/conversation-api/api-reference/messages.md
index c11de52d..46fbc10b 100644
--- a/docs/conversation-api/api-reference/messages.md
+++ b/docs/conversation-api/api-reference/messages.md
@@ -10,16 +10,15 @@ import TabItem from '@theme/TabItem';
---
-The Messages API returns a list of all the messages in a conversation. You can use this for providing **Speech to Text data (also known as transcription sometimes)** for video conference, meeting or telephone call.
+The Messages API returns a list of all the messages in a conversation. You can use this for getting **Speech to Text** data (also known as transcription) for video conference, meeting or a telephone call.
-Here message refer to a continuous sentence spoken by a speaker.
+Here, the message refers to a continuous sentence by a speaker.
-### Sentiment Analysis in messages BETA
+#### Sentiment Analysis in messages BETA
-Here you can enable sentiment analysis over each message which is being spoken in the conversation.
-
-All you need to do is pass `sentiment=true` as a query parameter. [Read more about it](/docs/concepts/sentiment-analysis).
+You can enable sentiment analysis over each message being spoken in the conversation.
+To do this, pass the query parameter `sentiment=true`. Read more about Sentiment Analysis [here](/docs/concepts/sentiment-analysis).
### HTTP Request
@@ -27,8 +26,6 @@ All you need to do is pass `sentiment=true` as a query parameter. [Read more abo
### Example API Call
-
-
:::info
Before using the Conversation API you must get the authentication token (`AUTH_TOKEN`) from [our authentication process](/docs/developer-tools/authentication).
:::
@@ -121,10 +118,10 @@ Header Name | Required | Description
### Query Params
-Parameter | Required | Value |Description|
+Parameter | Required | Value |Description |
--------- | --------- | ------- | -------
-```verbose``` | No | true |Gives you word level timestamps of each sentence.
-```sentiment```| No | true | Give you [Sentiment Analysis](/docs/concepts/sentiment-analysis) on each message.
+```verbose``` | Optional | true | Gives you word level timestamps and score of each sentence.
+```sentiment```| Optional | true | Give you [Sentiment Analysis](/docs/concepts/sentiment-analysis) on each message.
### Response
@@ -140,11 +137,13 @@ Parameter | Required | Value |Description|
},
"startTime": "2020-07-10T11:16:21.024Z",
"endTime": "2020-07-10T11:16:26.724Z",
+ "timeOffset": 5.9,
+ "duration": 1,
"conversationId": "6749556955938816",
"phrases": [
{
"type": "action_phrase",
- "text": "$69.99 per month"
+ "text": "$69.99 per month",
}
],
"sentiment": {
@@ -158,41 +157,69 @@ Parameter | Required | Value |Description|
"word": "Best",
"startTime": "2020-08-18T11:10:14.536Z",
"endTime": "2020-08-18T11:10:15.536Z",
+ "score": 0.91,
+ "timeOffset": 5.9,
+ "duration": 0.2
+
},
{
"word": "package",
"startTime": "2020-08-18T11:10:16.536Z",
"endTime": "2020-08-18T11:10:17.536Z",
+ "score": 0.80,
+ "timeOffset": 6.1,
+ "duration": 0.1
+
},
{
"word": "for",
"startTime": "2020-08-18T11:10:18.536Z",
"endTime": "2020-08-18T11:10:19.536Z",
+ "score": 0.68,
+ "timeOffset": 6.2,
+ "duration": 0.1
+
},
{
"word": "you",
"startTime": "2020-08-18T11:10:20.536Z",
"endTime": "2020-08-18T11:10:22.536Z",
+ "score": 0.68,
+ "timeOffset": 6.3,
+ "duration": 0.3
+
},
{
"word": "is",
"startTime": "2020-08-18T11:10:22.536Z",
"endTime": "2020-08-18T11:10:25.536Z",
+ "score": 0.68,
+ "timeOffset": 6.6,
+ "duration": 0.3
},
{
"word": "$69.99",
"startTime": "2020-08-18T11:10:25.536Z",
"endTime": "2020-08-18T11:10:27.536Z",
+ "score": 0.68,
+ "timeOffset": 6.67,
+ "duration": 0.3
},
{
"word": "per",
"startTime": "2020-08-18T11:10:27.536Z",
"endTime": "2020-08-18T11:10:29.536Z",
+ "score": 0.67,
+ "timeOffset": 6.6,
+ "duration": 0.4
},
{
"word": "month.",
"startTime": "2020-08-18T11:10:30.536Z",
"endTime": "2020-08-18T11:10:32.536Z",
+ "score": 0.67,
+ "timeOffset": 6.8,
+ "duration": 0.5
}]
},
{
@@ -204,11 +231,13 @@ Parameter | Required | Value |Description|
}
"startTime": "2020-08-18T11:11:14.536Z",
"endTime": "2020-08-18T11:11:18.536Z",
+ "timeOffset": 15.27,
+ "duration": 1.23,
"conversationId": "5139780136337408",
"phrases": [],
"sentiment": {
"polarity": {
- "score": 0.2
+ "score": 0.2,
},
"suggested": "neutral"
},
@@ -216,27 +245,43 @@ Parameter | Required | Value |Description|
{
"word": "Okay,",
"startTime": "2020-08-18T11:11:14.536Z",
- "endTime": "2020-08-18T11:11:14.936Z"
+ "endTime": "2020-08-18T11:11:14.936Z",
+ "score": 0.91,
+ "timeOffset": 15.25,
+ "duration": 0.59
+
},
{
"word": "Where",
"startTime": "2020-08-18T11:11:14.936Z",
- "endTime": "2020-08-18T11:11:15.436Z"
+ "endTime": "2020-08-18T11:11:15.436Z",
+ "score": 0.91,
+ "timeOffset": 15.25,
+ "duration": 0.59
},
{
"word": "is",
"startTime": "2020-08-18T11:11:16.236Z",
- "endTime": "2020-08-18T11:11:16.536Z"
+ "endTime": "2020-08-18T11:11:16.536Z",
+ "score": 0.88,
+ "timeOffset": 15.25,
+ "duration": 0.58
},
{
"word": "the",
"startTime": "2020-08-18T11:11:16.536Z",
- "endTime": "2020-08-18T11:11:16.936Z"
+ "endTime": "2020-08-18T11:11:16.936Z",
+ "score": 0.85,
+ "timeOffset": 15.25,
+ "duration": 0.58
},
{
"word": "file?",
"startTime": "2020-08-18T11:11:16.936Z",
- "endTime": "2020-08-18T11:11:17.236Z"
+ "endTime": "2020-08-18T11:11:17.236Z",
+ "score": 0.89,
+ "timeOffset": 15.25,
+ "duration": 0.59
}
]
}
@@ -246,12 +291,14 @@ Parameter | Required | Value |Description|
Field | Description
---------- | ------- |
-```id``` | Unique message identifier.
-```text``` | Message text.
-```from``` | User object with name and email.
-```startTime``` | DateTime value.
-```endTime``` | DateTime value.
-```conversationId``` | Unique conversation identifier.
-```words``` | Words object with properties `word`, `startTime` and `endTime`.
-```phrases``` | It shows the most important action phrases in each sentence. It's enabled when you pass `detectPhrases=true` during submiting the request in Async and Websocket API.
-```sentiment```| Shows the sentiment polarity(intensity of negativity or positivity of a sentence) and suggested sentiment type (positive, negative and neutral).
+```id``` | Unique message identifier.|
+```text``` | Message text.|
+```from``` | User object with name and email.|
+```startTime``` | DateTime value.|
+```endTime``` | DateTime value.|
+```timeOffset``` | Returned as a float value measuring in seconds, up to 2 decimal points. It indicates the seconds elapsed since the start of the conversation. It is returned at the sentence level as well as the word level. timeOffset= startTime (of current sentence/ word) - startTime (of the very first sentence/ word in the conversation). This variable is currently in Labs.|
+```duration``` | Returned as a float value measuring in seconds, upto 2 decimal points. It indicates for how long the sentence or word was spoken. It is returned at the sentence level as well as the word level. `duration= endTime (of current sentence/ word) - startTime (of current sentence/ word)`. This variable is currently in Labs.
+```conversationId``` | Unique conversation identifier. Read more about the Conversation ID [here](/docs/api-reference/getting-conversation-intelligence#what-is-a-conversation-id). |
+```words``` | Words object with properties `word`, `startTime`, `endTime` and `score`. The `score` is the word level confidence score that represents the confidence level of individual words within the transcript. The `score` shows the relevancy of the word in the transcript. Higher the word-level confidence score, the more relevant it is to the transcript message. When you pass `verbose=true`, the word-level confidence score is by default returned. Note that a processed `text` conversation will not return any confidence score since it is already in the transcript form. `words` also return the `timeOffset` and `duration` variables. The word level confidence score is currently in Labs. |
+```phrases``` | It shows the most important action phrases in each sentence. It's enabled when you pass `detectPhrases=true` during submiting the request in Async and Websocket API.|
+```sentiment```| Shows the sentiment polarity(intensity of negativity or positivity of a sentence) and suggested sentiment type (positive, negative and neutral). |
diff --git a/docs/conversation-api/api-reference/summary.md b/docs/conversation-api/api-reference/summary.md
index 235e5cd0..60ad7c71 100644
--- a/docs/conversation-api/api-reference/summary.md
+++ b/docs/conversation-api/api-reference/summary.md
@@ -1,7 +1,7 @@
---
id: summary
-title: GET Summary (Beta)
-sidebar_label: GET Summary (Beta)
+title: GET Summary
+sidebar_label: GET Summary
slug: /conversation-api/summary
---
import Tabs from '@theme/Tabs';
@@ -9,8 +9,15 @@ import TabItem from '@theme/TabItem';
---
-:::note In Beta Phase
-This feature is in the Beta phase. If you have any questions, ideas or suggestions please reach out to us at devrelations@symbl.ai.
+:::info Symbl Labs
+This feature is a part of Symbl Labs. Symbl Labs is our experimental wing designed to share our bleeding edge AI research on human conversations with anyone who wants to explore its limits.
+
+
+You can access the Labs features using your Symbl App Id and Secret. If you don't already have it, sign up on [platform](https://platform.symbl.ai/#/login) to get your credentials.
+
+**Note**: The usage of data for Labs projects is stored for enhancing our research. We may continue to build, iterate, mutate or discontinue any of the below given features on the sole discretion of our team as deemed necessary.
+
+For any queries or feedback, please contact us at labs@symbl.ai.
:::
This API allows you to get a [Summary](/docs/concepts/summarization) of important contextual messages in a conversation.
@@ -25,7 +32,7 @@ If the number of words in a conversation is below 50 or the number of sentences
### API Endpoint
-**GET `https://api.symbl.ai/v1/conversations/{conversationId}/summary`**
+**GET `https://api-labs.symbl.ai/v1/conversations/{conversationId}/summary`**
### Request Headers
@@ -50,7 +57,7 @@ If you are using `x-api-key` we recommend that you use `Authorization` header in
```shell
-curl --location --request GET 'https://api.symbl.ai/v1/conversations/{conversationId}/summary' \
+curl --location --request GET 'https:///api-labs.symbl.ai/v1/conversations/{conversationId}/summary' \
--header "Authorization: Bearer $AUTH_TOKEN" \
# Set your access token here. See https://docs.symbl.ai/docs/developer-tools/authentication
--header 'Content-Type: application/json' \
@@ -65,7 +72,7 @@ const request = require('request');
const authToken = AUTH_TOKEN;;
request.get({
- url: `https://api.symbl.ai/v1/conversations/{conversationId}/summary`,
+ url: `https:///api-labs.symbl.ai/v1/conversations/{conversationId}/summary`,
headers: { 'Authorization': `Bearer ${authToken}` },
json: true
}, (err, response, body) => {
@@ -75,6 +82,11 @@ request.get({
+:::important
+
+The Conversation ID you receive after processing conversations with production Endpoint can be used to process Summary using the Labs Endpoint.
+:::
+
### Using Refresh Parameter
You can use the `refresh=true` as query parameter in the Summary API for any of the following use-cases:
diff --git a/docs/conversation-api/api-reference/topics.md b/docs/conversation-api/api-reference/topics.md
index 580b063a..61c65ca8 100644
--- a/docs/conversation-api/api-reference/topics.md
+++ b/docs/conversation-api/api-reference/topics.md
@@ -33,6 +33,10 @@ in parent and child topics which helps outline the entire conversation faster. R
Topics can be generated again when you have new discussion items. Use `refresh=true` in the Topics API as a query param. This will delete the previous Topics and will create a new one.
+#### Custom Vocabulary for Topics LABS
+
+You can enable custom vocabulary in Topics API by passing the query parameter `customVocabulary=true`.
+
### HTTP Request
`GET https://api.symbl.ai/v1/conversations/{conversationId}/topics`
@@ -126,11 +130,11 @@ exit()
### Query Params
-Parameter | Required | Value |Description|
---------- | --------- | ------- | -------
-```sentiment```| No | true | Give you sentiment analysis on each topic in conversation.
-```parentRefs```| No | true | Gives you [topic hierarchy](/docs/concepts/topic-hierarchy).
-
+Parameter | Required | Type | Value | Description|
+--------- | --------- | ------- | ------- | ------- |
+```sentiment```| Optional | Boolean | `true` or `false` | Give you sentiment analysis on each topic in conversation.
+```parentRefs```| Optional | Boolean | `true` or `false` | Gives you [topic hierarchy](/docs/concepts/topic-hierarchy).
+```customVocabulary``` | Optional | String | | Gives you topics that contain the custom vocabulary keywords you provided.
@@ -254,6 +258,61 @@ Parameter | Required | Value |Description|
]
}
```
+> Custom Vocabulary Sample Response (`customVocabulary`):
+
+```javascript
+{
+ "topics": [
+ {
+ "id": "5907389282779136",
+ "text": "interns",
+ "type": "topic",
+ "score": 0.7178597920690242,
+ "messageIds": [
+ "4600982711304192",
+ "5487363432120320",
+ "6109794119188480"
+ ],
+ "parentRefs": [
+ {
+ "type": "topic",
+ "text": "company-wise hiring"
+ }
+ ]
+ },
+ {
+ "id": "5776859730018304",
+ "text": "company-wise hiring",
+ "type": "topic",
+ "score": 0.788856914361565,
+ "messageIds": [
+ "6298570346987520",
+ "6330577953226752"
+ ],
+ "parentRefs": []
+ },
+ {
+ "id": "6697188878974976",
+ "text": "new regulations",
+ "type": "topic",
+ "score": 0.6968750176932417,
+ "messageIds": [
+ "5356560840654848",
+ "5663440783802368",
+ "5263998490509312",
+ "6082396449406976",
+ "4925138187321344",
+ ],
+ "parentRefs": [
+ {
+ "type": "topic",
+ "text": "company-wise hiring"
+ }
+ ]
+ }
+ ]
+}
+```
### Response Object
@@ -265,4 +324,4 @@ Field | Description
```score``` | Confidence score of the generated topic. value from 0 - 1.
```messageIds``` | Unique message identifiers of the corresponding messages.
```parentRefs``` | This is enabled when `parentRefs` is set to true in request. Object containing type (as topic) and text of parent topic.
-```sentiment```| Shows the [sentiment](/docs/concepts/sentiment-analysis) polarity (the intensity of negativity or positivity of a sentence) and suggested sentiment type (positive, negative and neutral).
+```sentiment```| Shows the [sentiment](/docs/concepts/sentiment-analysis) polarity (the intensity of negativity or positivity of a sentence) and suggested sentiment type (positive, negative and neutral).
\ No newline at end of file
diff --git a/docs/conversation-api/api-reference/trackers.md b/docs/conversation-api/api-reference/trackers.md
index c8ba9b40..11a619b4 100644
--- a/docs/conversation-api/api-reference/trackers.md
+++ b/docs/conversation-api/api-reference/trackers.md
@@ -118,30 +118,44 @@ exit()
### Response
```json
-{
- "type": "vocabulary",
- "value": "Can you reiterate that one more time",
- "messageRefs": [
- {
- "id": "6428676305453056",
- "text": "So I am not showing that here but you can have that, you know, for particular sentence and, you know, then aggregate based on the whole conversation.",
- "offset": -1
- },
- {
- "id": "6035928066818048",
- "text": "Give that intent and name and that's it.",
- "offset": -1
- }
- ],
- "insightRefs": [
- {
- "text": "Yeah, and you So from sentiment analysis perspective, right?",
- "offset": -1,
- "type": "question",
- "id": "5794360651153408"
- }
- ]
-}
+[
+ {
+ "id": "5237067555536896",
+ "name": "PricingMention",
+ "matches": [
+ {
+ "type": "vocabulary",
+ "value": "What is the price",
+ "messageRefs": [
+ {
+ "id": "4667009028587520",
+ "text": "How much does it cost?",
+ "offset": -1
+ }
+ ],
+ "insightRefs": [
+ {
+ "text": "How much does it cost?",
+ "offset": -1,
+ "type": "question",
+ "id": "5420651570528256"
+ }
+ ]
+ },
+ {
+ "type": "vocabulary",
+ "value": "Subscription",
+ "messageRefs": [
+ {
+ "id": "4527958187311104",
+ "text": "Our subscription plan which includes the premium suite of services is $500 per month.",
+ "offset": 4
+ }
+ ],
+ "insightRefs": []
+ }
+ ]
+ },
```
Let’s go over the members of the response body which contains the detected tracker objects:
diff --git a/docs/conversation-api/concepts/summarization.md b/docs/conversation-api/concepts/summarization.md
index 59b741ba..385f982a 100644
--- a/docs/conversation-api/concepts/summarization.md
+++ b/docs/conversation-api/concepts/summarization.md
@@ -1,6 +1,6 @@
---
id: summarization
-title: Summarization API- Capturing Key Points (Beta)
+title: Summarization API- Capturing Key Points (Labs)
description: Use Symbl.ai’s summarization API to capture key points in a conversation and create succinct summaries. Learn more.
sidebar_label: Introduction
slug: /concepts/summarization
@@ -11,8 +11,15 @@ import TabItem from '@theme/TabItem';
---
-:::note In Beta Phase
-This feature is in the Beta phase. If you have any questions, ideas or suggestions please reach out to us at devrelations@symbl.ai.
+:::info Symbl Labs
+This feature is a part of Symbl Labs. Symbl Labs is our experimental wing designed to share our bleeding edge AI research on human conversations with anyone who wants to explore its limits.
+
+
+You can access the Labs features using your Symbl App Id and Secret. If you don't already have it, sign up on [platform](https://platform.symbl.ai/#/login) to get your credentials.
+
+**Note**: The usage of data for Labs projects is stored for enhancing our research. We may continue to build, iterate, mutate or discontinue any of the below given features on the sole discretion of our team as deemed necessary.
+
+For any queries or feedback, please contact us at labs@symbl.ai.
:::
Symbl distills important messages and creates succinct Summaries for long conversations. You can get these Summaries using the [Summary API](/docs/conversation-api/summary).
@@ -39,13 +46,15 @@ If the number of words in a conversation is below 50 or the number of sentences
You can enable the Summary API for Async APIs using the following endpoints:
+Note that the base URL for Symbl Labs is always `https://api-labs.symbl.ai`
+
API | Summary Endpoint
---------- | -------
-[Async Text API (POST/PUT)](/docs/async-api/overview/text/post-text)| ```https://api.symbl.ai/v1/process/text?enableSummary=true ```
-[Async Audio API (POST/PUT)](/docs/async-api/overview/audio/post-audio)| ```https://api.symbl.ai/v1/process/audio?enableSummary=true```
-[Async Audio URL API (POST/PUT)](/docs/async-api/overview/audio/post-audio-url)| ```https://api.symbl.ai/v1/process/audio/url?enableSummary=true```
-[Async Video API (POST/PUT)](/docs/async-api/overview/video/post-video)| ```https://api.symbl.ai/v1/process/video?enableSummary=true```
-[Async Video URL API (POST/PUT)](/docs/async-api/overview/video/post-video-url)| ```https://api.symbl.ai/v1/process/video/url?enableSummary=true```
+[Async Text API (POST/PUT)](/docs/async-api/overview/text/post-text)| ```https://api-labs.symbl.ai/v1/process/text?enableSummary=true ```
+[Async Audio API (POST/PUT)](/docs/async-api/overview/audio/post-audio)| ```https://api-labs.symbl.ai/v1/process/audio?enableSummary=true```
+[Async Audio URL API (POST/PUT)](/docs/async-api/overview/audio/post-audio-url)| ```https://api-labs.symbl.ai/v1/process/audio/url?enableSummary=true```
+[Async Video API (POST/PUT)](/docs/async-api/overview/video/post-video)| ```https://api-labs.symbl.ai/v1/process/video?enableSummary=true```
+[Async Video URL API (POST/PUT)](/docs/async-api/overview/video/post-video-url)| ```https://api-labs.symbl.ai/v1/process/video/url?enableSummary=true```
Once the above API job is complete, the corresponding Summary can be obtained by sending a GET request to the Summary API. See the [**Summary API Documentation**](/docs/conversation-api/summary) for details.
diff --git a/docs/conversation-api/concepts/trackers.md b/docs/conversation-api/concepts/trackers.md
index 4e9f55e8..b3a1511b 100644
--- a/docs/conversation-api/concepts/trackers.md
+++ b/docs/conversation-api/concepts/trackers.md
@@ -16,7 +16,7 @@ This feature is in the Beta phase. If you have any questions, ideas or suggestio
## What is a Tracker?
-Trackers are user-defined enities that allow you to track the occurrences of any characteristics or events in a conversation with just a few examples. You can track critical moments in a conversation across several use cases in both real-time as the conversation is in-progress as well as asynchronously after the conversation is over from recordings. Some use cases for Trackers are when a customer is unhappy, when someone is rude, potential sales opportunity so you can identify emerging trends and gauge the nature of interactions.
+Trackers are user-defined entities that allow you to track the occurrences of any characteristics or events in a conversation with just a few examples. You can track critical moments in a conversation across several use cases in both real-time as the conversation is in-progress as well as asynchronously after the conversation is over from recordings. Some use cases for Trackers are when a customer is unhappy, when someone is rude, potential sales opportunity so you can identify emerging trends and gauge the nature of interactions.
## How Trackers Work
@@ -80,7 +80,7 @@ Operation | Endpoint
---------- | -------
Create Tracker | [`POST` v1/manage/tracker](/management-api/trackers/create-tracker)
Create Trackers in Bulk | [`POST` v1/manage/trackers](/management-api/trackers/create-tracker#bulk-create-trackers-api)
-Get Tracker with ID| [`GET`v1/manage/tracker/{trackerId}](/management-api/trackers/get-tracker#get-tracker-by-id)
+Get Tracker with ID| [`GET` v1/manage/tracker/{trackerId}](/management-api/trackers/get-tracker#get-tracker-by-id)
Get Tracker with name | [`GET` v1/manage/trackers?&name={trackerName}](/management-api/trackers/get-tracker#get-tracker)
Update Tracker| [`PUT`v1/manage/tracker/{trackerId}](/management-api/trackers/update-tracker)
Delete Tracker| [`DELETE`v1/manage/tracker/{trackerId}](/management-api/trackers/delete-tracker)
@@ -89,6 +89,36 @@ Delete Tracker| [`DELETE`v1/manage/tracker/{trackerId}](/management-api/trackers
Currently, Trackers is supported with Symbl’s Async APIs and Streaming APIs.
:::
+### Trackers Management UI
+
+You can create, view, edit, and delete Trackers via the Trackers Management UI. To access this feature, log in to the [Symbl Platform](https://platform.symbl.ai/#/login).
+
+The following capabilities are supported in the Trackers Management UI:
+
+- Create Trackers
+- View Trackers
+- Edit Trackers
+- Delete Trackers
+
+![img-tracker-ui](/img/tracker-ui-1.png)
+
+:::tip
+The Trackers Management UI allows you to copy the Tracker ID with the copy button and use it directly in your code.
+
+1. Go to created Tracker and click on the select option. The edit options shows up.
+
+![copy-tracker-id](/img/copy-tracker-id.png)
+2. Click **Copy ID**. This copies the Tracker ID in the following format:
+
+`{"trackers":[{"id":"4807227589263360"}]}`
+:::
+
+:::important
+**Using punctuations**: You can only pass periods `.`, apostrophes `'` and dashes `-` in the Trackers vocabulary. Other punctuations like `?`, `,`, `!`, `:` are not allowed.
+**Vocabulary terms**: We recommend that you add at least 5 and a maximum of 50 vocabulary terms per Tracker.
+**Trackers limitation**: You can create up to 200 Trackers per account.
+:::
+
## Tutorials
- [How to create a Tracker](/docs/management-api/trackers/overview#consuming-trackers-with-management-api)
diff --git a/docs/developer-tools/errors.md b/docs/developer-tools/errors.md
index e06c00d1..9fcfa6fb 100644
--- a/docs/developer-tools/errors.md
+++ b/docs/developer-tools/errors.md
@@ -49,7 +49,7 @@ Error Code | Description | Resolution
Error Code | Description | Resolution
---------- | ------- | ---------
-404 - Not Found | The 409 response code specifies that the Tracker with that specific trackerId does not exist. | Check the trackerId and ensure that it is valid and exists.
+404 - Not Found | The 404 response code specifies that the Tracker with that specific trackerId does not exist. | Check the trackerId and ensure that it is valid and exists.
```bash
diff --git a/docs/faq.md b/docs/faq.md
index bbf37ed3..499e2953 100644
--- a/docs/faq.md
+++ b/docs/faq.md
@@ -49,4 +49,20 @@ If the Diarization feature is set to `true`, it will take priority over Speaker
No, Trackers cannot be run one at a time.
-If you have not processed your conversation with the parameter `enableAllTracker=true` in the Async API, Trackers will not be detected. To learn why and understand how to use this parameter while processing your conversation, see [Consuming Trackers with Management API](/docs/management-api/trackers/overview#step-2-submit-files-using-async-api-with-enablealltrackers-flag) section.
\ No newline at end of file
+If you have not processed your conversation with the parameter `enableAllTracker=true` in the Async API, Trackers will not be detected. To learn why and understand how to use this parameter while processing your conversation, see [Consuming Trackers with Management API](/docs/management-api/trackers/overview#step-2-submit-files-using-async-api-with-enablealltrackers-flag) section.
+
+### Are punctuations allowed in the trackers vocabulary?
+
+You can only pass periods `.`, apostrophes `'` and dashes `-` in the trackers vocabulary. Other punctuations like `?`, `,`, `!`, `:` are not allowed.
+
+### How do I create Trackers?
+You can create Trackers using the Management API. You can also use the Tracker UI via the [Symbl Platform](https://platform.symbl.ai/#/login) to create, view, edit and delete Trackers.
+
+### How many vocabulary terms can I add in a Tracker?
+We recommend that you add at least 5 vocabulary terms and a maximum of 50 per Tracker.
+
+### How many Trackers can I create?
+You can create up to 200 Trackers per account.
+
+### Can I view the logs of Streaming API?
+Yes, you can view the Streaming API logs via the [Symbl Platform](https://platform.symbl.ai/#/login).
diff --git a/docs/integrations/agora-sdk-plugin.md b/docs/integrations/agora-sdk-plugin.md
index 6bbb2806..ffa65ca2 100644
--- a/docs/integrations/agora-sdk-plugin.md
+++ b/docs/integrations/agora-sdk-plugin.md
@@ -782,7 +782,13 @@ public class MainActivity extends AppCompatActivity implements io.agora.rtc2.IMe
}
}
```
-### API Reference
+
+## Sample Project
+---
+The following sample project provides you an Android mobile app using the Agora Video SDK and the Symbl.ai Extension and it can be used as a reference. Follow the instructions in the README file for setting up, configuring and running the sample mobile app in your own device.
+[Sample Android App Project](https://github.com/symblai/symbl-agora-Android-app).
+
+## API Reference
---
-Find comprehensive information about our REST APIs in the [API Reference](https://docs.symbl.ai/docs/api-reference/getting-started) section.
+Find comprehensive information about our REST APIs in the [API Reference](/docs/api-reference/getting-started) section.
diff --git a/docs/introduction.md b/docs/introduction.md
index 5da19026..11a6ca7f 100644
--- a/docs/introduction.md
+++ b/docs/introduction.md
@@ -48,7 +48,7 @@ Using the Symbl credentials, you can [generate the authentication token](/docs/d
Using the following APIs, send conversation data in real-time or after the conversation has taken place (async).
- 👉 [Async APIs](/docs/async-api/reference/reference) allow you to send text, audio, or video conversations in recorded format.
+ 👉 [Async APIs](/docs/async-api/introduction) allow you to send text, audio, or video conversations in recorded format.
👉 [Streaming APIs](/docs/streamingapi/introduction) allow you to connect Symbl on a live call via WebSocket protocol.
👉 [Telephony APIs](/docs/telephony/introduction) allow you to connect Symbl on a live audio conversation via SIP and PSTN.
diff --git a/docs/javascript-sdk/reference/reference.md b/docs/javascript-sdk/reference/reference.md
index 6b4a1016..4dd53ba2 100644
--- a/docs/javascript-sdk/reference/reference.md
+++ b/docs/javascript-sdk/reference/reference.md
@@ -54,7 +54,7 @@ Connects to the [Telephony API](/docs/telephony/introduction) endpoint using the
Name | Description
-----|------------
-`config` | Options specified for the [Telephony API Configuration Object](http://docs.symbl.ai/docs/telephony-api/api-reference#request-parameters).
+`config` | Options specified for the [Telephony API Configuration Object](/docs/telephony-api/api-reference#request-parameters).
#### Returns
@@ -124,13 +124,13 @@ sdk.stopEndpoint({
```startRealtimeRequest ( options)```
-Connects to a [Streaming API](/docs/streamingapi/overview/introduction) Web Socket endpoint using the provided configuration options.
+Connects to a [Streaming API](/docs/streamingapi/introduction) Web Socket endpoint using the provided configuration options.
#### Parameters
Name | Description
-----|------------
-`options` | Options specified for the [Streaming API Configuration Object](https://docs.symbl.ai/docs/streaming-api/api-reference#request-parameters).
+`options` | Options specified for the [Streaming API Configuration Object](/docs/streaming-api/api-reference#request-parameters).
#### Returns
@@ -138,7 +138,7 @@ A Promise which is resolved once real-time request has been established.
#### Event Handlers
-View the [Event Handlers](##event-handlers-1) section below to view which event handlers can be passed to the real-time connection.
+View the [Event Handlers](#event-handlers-1) section below to view which event handlers can be passed to the real-time connection.
#### Code Example
@@ -174,7 +174,7 @@ Subscribes to an existing connection which will fire a callback for every event
Name | Description
-----|------------
-`connectionId` | You receive the connection ID after connecting with [startRealtimeRequest](#startRealtimeRequest) or [startEndpoint](#startendpoint).
+`connectionId` | You receive the connection ID after connecting with [startRealtimeRequest](#startrealtimerequest) or [startEndpoint](#startendpoint).
`callback` | A callback method which will be called on for every new event.
#### Code Example
@@ -232,7 +232,7 @@ SpeakerEvent is a type of event Symbl can accept that provides information about
Name | Description
-----|------------
-`connectionId` | You receive the connection ID after connecting with [startRealtimeRequest](#startRealtimeRequest) or [startEndpoint](#startendpoint).
+`connectionId` | You receive the connection ID after connecting with [startRealtimeRequest](#startrealtimerequest) or [startEndpoint](#startendpoint).
`event` | An event (such as a [SpeakerEvent](/docs/javascript-sdk/code-snippets/active-speaker-events/#speaker-event)) which is the event to be pushed onto the connection.
`callback` | A callback method which will be called on for every new event.
@@ -262,7 +262,7 @@ sdk.pushEventOnConnection(
## Event Handlers
-When connecting using [`startRealtimeRequest`](#startRealtimeRequest), you can pass various handlers in the configuration options which be called if the specific event attached to the handler is fired.
+When connecting using [`startRealtimeRequest`](#startrealtimerequest), you can pass various handlers in the configuration options which be called if the specific event attached to the handler is fired.
#### Code Example
@@ -484,7 +484,7 @@ This callback provides you with any of the detected topics in real-time as they
### onTrackerResponse
-This callback provides you with any of the detected trackers in real-time as they are detected. As with the [`onMessageCallback`](#onmessagecallback) this would also return every tracker in case of multiple streams.
+This callback provides you with any of the detected trackers in real-time as they are detected. As with the [`onMessageCallback`](#onMessageCallback) this would also return every tracker in case of multiple streams.
#### onTrackerResponse JSON Response Example
diff --git a/docs/javascript-sdk/tutorials/get-realtime-transcription-js-sdk.md b/docs/javascript-sdk/tutorials/get-realtime-transcription-js-sdk.md
index e5b5b8a3..f51aeaa4 100644
--- a/docs/javascript-sdk/tutorials/get-realtime-transcription-js-sdk.md
+++ b/docs/javascript-sdk/tutorials/get-realtime-transcription-js-sdk.md
@@ -246,7 +246,7 @@ sdk.pushEventOnConnection(connectionId, speakerEvent.toJSON(), (err) => {
});
```
-This example just touches the surface of what you can do with our Streaming API. If you would like to learn more about it you can visit the [Streaming API documentation](/docs/streamingapi/overview/introduction).
+This example just touches the surface of what you can do with our Streaming API. If you would like to learn more about it you can visit the [Streaming API documentation](/docs/streamingapi/introduction).
## Full Code Example
diff --git a/docs/javascript-sdk/tutorials/push-audio-get-realtime-data.md b/docs/javascript-sdk/tutorials/push-audio-get-realtime-data.md
index 5934842f..34ab878e 100644
--- a/docs/javascript-sdk/tutorials/push-audio-get-realtime-data.md
+++ b/docs/javascript-sdk/tutorials/push-audio-get-realtime-data.md
@@ -36,7 +36,7 @@ In this guide you will learn the following:
* [Handlers (handlers)](#handlers-handlers)
* [Full Configuration Object](#full-configuration-object)
* [Handle the audio stream](#handle-the-audio-stream)
-* [Process speech using device's microphone](#process-speech-using-devices-microphone)
+* [Process speech using device's microphone](#process-speech-using-the-devices-microphone)
* [Test](#test)
* [Grabbing the Conversation ID](#grabbing-the-conversation-id)
* [Full Code Sample](#full-code-sample)
diff --git a/docs/labs.md b/docs/labs.md
index d2b2c7bb..5b80a73f 100644
--- a/docs/labs.md
+++ b/docs/labs.md
@@ -9,6 +9,8 @@ slug: /labs
Symbl Labs is our experimental wing designed to share our bleeding edge AI research on human conversations with anyone who wants to explore its limits.
You can access the Labs features using your Symbl App Id and Secret. If you don't already have it, sign up on the platform to get your credentials.
+
+Note that the base URL for all Symbl Labs feature is always `https://api-labs.symbl.ai`
:::note
The usage of data for Labs projects is stored for enhancing our research. We may continue to build, iterate, mutate or discontinue any of the below given features on the sole discretion of our team as deemed necessary.
diff --git a/docs/management-api/conversation-groups/get-conversation-groups.md b/docs/management-api/conversation-groups/get-conversation-groups.md
index fe322c93..e04049a4 100644
--- a/docs/management-api/conversation-groups/get-conversation-groups.md
+++ b/docs/management-api/conversation-groups/get-conversation-groups.md
@@ -20,7 +20,7 @@ To fetch multiple Conversation Groups, go to [Get Multiple Conversation Groups](
### API Endpoint
-Make a PUT request to the following API:
+Make a GET request to the following API:
**GET `https://api.symbl.ai/v1/manage/group/{groupId}`**
diff --git a/docs/management-api/introduction.md b/docs/management-api/introduction.md
index dfeb3648..bbed2705 100644
--- a/docs/management-api/introduction.md
+++ b/docs/management-api/introduction.md
@@ -33,6 +33,10 @@ Get Tracker with name | [`GET` v1/manage/trackers?name={trackerName}](/managemen
Update Tracker| [`PUT`v1/manage/tracker/{trackerId}](/management-api/trackers/update-tracker)
Delete Tracker| [`DELETE`v1/manage/tracker/{trackerId}](/management-api/trackers/delete-tracker)
+:::info Trackers Management UI
+You can create, view, edit and delete Trackers via the Trackers Management UI as well. To access this feature, log in to the[Symbl Platform](https://platform.symbl.ai/#/login)
+:::
+
### Conversation Groups Management APIs
Following are the API endpoints supported for managing Conversation Groups entity:
diff --git a/docs/management-api/trackers/create-tracker.md b/docs/management-api/trackers/create-tracker.md
index fb49f28c..95d13361 100644
--- a/docs/management-api/trackers/create-tracker.md
+++ b/docs/management-api/trackers/create-tracker.md
@@ -20,6 +20,10 @@ You can create Trackers in the following ways:
- [Using Async APIs](#create-trackers-using-async-apis)
- [Using Streaming API](#create-trackers-using-streaming-api)
+:::info Trackers Management UI
+You can create, view, edit and delete Trackers via the Trackers Management UI as well. To access this feature, log in to the[Symbl Platform](https://platform.symbl.ai/#/login)
+:::
+
You can also add several Trackers at the same time as a bulk operation. To learn how, see [**Bulk Create Trackers**](#create-trackers-in-bulk) section.
:::info Create Trackers with Management API
@@ -48,6 +52,14 @@ Header Name | Required | Description
For better tracking use prominent keywords and phrases along with few longer utterances which represent the Tracker.
:::
+:::note Using Punctuations in Trackers Vocabulary
+You can only pass the following punctuations in trackers vocabulary:
+- Periods `.`
+- Apostrophes `'`
+
+Using any other punctuation mark such as `?`, `,`, `!`, `:` is not allowed.
+:::
+
### Request Body
```javascript
diff --git a/docs/management-api/trackers/delete-tracker.md b/docs/management-api/trackers/delete-tracker.md
index 1f32e729..64647818 100644
--- a/docs/management-api/trackers/delete-tracker.md
+++ b/docs/management-api/trackers/delete-tracker.md
@@ -17,6 +17,9 @@ The Delete Tracker API will delete the Tracker entity against the `trackerId` pr
Currently, the Tracker entities can be consumed in the [Async APIs](/docs/async-api/introduction) only. Support for the other APIs will be added soon.
:::
+:::note Trackers Management UI
+You can create, view, edit and delete Trackers via the Trackers Management UI as well. To access this feature, log in to the[Symbl Platform](https://platform.symbl.ai/#/login)
+:::
### API Endpoint
**DELETE `https://api.symbl.ai/v1/manage/tracker/{trackerId}`**
diff --git a/docs/management-api/trackers/get-tracker.md b/docs/management-api/trackers/get-tracker.md
index e555c902..dcfd4576 100644
--- a/docs/management-api/trackers/get-tracker.md
+++ b/docs/management-api/trackers/get-tracker.md
@@ -11,6 +11,10 @@ slug: /management-api/trackers/get-tracker
This feature is in the Beta phase. If you have any questions, ideas or suggestions please reach out to us at devrelations@symbl.ai.
:::
+:::note Trackers Management UI
+You can create, view, edit and delete Trackers via the Trackers Management UI as well. To access this feature, log in to the[Symbl Platform](https://platform.symbl.ai/#/login)
+:::
+
You can GET Trackers in the following ways with the Management API:
- [GET All Trackers](#get-all-trackers)
- [GET Tracker by ID](#get-tracker-by-id)
@@ -116,7 +120,7 @@ In case of unsuccessful responses, the following error codes will be returned fr
Error Code | Description | Resolution
---------- | ------- | -------
-`404 - Not Found` | The 409 response code specifies that the Tracker with that specific `trackerId` does not exist. | Check the `trackerId` and ensure that it is valid and exists.
+`404 - Not Found` | The 404 response code specifies that the Tracker with that specific `trackerId` does not exist. | Check the `trackerId` and ensure that it is valid and exists.
`400 - Bad Request` | The 400 response code specifies that the request body or the parameters have incorrect key names or their values have types that are different than the ones expected. | Please read the message returned in the response to fix this error.
`500 - Internal Server Error` | The 500 response code specifies that the server failed to handle the request. | Please reach out to support@symbl.ai if it persists even after multiple attempts.
`502 - Bad Gateway` | The 502 response code specifies that the server failed to acknowledge the request. This may happen due to multiple reasons. | Please reach out to support@symbl.ai if it persists even after multiple attempts.
diff --git a/docs/management-api/trackers/overview.md b/docs/management-api/trackers/overview.md
index cf545b72..387388a8 100644
--- a/docs/management-api/trackers/overview.md
+++ b/docs/management-api/trackers/overview.md
@@ -33,6 +33,13 @@ Trackers can be consumed via the Management API, which takes the onus of maintai
All Trackers created using Management API are saved and can be reused for other operations such as PUT, UPDATE and DELETE.
To read about the capabilities of the Management API, see the [Management API](/docs/management-api/introduction) page.
+:::note Trackers Management UI
+You can also create, view, edit and delete Trackers via the Trackers Management UI. To access this feature, log in to the[Symbl Platform](https://platform.symbl.ai/#/login)
+
+- **Using punctuations**: You can only pass periods `.`, apostrophes `'` and dashes `-` in the trackers vocabulary. Other punctuations like `?`, `,`, `!`, `:`are not allowed.
+- **Vocabulary terms**: You must add atleast 5 and a maximum of 50 vocabulary terms per Tracker.
+- **Trackers limitation**: You can create upto 200 trackers per account.
+:::
### Step 1: Create Trackers
---
@@ -43,6 +50,13 @@ To read about the capabilities of the Management API, see the [Management API](/
POST "https://api.symbl.ai/v1/manage/tracker"
```
+:::note Using Punctuations in Trackers Vocabulary
+You can only pass the following punctuations in trackers vocabulary:
+- Periods `.`
+- Apostrophes `'`
+
+Using any other punctuation mark such as `?`, `,`, `!`, `:` is not allowed.
+:::
You can define the phrases and keywords in the **vocabulary** of the request body as shown below:
```json
diff --git a/docs/management-api/trackers/update-tracker.md b/docs/management-api/trackers/update-tracker.md
index af0b57db..7da8a718 100644
--- a/docs/management-api/trackers/update-tracker.md
+++ b/docs/management-api/trackers/update-tracker.md
@@ -13,6 +13,10 @@ This feature is in the Beta phase. If you have any questions, ideas or suggestio
To update an existing Tracker, send a PUT Tracker API request with Management API. This updates a Tracker entity against the `trackerId` which can be consumed in Symbl APIs.
+:::note Trackers Management UI
+You can create, view, edit and delete Trackers via the Trackers Management UI as well. To access this feature, log in to the[Symbl Platform](https://platform.symbl.ai/#/login)
+:::
+
### API Endpoint
**PUT `https://api.symbl.ai/v1/manage/tracker/{trackerId}`**
@@ -93,6 +97,6 @@ Error Code | Description | Resolution
`404 - Not Found` | The 404 response code specifies that the Tracker with that specific `trackerId` does not exist. | Check the `trackerId` and ensure that it is valid and exists.
`429 - Too many requests` | The 429 response code specifies that the number of concurrent requests surpassed the limit for the API (which is 1 API call at a time). | Ensure that your system doesn’t make concurrent API calls that exceed this limit.
`400 - Bad Request` | The 400 response code specifies that the request body or the parameters have incorrect key names or their values have types that are different than the ones expected. | Please read the message returned in the response to fix this error.
-`413 - Request Entity Too Large` | The 413 response code specifies that the size of the request body exceeds that of the maximum limit the API supports (which is 1 MB). | Please ensure that the size of the request body is under this limit to resolve this error.`500 - Internal Server Error` | The 500 response code specifies that the server failed to handle the request. | Please reach out to support@symbl.ai if it persists even after multiple attempts.
+`413 - Request Entity Too Large` | The 413 response code specifies that the size of the request body exceeds that of the maximum limit the API supports (which is 1 MB). | Please ensure that the size of the request body is under this limit to resolve this error.
`502 - Bad Gateway` | The 502 response code specifies that the server failed to acknowledge the request. This may happen due to multiple reasons. | Please reach out to support@symbl.ai if it persists even after multiple attempts.
-`504 - Gateway Timeout` | The 504 response code specifies that the server failed to respond within the timeout duration. | Please reach out to support@symbl.ai if it persists even after multiple attempts.
+`504 - Gateway Timeout` | The 504 response code specifies that the server failed to respond within the timeout duration. | Please reach out to support@symbl.ai if it persists even after multiple attempts.
\ No newline at end of file
diff --git a/docs/python-sdk/python-sdk-reference.md b/docs/python-sdk/python-sdk-reference.md
index 989f49b4..2b5232e2 100644
--- a/docs/python-sdk/python-sdk-reference.md
+++ b/docs/python-sdk/python-sdk-reference.md
@@ -217,7 +217,7 @@ To see an example of the usage of `put_members` functionality, go to out [GitHub
### conversation_object.put_speakers_events(parameters={})
-`parameters`:- (mandatory) takes a dictionary which contains `speakerEvents`. For list of parameters accepted, see [Speaker Events Object](https://docs.symbl.ai/docs/conversation-api/speaker-events/#speaker-event-object) page.
+`parameters`:- (mandatory) takes a dictionary which contains `speakerEvents`. For list of parameters accepted, see [Speaker Events Object](/docs/conversation-api/speaker-events/#speaker-event-object) page.
This API provides the functionality to update Speakers in a conversation after it has been processed.
diff --git a/docs/sdk-intro.md b/docs/sdk-intro.md
index 01205770..ef3459ba 100644
--- a/docs/sdk-intro.md
+++ b/docs/sdk-intro.md
@@ -13,15 +13,25 @@ import TabItem from '@theme/TabItem';
Programmatically use Symbl APIs and integrate it with your web applications and meeting platforms.
-Use Symbl's SDKs to directly add Symbl's capabilities onto your web conferencing platforms. It is available in popular programming languages.
+Use Symbl's SDKs to directly add Symbl's capabilities onto your web conferencing platforms. It is available in popular programming languages given below:
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+*The C# SDK is not maintained as a part of Symbl's official repository yet. While you can utilize this SDK, support for the same is not available at this point.*
+
---
diff --git a/docs/streamingapi/code-snippets/detect-key-phrases.md b/docs/streamingapi/code-snippets/detect-key-phrases.md
index 21727793..eaf44e10 100644
--- a/docs/streamingapi/code-snippets/detect-key-phrases.md
+++ b/docs/streamingapi/code-snippets/detect-key-phrases.md
@@ -100,7 +100,7 @@ const uuid = require('uuid').v4;
// When a tracker is detected in real-time
console.log('onTrackerResponse', JSON.stringify(data, null, 2));
if (!!data) {
- data.forEach((tracker) => {
+ data.trackers.forEach((tracker) => {
console.log(`Detected Tracker Name: ${tracker.name}`);
console.log(`Detected Matches`);
tracker.matches.forEach((match) => {
@@ -174,6 +174,135 @@ setTimeout(async () => {
}, 60 * 1000) // Stop connection after 1 minute i.e. 60 secs
```
+### Full code snippet
+
+ ```js
+ const {sdk} = require('symbl-node');
+const uuid = require('uuid').v4;
+
+const mic = require('mic')
+
+const sampleRateHertz = 48000
+
+const micInstance = mic({
+ rate: sampleRateHertz,
+ channels: '1',
+ debug: false,
+ exitOnSilence: 6,
+});
+
+(async () => {
+ try {
+ // Initialize the SDK. You can find the appId and appSecret at https://platform.symbl.ai.
+ await sdk.init({
+ appId: appId,
+ appSecret: appSecret,
+ basePath: 'https://api.symbl.ai',
+ })
+
+ // Need unique Id
+ const id = uuid()
+
+ const connection = await sdk.startRealtimeRequest({
+ id,
+ insightTypes: ['action_item', 'question'],
+ trackers: [
+ {
+ name: "COVID-19",
+ vocabulary: [
+ "social distancing",
+ "cover your face with mask",
+ "vaccination"
+ ]
+ }
+ ],
+ config: {
+ meetingTitle: "My Awesome Meeting",
+ confidenceThreshold: 0.7,
+ languageCode: "en-US",
+ sampleRateHertz: 48000,
+ trackers: {
+ "interimResults": true
+ }
+ },
+ speaker: {
+ // Optional, if not specified, will simply not send an email in the end.
+ userId: "john@example.com", // Update with valid email
+ name: "John",
+ },
+ handlers: {
+ onTrackerResponse: (data) => {
+ // When a tracker is detected in real-time
+ console.log('onTrackerResponse', JSON.stringify(data, null, 2));
+ if (!!data) {
+ data.trackers.forEach((tracker) => {
+ console.log(`Detected Tracker Name: ${tracker.name}`);
+ console.log(`Detected Matches`);
+ tracker.matches.forEach((match) => {
+ console.log(`Tracker Value: ${match.value}`);
+ console.log(`Messages detected against this Tracker`);
+ match.messageRefs.forEach((messageRef) => {
+ console.log(`Message ID: ${messageRef.id}`);
+ console.log(`Message text for which the match was detected: ${messageRef.text}`);
+ console.log(`\n`);
+ });
+ console.log(`\n\n`);
+
+ console.log(`Insights detected against this Tracker`);
+ match.messageRefs.forEach((insightRef) => {
+ console.log(`Insight ID: ${insightRef.id}`);
+ console.log(`Insight text for which the match was detected: ${insightRef.text}`);
+ console.log(`Insight Type: ${insightRef.type}`);
+ console.log(`\n`);
+ });
+ console.log(`\n\n`);
+ });
+ });
+ }
+ },
+ },
+ });
+
+ const micInputStream = micInstance.getAudioStream()
+/** Raw audio stream */
+micInputStream.on('data', (data) => {
+ // Push audio from Microphone to websocket connection
+ connection.sendAudio(data)
+})
+
+micInputStream.on('error', function (err) {
+ console.log('Error in Input Stream: ' + err)
+})
+
+micInputStream.on('startComplete', function () {
+ console.log('Started listening to Microphone.')
+})
+
+micInputStream.on('silence', function () {
+ console.log('Got SIGNAL silence')
+})
+
+micInstance.start()
+
+setTimeout(async () => {
+ // Stop listening to microphone
+ micInstance.stop()
+ console.log('Stopped listening to Microphone.')
+ try {
+ // Stop connection
+ await connection.stop()
+ console.log('Connection Stopped.')
+ } catch (e) {
+ console.error('Error while stopping the connection.', e)
+ }
+}, 60 * 1000) // Stop connection after 1 minute i.e. 60 secs
+ } catch (e) {
+ console.error(e);
+ }
+})();
+
+ ```
+
#### Testing
Create a javascript file named `app.js` and copy this code into the file. Fill in the placeholder values with the proper values. Use npm to install the required libraries: `npm install symbl-node uuid`. Now in the terminal run
diff --git a/docs/streamingapi/code-snippets/receive-speech-to-text-for-different-languages.md b/docs/streamingapi/code-snippets/receive-speech-to-text-for-different-languages.md
index 45d44094..f963d33b 100644
--- a/docs/streamingapi/code-snippets/receive-speech-to-text-for-different-languages.md
+++ b/docs/streamingapi/code-snippets/receive-speech-to-text-for-different-languages.md
@@ -12,7 +12,7 @@ This example goes over how you can use the Symbl Streaming API to receive a spee
:::note
Currently, we only support English language in Streaming & Telephony API.
-We support languages other than English only for our enterprise plan.
+The support for Spanish language is also available in Streaming API in the Labs environment.
Please feel free to reach out to us at support@symbl.ai for any queries.
:::
diff --git a/docs/streamingapi/reference/reference.md b/docs/streamingapi/reference/reference.md
index 5d5d531a..7a1731e4 100644
--- a/docs/streamingapi/reference/reference.md
+++ b/docs/streamingapi/reference/reference.md
@@ -10,7 +10,7 @@ import TabItem from '@theme/TabItem';
---
-Symbl's Streaming API is based on WebSocket protocol and can be used for real-time use-cases where both the audio and its results from Symbl's back-end need to be available in real-time.
+Symbl's Streaming API is based on the WebSocket protocol and can be used for real-time use-cases where both the audio and its results need to be available in real-time.
:::info
Currently, Streaming API is supported in English language. The support for Spanish language is also available in the Labs environment.
@@ -29,10 +29,10 @@ Field | Required | Supported Value | Description
---------- | ------- | ------- | -------
```type``` | Mandatory | start_request, stop_request | Type of message
```insightTypes``` | Optional | action_item, question | Types of insights to return. If not provided, no insights will be returned.
-```customVocabulary``` | Optional | | An array of strings containing a vocabulary specific to your company, products, or phrases.
-```config``` | Optional | | Configuration for this request. [See the config section below for more details](#config).
-```speaker``` | Optional | | Speaker identity to use for audio in this WebSocket connection. If omitted, no speaker identification will be used for processing. [See the speaker section below for more details](#speaker).
-```noConnectionTimeout``` LABS | Optional | | The buffer time (in seconds) during which the WebSocket API connection stays open even if there’s no Streaming API connection active for that duration. This allows the Speaker to reconnect to the same meeting with the same Subscribers if they lost the connection previously. For example,
When this parameter is set to `noConnectionTimeout = 600 secs` and if there is no graceful termination using `stop_request` message sent explicitly when there just one WebSocket connection, the `connectionId` and `conversationId` are kept valid for 600 seconds before finalizing the connection, after which connectionId will be not available to subscribe and `conversationId` will have all the last know information associated with it.
+```customVocabulary``` | Optional | List of String | An array of strings containing a vocabulary specific to your company, products, or phrases.
+```config``` | Optional | Find the supported value [here](#config) | Configuration for this request. [See the config section below for more details](#config).
+```speaker``` | Optional | Find the supported value [here](#speaker) | Speaker identity to use for audio in this WebSocket connection. If omitted, no speaker identification will be used for processing. [See the speaker section below for more details](#speaker).
+```noConnectionTimeout``` LABS | Optional | Between `0` to `3600` seconds | The buffer time (in seconds) during which the WebSocket API connection stays open even if there’s no Streaming API connection active for that duration. This allows the Speaker to reconnect to the same meeting with the same Subscribers if they lost the connection previously. For example,
When this parameter is set to `noConnectionTimeout = 600 secs` and if there is no graceful termination using `stop_request` message sent explicitly when there just one WebSocket connection, the `connectionId` and `conversationId` are kept valid for 600 seconds before finalizing the connection, after which connectionId will be not available to subscribe and `conversationId` will have all the last know information associated with it.
```disconnectOnStopRequest``` LABS | Optional | `true` or `false` | This parameter allows you to set your Streaming API connection in such a way that even when the `stop_request` is sent. The connection does not drop-off, only the processing is stopped and the `conversationId` and connection is kept live for `1800` seconds by default. You can always override this value by passing the `disconnectOnStopRequest` parameter.
This allows you to stop and start the Streaming API processing without dropping the WebSocket connection, so that you can stop and resume the processing in the middle of a call and optimize the Streaming API usage costs.
The default value is `true`. |
```disconnectOnStopRequestTimeout``` LABS | Optional | Between `0` to `3600` seconds | This parameter allows you to override the idle time out (if a WebSocket connection is idle for 30 minutes). Set this parameter with a value between `0` to `3600` seconds. If the idle connection needs to be kept alive beyond `3600` seconds, you have to restart the connection at `3600` seconds elapsed.
If the value is passed as `0`, the WebSocket connection is dropped when `stop_request` is received. The default value is `1800`.
@@ -419,3 +419,25 @@ Let’s go over all the parameters passed in the configuration object in the abo
6. `handlers`: The object encapsulating the call-back functions to be invoked on detection of those specific entities. For more information on various other handlers, check out the [Javascript SDK Reference](/docs/javascript-sdk/reference#event-handlers-1).
a. `onTrackerResponse`: This function is invoked when Symbl detects a Tracker in real-time. The structure of the **Tracker** object is shown in the above code snippet.
+
+### Streaming API Logs
+
+You can view the logs of your Streaming API request on your Symbl Platform account. To view the logs, sign in to [Symbl Platform](https://platform.symbl.ai/#/login). The logs provide the following:
+
+- Connection ID
+
+- Conversation ID
+
+- Log details
+
+ - Date of Creation
+
+ - Log Type (Start request, Conversation created, Started listening, Recognition started, Stop request, etc.)
+
+- Ability to search for old logs
+
+- Ability to filter logs with dates
+
+- Ability to filter logs with only errors.
+
+![image-api-logs](/img/streaming-api-logs.png)
\ No newline at end of file
diff --git a/docs/streamingapi/tutorials/get-real-time-sentiment-analysis.md b/docs/streamingapi/tutorials/get-real-time-sentiment-analysis.md
index a3e5027d..bd985805 100644
--- a/docs/streamingapi/tutorials/get-real-time-sentiment-analysis.md
+++ b/docs/streamingapi/tutorials/get-real-time-sentiment-analysis.md
@@ -11,8 +11,6 @@ import TabItem from '@theme/TabItem';
In this guide you will learn how to get started with Symbl’s native Streaming API, which is our most accurate API for conversation analysis. Symbl's Streaming API is an API for enabling real-time conversational analysis on voice, video, or chat, or any live streaming directly through your web browser. If you have voice, video, or chat enabled, Symbl's API for streaming enables you to tap the raw conversational data of those streams. In addition to the setting up Symbl.ai's Streaming API, you create a function that logs sentiment analysis in real-time. Sentiment analysis operates through a call to the Message API with a query parameter.
-You can view the complete code sample for this tutorial on [GitHub](https://github.com/symblai/logging-real-time-sentiments):
-
:::note
The code sample you use today runs entirely in the browser without Node.js but requires you to understand HTTP requests.
:::
diff --git a/docs/telephony/tutorials/connect-to-zoom-with-telephony-api.md b/docs/telephony/tutorials/connect-to-zoom-with-telephony-api.md
index 926a2e23..d3a50639 100644
--- a/docs/telephony/tutorials/connect-to-zoom-with-telephony-api.md
+++ b/docs/telephony/tutorials/connect-to-zoom-with-telephony-api.md
@@ -13,7 +13,7 @@ import TabItem from '@theme/TabItem';
This guide uses a **PSTN** connection to connect to Zoom. **PSTN** audio quality maxes out to 8KHz. You can also use a **[SIP-based connection](/docs/concepts/pstn-and-sip#sip-session-initiation-protocol)**, which captures audio at 16KHz and above.
:::
-[Symbl’s Telephony API](https://docs.symbl.ai/?shell#telephony-api) allows you to connect to any conference call system using PSTN or SIP networks. In this guide, we will walk you through how to get a live transcription and real-time AI insights, such as [follow-ups](/docs/concepts/follow-ups), [action items](/docs/concepts/action-items), [topics](/docs/concepts/topics) and [questions](/docs/conversation-api/questions), of a Zoom call using a PSTN connection. This application uses the Symbl Javascript SDK which requires the `@symblai/symbl-js` node package. You must have an active Zoom call (no one has to be in it but yourself) and whatever you speak in the Zoom call will be taken by our API and processed for conversational insights.
+[Symbl’s Telephony API](/docs/telephony/introduction) allows you to connect to any conference call system using PSTN or SIP networks. In this guide, we will walk you through how to get a live transcription and real-time AI insights, such as [follow-ups](/docs/concepts/follow-ups), [action items](/docs/concepts/action-items), [topics](/docs/concepts/topics) and [questions](/docs/conversation-api/questions), of a Zoom call using a PSTN connection. This application uses the Symbl Javascript SDK which requires the `@symblai/symbl-js` node package. You must have an active Zoom call (no one has to be in it but yourself) and whatever you speak in the Zoom call will be taken by our API and processed for conversational insights.
:::info
You must make sure your Zoom call allows phone dial-in for this example to work correctly.
diff --git a/docs/telephony/tutorials/get-live-transcription-telephony-api.md b/docs/telephony/tutorials/get-live-transcription-telephony-api.md
index 5f9e42d2..3f23a631 100644
--- a/docs/telephony/tutorials/get-live-transcription-telephony-api.md
+++ b/docs/telephony/tutorials/get-live-transcription-telephony-api.md
@@ -13,7 +13,7 @@ Get a live transcription in your Node.js application by making a call to a valid
This application uses the Symbl Javascript SDK which requires the `symbl-node` node package.
-Making a phone call is also the quickest way to test [Symbl’s Telephony API](https://docs.symbl.ai/?shell#telephony-api). It can make an outbound call to a phone number using a traditional public switched telephony network [(PSTN)](https://en.wikipedia.org/wiki/Public_switched_telephone_network), any [SIP trunks](https://en.wikipedia.org/wiki/SIP_trunking), or SIP endpoints that can be accessed over the internet using a SIP URI.
+Making a phone call is also the quickest way to test [Symbl’s Telephony API](/docs/telephony/introduction). It can make an outbound call to a phone number using a traditional public switched telephony network [(PSTN)](https://en.wikipedia.org/wiki/Public_switched_telephone_network), any [SIP trunks](https://en.wikipedia.org/wiki/SIP_trunking), or SIP endpoints that can be accessed over the internet using a SIP URI.
### Contents
@@ -182,7 +182,7 @@ setTimeout(async () => {
}, 60000); // Change the 60000 with higher value if you want this to continue for more time.
```
-The `stopEndpoint` will return an updated `connection` object which will have the `conversationId` in the response. You can use `conversationId` to fetch the results even after the call using the [Conversation API](https://docs.symbl.ai/#conversation-api).
+The `stopEndpoint` will return an updated `connection` object which will have the `conversationId` in the response. You can use `conversationId` to fetch the results even after the call using the [Conversation API](/docs/conversation-api/introduction).
## Code Example
diff --git a/docs/tutorials/summarization/adding-speaker-info.md b/docs/tutorials/summarization/adding-speaker-info.md
index 06ba6d54..6ceb3ef1 100644
--- a/docs/tutorials/summarization/adding-speaker-info.md
+++ b/docs/tutorials/summarization/adding-speaker-info.md
@@ -1,6 +1,6 @@
---
id: adding-speaker-info
-title: Providing Speaker Information to generate Summary (Beta)
+title: Providing Speaker Information to generate Summary
sidebar_label: Providing Speaker Information to generate Summary
slug: /tutorials/summarization/adding-speaker-info
---
@@ -10,8 +10,15 @@ import TabItem from '@theme/TabItem';
---
-:::note In Beta Phase
-This feature is in the Beta phase. If you have any questions, ideas or suggestions please reach out to us at devrelations@symbl.ai.
+:::info Symbl Labs
+This feature is a part of Symbl Labs. Symbl Labs is our experimental wing designed to share our bleeding edge AI research on human conversations with anyone who wants to explore its limits.
+
+
+You can access the Labs features using your Symbl App Id and Secret. If you don't already have it, sign up on [platform](https://platform.symbl.ai/#/login) to get your credentials.
+
+**Note**: The usage of data for Labs projects is stored for enhancing our research. We may continue to build, iterate, mutate or discontinue any of the below given features on the sole discretion of our team as deemed necessary.
+
+For any queries or feedback, please contact us at labs@symbl.ai.
:::
Summaries are generated best when used with Speaker information captured in the conversation. It is highly recommended that you send us the speaker information to use this feature effectively.
diff --git a/docs/tutorials/summarization/getting-summary.md b/docs/tutorials/summarization/getting-summary.md
index 49bca403..85c0cd93 100644
--- a/docs/tutorials/summarization/getting-summary.md
+++ b/docs/tutorials/summarization/getting-summary.md
@@ -1,6 +1,6 @@
---
id: getting-summary
-title: How to get a Summary using Async API (Beta)
+title: How to get a Summary using Async API
sidebar_label: Get Summary using Async API
slug: /tutorials/summarization/getting-summary
---
@@ -9,8 +9,15 @@ import TabItem from '@theme/TabItem';
---
-:::note In Beta Phase
-This feature is in the Beta phase. If you have any questions, ideas or suggestions please reach out to us at devrelations@symbl.ai.
+:::info Symbl Labs
+This feature is a part of Symbl Labs. Symbl Labs is our experimental wing designed to share our bleeding edge AI research on human conversations with anyone who wants to explore its limits.
+
+
+You can access the Labs features using your Symbl App Id and Secret. If you don't already have it, sign up on [platform](https://platform.symbl.ai/#/login) to get your credentials.
+
+**Note**: The usage of data for Labs projects is stored for enhancing our research. We may continue to build, iterate, mutate or discontinue any of the below given features on the sole discretion of our team as deemed necessary.
+
+For any queries or feedback, please contact us at labs@symbl.ai.
:::
This tutorial provides a step-by-step instructions on how to get a Summary using Async APIs.
diff --git a/docs/tutorials/summarization/refreshing-summary.md b/docs/tutorials/summarization/refreshing-summary.md
index c6779510..02c67d96 100644
--- a/docs/tutorials/summarization/refreshing-summary.md
+++ b/docs/tutorials/summarization/refreshing-summary.md
@@ -1,6 +1,6 @@
---
id: refreshing-summary
-title: How to Refresh a Summary (Beta)
+title: How to Refresh a Summary
sidebar_label: Refreshing a Summary
slug: /tutorials/summarization/refreshing-summary
---
@@ -8,8 +8,16 @@ import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
---
-:::note In Beta Phase
-This feature is in the Beta phase. If you have any questions, ideas or suggestions please reach out to us at devrelations@symbl.ai.
+
+:::info Symbl Labs
+This feature is a part of Symbl Labs. Symbl Labs is our experimental wing designed to share our bleeding edge AI research on human conversations with anyone who wants to explore its limits.
+
+
+You can access the Labs features using your Symbl App Id and Secret. If you don't already have it, sign up on [platform](https://platform.symbl.ai/#/login) to get your credentials.
+
+**Note**: The usage of data for Labs projects is stored for enhancing our research. We may continue to build, iterate, mutate or discontinue any of the below given features on the sole discretion of our team as deemed necessary.
+
+For any queries or feedback, please contact us at labs@symbl.ai.
:::
When you wish to renegerate a Summary that you generated earlier, you can do so in two ways:
diff --git a/docs/web-sdk/muting-and-unmuting-connected-device.md b/docs/web-sdk/muting-and-unmuting-connected-device.md
deleted file mode 100644
index a7754cd8..00000000
--- a/docs/web-sdk/muting-and-unmuting-connected-device.md
+++ /dev/null
@@ -1,52 +0,0 @@
----
-id: muting-and-unmuting-connected-device
-title: Muting and Unmuting Connected Device (Beta)
-sidebar_label: Muting and Unmuting Connected Device (Beta)
-slug: /web-sdk/muting-and-unmuting-connected-device
----
-
-import Tabs from '@theme/Tabs';
-import TabItem from '@theme/TabItem';
-
----
-
-:::note IN BETA PHASE
-This feature is in the Beta phase. If you have any questions, ideas or suggestions please reach out to us at devrelations@symbl.ai.
-:::
-
-You can mute and unmute the connected device by simply calling `symbl.mute()` or `symbl.unmute()`.
-
-### Muting Device
-A quick snippet on how to use the mute method is given below:
-
-```js
-(async () => {
- // Creates the WebSocket in a non-processing state
- const stream = await symbl.createStream(connectionConfig);
- await symbl.mute(stream);
-})();
-```
-:::note Using createStream to start a realtime request
-Creating a stream using `symbl.startRealtimeRequest(config)` has been deprecated in favor of `symbl.createStream(config)`. For `createStream`, the WebSocket is started in a non processing state. You must send the start request before processing any audio.
-
-After the stream is created, you need to call `symbl.mute(stream)` to mute the device.
-:::
-
-### Unmuting Device
-A quick snippet on how to use the unmute method is given below:
-
-```js
-(async () => {
- // Creates the WebSocket in a non-processing state
- const stream = await symbl.createStream(connectionConfig);
- await symbl.unmute(stream);
-})();
-```
-
-:::note Using createStream to start a realtime request
-Creating a stream using `symbl.startRealtimeRequest(config)` has been deprecated in favor of `symbl.createStream(config)`. For `createStream`, the WebSocket is started in a non processing state. You must send the start request before processing any audio.
-
-After the stream is created, you need to call `symbl.unmute(stream)` to unmute the device.
-:::
-
-
diff --git a/docs/web-sdk/overview.md b/docs/web-sdk/overview.md
deleted file mode 100644
index 68b204a0..00000000
--- a/docs/web-sdk/overview.md
+++ /dev/null
@@ -1,159 +0,0 @@
----
-id: web-sdk
-title: Symbl Web SDK (Beta)
-sidebar_label: Introduction
-slug: /web-sdk/overview
----
-
-import Tabs from '@theme/Tabs';
-import TabItem from '@theme/TabItem';
-
----
-
-:::note IN BETA PHASE
-This feature is in the Beta phase. If you have any questions, ideas or suggestions please reach out to us at devrelations@symbl.ai.
-:::
-
-The Symbl Web SDK provides access to the Symbl APIs for applications directly in the browser.
-
-> **Source Code**
-Find the source code here: [https://github.com/symblai/symbl-web-sdk](https://github.com/symblai/symbl-web-sdk).
-
-
-## Supported Browsers
----
-
-|-- | Chrome | Edge Firefox | Firefox | Safari |
-| -------| ---------- | ------- | ----- | ------- |
-| macOS | ![icon](/img/tick-mark.png)| ![icon](/img/tick-mark.png)| ![icon](/img/tick-mark.png) | ![icon](/img/tick-mark.png) |
-| Windows | ![icon](/img/tick-mark.png) | ![icon](/img/tick-mark.png)| ![icon](/img/tick-mark.png) | |
-| Linux | ![icon](/img/tick-mark.png)| | ![icon](/img/tick-mark.png) |
-| iOS | ![icon](/img/tick-mark.png)| | ![icon](/img/tick-mark.png) | ![icon](/img/tick-mark.png) |
-| Android | ![icon](/img/tick-mark.png)| | ![icon](/img/tick-mark.png) | ![icon](/img/tick-mark.png) |
-
-## Setup
----
-**To use the Symbl Web SDK,**
-
-Include the following script tag in your HTML file:
-
-```html
-
-```
-or
-
-```html
-
-```
-
-In case of a front-end web application using a framework such as React, import it in the ES2015 style:
-
-```bash
-import symbl from "@symblai/symbl-web-sdk";
-```
-
-## Initialization
----
-The `init` authenticates you to use the Symbl API using the provided authentication credentials. To get authentication credentials (App ID and Secret), follow the steps given in the [Authentication](/docs/developer-tools/authentication#step-1-get-your-api-credentials) page.
-
-You can authenticate:
-
-- [Using your API Credentials](#authenticate-using-api-credentials)
-
- or
-
-- [Using your Auth Token](#authenticate-using-token)
-
-### Authenticate using API Credentials
-
-Use the code given below to authenticate using your App ID and App Secret.
-
-```js
-sdk.init({
- // APP_ID and APP_SECRET come from the Symbl Platform: https://platform.symbl.ai
- appId: APP_ID,
- appSecret: APP_SECRET,
- basePath: 'https://api.symbl.ai'
-})
-.then(() => console.log('SDK Initialized.'))
-.catch(err => console.error('Error in initialization.', err));
-```
-
-### Authenticate using Token
-
-Use the code given below to authenticate using the Auth Token. To generate the Auth Token follow the Steps given in the [Authentication](/docs/developer-tools/authentication#step-2-generate-the-access-token) Page.
-
-```js
-sdk.init({
- accessToken: ACCESS_TOKEN_HERE,
- basePath: 'https://api.symbl.ai'
-})
-.then(() => console.log('SDK Initialized.'))
-.catch(err => console.error('Error in initialization.', err));
-```
-
-
-:::info Web SDK in Labs
-The Web SDK is also available as a part of the [Symbl Labs](/docs/labs) with select features. You can find the Web SDK Labs Readme here: [https://github.com/symblai/symbl-web-sdk/blob/labs/README.md](https://github.com/symblai/symbl-web-sdk/blob/labs/README.md) and the source code here: [https://github.com/symblai/symbl-web-sdk/tree/labs](https://github.com/symblai/symbl-web-sdk/tree/labs).
-:::
-
-## Streaming API config options
-
-You can utilize the config options provided for our Streaming API. To read about the Streaming API config options, go to [Streaming API Reference](https://docs.symbl.ai/docs/streaming-api/api-reference/#request-parameters).
-
-### Additional Web SDK configs
-You can also pass the following configurations that are available specifically with the Web SDK:
-
-| Name | Default | Description |
-| -------| ---------- | ------- |
-| `sourceNode` | `null` | For passing in an external [MediaStreamAudioSourceNode](https://developer.mozilla.org/en-US/docs/Web/API/MediaStreamAudioSourceNode/MediaStreamAudioSourceNode) object. Although the Web SDK will handle the audio context and source nodes on its own by default, you can pass this option if you wish to handle it externally. |
-| `reconnectOnError` | `true` | If this option is set to `true`, the Web SDK will attempt to reconnect to the WebSocket in case of an error. You can also make use of our [onReconnectFail](/docs/web-sdk/web-sdk-reference#onreconnectfailerr) callback which will fire in case the reconnection attempt fails. |
-
-### Usage Example
-
-```js
-const id = btoa("my-first-symbl-ai-code");
-
-const connectionConfig = {
- id,
- insightTypes: ['action_item', 'question'],
- sourceNode: sourceNode,
- reconnectOnError: true,
- handlers: { // Read the handlers section for more
- ondevicechange: () => {
- alert('device changed!');
- },
- ...
- }
- ...
-}
-
-
-...
-
-// Creates the WebSocket in a non-processing state
-const stream = await symbl.createStream(connectionConfig);
-
-// Send the start request
-await symbl.unmute(stream);
-```
-
-## Tutorials
----
-We have prepared a list of tutorials to help you understand how to use the Web SDK.
-
-* [How to Transcribe a Live Audio Input through Microphone](/docs/web-sdk/transcribing-live-audio-through-microphone)
-* [How to pass a custom Node Source](/docs/web-sdk/passing-custom-sourcenode)
-* [How to pass a custom Device Change Handler](/docs/web-sdk/passing-custom-ondevicechange-handler)
-
-### Web SDK Reference
----
-The supported Handlers and Callbacks for the Web SDK are listed below:
-
-* [Event Handlers](/docs/javascript-sdk/reference#event-handlers-1)
- * [onSpeechDetected](/docs/javascript-sdk/reference#onspeechdetected)
- * [onMessageResponse](/docs/javascript-sdk/reference#onmessageresponse)
- * [onInsightResponse](/docs/javascript-sdk/reference#oninsightresponse)
- * [onTopicResponse](/docs/javascript-sdk/reference#ontopicresponse)
-
-👉 See the complete Web SDK Reference [here](/docs/web-sdk/web-sdk-reference).
\ No newline at end of file
diff --git a/docs/web-sdk/passing-a-custom-sourceNode.md b/docs/web-sdk/passing-a-custom-sourceNode.md
deleted file mode 100644
index a9d05a50..00000000
--- a/docs/web-sdk/passing-a-custom-sourceNode.md
+++ /dev/null
@@ -1,106 +0,0 @@
----
-id: passing-custom-sourcenode
-title: Passing a custom sourceNode (Beta)
-sidebar_label: Passing a custom sourceNode (Beta)
-slug: /web-sdk/passing-custom-sourcenode
----
-
-import Tabs from '@theme/Tabs';
-import TabItem from '@theme/TabItem';
-
----
-
-:::note IN BETA PHASE
-This feature is in the Beta phase. If you have any questions, ideas or suggestions please reach out to us at devrelations@symbl.ai.
-:::
-
-You can pass a custom `MediaStreamAudioSourceNode` object to the Web SDK. By default the Web SDK will create the AudioContext and the `MediaStreamAudioSourceNode` object automatically but using this will give you more control over those.
-
-Once you create the `MediaStreamAudioSourceNode` object you can pass it via the `connectionConfig` as sourceNode.
-
-```js
-// create the MediaStreamAudioSourceNode
-const AudioContext = window.AudioContext || window.webkitAudioContext;
-stream = await navigator.mediaDevices.getUserMedia({
- audio: true,
- video: false
-});
-context = new AudioContext();
-const sourceNode = context.createMediaStreamSource(stream);
-
-symbl.init({
- appId: '',
- appSecret: '',
- // accessToken: '', // can be used instead of appId and appSecret
- basePath: 'https://api.symbl.ai',
-});
-
-const id = btoa("my-first-symbl-ai-code");
-// pass in the MediaStreamAudioSourceNode as sourceNode
-const connectionConfig = {
- id,
- sourceNode,
- insightTypes: ['action_item', 'question'],
- config: {
- meetingTitle: 'My Test Meeting ' + id,
- confidenceThreshold: 0.7,
- timezoneOffset: 480, // Offset in minutes from UTC
- languageCode: 'en-US',
- sampleRateHertz: 48000
- },
- speaker: {
- // Optional, if not specified, will simply not send an email in the end.
- userId: '', // Update with valid email
- name: ''
- },
- handlers: {
- /**
- * This will return live speech-to-text transcription of the call.
- */
- onSpeechDetected: (data) => {
- if (data) {
- const {punctuated} = data
- console.log('Live: ', punctuated && punctuated.transcript)
- console.log('');
- }
- // console.log('onSpeechDetected ', JSON.stringify(data, null, 2));
- },
- /**
- * When processed messages are available, this callback will be called.
- */
- onMessageResponse: (data) => {
- // console.log('onMessageResponse', JSON.stringify(data, null, 2))
- },
- /**
- * When Symbl detects an insight, this callback will be called.
- */
- onInsightResponse: (data) => {
- // console.log('onInsightResponse', JSON.stringify(data, null, 2))
- },
- /**
- * When Symbl detects a topic, this callback will be called.
- */
- onTopicResponse: (data) => {
- // console.log('onTopicResponse', JSON.stringify(data, null, 2))
- }
- }
-};
-
-(async () => {
- // Creates the WebSocket in a non-processing state
- const stream = await symbl.createStream(connectionConfig);
-
- // Send the start request
- await stream.start(stream);
-})();
-```
-
-### Updating your external source node
-
-If you wish to update your external source node you can do so by using the `symbl.updateSourceNode` function:
-
-```js
-symbl.updateSourceNode(stream, sourceNode);
-```
-
-
diff --git a/docs/web-sdk/passing-custom-ondevicechange-handler.md b/docs/web-sdk/passing-custom-ondevicechange-handler.md
deleted file mode 100644
index 46c5d769..00000000
--- a/docs/web-sdk/passing-custom-ondevicechange-handler.md
+++ /dev/null
@@ -1,59 +0,0 @@
----
-id: passing-custom-ondevicechange-handler
-title: Passing a Custom Device Change Handler (Beta)
-sidebar_label: Passing a Custom Device Change Handler (Beta)
-slug: /web-sdk/passing-custom-ondevicechange-handler
----
-
-import Tabs from '@theme/Tabs';
-import TabItem from '@theme/TabItem';
-
----
-
-:::note IN BETA PHASE
-This feature is in the Beta phase. If you have any questions, ideas or suggestions please reach out to us at devrelations@symbl.ai.
-:::
-
-
-By default, the Symbl Web SDK will handle the `ondevicechange` event and send a `modify_request` event to modify the sample rate with the new device's sample rate. If you wish to override this logic, you can pass in your own `ondevicechange` handler in the handlers config.
-
-```js
-symbl.init({
- appId: '',
- appSecret: '',
- // accessToken: '', // can be used instead of appId and appSecret
- basePath: 'https://api.symbl.ai',
-});
-const id = btoa("my-first-symbl-ai-code");
-// pass in the MediaStreamAudioSourceNode as sourceNode
-const connectionConfig = {
- id,
- insightTypes: ['action_item', 'question'],
- config: {
- languageCode: 'en-US',
- sampleRateHertz: 48000
- },
- handlers: {
- ondevicechange: () => {
- // add your logic here.
- }
- }
-};
-
-(async () => {
- // Creates the WebSocket in a non-processing state
- const stream = await symbl.createStream(connectionConfig);
-
- // Send the start request
- await stream.start(stream);
-})();
-```
-### Using the deviceChanged callback
-
-You can also make use of our callback using our `deviceChanged` callback:
-
-```js
-symbl.deviceChanged = () => {
- // Add your logic here
-}
-```
\ No newline at end of file
diff --git a/docs/web-sdk/reconnecting-real-time.md b/docs/web-sdk/reconnecting-real-time.md
deleted file mode 100644
index c4a88419..00000000
--- a/docs/web-sdk/reconnecting-real-time.md
+++ /dev/null
@@ -1,89 +0,0 @@
----
-id: reconnecting-real-time
-title: Reconnecting to a Real-time Connection (Beta)
-sidebar_label: Reconnecting to a Real-time Connection
-slug: /web-sdk/reconnecting-real-time
----
-
-import Tabs from '@theme/Tabs';
-import TabItem from '@theme/TabItem';
-
----
-
-:::note IN BETA PHASE
-This feature is in the Beta phase. If you have any questions, ideas or suggestions please reach out to us at devrelations@symbl.ai.
-:::
-
-In case a user closes their browser or has an interruption in their WebSocket connection, you can use the `store` object to grab the Connection ID you last used.
-
-```js
-const id = symbl.store.get('connectionID');
-
-const connectionConfig = {
- id,
- insightTypes: ['action_item', 'question'],
- config: {
- meetingTitle: 'My Test Meeting ' + id,
- confidenceThreshold: 0.7,
- timezoneOffset: 480, // Offset in minutes from UTC
- languageCode: 'en-US',
- sampleRateHertz: 44100
- },
- speaker: {
- // Optional, if not specified, will simply not send an email in the end.
- userId: '', // Update with valid email
- name: ''
- },
- handlers: {
- /**
- * This will return live speech-to-text transcription of the call.
- */
- onSpeechDetected: (data) => {
- if (data) {
- const {punctuated} = data
- console.log('Live: ', punctuated && punctuated.transcript)
- console.log('');
- }
- // console.log('onSpeechDetected ', JSON.stringify(data, null, 2));
- },
- /**
- * When processed messages are available, this callback will be called.
- */
- onMessageResponse: (data) => {
- // console.log('onMessageResponse', JSON.stringify(data, null, 2))
- },
- /**
- * When Symbl detects an insight, this callback will be called.
- */
- onInsightResponse: (data) => {
- // console.log('onInsightResponse', JSON.stringify(data, null, 2))
- },
- /**
- * When Symbl detects a topic, this callback will be called.
- */
- onTopicResponse: (data) => {
- // console.log('onTopicResponse', JSON.stringify(data, null, 2))
- }
- }
-};
-
-(async () => {
- // Creates the WebSocket in a non-processing state
- const stream = await symbl.createStream(connectionConfig);
-
- // Send the start request
- await stream.start(stream);
-})();
-```
-:::note Using createStream to start a realtime request
-Creating a stream using `symbl.startRealtimeRequest(config)` has been deprecated in favor of `symbl.createStream(config)`. For createStream, the WebSocket is started in a non processing state. You must send the start request before processing any audio.
-
-After the stream is created, you need to call `symbl.start(stream)` to start the stream.
-:::
-
-Read more about the supported Event Handlers:
-
- 👉 [onSpeechDetected](/docs/web-sdk/web-sdk-reference#onspeechdetected)
- 👉 [onMessageResponse](/docs/web-sdk/web-sdk-reference#onmessageresponse)
- 👉 [onInsightResponse](/docs/web-sdk/web-sdk-reference#oninsightresponse)
- 👉 [onTopicResponse](/docs/web-sdk/web-sdk-reference#ontopicresponse)
diff --git a/docs/web-sdk/stopping-realtime-connection.md b/docs/web-sdk/stopping-realtime-connection.md
deleted file mode 100644
index be0f3b0d..00000000
--- a/docs/web-sdk/stopping-realtime-connection.md
+++ /dev/null
@@ -1,23 +0,0 @@
----
-id: stop-realtime-connection
-title: Stopping Real-time Connection (Beta)
-sidebar_label: Stopping Real-time Connection (Beta)
-slug: /web-sdk/stop-realtime-connection
----
-
-import Tabs from '@theme/Tabs';
-import TabItem from '@theme/TabItem';
-
----
-
-:::note IN BETA PHASE
-This feature is in the Beta phase. If you have any questions, ideas or suggestions please reach out to us at devrelations@symbl.ai.
-:::
-
-In order to end the real-time WebSocket connection, you'll need to use the following command with your connection object:
-
-```js
-symbl.stopRequest(connection);
-```
-
-If you do not sever the connection, you could use more minutes of time than intended, so it is recommended to always end the connection programmatically.
\ No newline at end of file
diff --git a/docs/web-sdk/subscribe-to-realtime.md b/docs/web-sdk/subscribe-to-realtime.md
deleted file mode 100644
index 871fcf2f..00000000
--- a/docs/web-sdk/subscribe-to-realtime.md
+++ /dev/null
@@ -1,56 +0,0 @@
----
-id: subscribe-to-realtime
-title: Subscribing to a Real-time Connection (Beta)
-sidebar_label: Subscribing to a Real-time Connection (Beta)
-slug: /web-sdk/subscribe-to-realtime
----
-
-import Tabs from '@theme/Tabs';
-import TabItem from '@theme/TabItem';
-
----
-
-:::note IN BETA PHASE
-This feature is in the Beta phase. If you have any questions, ideas or suggestions please reach out to us at devrelations@symbl.ai.
-:::
-
-With the Subscribe API you can connect to an existing connection via the connection ID. You'll want to open this example in a different browser while the real-time transcription is running.
-
-## Current call signature
-
-```js
-symbl.subscribeToStream(id, {
- reconnectOnError: true,
- handlers: {
- onMessage: (message) => { ... },
- onSubscribe: () => { ... },
- onClose: () => { ... },
- onReconnectFail: (err) => { ... },
- }
-});
-```
-
-## Deprecated call signature
-
-This way of using the `subscribeToSream` function has been deprecated. It will still work but might not in future versions. Please convert to the current call signature above. The function passed is equivalent to the `onMessage` handler in the new call signature.
-
-```js
-symbl.subscribeToStream(id, (data) => {
- console.log('data:', data);
-})
-```
-
-| Name | Default | Description |
-| -------| ---------- | --------- |
-| `reconnectOnError` | `true` | If `true`, the Web SDK will attempt to reconnect to the WebSocket in case of an error. You can also make sure of our `onReconnectFail` callback which will fire in case the reconnection attempt fails.) |
-
-## Subscribe API Handlers
-
-| Name | Description |
-| -------| ---------- |
-| `onMessage(message)` | Fired any time a message is received. | If true the Web SDK will attempt to reconnect to the WebSocket in case of error. You can also make sure of our `onReconnectFail` callback which will fire in case the reconnection attempt fails.) |
-| `onSubscribe()` | Fired when the connection intially subscribes.
-| `onClose()` | Fired when the connection is closed.
-| `onReconnectFail(err)` | Fires when the reconnection attempt fails. Related to the `reconnectOnError` config. |
-
-
diff --git a/docs/web-sdk/transcribing-live-audio-through-microphone.md b/docs/web-sdk/transcribing-live-audio-through-microphone.md
deleted file mode 100644
index 9e5839f0..00000000
--- a/docs/web-sdk/transcribing-live-audio-through-microphone.md
+++ /dev/null
@@ -1,117 +0,0 @@
----
-id: transcribing-live-audio-through-microphone
-title: Transcribing Live Audio through Microphone (Beta)
-sidebar_label: Transcribing Live Audio through Microphone
-slug: /web-sdk/transcribing-live-audio-through-microphone
----
-
-import Tabs from '@theme/Tabs';
-import TabItem from '@theme/TabItem';
-
----
-
-:::note IN BETA PHASE
-This feature is in the Beta phase. If you have any questions, ideas or suggestions please reach out to us at devrelations@symbl.ai.
-:::
-
-To transcribe live audio via Streaming API, you can setup a live microphone and push the audio stream using the browser APIs to access the microphone.
-
-Initialize the SDK and connect via the built-in WebSocket connector. This will output the live transcription to the console.
-
-### Initialize the SDK
-
-```js
-symbl.init({
- appId: '',
- appSecret: '',
- // accessToken: '', // can be used instead of appId and appSecret
- // basePath: '',
-});
-```
-
-You can get the `appId` and `appSecret` from the [Symbl Platform](https://platform.symbl.ai).
-See the steps to get your API Credentials in the [Authentication](/docs/developer-tools/authentication) section.
-
-### Start the Connection and pass Configuration Options
-
-:::note Using createStream to start a realtime request
-Creating a stream using `symbl.startRealtimeRequest(config)` has been deprecated in favor of `symbl.createStream(config)`. For createStream, the WebSocket is started in a non processing state. You must send the start request before processing any audio.
-
-After the stream is created, you need to call `symbl.start(stream)` to start the stream.
-:::
-
-```js
-symbl.init({
- appId: '',
- appSecret: '',
- // accessToken: '', // can be used instead of appId and appSecret
- basePath: 'https://api-labs.symbl.ai',
-});
-
-const id = btoa("my-first-symbl-ai-code");
-
-const connectionConfig = {
- id,
- insightTypes: ['action_item', 'question'],
- config: {
- meetingTitle: 'My Test Meeting ' + id,
- confidenceThreshold: 0.7,
- timezoneOffset: 480, // Offset in minutes from UTC
- languageCode: 'en-US',
- sampleRateHertz: 48000
- },
- speaker: {
- // Optional, if not specified, will simply not send an email in the end.
- userId: '', // Update with valid email
- name: ''
- },
- handlers: {
- /**
- * This will return live speech-to-text transcription of the call.
- */
- onSpeechDetected: (data) => {
- if (data) {
- const {punctuated} = data
- console.log('Live: ', punctuated && punctuated.transcript)
- console.log('');
- }
- // console.log('onSpeechDetected ', JSON.stringify(data, null, 2));
- },
- /**
- * When processed messages are available, this callback will be called.
- */
- onMessageResponse: (data) => {
- // console.log('onMessageResponse', JSON.stringify(data, null, 2))
- },
- /**
- * When Symbl detects an insight, this callback will be called.
- */
- onInsightResponse: (data) => {
- // console.log('onInsightResponse', JSON.stringify(data, null, 2))
- },
- /**
- * When Symbl detects a topic, this callback will be called.
- */
- onTopicResponse: (data) => {
- // console.log('onTopicResponse', JSON.stringify(data, null, 2))
- }
- }
-};
-
-(async () => {
- // Creates the WebSocket in a non-processing state
- const stream = await symbl.createStream(connectionConfig);
-
- // Send the start request
- await stream.start(stream);
-})();
-```
-
-Read more about the supported Event Handlers:
-
- 👉 [onSpeechDetected](/docs/web-sdk/web-sdk-reference#onspeechdetected)
- 👉 [onMessageResponse](/docs/web-sdk/web-sdk-reference#onmessageresponse)
- 👉 [onInsightResponse](/docs/web-sdk/web-sdk-reference#oninsightresponse)
- 👉 [onTopicResponse](/docs/web-sdk/web-sdk-reference#ontopicresponse)
-
-👉 See the complete Web SDK Reference [here](/docs/web-sdk/web-sdk-reference).
\ No newline at end of file
diff --git a/docs/web-sdk/web-sdk-reference.md b/docs/web-sdk/web-sdk-reference.md
deleted file mode 100644
index f17d4c8c..00000000
--- a/docs/web-sdk/web-sdk-reference.md
+++ /dev/null
@@ -1,369 +0,0 @@
----
-id: web-sdk-reference
-title: Web SDK Reference
-slug: /web-sdk/web-sdk-reference
-sidebar_label: Web SDK Reference
----
-
-import Tabs from '@theme/Tabs';
-import TabItem from '@theme/TabItem';
-
----
-
-This page contains a complete reference of the methods and event handlers supported with the Symbl Web SDK.
-
-## Public Methods
-
-### init
-
-```init (String appId, String appSecret)```
-
-Authenticates with the Symbl API using the provided authentication credentials.
-
-#### Parameters
-
-Name | Description
------|------------
-`appId` | The Symbl Application ID you get from the [Symbl Platform](https://platform.symbl.ai)
-`appSecret` | The Symbl Application Secret Token you get from the [Symbl Platform](https://platform.symbl.ai)
-`basePath` | The base path of the endpoint. By default it is `https://api.symbl.ai`.
-`accessToken` | The Symbl authentication Token you get from your `appId` and `appSecret`. This is an optional parameter you can use to authenticate using auth Token rather than the App ID and App Secret. See sample code [here](/docs/javascript-sdk/introduction#authenticate-using-token).
-
-#### Returns
-
-A Promise which is resolved once the API is connected and authenticated with Symbl.
-
-#### Code Example
-
-```js
-sdk.init({
- // APP_ID and APP_SECRET come from the Symbl Platform: https://platform.symbl.ai
- appId: APP_ID,
- appSecret: APP_SECRET,
- basePath: 'https://api.symbl.ai'
-})
-.then(() => console.log('SDK Initialized.'))
-.catch(err => console.error('Error in initialization.', err));
-```
-### startRealtimeRequest
-
-```startRealtimeRequest ( options)```
-
-Connects to a [Streaming API](/docs/streamingapi/overview/introduction) Web Socket endpoint using the provided configuration options.
-
-#### Parameters
-
-Name | Description
------|------------
-`options` | Options specified for the [Streaming API Configuration Object](https://docs.symbl.ai/docs/streaming-api/api-reference#request-parameters).
-
-#### Returns
-
-A Promise which is resolved once real-time request has been established.
-
-## Event Handlers
-
-When connecting using [`startRealtimeRequest`](#startRealtimeRequest), you can pass various handlers in the configuration options which be called if the specific event attached to the handler is fired.
-
-#### Code Example
-
-```js
-handlers: {
- /**
- * This will return live speech-to-text transcription of the call.
- */
- onSpeechDetected: (data) => {
- console.log(JSON.stringify(data))
- if (data) {
- const {punctuated} = data
- console.log('Live: ', punctuated && punctuated.transcript)
- }
- },
- /**
- * When processed messages are available, this callback will be called.
- */
- onMessageResponse: (data) => {
- console.log('onMessageResponse', JSON.stringify(data, null, 2))
- },
- /**
- * When Symbl detects an insight, this callback will be called.
- */
- onInsightResponse: (data) => {
- console.log('onInsightResponse', JSON.stringify(data, null, 2))
- },
- /**
- * When Symbl detects a topic, this callback will be called.
- */
- onTopicResponse: (data) => {
- console.log('onTopicResponse', JSON.stringify(data, null, 2))
- }
-}
-```
-
-### onSpeechDetected
-
-To retrieve the real-time transcription results as soon as they are detected. You can use this callback to render live transcription which is specific to the speaker of this audio stream.
-
-#### onSpeechDetected JSON Response Example
-
-```js
-{
- "type": "recognition_result",
- "isFinal": true,
- "payload": {
- "raw": {
- "alternatives": [{
- "words": [{
- "word": "Hello",
- "startTime": {
- "seconds": "3",
- "nanos": "800000000"
- },
- "endTime": {
- "seconds": "4",
- "nanos": "200000000"
- }
- }, {
- "word": "world.",
- "startTime": {
- "seconds": "4",
- "nanos": "200000000"
- },
- "endTime": {
- "seconds": "4",
- "nanos": "800000000"
- }
- }],
- "transcript": "Hello world.",
- "confidence": 0.9128385782241821
- }]
- }
- },
- "punctuated": {
- "transcript": "Hello world."
- },
- "user": {
- "userId": "emailAddress",
- "name": "John Doe",
- "id": "23681108-355b-4fc3-9d94-ed47dd39fa56"
- }
-}
-```
-
-### onMessageResponse
-
-This callback function contains the "finalized" transcription data for this speaker and if used with multiple streams with other speakers this callback would also provide their messages.
-
-The "finalized" messages mean that the automatic speech recognition has finalized the state of this part of transcription and has declared it "final". Therefore, this transcription will be more accurate than [`onSpeechDetected`](#onspeechdetected).
-
-#### onMessageResponse JSON Response Example
-
-```js
-[{
- "from": {
- "id": "0a7a36b1-047d-4d8c-8958-910317ed9edc",
- "name": "John Doe",
- "userId": "emailAddress"
- },
- "payload": {
- "content": "Hello world.",
- "contentType": "text/plain"
- },
- "id": "59c224c2-54c5-4762-9582-961bf250b478",
- "channel": {
- "id": "realtime-api"
- },
- "metadata": {
- "disablePunctuation": true,
- "timezoneOffset": 480,
- "originalContent": "Hello world.",
- "words": "[{\"word\":\"Hello\",\"startTime\":\"2021-02-04T20:34:59.029Z\",\"endTime\":\"2021-02-04T20:34:59.429Z\"},{\"word\":\"world.\",\"startTime\":\"2021-02-04T20:34:59.429Z\",\"endTime\":\"2021-02-04T20:35:00.029Z\"}]",
- "originalMessageId": "59c224c2-54c5-4762-9582-961bf250b478"
- },
- "dismissed": false,
- "duration": {
- "startTime": "2021-02-04T20:34:59.029Z",
- "endTime": "2021-02-04T20:35:00.029Z"
- }
-}]
-```
-
-### onInsightResponse
-
-This callback provides you with any of the detected insights in real-time as they are detected. As with the [`onMessageCallback`](#onmessagecallback) this would also return every speaker's insights in case of multiple streams.
-
-#### onInsightResponse JSON Response Example
-
-```json
-[{
- "id": "94020eb9-b688-4d56-945c-a7e5282258cc",
- "confidence": 0.9909798145016999,
- "messageReference": {
- "id": "94020eb9-b688-4d56-945c-a7e5282258cc"
- },
- "hints": [{
- "key": "informationScore",
- "value": "0.9782608695652174"
- }, {
- "key": "confidenceScore",
- "value": "0.9999962500210938"
- }, {
- "key": "comprehensionScore",
- "value": "0.9983848333358765"
- }],
- "type": "action_item",
- "assignee": {
- "id": "e2c5acf8-b9ed-421a-b3b3-02a5ae9796a0",
- "name": "John Doe",
- "userId": "emailAddress"
- },
- "dueBy": {
- "value": "2021-02-05T00:00:00-07:00"
- },
- "tags": [{
- "type": "date",
- "text": "today",
- "beginOffset": 39,
- "value": {
- "value": {
- "datetime": "2021-02-05"
- }
- }
- }, {
- "type": "person",
- "text": "John Doe",
- "beginOffset": 8,
- "value": {
- "value": {
- "name": "John Doe",
- "id": "e2c5acf8-b9ed-421a-b3b3-02a5ae9796a0",
- "assignee": true,
- "userId": "emailAddress"
- }
- }
- }],
- "dismissed": false,
- "payload": {
- "content": "Perhaps John Doe can submit the report today.",
- "contentType": "text/plain"
- },
- "from": {
- "id": "e2c5acf8-b9ed-421a-b3b3-02a5ae9796a0",
- "name": "John Doe",
- "userId": "emailAddress"
- }
-}]
-```
-
-### onTopicResponse
-
-This callback provides you with any of the detected topics in real-time as they are detected. As with the [`onMessageCallback`](#onmessagecallback) this would also return every topic in case of multiple streams.
-
-#### onTopicResponse JSON Response Example
-
-```json
-[{
- "id": "e69a5556-6729-11eb-ab14-2aee2deabb1b",
- "messageReferences": [{
- "id": "0df44422-0248-47e9-8814-e87f63404f2c",
- "relation": "text instance"
- }],
- "phrases": "auto insurance",
- "rootWords": [{
- "text": "auto"
- }],
- "score": 0.9,
- "type": "topic"
-}]
-```
-
-### onTrackerResponse (trackers)
-
-This callback provides you with any of the detected trackers in real-time as they are detected. As with the onMessageCallback this would also return every tracker in case of multiple streams.
-
-#### onTopicResponse JSON Response Example
-
-```json
-{
- "type": "tracker_response",
- "isFinal": true,
- "trackers": [
- {
- "name": "Goodness",
- "matches": [
- {
- "type": "vocabulary",
- "value": "This is awesome",
- "messageRefs": [
- {
- "id": "fa93aa64-0e8d-4697-bb52-e2916ca63192",
- "text": "This is awesome.",
- "offset": 0
- }
- ],
- "insightRefs": []
- },
- {
- "type": "vocabulary",
- "value": "Hello world",
- "messageRefs": [
- {
- "id": "8e720656-fed7-4b11-b359-3931c53bbcec",
- "text": "Hello world.",
- "offset": 0
- }
- ],
- "insightRefs": []
- }
- ]
- },
- {
- "name": "Goodness",
- "matches": [
- {
- "type": "vocabulary",
- "value": "I like it",
- "messageRefs": [
- {
- "id": "193dc144-2b55-4214-b211-ab83bd3e4a2e",
- "text": "I love it.",
- "offset": -1
- }
- ],
- "insightRefs": []
- }
- ]
- }
- ],
- "sequenceNumber": 1
-}
-```
-### onRequestError(err)
-
-Fires when the WebSocket has an error.
-
-### onConversationCompleted(message)
-
-Fires when the `conversation_completed` event is recieved from the WebSocket.
-
-### onReconnectFail(err)
-
-Fires when the reconnection attempt fails. Related to the `reconnectOnError` config.
-
-### onStartedListening(message)
-
-Fires when the `started_listening` event is received from the WebSocket.
-
-### onRequestStart(message)
-
-Fires when the `recognition_started` event is received from the WebSocket.
-
-### onRequestStop(message)
-
-Fires when the `recognition_stopped` event is received from the WebSocket.
-
-### onClose(event)
-
-Fires when the WebSocket connection closes for any reason.
-
-
diff --git a/docusaurus.config.js b/docusaurus.config.js
index 2c115031..5fd1dcc6 100644
--- a/docusaurus.config.js
+++ b/docusaurus.config.js
@@ -1,5 +1,10 @@
+require('dotenv').config()
+
module.exports = {
plugins: [
+ 'docusaurus2-dotenv',
+ 'docusaurus-plugin-hotjar',
+ 'docusaurus-plugin-munchkin',
'docusaurus-plugin-moesif',
[
'@docusaurus/plugin-client-redirects',
@@ -205,18 +210,6 @@ module.exports = {
organizationName: 'symbl.ai', // Usually your GitHub org/user-name.
projectName: 'docs-v1', // Usually your repo name.
onBrokenLinks: 'warn',
- scripts: [
- {
- src: '/docs/js/hotjar.js',
- async: true
- },
- {
- src: '/docs/js/munchkin.js',
- async: true
- },
- '//unpkg.com/moesif-browser-js@^1/moesif.min.js',
- '/docs/js/moesif.js',
- ],
themeConfig:
{
@@ -230,16 +223,16 @@ module.exports = {
// isCloseable: false, // Defaults to `true`.
// },
//
- googleAnalytics: {
- trackingID: 'UA-110963786-1',
- anonymizeIP: true,
+ moesif: {
+ applicationId: process.env.MOESIF_APPLICATION_ID,
+ // Add other Moesif options here.
},
- gtag: {
- trackingID: 'GTM-KF9THZZ',
- // Optional fields.
- anonymizeIP: true, // Should IPs be anonymized?
+ hotjar: {
+ applicationId: process.env.HOTJAR_ID,
+ },
+ munchkin: {
+ applicationId: process.env.MUNCHKIN_ID,
},
-
announcementBar: {
id: 'new_docs_announcement', // Any value that will identify this message.
content:'New Release Announcement: We have released the Symbl-Agora Marketplace extension with Agora SDK for Android applications. Head to our Integrations section to learn more.',
@@ -254,12 +247,6 @@ module.exports = {
disableSwitch: false,
respectPrefersColorScheme: true
},
-
- moesif:
- {
- applicationId: 'eyJhcHAiOiIxOTg6NDYwIiwidmVyIjoiMi4wIiwib3JnIjoiODg6MTAyMyIsImlhdCI6MTYwNjc4MDgwMH0.HJiVyW2au4JS1Po1RkXIsuuS6uvWd2ED71xgySIyZJY',
- // Add other Moesif options here.
- },
"prism":
{
theme: require('prism-react-renderer/themes/palenight'),
@@ -325,9 +312,9 @@ module.exports = {
algolia:
{
- apiKey: '2c62f60d685fcd9d4aa97367cfc7dcf3',
- indexName: 'symbl',
- appId: 'BH4D9OD16A'
+ apiKey: process.env.ALGOLIA_KEY,
+ indexName: process.env.ALGOLIA_INDEX_NAME,
+ appId: process.env.ALGOLIA_APP_ID
// contextualSearch: true,
},
@@ -373,6 +360,15 @@ module.exports = {
[
'@docusaurus/preset-classic',
{
+ googleAnalytics: {
+ trackingID: process.env.GOOGLE_ANALYTICS_TRAKING_ID,
+ anonymizeIP: true,
+ },
+ gtag: {
+ trackingID: process.env.GTAG_ID,
+ // Optional fields.
+ anonymizeIP: true, // Should IPs be anonymized?
+ },
docs:
{
// It is recommended to set document id as docs home page (`docs/` path).
diff --git a/package-lock.json b/package-lock.json
index e22bd2ac..c662b664 100644
--- a/package-lock.json
+++ b/package-lock.json
@@ -3915,11 +3915,29 @@
"buffer-indexof": "^1.0.0"
}
},
+ "docusaurus-plugin-hotjar": {
+ "version": "0.0.2",
+ "resolved": "https://registry.npmjs.org/docusaurus-plugin-hotjar/-/docusaurus-plugin-hotjar-0.0.2.tgz",
+ "integrity": "sha512-Jsdxa6k4YQm4SBiY5mv9h/6sKUrQs6lC6mRoPUfjiPVtnhURE3d0dj4Vnrpy/tRVSAbywAqA0F/PGn5RKHtVaw=="
+ },
"docusaurus-plugin-moesif": {
"version": "0.0.1",
"resolved": "https://registry.npmjs.org/docusaurus-plugin-moesif/-/docusaurus-plugin-moesif-0.0.1.tgz",
"integrity": "sha512-AnKnF2PFcjG+wdVCH+dp549jQvoXwePIc1JlLmvD6R/lF0Par3NKqTAlWQhwia0A2BJdr2xgpddaGXHy2MWdHw=="
},
+ "docusaurus-plugin-munchkin": {
+ "version": "0.0.1",
+ "resolved": "https://registry.npmjs.org/docusaurus-plugin-munchkin/-/docusaurus-plugin-munchkin-0.0.1.tgz",
+ "integrity": "sha512-Akx1fQZ4Q+TmyIk1i0HSxpVAs3l+T22GSzsdaFt7grvWaeqWA1AGWHDZKRcOSX/XKSqTrmxEwINGm1ZeLuOsKg=="
+ },
+ "docusaurus2-dotenv": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/docusaurus2-dotenv/-/docusaurus2-dotenv-1.4.0.tgz",
+ "integrity": "sha512-iWqem5fnBAyeBBtX75Fxp71uUAnwFaXzOmade8zAhN4vL3RG9m27sLSRwjJGVVgIkEo3esjGyCcTGTiCjfi+sg==",
+ "requires": {
+ "dotenv-webpack": "1.7.0"
+ }
+ },
"dom-converter": {
"version": "0.2.0",
"resolved": "https://registry.npmjs.org/dom-converter/-/dom-converter-0.2.0.tgz",
@@ -3985,6 +4003,34 @@
}
}
},
+ "dotenv": {
+ "version": "14.3.2",
+ "resolved": "https://registry.npmjs.org/dotenv/-/dotenv-14.3.2.tgz",
+ "integrity": "sha512-vwEppIphpFdvaMCaHfCEv9IgwcxMljMw2TnAQBB4VWPvzXQLTb82jwmdOKzlEVUL3gNFT4l4TPKO+Bn+sqcrVQ=="
+ },
+ "dotenv-defaults": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/dotenv-defaults/-/dotenv-defaults-1.1.1.tgz",
+ "integrity": "sha512-6fPRo9o/3MxKvmRZBD3oNFdxODdhJtIy1zcJeUSCs6HCy4tarUpd+G67UTU9tF6OWXeSPqsm4fPAB+2eY9Rt9Q==",
+ "requires": {
+ "dotenv": "^6.2.0"
+ },
+ "dependencies": {
+ "dotenv": {
+ "version": "6.2.0",
+ "resolved": "https://registry.npmjs.org/dotenv/-/dotenv-6.2.0.tgz",
+ "integrity": "sha512-HygQCKUBSFl8wKQZBSemMywRWcEDNidvNbjGVyZu3nbZ8qq9ubiPoGLMdRDpfSrpkkm9BXYFkpKxxFX38o/76w=="
+ }
+ }
+ },
+ "dotenv-webpack": {
+ "version": "1.7.0",
+ "resolved": "https://registry.npmjs.org/dotenv-webpack/-/dotenv-webpack-1.7.0.tgz",
+ "integrity": "sha512-wwNtOBW/6gLQSkb8p43y0Wts970A3xtNiG/mpwj9MLUhtPCQG6i+/DSXXoNN7fbPCU/vQ7JjwGmgOeGZSSZnsw==",
+ "requires": {
+ "dotenv-defaults": "^1.0.2"
+ }
+ },
"duplexer": {
"version": "0.1.2",
"resolved": "https://registry.npmjs.org/duplexer/-/duplexer-0.1.2.tgz",
diff --git a/package.json b/package.json
index e8bb69e4..7cc7af97 100644
--- a/package.json
+++ b/package.json
@@ -25,7 +25,11 @@
"@docusaurus/theme-search-algolia": "^2.0.0-beta.14",
"caniuse-lite": "^1.0.30001294",
"clsx": "^1.1.1",
+ "docusaurus-plugin-hotjar": "0.0.2",
"docusaurus-plugin-moesif": "0.0.1",
+ "docusaurus-plugin-munchkin": "0.0.1",
+ "docusaurus2-dotenv": "^1.4.0",
+ "dotenv": "^14.3.2",
"postcss": "^8.4.5",
"prism-react-renderer": "^1.2.1",
"react": "^17.0.2",
diff --git a/sidebars.js b/sidebars.js
index fff1a196..25bba5ec 100644
--- a/sidebars.js
+++ b/sidebars.js
@@ -95,7 +95,7 @@ module.exports = {
]
},
{
- label: "Summarization (Beta)",
+ label: "Summarization (Labs)",
type: 'category',
items: [
'conversation-api/concepts/summarization',
@@ -327,8 +327,7 @@ items: [
'conversation-api/api-reference/update-members',
'conversation-api/api-reference/speakers',
'conversation-api/api-reference/transcript',
- 'conversation-api/api-reference/trackers',
- 'conversation-api/api-reference/summary'
+ 'conversation-api/api-reference/trackers'
],
},
{
@@ -444,7 +443,7 @@ id: 'developer-tools/postman',
],
},
{
- label: 'Summary API (Beta)',
+ label: 'Summary API (Labs)',
type: 'category',
collapsed: true,
items: [
@@ -649,44 +648,6 @@ SDKsidebar: [{
},
]
},
- {
- label: "Web SDK (Beta)",
- type: "category",
- collapsed: false,
- items: [
- 'web-sdk/web-sdk',
- {
- type: 'category',
- label: 'Streaming API',
- items: [
- {
- type: 'category',
- label: 'Tutorials',
- items: [
- 'web-sdk/transcribing-live-audio-through-microphone',
- 'web-sdk/passing-custom-sourcenode',
- 'web-sdk/passing-custom-ondevicechange-handler',
- ],
- },
- {
- type: 'category',
- label: 'Code Snippets',
- items: [
- 'web-sdk/reconnecting-real-time',
- 'web-sdk/muting-and-unmuting-connected-device',
- 'web-sdk/subscribe-to-realtime',
- 'web-sdk/stop-realtime-connection',
- ],
- },
-
- ]
- },
- {
- type: 'doc',
- id: 'web-sdk/web-sdk-reference',
- },
- ]
- },
{
type: 'doc',
id: 'developer-tools/postman',
@@ -700,6 +661,16 @@ LabsSidebar: [{
id: "labs-intro",
type: "doc",
},
+ {
+ label: 'Summarization',
+ type: 'category',
+ collapsed: true,
+ items: [
+
+ 'conversation-api/concepts/summarization',
+ 'conversation-api/api-reference/summary'
+ ],
+ },
{
label: 'Comprehensive Action Items',
type: 'category',
@@ -749,6 +720,38 @@ ChangelogSidebar: [{
id: "changelog",
type: "doc",
},
+{
+ type: 'doc',
+ id: 'changelog',
+ customProps: {
+ hash: '#31-jan-2022',
+ label: '31 Jan 2022',
+ }
+},
+{
+ type: 'doc',
+ id: 'changelog',
+ customProps: {
+ hash: '#11-jan-2022',
+ label: '11 Jan 2022',
+ }
+},
+{
+ type: 'doc',
+ id: 'changelog',
+ customProps: {
+ hash: '#10-jan-2022',
+ label: '10 Jan 2022',
+ }
+},
+{
+ type: 'doc',
+ id: 'changelog',
+ customProps: {
+ hash: '#28-dec-2021',
+ label: '28 Dec 2021',
+ }
+},
{
type: 'doc',
id: 'changelog',
diff --git a/static/img/copy-tracker-id.png b/static/img/copy-tracker-id.png
new file mode 100644
index 00000000..3fcd24bc
Binary files /dev/null and b/static/img/copy-tracker-id.png differ
diff --git a/static/img/streaming-api-logs.png b/static/img/streaming-api-logs.png
new file mode 100644
index 00000000..d921af81
Binary files /dev/null and b/static/img/streaming-api-logs.png differ
diff --git a/static/img/tracker-management-ui-1.png b/static/img/tracker-management-ui-1.png
new file mode 100644
index 00000000..d102c95a
Binary files /dev/null and b/static/img/tracker-management-ui-1.png differ
diff --git a/static/img/tracker-management-ui.png b/static/img/tracker-management-ui.png
new file mode 100644
index 00000000..0a4cb223
Binary files /dev/null and b/static/img/tracker-management-ui.png differ
diff --git a/static/img/tracker-ui-1.png b/static/img/tracker-ui-1.png
new file mode 100644
index 00000000..848a16b0
Binary files /dev/null and b/static/img/tracker-ui-1.png differ
diff --git a/static/js/hotjar.js b/static/js/hotjar.js
deleted file mode 100644
index f09768bd..00000000
--- a/static/js/hotjar.js
+++ /dev/null
@@ -1,8 +0,0 @@
-(function(h,o,t,j,a,r){
- h.hj=h.hj||function(){(h.hj.q=h.hj.q||[]).push(arguments)};
- h._hjSettings={hjid:1585965,hjsv:6};
- a=o.getElementsByTagName('head')[0];
- r=o.createElement('script');r.async=1;
- r.src=t+h._hjSettings.hjid+j+h._hjSettings.hjsv;
- a.appendChild(r);
-})(window,document,'https://static.hotjar.com/c/hotjar-','.js?sv=');
\ No newline at end of file
diff --git a/static/js/moesif.js b/static/js/moesif.js
deleted file mode 100644
index d3d04b86..00000000
--- a/static/js/moesif.js
+++ /dev/null
@@ -1,3 +0,0 @@
-moesif.init({
- applicationId: 'eyJhcHAiOiIxOTg6NDYwIiwidmVyIjoiMi4wIiwib3JnIjoiODg6MTAyMyIsImlhdCI6MTYxMjEzNzYwMH0.OpA76yIcCncJaxhAK4scrv79lHSCj486E8wQJSgszzc'
-});
\ No newline at end of file
diff --git a/static/js/munchkin.js b/static/js/munchkin.js
deleted file mode 100644
index cbb3725a..00000000
--- a/static/js/munchkin.js
+++ /dev/null
@@ -1,20 +0,0 @@
-(function() {
- var didInit = false;
- function initMunchkin() {
- if(didInit === false) {
- didInit = true;
- Munchkin.init('888-ITB-575');
- }
- }
- var s = document.createElement('script');
- s.type = 'text/javascript';
- s.async = true;
- s.src = '//munchkin.marketo.net/munchkin.js';
- s.onreadystatechange = function() {
- if (this.readyState == 'complete' || this.readyState == 'loaded') {
- initMunchkin();
- }
- };
- s.onload = initMunchkin;
- document.getElementsByTagName('head')[0].appendChild(s);
-})();
\ No newline at end of file
diff --git a/styleguide.md b/styleguide.md
new file mode 100644
index 00000000..f4857563
--- /dev/null
+++ b/styleguide.md
@@ -0,0 +1,389 @@
+# Symbl Documentation Styleguide
+
+
+This style guide contains a set of standards for the writing and designing of technical documentation for Symbl.ai. The aim is to bring a consistent tone and style to our documentation so that it is easier for our users to understand information.
+
+These guidelines are applicable to our product guides, references, changelog, and tutorials.
+
+
+## Capitalization
+
+***
+
+Symbl.ai uses sentence-style capitalization, which implies everything is lowercase excluding the first word and proper nouns, which includes brands, products, and services. (For customers to find it clear to identify, locate, and purchase them, and reserve capitalization for only product and service names.)
+
+Follow these 4 Guidelines in Symbl.ai content:
+
+
+### 1. Capitalize the First Word of a Sentence
+
+Always capitalize the first word of a sentence. That means to Capitalize the first word of every sentence, heading, title, UI name, or standalone phrase.
+
+For example,
+
+* _An action item is a specific outcome recognized in the conversation._
+* _Topics are key drivers of the conversation._
+
+
+### 2. Capitalize Names and Other Proper Nouns
+
+Capitalize proper nouns: name for a particular person, place, or thing. Names are proper nouns. The names of programs, gadgets, companies, and software are also proper nouns, so you should capitalize them, too.
+
+For example,
+
+* _Symbl offers a comprehensive suite of APIs._
+* _Symbl's Streaming API is based on WebSocket protocol._
+
+
+### 3. Capitalize Most Words in Titles
+
+In most titles and headings, you should use sentence-style capitalization: capitalize the first word and lowercase the rest.
+
+Exceptions include Proper nouns, including brand, product, and service names, which are always capitalized. If a title or heading consists of a colon, capitalize the first word after it.
+
+* Don't capitalize _a, an_, or _the_ unless it's the first word.
+* Don't capitalize prepositions of four or fewer letters (such as on, to, in, up, down, of, and for) unless the preposition is the first or last word.
+* Don't capitalize and, but, or, nor, yet, or so unless it's the first word or the last word.
+* Capitalize the first word of labels and terms that appear in UI and APIs unless they're always lowercase (for example, fdisk).
+* In programming languages, follow the traditional capitalization of keywords and other special terms.
+
+For example,
+
+* _Transcribe Speech-to-Text in Real-Time_
+* _Topics API- Extracting Relevant Topics_
+
+
+### 4. Don't use all uppercase for emphasis
+
+It's okay to use italic sparingly for emphasis. The Chicago Manual of Style has a note (7.48) on capitalization for emphasis, affirming:
+
+“Capitalizing an entire word or phrase for importance is rarely suitable. Suppose capitals are wanted–in dialogue or in representing newspaper headlines. For example, small caps rather than full capitals look more graceful.”
+
+To show emphasis on a particular word or phrase, we recommend using italics. This lets you understand that the author is trying to distinguish this word or phrase from the rest of the text because it appears different. Additionally, it has a more professional and "graceful" emphasis that doesn't seem like "yelling."
+
+For example,
+
+* _"DO NOT copy-paste this code."_
+* _"This function STOPS the servers."_
+
+
+## Grammar and Parts of Speech
+
+***
+
+## Person
+
+A person is a category used to differentiate between:
+
+1. those speaking
+
+2. those being addressed
+
+3. those who are neither speaking nor being addressed (i.e., everybody else).
+
+These three categories are called the first, second, and third person. They are ways of describing points of view.
+
+* **First-person** is the **I/we** perspective.
+* **Second-person** is the **you** perspective.
+* **Third-person** is the **he/she/it/they** perspective.
+
+Follow these 3 Guidelines in Symbl.ai content:
+
+
+### 1. Use second-person
+
+The second-person point of view refers to the person (or people) being spoken to. This is the “you” perspective. The biggest sign of the second person is the practice of second-person pronouns: you, your, yours, yourself, yourselves. The second-person point of view is usually used to give directions, offer advice, or explain. This perspective allows you to connect with your audience by focusing on the reader and treating them with a compassionate, friendly human tone.
+
+For example,
+
+* _To begin, get your API Credentials from Symbl Platform. Using these credentials, you can then generate the Access Token to invoke Symbl API calls._
+* _You can choose to tune your Summary Page with query parameters in order to play with different configurations and see how the results look._
+
+
+### 2. Use first-person accurately
+
+Use first person (usually I or me) only when you need to write from the customer's point of view. The first-person point of view is more personal than the three points of view. Using I or We gives you the chance to show off your personality as a business owner or establish a clear voice for your brand. This point of view helps build trust with consumers, as any text will read as if it is coming straight from you.
+
+For example,
+
+* _Alert me when a new Symbl update comes in. (checkbox text)_
+* _I want to be part of the Symbl slack feedback team._
+
+
+### 3. Avoid the first-person plural
+
+A grammatical classification of pronouns and verbs used by the speaker to refer to or speak about themselves with others. First-person plural, which often uses the pronoun we, can feel like an intimidating corporate presence which is the exact opposite of Symbl's own modern voice. It's OK if you use phrasing like "we recommend" if it helps you avoid awkward phrasing like "it's recommended", but write around it if you can. Try and keep the focus on the customer, not Symbl itself.
+
+For example,
+
+* _The Management API allows you to access and manage various resources against your Symbl account. (Instead of Symbl’s Management API …)_
+* _The Summary UI provides users with a translated meeting summary page with transcript, attendees, topics, action items, follow-ups, and more._
+
+
+## Dangling and misplaced modifiers
+
+A practical definition for the word "modify" is to change or to alter something. This meaning is the same when considering the purpose of modifiers within a sentence.
+
+A modifier modifies a word/phrase/clause in a sentence to add emphasis, explanation, or detail. Modifiers are descriptive words, such as adverbs and adjectives. Modifier phrases, such as adjective clauses and adverbial phrases, also exist and descriptive adjectives and adverbs.
+
+For example,
+
+* _Pre-Built UI is an interface for users to interact with Symbl's APIs output and understand the conversation better._
+
+Follow these 2 Guidelines in Symbl.ai content:
+
+
+### 1. Strictly avoid misplaced modifier
+
+A misplaced modifier is a kind of modifier that is placed far too away from the word, phrase, or clause it is meant to modify and, as a result, appears to be changing something else entirely.
+
+A misplaced modifier can be fixed by removing and moving it to connect to the right subject.
+
+For example,
+
+* _Currently, this utility only supports one feature._
+
+
+### 2. Strictly avoid dangling modifier
+
+A dangling modifier happens when the subject of a modifier is missing from the sentence.
+
+Dangling modifiers usually take the form of an introductory phrase accompanied by a clause that doesn't state the intended subject.
+
+For example,
+
+* _There are repositories that can’t be removed on the drive._
+
+
+## Verbs
+
+Verbs represent external actions (run, jump, work) and internal (love, think, consider). Verbs determine what the subject is doing or feeling, even if they're just _being_. Verbs are the only type of word that's absolutely essential to make a sentence.
+
+Follow these 4 Guidelines in Symbl.ai content:
+
+
+### 1. Use present tense verb
+
+In the present tense, the action is occurring now. The present tense is usually simpler to read and understand than the past or future tense. It's the most suitable choice for most of the content.
+
+For example,
+
+* _Symbl's APIs let you generate real-time Sentiment Analysis, Action Items, Topics, Trackers, Summary, and much more in your applications._
+
+
+### 2. Use the indicative mood of a verb
+
+The mood of a verb states the writer's intent. Most of the time, it's better to use the indicative mood. An indicative mood is a verb form that gives a statement or asks a question. It's crisp and straightforward without being bossy. Don't switch moods within a sentence.
+
+For example,
+
+* _The action items provide you with insights into 'who has to do what, by when.'_
+
+
+### 3. Use Active voice
+
+When the subject of a sentence does the verb's action, it's called Active voice. Simply put, it tells what a person or thing does. The sentence is direct, strong, and easy to read. The active voice describes a sentence where the subject performs an action stated by the verb.
+
+For example:
+
+* _Symbl recognizes if an action item has a connotation or nature of language._
+
+
+### 4. Apply Subject-verb agreement
+
+Verbs and subjects must **agree** with one another in **number**. Hence, if a subject is singular, its verb must also be singular; if a subject is plural, its verb also needs to be plural. No matter what tense you use, your verb has to match the number of the subject. Simply put, singular subjects conjugate verbs differently than plural subjects.
+
+Often, you either add -s to the end of the verb or you don't. However, more advanced tenses with auxiliary verbs can get tricky—both be and have are irregular verbs, so you have to pay close consideration to use their suitable forms even when they're not the main verb.
+
+For example,
+
+* _Symbl uses different APIs to analyze conversation data._
+
+
+## Lists
+
+Lists are an excellent way to present complex content in a way that's simple, easy, and straightforward to study.
+
+Lists operate best when they have two to seven items. Each item should be reasonably short so that the reader can see at least two or three list items at a glimpse. It's OK to have a couple of brief paragraphs in a list item, but you shouldn't exceed that length too much.
+
+Follow these 5 Guidelines in Symbl.ai content:
+
+
+### 1. Introductory paragraph
+
+You should make sure the concept and purpose of the list are clear and straightforward. Introduce the list with a title, a complete sentence, or a small bit that concludes with a colon.
+
+If you introduce a list with a title, don't use descriptive text after the title. Additionally, don't use a colon or period after the title.
+
+For example,
+
+* _Response Body Parameters_
+* _Query Params_
+
+
+### 2. Implement Capitalization
+
+You should begin each piece of item in a list with a capital letter unless and until if there's a reason not to (Note: it's a command that's always lowercase). If needed, rewrite all the list items so that all items begin with capital letters or all items begin with lowercase words.
+
+For example,
+
+* _cURL (command)_
+* _Sentiment_
+
+To learn more, go to the Capitalization section.
+
+
+### 3. Use a Bullet-point list
+
+A set of items in a bullet point list is neither a sequence nor an option. Use a bulleted point list for items that have something in common but don't necessarily need to appear in a particular order. Bullet points can work as a point of entry for readers, a mode of emphasis, or a way to indicate importance and value.
+
+They can split up long text paragraphs and add variation to a body of work while giving your reader an easy source of quick information. Bullets are clear to spot, short, quick to read, and the information they contain is much easily remembered.
+
+For example,
+
+* _**Speaker Ratio**: The speaker ratio is the total ratio of a speaker versus another._
+* _**Talk time**: Talk time per speaker._
+* _**Silence**: Indicates the time during which none of the speakers spoke anything._
+
+
+### 4. Use Numbered lists
+
+Numbered lists are an elegant way of using the natural sparsity of data within a business. The numbered list represents a hierarchy. You should use a numbered list for sequential items (like a process or a step-by-step method) or prioritized items (like a list of top 10).
+
+For example,
+
+1. _Get Symbl Authentication 🔐_
+2. _Submit your Audio file 🎤_
+3. _Receive Speech to Text & Conversational Action Items 🎁_
+
+
+### 5. Mind the Punctuation
+
+In either a bullet point or a numbered list, end each item with a period only if any item produces a complete sentence when combined with the introduction of the list that leads to the colon.
+
+Additionally, follow these Don'ts:
+
+* Don't use a period if all items have three or fewer words or if the items are headings, subheadings, UI labels, or strings.
+* Don't use commas, semicolons, or conjunctions (and, or) at the end of list items.
+* Don't use a period if the item consists of a single word.
+* Don't use a period if the item doesn't include a verb.
+* Don't use a period if the item is entirely in code font.
+* Don't use a period if the item is entirely linked text or a document title.
+
+For example,
+
+* _If you want to learn more, check out [Introduction to Conversation API.](https://docs.symbl.ai/docs/conversation-api/introduction)_
+* _If you want to try more APIs, click on **Get Topics**, **Get Questions**, **Get Entities**, etc. in the **Conversation API** tab in Postman._
+
+
+## Procedures and Instructions
+
+***
+
+## Writing Step-by-Step Instructions
+
+To write clear and accurate instructions, first, make sure you understand precisely how to complete the task. Follow and perform your own instructions exactly to make sure they will complete the task successfully.
+
+Follow these 4 guidelines in Symbl.ai content to help you create precise, easy-to-follow instructions, whether you're writing simple, single-step, or complicated processes that consist of multiple steps:
+
+
+### 1. Format Multiple-step instructions consistently
+
+Complicated instructions often consist of multiple steps formatted into a numbered list. For multiple-step step instruction in numbered lists, you should consistently format the steps so customers can find them quickly and can easily grasp them.
+
+You should consider using a title to help clients find the instructions instantly. Use the title to tell clients what the instructions will assist them to do.
+
+For example,
+
+* _To create a Symbl account_
+
+
+### 2. Start each step with a verb
+
+Every step you write should be action-active. It shows your users precisely the action they need to take to complete a step of the given task.
+
+* Apply the [imperative verb forms](https://docs.microsoft.com/en-us/style-guide/grammar/verbs). In the step-by-step instructions, users want you to tell them what to do.
+
+* Use consistent sentence structures, such as using a phrase when you want to tell the customer _where to begin_. The rest of the time, begin each sentence with a verb.
+
+ For example,
+
+ 1. _On the ribbon, go to the **Design** tab._
+ 2. _Open **Photos**._
+ 3. _For **Alignment**, choose **Left**._
+
+* Word your instructions in phrases of what someone needs to do, not what someone must think or know. Usually, include actions that achieve the end of a step, such as OK or SEND buttons.
+
+ For example,
+
+ * _In the response body, check if you have received the conversationID._
+
+
+### 3. Write from a second-person point of view
+
+The pronoun "you" enables you to address your reader directly and can avoid confusion. When using the pronoun "you", the user knows precisely what they must do to perform the task and doesn't have to speculate.
+
+For example,
+
+* _In this guide, you will learn how to get started with Symbl's native Streaming API._
+
+
+### 4. Shorten a simple series of instructions
+
+Shorten a simple series of instructions by using right-angle brackets.
+
+Add a space before and after each bracket, and you shouldn't make the brackets bold.
+
+For example,
+
+* _Go to **Settings** tab > **API Keys**._
+
+
+## Word Choice
+
+***
+
+
+## Use US spellings and avoid non-English words
+
+Symbl’s Styleguide practices US English which has many dialects and variations depending on geography, but for most purposes, use "General American" English.
+
+Follow these 3 main guidelines in Symbl content, when writing or editing in American English:
+
+
+### 1. The spelling of English words varies by locale; use the US spelling.
+
+For example,
+
+* _Use "analyze" not "analyze."_
+* _Use “strategize” not “strategize.”_
+
+
+### 2. Avoid non-English words or phrases.
+
+For example,
+
+* _ad hoc_
+* _de facto_
+
+
+## 3. Avoid Latin acronyms for general English phrases.
+
+For example,
+
+* _e.g. instead use - for example_
+* _i.e. instead use - that is_
+* _viz. instead use - namely_
+* _ergo instead use - therefore_
+
+Exception: It's OK to use etc., in certain situations where the space is limited and restrictive.
+
+
+## References
+
+***
+
+The following writing style guide documents were referred to while creating this document:
+
+* [Microsoft Styleguide](https://docs.microsoft.com/en-us/style-guide/welcome/)
+* [Google Developer Documentation Styleguide](https://developers.google.com/style)
\ No newline at end of file