From 69e7da49787ecf1a20eee4e1faf7541f1875197b Mon Sep 17 00:00:00 2001 From: Angelo Paparazzi Date: Mon, 21 Dec 2020 14:47:58 -0500 Subject: [PATCH] fix(assistant): adjust agent availability schema --- Sources/AssistantV1/Assistant.swift | 170 ++++++++--------- .../Models/AgentAvailabilityMessage.swift | 48 +++++ ...NodeOutputResponseTypeConnectToAgent.swift | 8 +- .../AssistantV1/Models/RuntimeEntity.swift | 13 +- ...ricRuntimeResponseTypeConnectToAgent.swift | 8 +- Sources/AssistantV2/Assistant.swift | 174 +++++++++--------- .../Models/AgentAvailabilityMessage.swift | 34 ++++ ...ricRuntimeResponseTypeConnectToAgent.swift | 4 +- Sources/SpeechToTextV1/SpeechToText.swift | 17 +- .../Models/SupportedFeatures.swift | 3 +- .../TextToSpeechV1/Models/Translation.swift | 8 +- Sources/TextToSpeechV1/Models/Word.swift | 11 +- Sources/TextToSpeechV1/TextToSpeech.swift | 89 +++++++-- 13 files changed, 369 insertions(+), 218 deletions(-) create mode 100644 Sources/AssistantV1/Models/AgentAvailabilityMessage.swift create mode 100644 Sources/AssistantV2/Models/AgentAvailabilityMessage.swift diff --git a/Sources/AssistantV1/Assistant.swift b/Sources/AssistantV1/Assistant.swift index 92e09d879..160afbc95 100644 --- a/Sources/AssistantV1/Assistant.swift +++ b/Sources/AssistantV1/Assistant.swift @@ -15,7 +15,7 @@ **/ /** - * IBM OpenAPI SDK Code Generator Version: 99-SNAPSHOT-36b26b63-20201028-122900 + * IBM OpenAPI SDK Code Generator Version: 99-SNAPSHOT-be3b4618-20201221-123327 **/ // swiftlint:disable file_length @@ -263,6 +263,90 @@ public class Assistant { // swiftlint:enable identifier_name } + /** + Identify intents and entities in multiple user utterances. + + Send multiple user inputs to a workspace in a single request and receive information about the intents and entities + recognized in each input. This method is useful for testing and comparing the performance of different workspaces. + This method is available only with Premium plans. + + - parameter workspaceID: Unique identifier of the workspace. + - parameter input: An array of input utterances to classify. + - parameter headers: A dictionary of request headers to be sent with this request. + - parameter completionHandler: A function executed when the request completes with a successful result or error + */ + public func bulkClassify( + workspaceID: String, + input: [BulkClassifyUtterance]? = nil, + headers: [String: String]? = nil, + completionHandler: @escaping (WatsonResponse?, WatsonError?) -> Void) + { + // construct body + let bulkClassifyRequest = BulkClassifyRequest( + input: input) + let body: Data? + do { + body = try JSON.encoder.encodeIfPresent(bulkClassifyRequest) + } catch { + completionHandler(nil, RestError.serialization(values: "request body")) + return + } + + // construct header parameters + var headerParameters = defaultHeaders + let sdkHeaders = Shared.getSDKHeaders(serviceName: serviceName, serviceVersion: serviceVersion, methodName: "bulkClassify") + headerParameters.merge(sdkHeaders) { (_, new) in new } + headerParameters["Accept"] = "application/json" + headerParameters["Content-Type"] = "application/json" + if let headers = headers { + headerParameters.merge(headers) { (_, new) in new } + } + + // construct query parameters + var queryParameters = [URLQueryItem]() + queryParameters.append(URLQueryItem(name: "version", value: version)) + + // construct REST request + let path = "/v1/workspaces/\(workspaceID)/bulk_classify" + guard let encodedPath = path.addingPercentEncoding(withAllowedCharacters: .urlPathAllowed) else { + completionHandler(nil, RestError.urlEncoding(path: path)) + return + } + + // ensure that serviceURL is set + guard let serviceEndpoint = serviceURL else { + completionHandler(nil, RestError.noEndpoint) + return + } + + let request = RestRequest( + session: session, + authenticator: authenticator, + errorResponseDecoder: errorResponseDecoder, + method: "POST", + url: serviceEndpoint + encodedPath, + headerParameters: headerParameters, + queryItems: queryParameters, + messageBody: body + ) + + // execute REST request + request.responseObject(completionHandler: completionHandler) + } + + // Private struct for the bulkClassify request body + private struct BulkClassifyRequest: Encodable { + // swiftlint:disable identifier_name + let input: [BulkClassifyUtterance]? + init? (input: [BulkClassifyUtterance]? = nil) { + if input == nil { + return nil + } + self.input = input + } + // swiftlint:enable identifier_name + } + /** List workspaces. @@ -4076,88 +4160,4 @@ public class Assistant { request.response(completionHandler: completionHandler) } - /** - Identify intents and entities in multiple user utterances. - - Send multiple user inputs to a workspace in a single request and receive information about the intents and entities - recognized in each input. This method is useful for testing and comparing the performance of different workspaces. - This method is available only with Premium plans. - - - parameter workspaceID: Unique identifier of the workspace. - - parameter input: An array of input utterances to classify. - - parameter headers: A dictionary of request headers to be sent with this request. - - parameter completionHandler: A function executed when the request completes with a successful result or error - */ - public func bulkClassify( - workspaceID: String, - input: [BulkClassifyUtterance]? = nil, - headers: [String: String]? = nil, - completionHandler: @escaping (WatsonResponse?, WatsonError?) -> Void) - { - // construct body - let bulkClassifyRequest = BulkClassifyRequest( - input: input) - let body: Data? - do { - body = try JSON.encoder.encodeIfPresent(bulkClassifyRequest) - } catch { - completionHandler(nil, RestError.serialization(values: "request body")) - return - } - - // construct header parameters - var headerParameters = defaultHeaders - let sdkHeaders = Shared.getSDKHeaders(serviceName: serviceName, serviceVersion: serviceVersion, methodName: "bulkClassify") - headerParameters.merge(sdkHeaders) { (_, new) in new } - headerParameters["Accept"] = "application/json" - headerParameters["Content-Type"] = "application/json" - if let headers = headers { - headerParameters.merge(headers) { (_, new) in new } - } - - // construct query parameters - var queryParameters = [URLQueryItem]() - queryParameters.append(URLQueryItem(name: "version", value: version)) - - // construct REST request - let path = "/v1/workspaces/\(workspaceID)/bulk_classify" - guard let encodedPath = path.addingPercentEncoding(withAllowedCharacters: .urlPathAllowed) else { - completionHandler(nil, RestError.urlEncoding(path: path)) - return - } - - // ensure that serviceURL is set - guard let serviceEndpoint = serviceURL else { - completionHandler(nil, RestError.noEndpoint) - return - } - - let request = RestRequest( - session: session, - authenticator: authenticator, - errorResponseDecoder: errorResponseDecoder, - method: "POST", - url: serviceEndpoint + encodedPath, - headerParameters: headerParameters, - queryItems: queryParameters, - messageBody: body - ) - - // execute REST request - request.responseObject(completionHandler: completionHandler) - } - - // Private struct for the bulkClassify request body - private struct BulkClassifyRequest: Encodable { - // swiftlint:disable identifier_name - let input: [BulkClassifyUtterance]? - init? (input: [BulkClassifyUtterance]? = nil) { - if input == nil { - return nil - } - self.input = input - } - // swiftlint:enable identifier_name - } - } diff --git a/Sources/AssistantV1/Models/AgentAvailabilityMessage.swift b/Sources/AssistantV1/Models/AgentAvailabilityMessage.swift new file mode 100644 index 000000000..1d534eded --- /dev/null +++ b/Sources/AssistantV1/Models/AgentAvailabilityMessage.swift @@ -0,0 +1,48 @@ +/** + * (C) Copyright IBM Corp. 2020. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + **/ + +import Foundation + +/** + AgentAvailabilityMessage. + */ +public struct AgentAvailabilityMessage: Codable, Equatable { + + /** + The text of the message. + */ + public var message: String? + + // Map each property name to the key that shall be used for encoding/decoding. + private enum CodingKeys: String, CodingKey { + case message = "message" + } + + /** + Initialize a `AgentAvailabilityMessage` with member variables. + + - parameter message: The text of the message. + + - returns: An initialized `AgentAvailabilityMessage`. + */ + public init( + message: String? = nil + ) + { + self.message = message + } + +} diff --git a/Sources/AssistantV1/Models/DialogNodeOutputGenericDialogNodeOutputResponseTypeConnectToAgent.swift b/Sources/AssistantV1/Models/DialogNodeOutputGenericDialogNodeOutputResponseTypeConnectToAgent.swift index 1ec6bb573..f1bd8d44f 100644 --- a/Sources/AssistantV1/Models/DialogNodeOutputGenericDialogNodeOutputResponseTypeConnectToAgent.swift +++ b/Sources/AssistantV1/Models/DialogNodeOutputGenericDialogNodeOutputResponseTypeConnectToAgent.swift @@ -47,13 +47,13 @@ public struct DialogNodeOutputGenericDialogNodeOutputResponseTypeConnectToAgent: An optional message to be displayed to the user to indicate that the conversation will be transferred to the next available agent. */ - public var agentAvailable: String? + public var agentAvailable: AgentAvailabilityMessage? /** An optional message to be displayed to the user to indicate that no online agent is available to take over the conversation. */ - public var agentUnavailable: String? + public var agentUnavailable: AgentAvailabilityMessage? /** Routing or other contextual information to be used by target service desk systems. @@ -87,8 +87,8 @@ public struct DialogNodeOutputGenericDialogNodeOutputResponseTypeConnectToAgent: public init( responseType: String, messageToHumanAgent: String? = nil, - agentAvailable: String? = nil, - agentUnavailable: String? = nil, + agentAvailable: AgentAvailabilityMessage? = nil, + agentUnavailable: AgentAvailabilityMessage? = nil, transferInfo: DialogNodeOutputConnectToAgentTransferInfo? = nil ) { diff --git a/Sources/AssistantV1/Models/RuntimeEntity.swift b/Sources/AssistantV1/Models/RuntimeEntity.swift index 999d60b79..797a0475a 100644 --- a/Sources/AssistantV1/Models/RuntimeEntity.swift +++ b/Sources/AssistantV1/Models/RuntimeEntity.swift @@ -54,10 +54,9 @@ public struct RuntimeEntity: Codable, Equatable { public var groups: [CaptureGroup]? /** - An object containing detailed information about the entity recognized in the user input. This property is included - only if the new system entities are enabled for the workspace. - For more information about how the new system entities are interpreted, see the - [documentation](https://cloud.ibm.com/docs/assistant?topic=assistant-beta-system-entities). + An object containing detailed information about the entity recognized in the user input. + For more information about how system entities are interpreted, see the + [documentation](https://cloud.ibm.com/docs/assistant?topic=assistant-system-entities). */ public var interpretation: RuntimeEntityInterpretation? @@ -100,9 +99,9 @@ public struct RuntimeEntity: Codable, Equatable { - parameter metadata: Any metadata for the entity. - parameter groups: The recognized capture groups for the entity, as defined by the entity pattern. - parameter interpretation: An object containing detailed information about the entity recognized in the user - input. This property is included only if the new system entities are enabled for the workspace. - For more information about how the new system entities are interpreted, see the - [documentation](https://cloud.ibm.com/docs/assistant?topic=assistant-beta-system-entities). + input. + For more information about how system entities are interpreted, see the + [documentation](https://cloud.ibm.com/docs/assistant?topic=assistant-system-entities). - parameter alternatives: An array of possible alternative values that the user might have intended instead of the value returned in the **value** property. This property is returned only for `@sys-time` and `@sys-date` entities when the user's input is ambiguous. diff --git a/Sources/AssistantV1/Models/RuntimeResponseGenericRuntimeResponseTypeConnectToAgent.swift b/Sources/AssistantV1/Models/RuntimeResponseGenericRuntimeResponseTypeConnectToAgent.swift index de27dbb93..ff8a438e7 100644 --- a/Sources/AssistantV1/Models/RuntimeResponseGenericRuntimeResponseTypeConnectToAgent.swift +++ b/Sources/AssistantV1/Models/RuntimeResponseGenericRuntimeResponseTypeConnectToAgent.swift @@ -47,13 +47,13 @@ public struct RuntimeResponseGenericRuntimeResponseTypeConnectToAgent: Codable, An optional message to be displayed to the user to indicate that the conversation will be transferred to the next available agent. */ - public var agentAvailable: String? + public var agentAvailable: AgentAvailabilityMessage? /** An optional message to be displayed to the user to indicate that no online agent is available to take over the conversation. */ - public var agentUnavailable: String? + public var agentUnavailable: AgentAvailabilityMessage? /** Routing or other contextual information to be used by target service desk systems. @@ -105,8 +105,8 @@ public struct RuntimeResponseGenericRuntimeResponseTypeConnectToAgent: Codable, public init( responseType: String, messageToHumanAgent: String? = nil, - agentAvailable: String? = nil, - agentUnavailable: String? = nil, + agentAvailable: AgentAvailabilityMessage? = nil, + agentUnavailable: AgentAvailabilityMessage? = nil, transferInfo: DialogNodeOutputConnectToAgentTransferInfo? = nil, topic: String? = nil, dialogNode: String? = nil diff --git a/Sources/AssistantV2/Assistant.swift b/Sources/AssistantV2/Assistant.swift index cf21ddcb0..0b4af2630 100644 --- a/Sources/AssistantV2/Assistant.swift +++ b/Sources/AssistantV2/Assistant.swift @@ -15,7 +15,7 @@ **/ /** - * IBM OpenAPI SDK Code Generator Version: 99-SNAPSHOT-36b26b63-20201028-122900 + * IBM OpenAPI SDK Code Generator Version: 99-SNAPSHOT-be3b4618-20201221-123327 **/ // swiftlint:disable file_length @@ -452,6 +452,92 @@ public class Assistant { // swiftlint:enable identifier_name } + /** + Identify intents and entities in multiple user utterances. + + Send multiple user inputs to a dialog skill in a single request and receive information about the intents and + entities recognized in each input. This method is useful for testing and comparing the performance of different + skills or skill versions. + This method is available only with Premium plans. + + - parameter skillID: Unique identifier of the skill. To find the skill ID in the Watson Assistant user interface, + open the skill settings and click **API Details**. + - parameter input: An array of input utterances to classify. + - parameter headers: A dictionary of request headers to be sent with this request. + - parameter completionHandler: A function executed when the request completes with a successful result or error + */ + public func bulkClassify( + skillID: String, + input: [BulkClassifyUtterance]? = nil, + headers: [String: String]? = nil, + completionHandler: @escaping (WatsonResponse?, WatsonError?) -> Void) + { + // construct body + let bulkClassifyRequest = BulkClassifyRequest( + input: input) + let body: Data? + do { + body = try JSON.encoder.encodeIfPresent(bulkClassifyRequest) + } catch { + completionHandler(nil, RestError.serialization(values: "request body")) + return + } + + // construct header parameters + var headerParameters = defaultHeaders + let sdkHeaders = Shared.getSDKHeaders(serviceName: serviceName, serviceVersion: serviceVersion, methodName: "bulkClassify") + headerParameters.merge(sdkHeaders) { (_, new) in new } + headerParameters["Accept"] = "application/json" + headerParameters["Content-Type"] = "application/json" + if let headers = headers { + headerParameters.merge(headers) { (_, new) in new } + } + + // construct query parameters + var queryParameters = [URLQueryItem]() + queryParameters.append(URLQueryItem(name: "version", value: version)) + + // construct REST request + let path = "/v2/skills/\(skillID)/workspace/bulk_classify" + guard let encodedPath = path.addingPercentEncoding(withAllowedCharacters: .urlPathAllowed) else { + completionHandler(nil, RestError.urlEncoding(path: path)) + return + } + + // ensure that serviceURL is set + guard let serviceEndpoint = serviceURL else { + completionHandler(nil, RestError.noEndpoint) + return + } + + let request = RestRequest( + session: session, + authenticator: authenticator, + errorResponseDecoder: errorResponseDecoder, + method: "POST", + url: serviceEndpoint + encodedPath, + headerParameters: headerParameters, + queryItems: queryParameters, + messageBody: body + ) + + // execute REST request + request.responseObject(completionHandler: completionHandler) + } + + // Private struct for the bulkClassify request body + private struct BulkClassifyRequest: Encodable { + // swiftlint:disable identifier_name + let input: [BulkClassifyUtterance]? + init? (input: [BulkClassifyUtterance]? = nil) { + if input == nil { + return nil + } + self.input = input + } + // swiftlint:enable identifier_name + } + /** List log events for an assistant. @@ -592,90 +678,4 @@ public class Assistant { request.response(completionHandler: completionHandler) } - /** - Identify intents and entities in multiple user utterances. - - Send multiple user inputs to a dialog skill in a single request and receive information about the intents and - entities recognized in each input. This method is useful for testing and comparing the performance of different - skills or skill versions. - This method is available only with Premium plans. - - - parameter skillID: Unique identifier of the skill. To find the skill ID in the Watson Assistant user interface, - open the skill settings and click **API Details**. - - parameter input: An array of input utterances to classify. - - parameter headers: A dictionary of request headers to be sent with this request. - - parameter completionHandler: A function executed when the request completes with a successful result or error - */ - public func bulkClassify( - skillID: String, - input: [BulkClassifyUtterance]? = nil, - headers: [String: String]? = nil, - completionHandler: @escaping (WatsonResponse?, WatsonError?) -> Void) - { - // construct body - let bulkClassifyRequest = BulkClassifyRequest( - input: input) - let body: Data? - do { - body = try JSON.encoder.encodeIfPresent(bulkClassifyRequest) - } catch { - completionHandler(nil, RestError.serialization(values: "request body")) - return - } - - // construct header parameters - var headerParameters = defaultHeaders - let sdkHeaders = Shared.getSDKHeaders(serviceName: serviceName, serviceVersion: serviceVersion, methodName: "bulkClassify") - headerParameters.merge(sdkHeaders) { (_, new) in new } - headerParameters["Accept"] = "application/json" - headerParameters["Content-Type"] = "application/json" - if let headers = headers { - headerParameters.merge(headers) { (_, new) in new } - } - - // construct query parameters - var queryParameters = [URLQueryItem]() - queryParameters.append(URLQueryItem(name: "version", value: version)) - - // construct REST request - let path = "/v2/skills/\(skillID)/workspace/bulk_classify" - guard let encodedPath = path.addingPercentEncoding(withAllowedCharacters: .urlPathAllowed) else { - completionHandler(nil, RestError.urlEncoding(path: path)) - return - } - - // ensure that serviceURL is set - guard let serviceEndpoint = serviceURL else { - completionHandler(nil, RestError.noEndpoint) - return - } - - let request = RestRequest( - session: session, - authenticator: authenticator, - errorResponseDecoder: errorResponseDecoder, - method: "POST", - url: serviceEndpoint + encodedPath, - headerParameters: headerParameters, - queryItems: queryParameters, - messageBody: body - ) - - // execute REST request - request.responseObject(completionHandler: completionHandler) - } - - // Private struct for the bulkClassify request body - private struct BulkClassifyRequest: Encodable { - // swiftlint:disable identifier_name - let input: [BulkClassifyUtterance]? - init? (input: [BulkClassifyUtterance]? = nil) { - if input == nil { - return nil - } - self.input = input - } - // swiftlint:enable identifier_name - } - } diff --git a/Sources/AssistantV2/Models/AgentAvailabilityMessage.swift b/Sources/AssistantV2/Models/AgentAvailabilityMessage.swift new file mode 100644 index 000000000..f650f7cf2 --- /dev/null +++ b/Sources/AssistantV2/Models/AgentAvailabilityMessage.swift @@ -0,0 +1,34 @@ +/** + * (C) Copyright IBM Corp. 2020. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + **/ + +import Foundation + +/** + AgentAvailabilityMessage. + */ +public struct AgentAvailabilityMessage: Codable, Equatable { + + /** + The text of the message. + */ + public var message: String? + + // Map each property name to the key that shall be used for encoding/decoding. + private enum CodingKeys: String, CodingKey { + case message = "message" + } + +} diff --git a/Sources/AssistantV2/Models/RuntimeResponseGenericRuntimeResponseTypeConnectToAgent.swift b/Sources/AssistantV2/Models/RuntimeResponseGenericRuntimeResponseTypeConnectToAgent.swift index 077a52f30..5c0ab4c5a 100644 --- a/Sources/AssistantV2/Models/RuntimeResponseGenericRuntimeResponseTypeConnectToAgent.swift +++ b/Sources/AssistantV2/Models/RuntimeResponseGenericRuntimeResponseTypeConnectToAgent.swift @@ -47,13 +47,13 @@ public struct RuntimeResponseGenericRuntimeResponseTypeConnectToAgent: Codable, An optional message to be displayed to the user to indicate that the conversation will be transferred to the next available agent. */ - public var agentAvailable: String? + public var agentAvailable: AgentAvailabilityMessage? /** An optional message to be displayed to the user to indicate that no online agent is available to take over the conversation. */ - public var agentUnavailable: String? + public var agentUnavailable: AgentAvailabilityMessage? /** Routing or other contextual information to be used by target service desk systems. diff --git a/Sources/SpeechToTextV1/SpeechToText.swift b/Sources/SpeechToTextV1/SpeechToText.swift index 4915e4796..1130269fd 100644 --- a/Sources/SpeechToTextV1/SpeechToText.swift +++ b/Sources/SpeechToTextV1/SpeechToText.swift @@ -15,7 +15,7 @@ **/ /** - * IBM OpenAPI SDK Code Generator Version: 99-SNAPSHOT-36b26b63-20201028-122900 + * IBM OpenAPI SDK Code Generator Version: 99-SNAPSHOT-be3b4618-20201221-123327 **/ // swiftlint:disable file_length @@ -196,7 +196,7 @@ public class SpeechToText { **See also:** [Languages and models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models#models). - parameter modelID: The identifier of the model in the form of its name from the output of the **Get a model** - method. + method. (**Note:** The model `ar-AR_BroadbandModel` is deprecated; use `ar-MS_BroadbandModel` instead.). - parameter headers: A dictionary of request headers to be sent with this request. - parameter completionHandler: A function executed when the request completes with a successful result or error */ @@ -305,7 +305,8 @@ public class SpeechToText { - parameter audio: The audio to transcribe. - parameter contentType: The format (MIME type) of the audio. For more information about specifying an audio format, see **Audio formats (content types)** in the method description. - - parameter model: The identifier of the model that is to be used for the recognition request. See [Languages and + - parameter model: The identifier of the model that is to be used for the recognition request. (**Note:** The + model `ar-AR_BroadbandModel` is deprecated; use `ar-MS_BroadbandModel` instead.) See [Languages and models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models#models). - parameter languageCustomizationID: The customization ID (GUID) of a custom language model that is to be used with the recognition request. The base model of the specified custom language model must match the model @@ -811,7 +812,8 @@ public class SpeechToText { - parameter audio: The audio to transcribe. - parameter contentType: The format (MIME type) of the audio. For more information about specifying an audio format, see **Audio formats (content types)** in the method description. - - parameter model: The identifier of the model that is to be used for the recognition request. See [Languages and + - parameter model: The identifier of the model that is to be used for the recognition request. (**Note:** The + model `ar-AR_BroadbandModel` is deprecated; use `ar-MS_BroadbandModel` instead.) See [Languages and models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models#models). - parameter callbackURL: A URL to which callback notifications are to be sent. The URL must already be successfully allowlisted by using the **Register a callback** method. You can include the same callback URL with @@ -1452,7 +1454,7 @@ public class SpeechToText { - parameter language: The identifier of the language for which custom language or custom acoustic models are to be returned. Omit the parameter to see all custom language or custom acoustic models that are owned by the - requesting credentials. + requesting credentials. (**Note:** The identifier `ar-AR` is deprecated; use `ar-MS` instead.) To determine the languages for which customization is available, see [Language support for customization](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-customization#languageSupport). - parameter headers: A dictionary of request headers to be sent with this request. @@ -2852,7 +2854,8 @@ public class SpeechToText { name that describes the acoustic environment of the custom model, such as `Mobile custom model` or `Noisy car custom model`. - parameter baseModelName: The name of the base language model that is to be customized by the new custom - acoustic model. The new custom model can be used only with the base model that it customizes. + acoustic model. The new custom model can be used only with the base model that it customizes. (**Note:** The + model `ar-AR_BroadbandModel` is deprecated; use `ar-MS_BroadbandModel` instead.) To determine whether a base model supports acoustic model customization, refer to [Language support for customization](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-customization#languageSupport). - parameter description: A description of the new custom acoustic model. Use a localized description that matches @@ -2930,7 +2933,7 @@ public class SpeechToText { - parameter language: The identifier of the language for which custom language or custom acoustic models are to be returned. Omit the parameter to see all custom language or custom acoustic models that are owned by the - requesting credentials. + requesting credentials. (**Note:** The identifier `ar-AR` is deprecated; use `ar-MS` instead.) To determine the languages for which customization is available, see [Language support for customization](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-customization#languageSupport). - parameter headers: A dictionary of request headers to be sent with this request. diff --git a/Sources/TextToSpeechV1/Models/SupportedFeatures.swift b/Sources/TextToSpeechV1/Models/SupportedFeatures.swift index ff6682656..48273aa53 100644 --- a/Sources/TextToSpeechV1/Models/SupportedFeatures.swift +++ b/Sources/TextToSpeechV1/Models/SupportedFeatures.swift @@ -28,7 +28,8 @@ public struct SupportedFeatures: Codable, Equatable { /** If `true`, the voice can be transformed by using the SSML <voice-transformation> element; if `false`, the - voice cannot be transformed. + voice cannot be transformed. The feature was available only for the now-deprecated standard voices. You cannot use + the feature with neural voices. */ public var voiceTransformation: Bool diff --git a/Sources/TextToSpeechV1/Models/Translation.swift b/Sources/TextToSpeechV1/Models/Translation.swift index e893e6c32..9b0a6cb35 100644 --- a/Sources/TextToSpeechV1/Models/Translation.swift +++ b/Sources/TextToSpeechV1/Models/Translation.swift @@ -50,8 +50,8 @@ public struct Translation: Codable, Equatable { /** The phonetic or sounds-like translation for the word. A phonetic translation is based on the SSML format for representing the phonetic string of a word either as an IPA translation or as an IBM SPR translation. The Arabic, - Chinese, Dutch, and Korean languages support only IPA. A sounds-like is one or more words that, when combined, - sound like the word. + Chinese, Dutch, Australian English, and Korean languages support only IPA. A sounds-like is one or more words that, + when combined, sound like the word. */ public var translation: String @@ -74,8 +74,8 @@ public struct Translation: Codable, Equatable { - parameter translation: The phonetic or sounds-like translation for the word. A phonetic translation is based on the SSML format for representing the phonetic string of a word either as an IPA translation or as an IBM SPR - translation. The Arabic, Chinese, Dutch, and Korean languages support only IPA. A sounds-like is one or more - words that, when combined, sound like the word. + translation. The Arabic, Chinese, Dutch, Australian English, and Korean languages support only IPA. A sounds-like + is one or more words that, when combined, sound like the word. - parameter partOfSpeech: **Japanese only.** The part of speech for the word. The service uses the value to produce the correct intonation for the word. You can create only a single entry, with or without a single part of speech, for any word; you cannot create multiple entries with different parts of speech for the same word. For diff --git a/Sources/TextToSpeechV1/Models/Word.swift b/Sources/TextToSpeechV1/Models/Word.swift index 22d893c6e..a6e980291 100644 --- a/Sources/TextToSpeechV1/Models/Word.swift +++ b/Sources/TextToSpeechV1/Models/Word.swift @@ -54,9 +54,9 @@ public struct Word: Codable, Equatable { /** The phonetic or sounds-like translation for the word. A phonetic translation is based on the SSML format for - representing the phonetic string of a word either as an IPA or IBM SPR translation. The Arabic, Chinese, Dutch, and - Korean languages support only IPA. A sounds-like translation consists of one or more words that, when combined, - sound like the word. The maximum length of a translation is 499 characters. + representing the phonetic string of a word either as an IPA or IBM SPR translation. The Arabic, Chinese, Dutch, + Australian English, and Korean languages support only IPA. A sounds-like translation consists of one or more words + that, when combined, sound like the word. The maximum length of a translation is 499 characters. */ public var translation: String @@ -81,8 +81,9 @@ public struct Word: Codable, Equatable { - parameter word: The word for the custom model. The maximum length of a word is 49 characters. - parameter translation: The phonetic or sounds-like translation for the word. A phonetic translation is based on the SSML format for representing the phonetic string of a word either as an IPA or IBM SPR translation. The - Arabic, Chinese, Dutch, and Korean languages support only IPA. A sounds-like translation consists of one or more - words that, when combined, sound like the word. The maximum length of a translation is 499 characters. + Arabic, Chinese, Dutch, Australian English, and Korean languages support only IPA. A sounds-like translation + consists of one or more words that, when combined, sound like the word. The maximum length of a translation is + 499 characters. - parameter partOfSpeech: **Japanese only.** The part of speech for the word. The service uses the value to produce the correct intonation for the word. You can create only a single entry, with or without a single part of speech, for any word; you cannot create multiple entries with different parts of speech for the same word. For diff --git a/Sources/TextToSpeechV1/TextToSpeech.swift b/Sources/TextToSpeechV1/TextToSpeech.swift index f82b15c58..0927e134c 100644 --- a/Sources/TextToSpeechV1/TextToSpeech.swift +++ b/Sources/TextToSpeechV1/TextToSpeech.swift @@ -15,7 +15,7 @@ **/ /** - * IBM OpenAPI SDK Code Generator Version: 99-SNAPSHOT-36b26b63-20201028-122900 + * IBM OpenAPI SDK Code Generator Version: 99-SNAPSHOT-be3b4618-20201221-123327 **/ // swiftlint:disable file_length @@ -40,7 +40,7 @@ public typealias WatsonResponse = RestResponse A sounds-like translation consists of one or more words that, when combined, sound like the word. A phonetic translation is based on the SSML phoneme format for representing a word. You can specify a phonetic translation in standard International Phonetic Alphabet (IPA) representation or in the proprietary IBM Symbolic Phonetic - Representation (SPR). The Arabic, Chinese, Dutch, and Korean languages support only IPA. + Representation (SPR). The Arabic, Chinese, Dutch, Australian English, and Korean languages support only IPA. */ public class TextToSpeech { @@ -194,8 +194,24 @@ public class TextToSpeech { language of the specified voice. To list information about all available voices, use the **List voices** method. **See also:** [Listing a specific voice](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-voices#listVoice). - - - parameter voice: The voice for which information is to be returned. + ### Important voice updates + The service's voices underwent significant change on 2 December 2020. + * The Arabic, Chinese, Dutch, Australian English, and Korean voices are now neural instead of concatenative. + * The `ar-AR_OmarVoice` voice is deprecated. Use `ar-MS_OmarVoice` voice instead. + * The `ar-AR` language identifier cannot be used to create a custom model. Use the `ar-MS` identifier instead. + * The standard concatenative voices for the following languages are now deprecated: Brazilian Portuguese, United + Kingdom and United States English, French, German, Italian, Japanese, and Spanish (all dialects). + * The features expressive SSML, voice transformation SSML, and use of the `volume` attribute of the `` + element are deprecated and are not supported with any of the service's neural voices. + * All of the service's voices are now customizable and generally available (GA) for production use. + The deprecated voices and features will continue to function for at least one year but might be removed at a future + date. You are encouraged to migrate to the equivalent neural voices at your earliest convenience. For more + information about all voice updates, see the [2 December 2020 service + update](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-release-notes#December2020) in the release + notes. + + - parameter voice: The voice for which information is to be returned. For more information about specifying a + voice, see **Important voice updates** in the method description. - parameter customizationID: The customization ID (GUID) of a custom model for which information is to be returned. You must make the request with credentials for the instance of the service that owns the custom model. Omit the parameter to see information about the specified voice with no customization. @@ -293,6 +309,21 @@ public class TextToSpeech { 22,050 Hz. For more information about specifying an audio format, including additional details about some of the formats, see [Audio formats](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-audioFormats#audioFormats). + ### Important voice updates + The service's voices underwent significant change on 2 December 2020. + * The Arabic, Chinese, Dutch, Australian English, and Korean voices are now neural instead of concatenative. + * The `ar-AR_OmarVoice` voice is deprecated. Use `ar-MS_OmarVoice` voice instead. + * The `ar-AR` language identifier cannot be used to create a custom model. Use the `ar-MS` identifier instead. + * The standard concatenative voices for the following languages are now deprecated: Brazilian Portuguese, United + Kingdom and United States English, French, German, Italian, Japanese, and Spanish (all dialects). + * The features expressive SSML, voice transformation SSML, and use of the `volume` attribute of the `` + element are deprecated and are not supported with any of the service's neural voices. + * All of the service's voices are now customizable and generally available (GA) for production use. + The deprecated voices and features will continue to function for at least one year but might be removed at a future + date. You are encouraged to migrate to the equivalent neural voices at your earliest convenience. For more + information about all voice updates, see the [2 December 2020 service + update](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-release-notes#December2020) in the release + notes. ### Warning messages If a request includes invalid query parameters, the service returns a `Warnings` response header that provides messages about the invalid parameters. The warning includes a descriptive message and a list of invalid argument @@ -303,7 +334,8 @@ public class TextToSpeech { - parameter accept: The requested format (MIME type) of the audio. You can use the `Accept` header or the `accept` parameter to specify the audio format. For more information about specifying an audio format, see **Audio formats (accept types)** in the method description. - - parameter voice: The voice to use for synthesis. + - parameter voice: The voice to use for synthesis. For more information about specifying a voice, see **Important + voice updates** in the method description. - parameter customizationID: The customization ID (GUID) of a custom model to use for the synthesis. If a custom model is specified, it works only if it matches the language of the indicated voice. You must make the request with credentials for the instance of the service that owns the custom model. Omit the parameter to use the @@ -388,12 +420,29 @@ public class TextToSpeech { voice or for a specific custom model to see the translation for that model. **See also:** [Querying a word from a language](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-customWords#cuWordsQueryLanguage). + ### Important voice updates + The service's voices underwent significant change on 2 December 2020. + * The Arabic, Chinese, Dutch, Australian English, and Korean voices are now neural instead of concatenative. + * The `ar-AR_OmarVoice` voice is deprecated. Use `ar-MS_OmarVoice` voice instead. + * The `ar-AR` language identifier cannot be used to create a custom model. Use the `ar-MS` identifier instead. + * The standard concatenative voices for the following languages are now deprecated: Brazilian Portuguese, United + Kingdom and United States English, French, German, Italian, Japanese, and Spanish (all dialects). + * The features expressive SSML, voice transformation SSML, and use of the `volume` attribute of the `` + element are deprecated and are not supported with any of the service's neural voices. + * All of the service's voices are now customizable and generally available (GA) for production use. + The deprecated voices and features will continue to function for at least one year but might be removed at a future + date. You are encouraged to migrate to the equivalent neural voices at your earliest convenience. For more + information about all voice updates, see the [2 December 2020 service + update](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-release-notes#December2020) in the release + notes. - parameter text: The word for which the pronunciation is requested. - parameter voice: A voice that specifies the language in which the pronunciation is to be returned. All voices - for the same language (for example, `en-US`) return the same translation. - - parameter format: The phoneme format in which to return the pronunciation. The Arabic, Chinese, Dutch, and - Korean languages support only IPA. Omit the parameter to obtain the pronunciation in the default format. + for the same language (for example, `en-US`) return the same translation. For more information about specifying a + voice, see **Important voice updates** in the method description. + - parameter format: The phoneme format in which to return the pronunciation. The Arabic, Chinese, Dutch, + Australian English, and Korean languages support only IPA. Omit the parameter to obtain the pronunciation in the + default format. - parameter customizationID: The customization ID (GUID) of a custom model for which the pronunciation is to be returned. The language of a specified custom model must match the language of the specified voice. If the word is not defined in the specified custom model, the service returns the default translation for the custom model's @@ -465,11 +514,27 @@ public class TextToSpeech { are used to create it. **See also:** [Creating a custom model](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-customModels#cuModelsCreate). + ### Important voice updates + The service's voices underwent significant change on 2 December 2020. + * The Arabic, Chinese, Dutch, Australian English, and Korean voices are now neural instead of concatenative. + * The `ar-AR_OmarVoice` voice is deprecated. Use `ar-MS_OmarVoice` voice instead. + * The `ar-AR` language identifier cannot be used to create a custom model. Use the `ar-MS` identifier instead. + * The standard concatenative voices for the following languages are now deprecated: Brazilian Portuguese, United + Kingdom and United States English, French, German, Italian, Japanese, and Spanish (all dialects). + * The features expressive SSML, voice transformation SSML, and use of the `volume` attribute of the `` + element are deprecated and are not supported with any of the service's neural voices. + * All of the service's voices are now customizable and generally available (GA) for production use. + The deprecated voices and features will continue to function for at least one year but might be removed at a future + date. You are encouraged to migrate to the equivalent neural voices at your earliest convenience. For more + information about all voice updates, see the [2 December 2020 service + update](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-release-notes#December2020) in the release + notes. - parameter name: The name of the new custom model. - parameter language: The language of the new custom model. You create a custom model for a specific language, - not for a specific voice. A custom model can be used with any voice, standard or neural, for its specified - language. Omit the parameter to use the the default language, `en-US`. + not for a specific voice. A custom model can be used with any voice for its specified language. Omit the + parameter to use the the default language, `en-US`. **Note:** The `ar-AR` language identifier cannot be used to + create a custom model. Use the `ar-MS` identifier instead. - parameter description: A description of the new custom model. Specifying a description is recommended. - parameter headers: A dictionary of request headers to be sent with this request. - parameter completionHandler: A function executed when the request completes with a successful result or error @@ -959,8 +1024,8 @@ public class TextToSpeech { - parameter word: The word that is to be added or updated for the custom model. - parameter translation: The phonetic or sounds-like translation for the word. A phonetic translation is based on the SSML format for representing the phonetic string of a word either as an IPA translation or as an IBM SPR - translation. The Arabic, Chinese, Dutch, and Korean languages support only IPA. A sounds-like is one or more - words that, when combined, sound like the word. + translation. The Arabic, Chinese, Dutch, Australian English, and Korean languages support only IPA. A sounds-like + is one or more words that, when combined, sound like the word. - parameter partOfSpeech: **Japanese only.** The part of speech for the word. The service uses the value to produce the correct intonation for the word. You can create only a single entry, with or without a single part of speech, for any word; you cannot create multiple entries with different parts of speech for the same word. For