diff --git a/.bumpversion.cfg b/.bumpversion.cfg index 08f707c3..3b4781f2 100644 --- a/.bumpversion.cfg +++ b/.bumpversion.cfg @@ -1,5 +1,5 @@ [bumpversion] -current_version = 5.2.3 +current_version = 5.3.0 commit = True message = Bump version: {current_version} → {new_version} [skip ci] diff --git a/examples/assistant_v1.py b/examples/assistant_v1.py index 77d49191..2316e2d3 100644 --- a/examples/assistant_v1.py +++ b/examples/assistant_v1.py @@ -13,7 +13,7 @@ # Authentication via external config like VCAP_SERVICES assistant = AssistantV1(version='2018-07-10') -assistant.set_service_url('https://gateway.watsonplatform.net/assistant/api') +assistant.set_service_url('https://api.us-south.assistant.watson.cloud.ibm.com') ######################### # Workspaces diff --git a/examples/assistant_v2.py b/examples/assistant_v2.py index 90cd6272..af28a1c6 100644 --- a/examples/assistant_v2.py +++ b/examples/assistant_v2.py @@ -6,7 +6,7 @@ assistant = AssistantV2( version='2018-09-20', authenticator=authenticator) -assistant.set_service_url('https://gateway.watsonplatform.net/assistant/api') +assistant.set_service_url('https://api.us-south.assistant.watson.cloud.ibm.com') ######################### # Sessions diff --git a/examples/compare_comply_v1.py b/examples/compare_comply_v1.py index 81867d0c..63e33986 100644 --- a/examples/compare_comply_v1.py +++ b/examples/compare_comply_v1.py @@ -8,7 +8,7 @@ compare_comply = CompareComplyV1( version='2018-03-23', authenticator=authenticator) -compare_comply.set_service_url('https://gateway.watsonplatform.net/compare-comply/api') +compare_comply.set_service_url('https://api.us-south.compare-comply.watson.cloud.ibm.com') print('Convert to HTML') contract = os.path.abspath('resources/contract_A.pdf') diff --git a/examples/discovery_v1.py b/examples/discovery_v1.py index 1bf1dad4..4f32acf2 100644 --- a/examples/discovery_v1.py +++ b/examples/discovery_v1.py @@ -6,7 +6,7 @@ discovery = DiscoveryV1( version='2018-08-01', authenticator=authenticator) -discovery.set_service_url('https://gateway.watsonplatform.net/discovery/api') +discovery.set_service_url('https://api.us-south.discovery.watson.cloud.ibm.com') environments = discovery.list_environments().get_result() print(json.dumps(environments, indent=2)) diff --git a/examples/language_translator_v3.py b/examples/language_translator_v3.py index 12feb0b1..da7aecf8 100644 --- a/examples/language_translator_v3.py +++ b/examples/language_translator_v3.py @@ -7,7 +7,7 @@ language_translator = LanguageTranslatorV3( version='2018-05-01', authenticator=authenticator) -language_translator.set_service_url('https://gateway.watsonplatform.net/language-translator/api') +language_translator.set_service_url('https://api.us-south.language-translator.watson.cloud.ibm.com') ## Translate translation = language_translator.translate( diff --git a/examples/natural_language_classifier_v1.py b/examples/natural_language_classifier_v1.py index 3cf93c1f..62457e50 100644 --- a/examples/natural_language_classifier_v1.py +++ b/examples/natural_language_classifier_v1.py @@ -6,7 +6,7 @@ authenticator = IAMAuthenticator('your_api_key') service = NaturalLanguageClassifierV1(authenticator=authenticator) -service.set_service_url('https://gateway.watsonplatform.net/natural-language-classifier/api') +service.set_service_url('https://api.us-south.natural-language-classifier.watson.cloud.ibm.com') classifiers = service.list_classifiers().get_result() print(json.dumps(classifiers, indent=2)) diff --git a/examples/natural_language_understanding_v1.py b/examples/natural_language_understanding_v1.py index 87449a6d..570e7482 100644 --- a/examples/natural_language_understanding_v1.py +++ b/examples/natural_language_understanding_v1.py @@ -13,7 +13,7 @@ # Authentication via external config like VCAP_SERVICES service = NaturalLanguageUnderstandingV1( version='2018-03-16') -service.set_service_url('https://gateway.watsonplatform.net/natural-language-understanding/api') +service.set_service_url('https://api.us-south.natural-language-understanding.watson.cloud.ibm.com') response = service.analyze( text='Bruce Banner is the Hulk and Bruce Wayne is BATMAN! ' diff --git a/examples/speaker_text_to_speech.py b/examples/speaker_text_to_speech.py index 9faf6fb9..a75495dc 100644 --- a/examples/speaker_text_to_speech.py +++ b/examples/speaker_text_to_speech.py @@ -12,7 +12,7 @@ authenticator = IAMAuthenticator('your_api_key') service = TextToSpeechV1(authenticator=authenticator) -service.set_service_url('https://stream.watsonplatform.net/speech-to-text/api') +service.set_service_url('https://api.us-south.speech-to-text.watson.cloud.ibm.com') class Play(object): """ diff --git a/examples/speech_to_text_v1.py b/examples/speech_to_text_v1.py index 083e7d96..e0b60a33 100644 --- a/examples/speech_to_text_v1.py +++ b/examples/speech_to_text_v1.py @@ -7,7 +7,7 @@ authenticator = IAMAuthenticator('your_api_key') service = SpeechToTextV1(authenticator=authenticator) -service.set_service_url('https://stream.watsonplatform.net/speech-to-text/api') +service.set_service_url('https://api.us-south.speech-to-text.watson.cloud.ibm.com') models = service.list_models().get_result() print(json.dumps(models, indent=2)) diff --git a/examples/text_to_speech_v1.py b/examples/text_to_speech_v1.py index 22c28ec2..0571592d 100644 --- a/examples/text_to_speech_v1.py +++ b/examples/text_to_speech_v1.py @@ -7,7 +7,7 @@ authenticator = IAMAuthenticator('your_api_key') service = TextToSpeechV1(authenticator=authenticator) -service.set_service_url('https://stream.watsonplatform.net/text-to-speech/api') +service.set_service_url('https://api.us-south.text-to-speech.watson.cloud.ibm.com') voices = service.list_voices().get_result() print(json.dumps(voices, indent=2)) diff --git a/examples/tone_analyzer_v3.py b/examples/tone_analyzer_v3.py index bcd32adf..db483318 100755 --- a/examples/tone_analyzer_v3.py +++ b/examples/tone_analyzer_v3.py @@ -14,7 +14,7 @@ # Authentication via external config like VCAP_SERVICES service = ToneAnalyzerV3(version='2017-09-21') -service.set_service_url('https://gateway.watsonplatform.net/tone-analyzer/api') +service.set_service_url('https://api.us-south.tone-analyzer.watson.cloud.ibm.com') print("\ntone_chat() example 1:\n") utterances = [{ diff --git a/examples/visual_recognition_v3.py b/examples/visual_recognition_v3.py index 775edba7..01ddeb7b 100644 --- a/examples/visual_recognition_v3.py +++ b/examples/visual_recognition_v3.py @@ -11,7 +11,7 @@ service = VisualRecognitionV3( '2018-03-19', authenticator=authenticator) -service.set_service_url('https://gateway.watsonplatform.net/visual-recognition/api') +service.set_service_url('https://api.us-south.visual-recognition.watson.cloud.ibm.com') # with open(abspath('resources/cars.zip'), 'rb') as cars, \ # open(abspath('resources/trucks.zip'), 'rb') as trucks: diff --git a/examples/visual_recognition_v4.py b/examples/visual_recognition_v4.py index 66c76aa0..5febf898 100644 --- a/examples/visual_recognition_v4.py +++ b/examples/visual_recognition_v4.py @@ -9,7 +9,7 @@ service = VisualRecognitionV4( '2018-03-19', authenticator=authenticator) -service.set_service_url('https://gateway.watsonplatform.net/visual-recognition/api') +service.set_service_url('https://api.us-south.visual-recognition.watson.cloud.ibm.com') # create a classifier my_collection = service.create_collection( diff --git a/ibm_watson/assistant_v1.py b/ibm_watson/assistant_v1.py index a5f1d967..13f764e6 100644 --- a/ibm_watson/assistant_v1.py +++ b/ibm_watson/assistant_v1.py @@ -14,13 +14,16 @@ # See the License for the specific language governing permissions and # limitations under the License. -# IBM OpenAPI SDK Code Generator Version: 99-SNAPSHOT-902c9336-20210507-162723 +# IBM OpenAPI SDK Code Generator Version: 3.38.0-07189efd-20210827-205025 """ The IBM Watson™ Assistant service combines machine learning, natural language understanding, and an integrated dialog editor to create conversation flows between your apps and your users. The Assistant v1 API provides authoring methods your application can use to create or update a workspace. + +API Version: 1.0 +See: https://cloud.ibm.com/docs/assistant """ from datetime import datetime @@ -57,7 +60,7 @@ def __init__( Construct a new client for the Assistant service. :param str version: Release date of the API version you want to use. - Specify dates in YYYY-MM-DD format. The current version is `2020-04-01`. + Specify dates in YYYY-MM-DD format. The current version is `2021-06-14`. :param Authenticator authenticator: The authenticator specifies the authentication mechanism. Get up to date information from https://github.com/IBM/python-sdk-core/blob/master/README.md @@ -185,7 +188,7 @@ def message(self, params=params, data=data) - response = self.send(request) + response = self.send(request, **kwargs) return response ######################### @@ -245,7 +248,7 @@ def bulk_classify(self, params=params, data=data) - response = self.send(request) + response = self.send(request, **kwargs) return response ######################### @@ -308,7 +311,7 @@ def list_workspaces(self, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def create_workspace(self, @@ -408,7 +411,7 @@ def create_workspace(self, params=params, data=data) - response = self.send(request) + response = self.send(request, **kwargs) return response def get_workspace(self, @@ -467,7 +470,7 @@ def get_workspace(self, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def update_workspace(self, @@ -588,7 +591,7 @@ def update_workspace(self, params=params, data=data) - response = self.send(request) + response = self.send(request, **kwargs) return response def delete_workspace(self, workspace_id: str, **kwargs) -> DetailedResponse: @@ -626,7 +629,7 @@ def delete_workspace(self, workspace_id: str, **kwargs) -> DetailedResponse: headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response ######################### @@ -702,7 +705,7 @@ def list_intents(self, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def create_intent(self, @@ -774,7 +777,7 @@ def create_intent(self, params=params, data=data) - response = self.send(request) + response = self.send(request, **kwargs) return response def get_intent(self, @@ -832,7 +835,7 @@ def get_intent(self, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def update_intent(self, @@ -922,7 +925,7 @@ def update_intent(self, params=params, data=data) - response = self.send(request) + response = self.send(request, **kwargs) return response def delete_intent(self, workspace_id: str, intent: str, @@ -965,7 +968,7 @@ def delete_intent(self, workspace_id: str, intent: str, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response ######################### @@ -1041,7 +1044,7 @@ def list_examples(self, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def create_example(self, @@ -1110,7 +1113,7 @@ def create_example(self, params=params, data=data) - response = self.send(request) + response = self.send(request, **kwargs) return response def get_example(self, @@ -1163,7 +1166,7 @@ def get_example(self, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def update_example(self, @@ -1234,7 +1237,7 @@ def update_example(self, params=params, data=data) - response = self.send(request) + response = self.send(request, **kwargs) return response def delete_example(self, workspace_id: str, intent: str, text: str, @@ -1280,7 +1283,7 @@ def delete_example(self, workspace_id: str, intent: str, text: str, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response ######################### @@ -1352,7 +1355,7 @@ def list_counterexamples(self, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def create_counterexample(self, @@ -1413,7 +1416,7 @@ def create_counterexample(self, params=params, data=data) - response = self.send(request) + response = self.send(request, **kwargs) return response def get_counterexample(self, @@ -1464,7 +1467,7 @@ def get_counterexample(self, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def update_counterexample(self, @@ -1526,7 +1529,7 @@ def update_counterexample(self, params=params, data=data) - response = self.send(request) + response = self.send(request, **kwargs) return response def delete_counterexample(self, workspace_id: str, text: str, @@ -1571,7 +1574,7 @@ def delete_counterexample(self, workspace_id: str, text: str, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response ######################### @@ -1647,7 +1650,7 @@ def list_entities(self, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def create_entity(self, @@ -1728,7 +1731,7 @@ def create_entity(self, params=params, data=data) - response = self.send(request) + response = self.send(request, **kwargs) return response def get_entity(self, @@ -1786,7 +1789,7 @@ def get_entity(self, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def update_entity(self, @@ -1883,7 +1886,7 @@ def update_entity(self, params=params, data=data) - response = self.send(request) + response = self.send(request, **kwargs) return response def delete_entity(self, workspace_id: str, entity: str, @@ -1926,7 +1929,7 @@ def delete_entity(self, workspace_id: str, entity: str, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response ######################### @@ -1989,7 +1992,7 @@ def list_mentions(self, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response ######################### @@ -2070,7 +2073,7 @@ def list_values(self, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def create_value(self, @@ -2157,7 +2160,7 @@ def create_value(self, params=params, data=data) - response = self.send(request) + response = self.send(request, **kwargs) return response def get_value(self, @@ -2219,7 +2222,7 @@ def get_value(self, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def update_value(self, @@ -2325,7 +2328,7 @@ def update_value(self, params=params, data=data) - response = self.send(request) + response = self.send(request, **kwargs) return response def delete_value(self, workspace_id: str, entity: str, value: str, @@ -2371,7 +2374,7 @@ def delete_value(self, workspace_id: str, entity: str, value: str, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response ######################### @@ -2450,7 +2453,7 @@ def list_synonyms(self, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def create_synonym(self, @@ -2519,7 +2522,7 @@ def create_synonym(self, params=params, data=data) - response = self.send(request) + response = self.send(request, **kwargs) return response def get_synonym(self, @@ -2577,7 +2580,7 @@ def get_synonym(self, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def update_synonym(self, @@ -2649,7 +2652,7 @@ def update_synonym(self, params=params, data=data) - response = self.send(request) + response = self.send(request, **kwargs) return response def delete_synonym(self, workspace_id: str, entity: str, value: str, @@ -2699,7 +2702,7 @@ def delete_synonym(self, workspace_id: str, entity: str, value: str, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response ######################### @@ -2770,7 +2773,7 @@ def list_dialog_nodes(self, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def create_dialog_node(self, @@ -2922,7 +2925,7 @@ def create_dialog_node(self, params=params, data=data) - response = self.send(request) + response = self.send(request, **kwargs) return response def get_dialog_node(self, @@ -2972,7 +2975,7 @@ def get_dialog_node(self, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def update_dialog_node(self, @@ -3129,7 +3132,7 @@ def update_dialog_node(self, params=params, data=data) - response = self.send(request) + response = self.send(request, **kwargs) return response def delete_dialog_node(self, workspace_id: str, dialog_node: str, @@ -3173,7 +3176,7 @@ def delete_dialog_node(self, workspace_id: str, dialog_node: str, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response ######################### @@ -3239,7 +3242,7 @@ def list_logs(self, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def list_all_logs(self, @@ -3258,7 +3261,9 @@ def list_all_logs(self, matching the specified filter. You must specify a filter query that includes a value for `language`, as well as a value for `request.context.system.assistant_id`, `workspace_id`, or - `request.context.metadata.deployment`. For more information, see the + `request.context.metadata.deployment`. These required filters must be + specified using the exact match (`::`) operator. For more information, see + the [documentation](https://cloud.ibm.com/docs/assistant?topic=assistant-filter-reference#filter-reference). :param str sort: (optional) How to sort the returned log events. You can sort by **request_timestamp**. To reverse the sort order, prefix the @@ -3298,7 +3303,7 @@ def list_all_logs(self, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response ######################### @@ -3348,7 +3353,7 @@ def delete_user_data(self, customer_id: str, **kwargs) -> DetailedResponse: headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response @@ -4057,6 +4062,27 @@ def _to_dict(self): """Return a json dictionary representing this model.""" return self.to_dict() + def get_properties(self) -> Dict: + """Return a dictionary of arbitrary properties from this instance of Context""" + _dict = {} + + for _key in [ + k for k in vars(self).keys() if k not in Context._properties + ]: + _dict[_key] = getattr(self, _key) + return _dict + + def set_properties(self, _dict: dict): + """Set a dictionary of arbitrary properties to this instance of Context""" + for _key in [ + k for k in vars(self).keys() if k not in Context._properties + ]: + delattr(self, _key) + + for _key, _value in _dict.items(): + if _key not in Context._properties: + setattr(self, _key, _value) + def __str__(self) -> str: """Return a `str` version of this Context object.""" return json.dumps(self.to_dict(), indent=2) @@ -5176,6 +5202,29 @@ def _to_dict(self): """Return a json dictionary representing this model.""" return self.to_dict() + def get_properties(self) -> Dict: + """Return a dictionary of arbitrary properties from this instance of DialogNodeContext""" + _dict = {} + + for _key in [ + k for k in vars(self).keys() + if k not in DialogNodeContext._properties + ]: + _dict[_key] = getattr(self, _key) + return _dict + + def set_properties(self, _dict: dict): + """Set a dictionary of arbitrary properties to this instance of DialogNodeContext""" + for _key in [ + k for k in vars(self).keys() + if k not in DialogNodeContext._properties + ]: + delattr(self, _key) + + for _key, _value in _dict.items(): + if _key not in DialogNodeContext._properties: + setattr(self, _key, _value) + def __str__(self) -> str: """Return a `str` version of this DialogNodeContext object.""" return json.dumps(self.to_dict(), indent=2) @@ -5436,6 +5485,29 @@ def _to_dict(self): """Return a json dictionary representing this model.""" return self.to_dict() + def get_properties(self) -> Dict: + """Return a dictionary of arbitrary properties from this instance of DialogNodeOutput""" + _dict = {} + + for _key in [ + k for k in vars(self).keys() + if k not in DialogNodeOutput._properties + ]: + _dict[_key] = getattr(self, _key) + return _dict + + def set_properties(self, _dict: dict): + """Set a dictionary of arbitrary properties to this instance of DialogNodeOutput""" + for _key in [ + k for k in vars(self).keys() + if k not in DialogNodeOutput._properties + ]: + delattr(self, _key) + + for _key, _value in _dict.items(): + if _key not in DialogNodeOutput._properties: + setattr(self, _key, _value) + def __str__(self) -> str: """Return a `str` version of this DialogNodeOutput object.""" return json.dumps(self.to_dict(), indent=2) @@ -7550,6 +7622,29 @@ def _to_dict(self): """Return a json dictionary representing this model.""" return self.to_dict() + def get_properties(self) -> Dict: + """Return a dictionary of arbitrary properties from this instance of MessageInput""" + _dict = {} + + for _key in [ + k for k in vars(self).keys() + if k not in MessageInput._properties + ]: + _dict[_key] = getattr(self, _key) + return _dict + + def set_properties(self, _dict: dict): + """Set a dictionary of arbitrary properties to this instance of MessageInput""" + for _key in [ + k for k in vars(self).keys() + if k not in MessageInput._properties + ]: + delattr(self, _key) + + for _key, _value in _dict.items(): + if _key not in MessageInput._properties: + setattr(self, _key, _value) + def __str__(self) -> str: """Return a `str` version of this MessageInput object.""" return json.dumps(self.to_dict(), indent=2) @@ -8020,6 +8115,27 @@ def _to_dict(self): """Return a json dictionary representing this model.""" return self.to_dict() + def get_properties(self) -> Dict: + """Return a dictionary of arbitrary properties from this instance of OutputData""" + _dict = {} + + for _key in [ + k for k in vars(self).keys() if k not in OutputData._properties + ]: + _dict[_key] = getattr(self, _key) + return _dict + + def set_properties(self, _dict: dict): + """Set a dictionary of arbitrary properties to this instance of OutputData""" + for _key in [ + k for k in vars(self).keys() if k not in OutputData._properties + ]: + delattr(self, _key) + + for _key, _value in _dict.items(): + if _key not in OutputData._properties: + setattr(self, _key, _value) + def __str__(self) -> str: """Return a `str` version of this OutputData object.""" return json.dumps(self.to_dict(), indent=2) @@ -8216,12 +8332,15 @@ class RuntimeEntity(): A term from the request that was identified as an entity. :attr str entity: An entity detected in the input. - :attr List[int] location: An array of zero-based character offsets that indicate - where the detected entity values begin and end in the input text. + :attr List[int] location: (optional) An array of zero-based character offsets + that indicate where the detected entity values begin and end in the input text. :attr str value: The entity value that was recognized in the user input. :attr float confidence: (optional) A decimal percentage that represents Watson's confidence in the recognized entity. - :attr dict metadata: (optional) Any metadata for the entity. + :attr dict metadata: (optional) **Deprecated.** Any metadata for the entity. + Beginning with the `2021-06-14` API version, the `metadata` property is no + longer returned. For information about system entities recognized in the user + input, see the `interpretation` property. :attr List[CaptureGroup] groups: (optional) The recognized capture groups for the entity, as defined by the entity pattern. :attr RuntimeEntityInterpretation interpretation: (optional) An object @@ -8242,9 +8361,9 @@ class RuntimeEntity(): def __init__(self, entity: str, - location: List[int], value: str, *, + location: List[int] = None, confidence: float = None, metadata: dict = None, groups: List['CaptureGroup'] = None, @@ -8255,12 +8374,17 @@ def __init__(self, Initialize a RuntimeEntity object. :param str entity: An entity detected in the input. - :param List[int] location: An array of zero-based character offsets that - indicate where the detected entity values begin and end in the input text. :param str value: The entity value that was recognized in the user input. + :param List[int] location: (optional) An array of zero-based character + offsets that indicate where the detected entity values begin and end in the + input text. :param float confidence: (optional) A decimal percentage that represents Watson's confidence in the recognized entity. - :param dict metadata: (optional) Any metadata for the entity. + :param dict metadata: (optional) **Deprecated.** Any metadata for the + entity. + Beginning with the `2021-06-14` API version, the `metadata` property is no + longer returned. For information about system entities recognized in the + user input, see the `interpretation` property. :param List[CaptureGroup] groups: (optional) The recognized capture groups for the entity, as defined by the entity pattern. :param RuntimeEntityInterpretation interpretation: (optional) An object @@ -8302,10 +8426,6 @@ def from_dict(cls, _dict: Dict) -> 'RuntimeEntity': ) if 'location' in _dict: args['location'] = _dict.get('location') - else: - raise ValueError( - 'Required property \'location\' not present in RuntimeEntity JSON' - ) if 'value' in _dict: args['value'] = _dict.get('value') else: @@ -9993,9 +10113,8 @@ class WorkspaceSystemSettingsDisambiguation(): :attr bool enabled: (optional) Whether the disambiguation feature is enabled for the workspace. :attr str sensitivity: (optional) The sensitivity of the disambiguation feature - to intent detection conflicts. Set to **high** if you want the disambiguation - feature to be triggered more often. This can be useful for testing or - demonstration purposes. + to intent detection uncertainty. Higher sensitivity means that the + disambiguation feature is triggered more often and includes more choices. :attr bool randomize: (optional) Whether the order in which disambiguation suggestions are presented should be randomized (but still influenced by relative confidence). @@ -10024,9 +10143,8 @@ def __init__(self, :param bool enabled: (optional) Whether the disambiguation feature is enabled for the workspace. :param str sensitivity: (optional) The sensitivity of the disambiguation - feature to intent detection conflicts. Set to **high** if you want the - disambiguation feature to be triggered more often. This can be useful for - testing or demonstration purposes. + feature to intent detection uncertainty. Higher sensitivity means that the + disambiguation feature is triggered more often and includes more choices. :param bool randomize: (optional) Whether the order in which disambiguation suggestions are presented should be randomized (but still influenced by relative confidence). @@ -10110,12 +10228,16 @@ def __ne__(self, other: 'WorkspaceSystemSettingsDisambiguation') -> bool: class SensitivityEnum(str, Enum): """ - The sensitivity of the disambiguation feature to intent detection conflicts. Set - to **high** if you want the disambiguation feature to be triggered more often. - This can be useful for testing or demonstration purposes. + The sensitivity of the disambiguation feature to intent detection uncertainty. + Higher sensitivity means that the disambiguation feature is triggered more often + and includes more choices. """ AUTO = 'auto' HIGH = 'high' + MEDIUM_HIGH = 'medium_high' + MEDIUM = 'medium' + MEDIUM_LOW = 'medium_low' + LOW = 'low' class WorkspaceSystemSettingsOffTopic(): @@ -10551,12 +10673,14 @@ class DialogNodeOutputGenericDialogNodeOutputResponseTypeImage( :attr str response_type: The type of response returned by the dialog node. The specified response type must be supported by the client application or channel. - :attr str source: The URL of the image. + :attr str source: The `https:` URL of the image. :attr str title: (optional) An optional title to show before the response. :attr str description: (optional) An optional description to show with the response. :attr List[ResponseGenericChannel] channels: (optional) An array of objects specifying channels for which the response is intended. + :attr str alt_text: (optional) Descriptive text that can be used for screen + readers or other situations where the image cannot be seen. """ def __init__(self, @@ -10565,19 +10689,22 @@ def __init__(self, *, title: str = None, description: str = None, - channels: List['ResponseGenericChannel'] = None) -> None: + channels: List['ResponseGenericChannel'] = None, + alt_text: str = None) -> None: """ Initialize a DialogNodeOutputGenericDialogNodeOutputResponseTypeImage object. :param str response_type: The type of response returned by the dialog node. The specified response type must be supported by the client application or channel. - :param str source: The URL of the image. + :param str source: The `https:` URL of the image. :param str title: (optional) An optional title to show before the response. :param str description: (optional) An optional description to show with the response. :param List[ResponseGenericChannel] channels: (optional) An array of objects specifying channels for which the response is intended. + :param str alt_text: (optional) Descriptive text that can be used for + screen readers or other situations where the image cannot be seen. """ # pylint: disable=super-init-not-called self.response_type = response_type @@ -10585,6 +10712,7 @@ def __init__(self, self.title = title self.description = description self.channels = channels + self.alt_text = alt_text @classmethod def from_dict( @@ -10613,6 +10741,8 @@ def from_dict( ResponseGenericChannel.from_dict(x) for x in _dict.get('channels') ] + if 'alt_text' in _dict: + args['alt_text'] = _dict.get('alt_text') return cls(**args) @classmethod @@ -10633,6 +10763,8 @@ def to_dict(self) -> Dict: _dict['description'] = self.description if hasattr(self, 'channels') and self.channels is not None: _dict['channels'] = [x.to_dict() for x in self.channels] + if hasattr(self, 'alt_text') and self.alt_text is not None: + _dict['alt_text'] = self.alt_text return _dict def _to_dict(self): @@ -11583,7 +11715,7 @@ class RuntimeResponseGenericRuntimeResponseTypeImage(RuntimeResponseGeneric): :attr str response_type: The type of response returned by the dialog node. The specified response type must be supported by the client application or channel. - :attr str source: The URL of the image. + :attr str source: The `https:` URL of the image. :attr str title: (optional) The title or introductory text to show before the response. :attr str description: (optional) The description to show with the the response. @@ -11591,6 +11723,8 @@ class RuntimeResponseGenericRuntimeResponseTypeImage(RuntimeResponseGeneric): specifying channels for which the response is intended. If **channels** is present, the response is intended for a built-in integration and should not be handled by an API client. + :attr str alt_text: (optional) Descriptive text that can be used for screen + readers or other situations where the image cannot be seen. """ def __init__(self, @@ -11599,14 +11733,15 @@ def __init__(self, *, title: str = None, description: str = None, - channels: List['ResponseGenericChannel'] = None) -> None: + channels: List['ResponseGenericChannel'] = None, + alt_text: str = None) -> None: """ Initialize a RuntimeResponseGenericRuntimeResponseTypeImage object. :param str response_type: The type of response returned by the dialog node. The specified response type must be supported by the client application or channel. - :param str source: The URL of the image. + :param str source: The `https:` URL of the image. :param str title: (optional) The title or introductory text to show before the response. :param str description: (optional) The description to show with the the @@ -11615,6 +11750,8 @@ def __init__(self, objects specifying channels for which the response is intended. If **channels** is present, the response is intended for a built-in integration and should not be handled by an API client. + :param str alt_text: (optional) Descriptive text that can be used for + screen readers or other situations where the image cannot be seen. """ # pylint: disable=super-init-not-called self.response_type = response_type @@ -11622,6 +11759,7 @@ def __init__(self, self.title = title self.description = description self.channels = channels + self.alt_text = alt_text @classmethod def from_dict( @@ -11650,6 +11788,8 @@ def from_dict( ResponseGenericChannel.from_dict(x) for x in _dict.get('channels') ] + if 'alt_text' in _dict: + args['alt_text'] = _dict.get('alt_text') return cls(**args) @classmethod @@ -11670,6 +11810,8 @@ def to_dict(self) -> Dict: _dict['description'] = self.description if hasattr(self, 'channels') and self.channels is not None: _dict['channels'] = [x.to_dict() for x in self.channels] + if hasattr(self, 'alt_text') and self.alt_text is not None: + _dict['alt_text'] = self.alt_text return _dict def _to_dict(self): diff --git a/ibm_watson/assistant_v2.py b/ibm_watson/assistant_v2.py index 1f0a52c8..12b60fd4 100644 --- a/ibm_watson/assistant_v2.py +++ b/ibm_watson/assistant_v2.py @@ -14,13 +14,16 @@ # See the License for the specific language governing permissions and # limitations under the License. -# IBM OpenAPI SDK Code Generator Version: 99-SNAPSHOT-902c9336-20210507-162723 +# IBM OpenAPI SDK Code Generator Version: 3.38.0-07189efd-20210827-205025 """ The IBM Watson™ Assistant service combines machine learning, natural language understanding, and an integrated dialog editor to create conversation flows between your apps and your users. The Assistant v2 API provides runtime methods your client application can use to send user input to an assistant and receive a response. + +API Version: 2.0 +See: https://cloud.ibm.com/docs/assistant """ from enum import Enum @@ -56,7 +59,7 @@ def __init__( Construct a new client for the Assistant service. :param str version: Release date of the API version you want to use. - Specify dates in YYYY-MM-DD format. The current version is `2020-09-24`. + Specify dates in YYYY-MM-DD format. The current version is `2021-06-14`. :param Authenticator authenticator: The authenticator specifies the authentication mechanism. Get up to date information from https://github.com/IBM/python-sdk-core/blob/master/README.md @@ -121,7 +124,7 @@ def create_session(self, assistant_id: str, **kwargs) -> DetailedResponse: headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def delete_session(self, assistant_id: str, session_id: str, @@ -171,7 +174,7 @@ def delete_session(self, assistant_id: str, session_id: str, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response ######################### @@ -259,7 +262,7 @@ def message(self, params=params, data=data) - response = self.send(request) + response = self.send(request, **kwargs) return response def message_stateless(self, @@ -338,7 +341,7 @@ def message_stateless(self, params=params, data=data) - response = self.send(request) + response = self.send(request, **kwargs) return response ######################### @@ -401,7 +404,7 @@ def bulk_classify(self, params=params, data=data) - response = self.send(request) + response = self.send(request, **kwargs) return response ######################### @@ -472,7 +475,7 @@ def list_logs(self, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response ######################### @@ -522,7 +525,7 @@ def delete_user_data(self, customer_id: str, **kwargs) -> DetailedResponse: headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response @@ -2286,9 +2289,23 @@ class MessageContextGlobalSystem(): or `tomorrow`. This can be useful for simulating past or future times for testing purposes, or when analyzing documents such as news articles. This value must be a UTC time value formatted according to ISO 8601 (for - example, `2019-06-26T12:00:00Z` for noon on 26 June 2019. + example, `2021-06-26T12:00:00Z` for noon UTC on 26 June 2021). This property is included only if the new system entities are enabled for the skill. + :attr str session_start_time: (optional) The time at which the session started. + With the stateful `message` method, the start time is always present, and is set + by the service based on the time the session was created. With the stateless + `message` method, the start time is set by the service in the response to the + first message, and should be returned as part of the context with each + subsequent message in the session. + This value is a UTC time value formatted according to ISO 8601 (for example, + `2021-06-26T12:00:00Z` for noon UTC on 26 June 2021). + :attr str state: (optional) An encoded string that represents the configuration + state of the assistant at the beginning of the conversation. If you are using + the stateless `message` method, save this value and then send it in the context + of the subsequent message request to avoid disruptions if there are + configuration changes during the conversation (such as a change to a skill the + assistant uses). """ def __init__(self, @@ -2297,7 +2314,9 @@ def __init__(self, user_id: str = None, turn_count: int = None, locale: str = None, - reference_time: str = None) -> None: + reference_time: str = None, + session_start_time: str = None, + state: str = None) -> None: """ Initialize a MessageContextGlobalSystem object. @@ -2332,15 +2351,31 @@ def __init__(self, or future times for testing purposes, or when analyzing documents such as news articles. This value must be a UTC time value formatted according to ISO 8601 (for - example, `2019-06-26T12:00:00Z` for noon on 26 June 2019. + example, `2021-06-26T12:00:00Z` for noon UTC on 26 June 2021). This property is included only if the new system entities are enabled for the skill. + :param str session_start_time: (optional) The time at which the session + started. With the stateful `message` method, the start time is always + present, and is set by the service based on the time the session was + created. With the stateless `message` method, the start time is set by the + service in the response to the first message, and should be returned as + part of the context with each subsequent message in the session. + This value is a UTC time value formatted according to ISO 8601 (for + example, `2021-06-26T12:00:00Z` for noon UTC on 26 June 2021). + :param str state: (optional) An encoded string that represents the + configuration state of the assistant at the beginning of the conversation. + If you are using the stateless `message` method, save this value and then + send it in the context of the subsequent message request to avoid + disruptions if there are configuration changes during the conversation + (such as a change to a skill the assistant uses). """ self.timezone = timezone self.user_id = user_id self.turn_count = turn_count self.locale = locale self.reference_time = reference_time + self.session_start_time = session_start_time + self.state = state @classmethod def from_dict(cls, _dict: Dict) -> 'MessageContextGlobalSystem': @@ -2356,6 +2391,10 @@ def from_dict(cls, _dict: Dict) -> 'MessageContextGlobalSystem': args['locale'] = _dict.get('locale') if 'reference_time' in _dict: args['reference_time'] = _dict.get('reference_time') + if 'session_start_time' in _dict: + args['session_start_time'] = _dict.get('session_start_time') + if 'state' in _dict: + args['state'] = _dict.get('state') return cls(**args) @classmethod @@ -2376,6 +2415,12 @@ def to_dict(self) -> Dict: _dict['locale'] = self.locale if hasattr(self, 'reference_time') and self.reference_time is not None: _dict['reference_time'] = self.reference_time + if hasattr( + self, + 'session_start_time') and self.session_start_time is not None: + _dict['session_start_time'] = self.session_start_time + if hasattr(self, 'state') and self.state is not None: + _dict['state'] = self.state return _dict def _to_dict(self): @@ -2553,6 +2598,29 @@ def _to_dict(self): """Return a json dictionary representing this model.""" return self.to_dict() + def get_properties(self) -> Dict: + """Return a dictionary of arbitrary properties from this instance of MessageContextSkillSystem""" + _dict = {} + + for _key in [ + k for k in vars(self).keys() + if k not in MessageContextSkillSystem._properties + ]: + _dict[_key] = getattr(self, _key) + return _dict + + def set_properties(self, _dict: dict): + """Set a dictionary of arbitrary properties to this instance of MessageContextSkillSystem""" + for _key in [ + k for k in vars(self).keys() + if k not in MessageContextSkillSystem._properties + ]: + delattr(self, _key) + + for _key, _value in _dict.items(): + if _key not in MessageContextSkillSystem._properties: + setattr(self, _key, _value) + def __str__(self) -> str: """Return a `str` version of this MessageContextSkillSystem object.""" return json.dumps(self.to_dict(), indent=2) @@ -2648,8 +2716,12 @@ class MessageInput(): """ An input object that includes the input text. - :attr str message_type: (optional) The type of user input. Currently, only text - input is supported. + :attr str message_type: (optional) The type of the message: + - `text`: The user input is processed normally by the assistant. + - `search`: Only search results are returned. (Any dialog or actions skill is + bypassed.) + **Note:** A `search` message results in an error if no search skill is + configured for the assistant. :attr str text: (optional) The text of the user input. This string cannot contain carriage return, newline, or tab characters. :attr List[RuntimeIntent] intents: (optional) Intents to use when evaluating the @@ -2674,8 +2746,12 @@ def __init__(self, """ Initialize a MessageInput object. - :param str message_type: (optional) The type of user input. Currently, only - text input is supported. + :param str message_type: (optional) The type of the message: + - `text`: The user input is processed normally by the assistant. + - `search`: Only search results are returned. (Any dialog or actions skill + is bypassed.) + **Note:** A `search` message results in an error if no search skill is + configured for the assistant. :param str text: (optional) The text of the user input. This string cannot contain carriage return, newline, or tab characters. :param List[RuntimeIntent] intents: (optional) Intents to use when @@ -2762,9 +2838,15 @@ def __ne__(self, other: 'MessageInput') -> bool: class MessageTypeEnum(str, Enum): """ - The type of user input. Currently, only text input is supported. + The type of the message: + - `text`: The user input is processed normally by the assistant. + - `search`: Only search results are returned. (Any dialog or actions skill is + bypassed.) + **Note:** A `search` message results in an error if no search skill is configured + for the assistant. """ TEXT = 'text' + SEARCH = 'search' class MessageInputOptions(): @@ -3083,8 +3165,12 @@ class MessageInputStateless(): """ An input object that includes the input text. - :attr str message_type: (optional) The type of user input. Currently, only text - input is supported. + :attr str message_type: (optional) The type of the message: + - `text`: The user input is processed normally by the assistant. + - `search`: Only search results are returned. (Any dialog or actions skill is + bypassed.) + **Note:** A `search` message results in an error if no search skill is + configured for the assistant. :attr str text: (optional) The text of the user input. This string cannot contain carriage return, newline, or tab characters. :attr List[RuntimeIntent] intents: (optional) Intents to use when evaluating the @@ -3109,8 +3195,12 @@ def __init__(self, """ Initialize a MessageInputStateless object. - :param str message_type: (optional) The type of user input. Currently, only - text input is supported. + :param str message_type: (optional) The type of the message: + - `text`: The user input is processed normally by the assistant. + - `search`: Only search results are returned. (Any dialog or actions skill + is bypassed.) + **Note:** A `search` message results in an error if no search skill is + configured for the assistant. :param str text: (optional) The text of the user input. This string cannot contain carriage return, newline, or tab characters. :param List[RuntimeIntent] intents: (optional) Intents to use when @@ -3197,9 +3287,15 @@ def __ne__(self, other: 'MessageInputStateless') -> bool: class MessageTypeEnum(str, Enum): """ - The type of user input. Currently, only text input is supported. + The type of the message: + - `text`: The user input is processed normally by the assistant. + - `search`: Only search results are returned. (Any dialog or actions skill is + bypassed.) + **Note:** A `search` message results in an error if no search skill is configured + for the assistant. """ TEXT = 'text' + SEARCH = 'search' class MessageOutput(): @@ -3899,13 +3995,16 @@ class RuntimeEntity(): The entity value that was recognized in the user input. :attr str entity: An entity detected in the input. - :attr List[int] location: An array of zero-based character offsets that indicate - where the detected entity values begin and end in the input text. + :attr List[int] location: (optional) An array of zero-based character offsets + that indicate where the detected entity values begin and end in the input text. :attr str value: The term in the input text that was recognized as an entity value. :attr float confidence: (optional) A decimal percentage that represents Watson's confidence in the recognized entity. - :attr dict metadata: (optional) Any metadata for the entity. + :attr dict metadata: (optional) **Deprecated.** Any metadata for the entity. + Beginning with the `2021-06-14` API version, the `metadata` property is no + longer returned. For information about system entities recognized in the user + input, see the `interpretation` property. :attr List[CaptureGroup] groups: (optional) The recognized capture groups for the entity, as defined by the entity pattern. :attr RuntimeEntityInterpretation interpretation: (optional) An object @@ -3928,9 +4027,9 @@ class RuntimeEntity(): def __init__(self, entity: str, - location: List[int], value: str, *, + location: List[int] = None, confidence: float = None, metadata: dict = None, groups: List['CaptureGroup'] = None, @@ -3941,13 +4040,18 @@ def __init__(self, Initialize a RuntimeEntity object. :param str entity: An entity detected in the input. - :param List[int] location: An array of zero-based character offsets that - indicate where the detected entity values begin and end in the input text. :param str value: The term in the input text that was recognized as an entity value. + :param List[int] location: (optional) An array of zero-based character + offsets that indicate where the detected entity values begin and end in the + input text. :param float confidence: (optional) A decimal percentage that represents Watson's confidence in the recognized entity. - :param dict metadata: (optional) Any metadata for the entity. + :param dict metadata: (optional) **Deprecated.** Any metadata for the + entity. + Beginning with the `2021-06-14` API version, the `metadata` property is no + longer returned. For information about system entities recognized in the + user input, see the `interpretation` property. :param List[CaptureGroup] groups: (optional) The recognized capture groups for the entity, as defined by the entity pattern. :param RuntimeEntityInterpretation interpretation: (optional) An object @@ -3991,10 +4095,6 @@ def from_dict(cls, _dict: Dict) -> 'RuntimeEntity': ) if 'location' in _dict: args['location'] = _dict.get('location') - else: - raise ValueError( - 'Required property \'location\' not present in RuntimeEntity JSON' - ) if 'value' in _dict: args['value'] = _dict.get('value') else: @@ -4743,6 +4843,12 @@ class SearchResult(): :attr SearchResultHighlight highlight: (optional) An object containing segments of text from search results with query-matching text highlighted using HTML `` tags. + :attr List[SearchResultAnswer] answers: (optional) An array specifying segments + of text within the result that were identified as direct answers to the search + query. Currently, only the single answer with the highest confidence (if any) is + returned. + **Note:** This property uses the answer finding beta feature, and is available + only if the search skill is connected to a Discovery v2 service instance. """ def __init__(self, @@ -4752,7 +4858,8 @@ def __init__(self, body: str = None, title: str = None, url: str = None, - highlight: 'SearchResultHighlight' = None) -> None: + highlight: 'SearchResultHighlight' = None, + answers: List['SearchResultAnswer'] = None) -> None: """ Initialize a SearchResult object. @@ -4773,6 +4880,13 @@ def __init__(self, :param SearchResultHighlight highlight: (optional) An object containing segments of text from search results with query-matching text highlighted using HTML `` tags. + :param List[SearchResultAnswer] answers: (optional) An array specifying + segments of text within the result that were identified as direct answers + to the search query. Currently, only the single answer with the highest + confidence (if any) is returned. + **Note:** This property uses the answer finding beta feature, and is + available only if the search skill is connected to a Discovery v2 service + instance. """ self.id = id self.result_metadata = result_metadata @@ -4780,6 +4894,7 @@ def __init__(self, self.title = title self.url = url self.highlight = highlight + self.answers = answers @classmethod def from_dict(cls, _dict: Dict) -> 'SearchResult': @@ -4806,6 +4921,10 @@ def from_dict(cls, _dict: Dict) -> 'SearchResult': if 'highlight' in _dict: args['highlight'] = SearchResultHighlight.from_dict( _dict.get('highlight')) + if 'answers' in _dict: + args['answers'] = [ + SearchResultAnswer.from_dict(x) for x in _dict.get('answers') + ] return cls(**args) @classmethod @@ -4829,6 +4948,8 @@ def to_dict(self) -> Dict: _dict['url'] = self.url if hasattr(self, 'highlight') and self.highlight is not None: _dict['highlight'] = self.highlight.to_dict() + if hasattr(self, 'answers') and self.answers is not None: + _dict['answers'] = [x.to_dict() for x in self.answers] return _dict def _to_dict(self): @@ -4850,6 +4971,78 @@ def __ne__(self, other: 'SearchResult') -> bool: return not self == other +class SearchResultAnswer(): + """ + An object specifing a segment of text that was identified as a direct answer to the + search query. + + :attr str text: The text of the answer. + :attr float confidence: The confidence score for the answer, as returned by the + Discovery service. + """ + + def __init__(self, text: str, confidence: float) -> None: + """ + Initialize a SearchResultAnswer object. + + :param str text: The text of the answer. + :param float confidence: The confidence score for the answer, as returned + by the Discovery service. + """ + self.text = text + self.confidence = confidence + + @classmethod + def from_dict(cls, _dict: Dict) -> 'SearchResultAnswer': + """Initialize a SearchResultAnswer object from a json dictionary.""" + args = {} + if 'text' in _dict: + args['text'] = _dict.get('text') + else: + raise ValueError( + 'Required property \'text\' not present in SearchResultAnswer JSON' + ) + if 'confidence' in _dict: + args['confidence'] = _dict.get('confidence') + else: + raise ValueError( + 'Required property \'confidence\' not present in SearchResultAnswer JSON' + ) + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a SearchResultAnswer object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'text') and self.text is not None: + _dict['text'] = self.text + if hasattr(self, 'confidence') and self.confidence is not None: + _dict['confidence'] = self.confidence + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this SearchResultAnswer object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'SearchResultAnswer') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'SearchResultAnswer') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class SearchResultHighlight(): """ An object containing segments of text from search results with query-matching text @@ -4934,6 +5127,29 @@ def _to_dict(self): """Return a json dictionary representing this model.""" return self.to_dict() + def get_properties(self) -> Dict: + """Return a dictionary of arbitrary properties from this instance of SearchResultHighlight""" + _dict = {} + + for _key in [ + k for k in vars(self).keys() + if k not in SearchResultHighlight._properties + ]: + _dict[_key] = getattr(self, _key) + return _dict + + def set_properties(self, _dict: dict): + """Set a dictionary of arbitrary properties to this instance of SearchResultHighlight""" + for _key in [ + k for k in vars(self).keys() + if k not in SearchResultHighlight._properties + ]: + delattr(self, _key) + + for _key, _value in _dict.items(): + if _key not in SearchResultHighlight._properties: + setattr(self, _key, _value) + def __str__(self) -> str: """Return a `str` version of this SearchResultHighlight object.""" return json.dumps(self.to_dict(), indent=2) @@ -5691,13 +5907,15 @@ class RuntimeResponseGenericRuntimeResponseTypeImage(RuntimeResponseGeneric): :attr str response_type: The type of response returned by the dialog node. The specified response type must be supported by the client application or channel. - :attr str source: The URL of the image. + :attr str source: The `https:` URL of the image. :attr str title: (optional) The title to show before the response. :attr str description: (optional) The description to show with the the response. :attr List[ResponseGenericChannel] channels: (optional) An array of objects specifying channels for which the response is intended. If **channels** is present, the response is intended for a built-in integration and should not be handled by an API client. + :attr str alt_text: (optional) Descriptive text that can be used for screen + readers or other situations where the image cannot be seen. """ def __init__(self, @@ -5706,14 +5924,15 @@ def __init__(self, *, title: str = None, description: str = None, - channels: List['ResponseGenericChannel'] = None) -> None: + channels: List['ResponseGenericChannel'] = None, + alt_text: str = None) -> None: """ Initialize a RuntimeResponseGenericRuntimeResponseTypeImage object. :param str response_type: The type of response returned by the dialog node. The specified response type must be supported by the client application or channel. - :param str source: The URL of the image. + :param str source: The `https:` URL of the image. :param str title: (optional) The title to show before the response. :param str description: (optional) The description to show with the the response. @@ -5721,6 +5940,8 @@ def __init__(self, objects specifying channels for which the response is intended. If **channels** is present, the response is intended for a built-in integration and should not be handled by an API client. + :param str alt_text: (optional) Descriptive text that can be used for + screen readers or other situations where the image cannot be seen. """ # pylint: disable=super-init-not-called self.response_type = response_type @@ -5728,6 +5949,7 @@ def __init__(self, self.title = title self.description = description self.channels = channels + self.alt_text = alt_text @classmethod def from_dict( @@ -5756,6 +5978,8 @@ def from_dict( ResponseGenericChannel.from_dict(x) for x in _dict.get('channels') ] + if 'alt_text' in _dict: + args['alt_text'] = _dict.get('alt_text') return cls(**args) @classmethod @@ -5776,6 +6000,8 @@ def to_dict(self) -> Dict: _dict['description'] = self.description if hasattr(self, 'channels') and self.channels is not None: _dict['channels'] = [x.to_dict() for x in self.channels] + if hasattr(self, 'alt_text') and self.alt_text is not None: + _dict['alt_text'] = self.alt_text return _dict def _to_dict(self): diff --git a/ibm_watson/compare_comply_v1.py b/ibm_watson/compare_comply_v1.py index 6ecc8d37..1518ea73 100644 --- a/ibm_watson/compare_comply_v1.py +++ b/ibm_watson/compare_comply_v1.py @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -# IBM OpenAPI SDK Code Generator Version: 99-SNAPSHOT-902c9336-20210507-162723 +# IBM OpenAPI SDK Code Generator Version: 3.38.0-07189efd-20210827-205025 """ IBM Watson™ Compare and Comply is discontinued. Existing instances are supported until 30 November 2021, but as of 1 December 2020, you can't create instances. Any @@ -25,6 +25,9 @@ {: deprecated} Compare and Comply analyzes governing documents to provide details about critical aspects of the documents. + +API Version: 1.0 +See: https://cloud.ibm.com/docs/compare-comply?topic=compare-comply-about """ from datetime import datetime @@ -132,7 +135,7 @@ def convert_to_html(self, params=params, files=form_data) - response = self.send(request) + response = self.send(request, **kwargs) return response ######################### @@ -187,7 +190,7 @@ def classify_elements(self, params=params, files=form_data) - response = self.send(request) + response = self.send(request, **kwargs) return response ######################### @@ -242,7 +245,7 @@ def extract_tables(self, params=params, files=form_data) - response = self.send(request) + response = self.send(request, **kwargs) return response ######################### @@ -314,7 +317,7 @@ def compare_documents(self, params=params, files=form_data) - response = self.send(request) + response = self.send(request, **kwargs) return response ######################### @@ -376,7 +379,7 @@ def add_feedback(self, params=params, data=data) - response = self.send(request) + response = self.send(request, **kwargs) return response def list_feedback(self, @@ -488,7 +491,7 @@ def list_feedback(self, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def get_feedback(self, @@ -536,7 +539,7 @@ def get_feedback(self, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def delete_feedback(self, @@ -584,7 +587,7 @@ def delete_feedback(self, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response ######################### @@ -690,7 +693,7 @@ def create_batch(self, params=params, files=form_data) - response = self.send(request) + response = self.send(request, **kwargs) return response def list_batches(self, **kwargs) -> DetailedResponse: @@ -722,7 +725,7 @@ def list_batches(self, **kwargs) -> DetailedResponse: headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def get_batch(self, batch_id: str, **kwargs) -> DetailedResponse: @@ -761,7 +764,7 @@ def get_batch(self, batch_id: str, **kwargs) -> DetailedResponse: headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def update_batch(self, @@ -814,7 +817,7 @@ def update_batch(self, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response diff --git a/ibm_watson/discovery_v1.py b/ibm_watson/discovery_v1.py index 3dc4d4d5..d826fd94 100644 --- a/ibm_watson/discovery_v1.py +++ b/ibm_watson/discovery_v1.py @@ -14,13 +14,16 @@ # See the License for the specific language governing permissions and # limitations under the License. -# IBM OpenAPI SDK Code Generator Version: 99-SNAPSHOT-a45d89ef-20201209-192237 +# IBM OpenAPI SDK Code Generator Version: 3.38.0-07189efd-20210827-205025 """ IBM Watson™ Discovery is a cognitive search and content analytics engine that you can add to applications to identify patterns, trends and actionable insights to drive better decision-making. Securely unify structured and unstructured data with pre-enriched content, and use a simplified query language to eliminate the need for manual filtering of results. + +API Version: 1.0 +See: https://cloud.ibm.com/docs/discovery """ from datetime import date @@ -130,7 +133,7 @@ def create_environment(self, params=params, data=data) - response = self.send(request) + response = self.send(request, **kwargs) return response def list_environments(self, @@ -166,7 +169,7 @@ def list_environments(self, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def get_environment(self, environment_id: str, @@ -203,7 +206,7 @@ def get_environment(self, environment_id: str, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def update_environment(self, @@ -259,7 +262,7 @@ def update_environment(self, params=params, data=data) - response = self.send(request) + response = self.send(request, **kwargs) return response def delete_environment(self, environment_id: str, @@ -296,7 +299,7 @@ def delete_environment(self, environment_id: str, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def list_fields(self, environment_id: str, collection_ids: List[str], @@ -344,7 +347,7 @@ def list_fields(self, environment_id: str, collection_ids: List[str], headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response ######################### @@ -441,7 +444,7 @@ def create_configuration( params=params, data=data) - response = self.send(request) + response = self.send(request, **kwargs) return response def list_configurations(self, @@ -485,7 +488,7 @@ def list_configurations(self, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def get_configuration(self, environment_id: str, configuration_id: str, @@ -527,7 +530,7 @@ def get_configuration(self, environment_id: str, configuration_id: str, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def update_configuration( @@ -624,7 +627,7 @@ def update_configuration( params=params, data=data) - response = self.send(request) + response = self.send(request, **kwargs) return response def delete_configuration(self, environment_id: str, configuration_id: str, @@ -673,7 +676,7 @@ def delete_configuration(self, environment_id: str, configuration_id: str, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response ######################### @@ -740,7 +743,7 @@ def create_collection(self, params=params, data=data) - response = self.send(request) + response = self.send(request, **kwargs) return response def list_collections(self, @@ -784,7 +787,7 @@ def list_collections(self, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def get_collection(self, environment_id: str, collection_id: str, @@ -825,7 +828,7 @@ def get_collection(self, environment_id: str, collection_id: str, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def update_collection(self, @@ -888,7 +891,7 @@ def update_collection(self, params=params, data=data) - response = self.send(request) + response = self.send(request, **kwargs) return response def delete_collection(self, environment_id: str, collection_id: str, @@ -929,7 +932,7 @@ def delete_collection(self, environment_id: str, collection_id: str, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def list_collection_fields(self, environment_id: str, collection_id: str, @@ -972,7 +975,7 @@ def list_collection_fields(self, environment_id: str, collection_id: str, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response ######################### @@ -1020,7 +1023,7 @@ def list_expansions(self, environment_id: str, collection_id: str, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def create_expansions(self, environment_id: str, collection_id: str, @@ -1088,7 +1091,7 @@ def create_expansions(self, environment_id: str, collection_id: str, params=params, data=data) - response = self.send(request) + response = self.send(request, **kwargs) return response def delete_expansions(self, environment_id: str, collection_id: str, @@ -1131,7 +1134,7 @@ def delete_expansions(self, environment_id: str, collection_id: str, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def get_tokenization_dictionary_status(self, environment_id: str, @@ -1177,7 +1180,7 @@ def get_tokenization_dictionary_status(self, environment_id: str, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def create_tokenization_dictionary( @@ -1238,7 +1241,7 @@ def create_tokenization_dictionary( params=params, data=data) - response = self.send(request) + response = self.send(request, **kwargs) return response def delete_tokenization_dictionary(self, environment_id: str, @@ -1282,7 +1285,7 @@ def delete_tokenization_dictionary(self, environment_id: str, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def get_stopword_list_status(self, environment_id: str, collection_id: str, @@ -1325,7 +1328,7 @@ def get_stopword_list_status(self, environment_id: str, collection_id: str, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def create_stopword_list(self, @@ -1386,7 +1389,7 @@ def create_stopword_list(self, params=params, files=form_data) - response = self.send(request) + response = self.send(request, **kwargs) return response def delete_stopword_list(self, environment_id: str, collection_id: str, @@ -1429,7 +1432,7 @@ def delete_stopword_list(self, environment_id: str, collection_id: str, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response ######################### @@ -1525,7 +1528,7 @@ def add_document(self, params=params, files=form_data) - response = self.send(request) + response = self.send(request, **kwargs) return response def get_document_status(self, environment_id: str, collection_id: str, @@ -1575,7 +1578,7 @@ def get_document_status(self, environment_id: str, collection_id: str, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def update_document(self, @@ -1656,7 +1659,7 @@ def update_document(self, params=params, files=form_data) - response = self.send(request) + response = self.send(request, **kwargs) return response def delete_document(self, environment_id: str, collection_id: str, @@ -1705,7 +1708,7 @@ def delete_document(self, environment_id: str, collection_id: str, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response ######################### @@ -1879,7 +1882,7 @@ def query(self, params=params, data=data) - response = self.send(request) + response = self.send(request, **kwargs) return response def query_notices(self, @@ -2019,7 +2022,7 @@ def query_notices(self, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def federated_query(self, @@ -2183,7 +2186,7 @@ def federated_query(self, params=params, data=data) - response = self.send(request) + response = self.send(request, **kwargs) return response def federated_query_notices(self, @@ -2307,7 +2310,7 @@ def federated_query_notices(self, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def get_autocompletion(self, @@ -2328,8 +2331,7 @@ def get_autocompletion(self, :param str environment_id: The ID of the environment. :param str collection_id: The ID of the collection. :param str prefix: The prefix to use for autocompletion. For example, the - prefix `Ho` could autocomplete to `Hot`, `Housing`, or `How do I upgrade`. - Possible completions are. + prefix `Ho` could autocomplete to `hot`, `housing`, or `how`. :param str field: (optional) The field in the result documents that autocompletion suggestions are identified from. :param int count: (optional) The number of autocompletion suggestions to @@ -2372,7 +2374,7 @@ def get_autocompletion(self, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response ######################### @@ -2419,7 +2421,7 @@ def list_training_data(self, environment_id: str, collection_id: str, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def add_training_data(self, @@ -2487,7 +2489,7 @@ def add_training_data(self, params=params, data=data) - response = self.send(request) + response = self.send(request, **kwargs) return response def delete_all_training_data(self, environment_id: str, collection_id: str, @@ -2529,7 +2531,7 @@ def delete_all_training_data(self, environment_id: str, collection_id: str, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def get_training_data(self, environment_id: str, collection_id: str, @@ -2577,7 +2579,7 @@ def get_training_data(self, environment_id: str, collection_id: str, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def delete_training_data(self, environment_id: str, collection_id: str, @@ -2624,7 +2626,7 @@ def delete_training_data(self, environment_id: str, collection_id: str, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def list_training_examples(self, environment_id: str, collection_id: str, @@ -2671,7 +2673,7 @@ def list_training_examples(self, environment_id: str, collection_id: str, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def create_training_example(self, @@ -2740,7 +2742,7 @@ def create_training_example(self, params=params, data=data) - response = self.send(request) + response = self.send(request, **kwargs) return response def delete_training_example(self, environment_id: str, collection_id: str, @@ -2792,7 +2794,7 @@ def delete_training_example(self, environment_id: str, collection_id: str, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def update_training_example(self, @@ -2859,7 +2861,7 @@ def update_training_example(self, params=params, data=data) - response = self.send(request) + response = self.send(request, **kwargs) return response def get_training_example(self, environment_id: str, collection_id: str, @@ -2912,7 +2914,7 @@ def get_training_example(self, environment_id: str, collection_id: str, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response ######################### @@ -2956,7 +2958,7 @@ def delete_user_data(self, customer_id: str, **kwargs) -> DetailedResponse: headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response ######################### @@ -3008,7 +3010,7 @@ def create_event(self, type: str, data: 'EventData', params=params, data=data) - response = self.send(request) + response = self.send(request, **kwargs) return response def query_log(self, @@ -3072,7 +3074,7 @@ def query_log(self, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def get_metrics_query(self, @@ -3121,7 +3123,7 @@ def get_metrics_query(self, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def get_metrics_query_event(self, @@ -3171,7 +3173,7 @@ def get_metrics_query_event(self, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def get_metrics_query_no_results(self, @@ -3221,7 +3223,7 @@ def get_metrics_query_no_results(self, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def get_metrics_event_rate(self, @@ -3271,7 +3273,7 @@ def get_metrics_event_rate(self, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def get_metrics_query_token_event(self, @@ -3312,7 +3314,7 @@ def get_metrics_query_token_event(self, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response ######################### @@ -3358,7 +3360,7 @@ def list_credentials(self, environment_id: str, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def create_credentials(self, @@ -3366,7 +3368,7 @@ def create_credentials(self, *, source_type: str = None, credential_details: 'CredentialDetails' = None, - status: str = None, + status: 'StatusDetails' = None, **kwargs) -> DetailedResponse: """ Create credentials. @@ -3391,11 +3393,8 @@ def create_credentials(self, :param CredentialDetails credential_details: (optional) Object containing details of the stored credentials. Obtain credentials for your source from the administrator of the source. - :param str status: (optional) The current status of this set of - credentials. `connected` indicates that the credentials are available to - use with the source configuration of a collection. `invalid` refers to the - credentials (for example, the password provided has expired) and must be - corrected before they can be used with a collection. + :param StatusDetails status: (optional) Object that contains details about + the status of the authentication process. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse with `dict` result representing a `Credentials` object @@ -3405,6 +3404,8 @@ def create_credentials(self, raise ValueError('environment_id must be provided') if credential_details is not None: credential_details = convert_model(credential_details) + if status is not None: + status = convert_model(status) headers = {} sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME, service_version='V1', @@ -3437,7 +3438,7 @@ def create_credentials(self, params=params, data=data) - response = self.send(request) + response = self.send(request, **kwargs) return response def get_credentials(self, environment_id: str, credential_id: str, @@ -3483,7 +3484,7 @@ def get_credentials(self, environment_id: str, credential_id: str, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def update_credentials(self, @@ -3492,7 +3493,7 @@ def update_credentials(self, *, source_type: str = None, credential_details: 'CredentialDetails' = None, - status: str = None, + status: 'StatusDetails' = None, **kwargs) -> DetailedResponse: """ Update credentials. @@ -3518,11 +3519,8 @@ def update_credentials(self, :param CredentialDetails credential_details: (optional) Object containing details of the stored credentials. Obtain credentials for your source from the administrator of the source. - :param str status: (optional) The current status of this set of - credentials. `connected` indicates that the credentials are available to - use with the source configuration of a collection. `invalid` refers to the - credentials (for example, the password provided has expired) and must be - corrected before they can be used with a collection. + :param StatusDetails status: (optional) Object that contains details about + the status of the authentication process. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse with `dict` result representing a `Credentials` object @@ -3534,6 +3532,8 @@ def update_credentials(self, raise ValueError('credential_id must be provided') if credential_details is not None: credential_details = convert_model(credential_details) + if status is not None: + status = convert_model(status) headers = {} sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME, service_version='V1', @@ -3566,7 +3566,7 @@ def update_credentials(self, params=params, data=data) - response = self.send(request) + response = self.send(request, **kwargs) return response def delete_credentials(self, environment_id: str, credential_id: str, @@ -3610,7 +3610,7 @@ def delete_credentials(self, environment_id: str, credential_id: str, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response ######################### @@ -3653,7 +3653,7 @@ def list_gateways(self, environment_id: str, **kwargs) -> DetailedResponse: headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def create_gateway(self, @@ -3703,7 +3703,7 @@ def create_gateway(self, params=params, data=data) - response = self.send(request) + response = self.send(request, **kwargs) return response def get_gateway(self, environment_id: str, gateway_id: str, @@ -3746,7 +3746,7 @@ def get_gateway(self, environment_id: str, gateway_id: str, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def delete_gateway(self, environment_id: str, gateway_id: str, @@ -3789,7 +3789,7 @@ def delete_gateway(self, environment_id: str, gateway_id: str, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response @@ -5075,11 +5075,8 @@ class Credentials(): :attr CredentialDetails credential_details: (optional) Object containing details of the stored credentials. Obtain credentials for your source from the administrator of the source. - :attr str status: (optional) The current status of this set of credentials. - `connected` indicates that the credentials are available to use with the source - configuration of a collection. `invalid` refers to the credentials (for example, - the password provided has expired) and must be corrected before they can be used - with a collection. + :attr StatusDetails status: (optional) Object that contains details about the + status of the authentication process. """ def __init__(self, @@ -5087,7 +5084,7 @@ def __init__(self, credential_id: str = None, source_type: str = None, credential_details: 'CredentialDetails' = None, - status: str = None) -> None: + status: 'StatusDetails' = None) -> None: """ Initialize a Credentials object. @@ -5105,11 +5102,8 @@ def __init__(self, :param CredentialDetails credential_details: (optional) Object containing details of the stored credentials. Obtain credentials for your source from the administrator of the source. - :param str status: (optional) The current status of this set of - credentials. `connected` indicates that the credentials are available to - use with the source configuration of a collection. `invalid` refers to the - credentials (for example, the password provided has expired) and must be - corrected before they can be used with a collection. + :param StatusDetails status: (optional) Object that contains details about + the status of the authentication process. """ self.credential_id = credential_id self.source_type = source_type @@ -5128,7 +5122,7 @@ def from_dict(cls, _dict: Dict) -> 'Credentials': args['credential_details'] = CredentialDetails.from_dict( _dict.get('credential_details')) if 'status' in _dict: - args['status'] = _dict.get('status') + args['status'] = StatusDetails.from_dict(_dict.get('status')) return cls(**args) @classmethod @@ -5149,7 +5143,7 @@ def to_dict(self) -> Dict: 'credential_details') and self.credential_details is not None: _dict['credential_details'] = self.credential_details.to_dict() if hasattr(self, 'status') and self.status is not None: - _dict['status'] = self.status + _dict['status'] = self.status.to_dict() return _dict def _to_dict(self): @@ -5188,16 +5182,6 @@ class SourceTypeEnum(str, Enum): WEB_CRAWL = 'web_crawl' CLOUD_OBJECT_STORAGE = 'cloud_object_storage' - class StatusEnum(str, Enum): - """ - The current status of this set of credentials. `connected` indicates that the - credentials are available to use with the source configuration of a collection. - `invalid` refers to the credentials (for example, the password provided has - expired) and must be corrected before they can be used with a collection. - """ - CONNECTED = 'connected' - INVALID = 'invalid' - class CredentialsList(): """ @@ -6063,10 +6047,9 @@ class Enrichment(): are `natural_language_understanding` and `elements`. When using `natual_language_understanding`, the **options** object must contain Natural Language Understanding options. - When using `elements` the **options** object must contain Element - Classification options. Additionally, when using the `elements` enrichment the - configuration specified and files ingested must meet all the criteria specified - in [the + When using `elements` the **options** object must contain Element Classification + options. Additionally, when using the `elements` enrichment the configuration + specified and files ingested must meet all the criteria specified in [the documentation](https://cloud.ibm.com/docs/discovery?topic=discovery-element-classification#element-classification). :attr bool ignore_downstream_errors: (optional) If true, then most errors generated during the enrichment process will be treated as warnings and will not @@ -6098,7 +6081,7 @@ def __init__(self, options are `natural_language_understanding` and `elements`. When using `natual_language_understanding`, the **options** object must contain Natural Language Understanding options. - When using `elements` the **options** object must contain Element + When using `elements` the **options** object must contain Element Classification options. Additionally, when using the `elements` enrichment the configuration specified and files ingested must meet all the criteria specified in [the @@ -6210,8 +6193,8 @@ class EnrichmentOptions(): (German), `it` (Italian), `pt` (Portuguese), `ru` (Russian), `es` (Spanish), and `sv` (Swedish). **Note:** Not all features support all languages, automatic detection is recommended. - :attr str model: (optional) *For use with `elements` enrichments only.* The - element extraction model to use. Models available are: `contract`. + :attr str model: (optional) For use with `elements` enrichments only. The + element extraction model to use. The only model available is `contract`. """ def __init__(self, @@ -6230,8 +6213,8 @@ def __init__(self, `fr` (French), `de` (German), `it` (Italian), `pt` (Portuguese), `ru` (Russian), `es` (Spanish), and `sv` (Swedish). **Note:** Not all features support all languages, automatic detection is recommended. - :param str model: (optional) *For use with `elements` enrichments only.* - The element extraction model to use. Models available are: `contract`. + :param str model: (optional) For use with `elements` enrichments only. The + element extraction model to use. The only model available is `contract`. """ self.features = features self.language = language @@ -9472,7 +9455,7 @@ class Notice(): `smart_document_understanding_failed_warning`, `smart_document_understanding_page_error`, `smart_document_understanding_page_warning`. **Note:** This is not a complete - list, other values might be returned. + list; other values might be returned. :attr datetime created: (optional) The creation date of the collection in the format yyyy-MM-dd'T'HH:mm:ss.SSS'Z'. :attr str document_id: (optional) Unique identifier of the document. @@ -9480,9 +9463,9 @@ class Notice(): training. :attr str severity: (optional) Severity level of the notice. :attr str step: (optional) Ingestion or training step in which the notice - occurred. Typical step values include: `classify_elements`, - `smartDocumentUnderstanding`, `ingestion`, `indexing`, `convert`. **Note:** This - is not a complete list, other values might be returned. + occurred. Typical step values include: `smartDocumentUnderstanding`, + `ingestion`, `indexing`, `convert`. **Note:** This is not a complete list; other + values might be returned. :attr str description: (optional) The description of the notice. """ @@ -10057,6 +10040,29 @@ def _to_dict(self): """Return a json dictionary representing this model.""" return self.to_dict() + def get_properties(self) -> Dict: + """Return a dictionary of arbitrary properties from this instance of QueryNoticesResult""" + _dict = {} + + for _key in [ + k for k in vars(self).keys() + if k not in QueryNoticesResult._properties + ]: + _dict[_key] = getattr(self, _key) + return _dict + + def set_properties(self, _dict: dict): + """Set a dictionary of arbitrary properties to this instance of QueryNoticesResult""" + for _key in [ + k for k in vars(self).keys() + if k not in QueryNoticesResult._properties + ]: + delattr(self, _key) + + for _key, _value in _dict.items(): + if _key not in QueryNoticesResult._properties: + setattr(self, _key, _value) + def __str__(self) -> str: """Return a `str` version of this QueryNoticesResult object.""" return json.dumps(self.to_dict(), indent=2) @@ -10417,6 +10423,27 @@ def _to_dict(self): """Return a json dictionary representing this model.""" return self.to_dict() + def get_properties(self) -> Dict: + """Return a dictionary of arbitrary properties from this instance of QueryResult""" + _dict = {} + + for _key in [ + k for k in vars(self).keys() if k not in QueryResult._properties + ]: + _dict[_key] = getattr(self, _key) + return _dict + + def set_properties(self, _dict: dict): + """Set a dictionary of arbitrary properties to this instance of QueryResult""" + for _key in [ + k for k in vars(self).keys() if k not in QueryResult._properties + ]: + delattr(self, _key) + + for _key, _value in _dict.items(): + if _key not in QueryResult._properties: + setattr(self, _key, _value) + def __str__(self) -> str: """Return a `str` version of this QueryResult object.""" return json.dumps(self.to_dict(), indent=2) @@ -11936,6 +11963,74 @@ class StatusEnum(str, Enum): UNKNOWN = 'unknown' +class StatusDetails(): + """ + Object that contains details about the status of the authentication process. + + :attr bool authenticated: (optional) Indicates whether the credential is + accepted by the target data source. + :attr str error_message: (optional) If `authenticated` is `false`, a message + describes why the authentication was unsuccessful. + """ + + def __init__(self, + *, + authenticated: bool = None, + error_message: str = None) -> None: + """ + Initialize a StatusDetails object. + + :param bool authenticated: (optional) Indicates whether the credential is + accepted by the target data source. + :param str error_message: (optional) If `authenticated` is `false`, a + message describes why the authentication was unsuccessful. + """ + self.authenticated = authenticated + self.error_message = error_message + + @classmethod + def from_dict(cls, _dict: Dict) -> 'StatusDetails': + """Initialize a StatusDetails object from a json dictionary.""" + args = {} + if 'authenticated' in _dict: + args['authenticated'] = _dict.get('authenticated') + if 'error_message' in _dict: + args['error_message'] = _dict.get('error_message') + return cls(**args) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a StatusDetails object from a json dictionary.""" + return cls.from_dict(_dict) + + def to_dict(self) -> Dict: + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'authenticated') and self.authenticated is not None: + _dict['authenticated'] = self.authenticated + if hasattr(self, 'error_message') and self.error_message is not None: + _dict['error_message'] = self.error_message + return _dict + + def _to_dict(self): + """Return a json dictionary representing this model.""" + return self.to_dict() + + def __str__(self) -> str: + """Return a `str` version of this StatusDetails object.""" + return json.dumps(self.to_dict(), indent=2) + + def __eq__(self, other: 'StatusDetails') -> bool: + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other: 'StatusDetails') -> bool: + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class TokenDictRule(): """ An object defining a single tokenizaion rule. diff --git a/ibm_watson/discovery_v2.py b/ibm_watson/discovery_v2.py index c17756c7..708007d0 100644 --- a/ibm_watson/discovery_v2.py +++ b/ibm_watson/discovery_v2.py @@ -14,13 +14,16 @@ # See the License for the specific language governing permissions and # limitations under the License. -# IBM OpenAPI SDK Code Generator Version: 99-SNAPSHOT-902c9336-20210507-162723 +# IBM OpenAPI SDK Code Generator Version: 3.38.0-07189efd-20210827-205025 """ IBM Watson™ Discovery is a cognitive search and content analytics engine that you can add to applications to identify patterns, trends and actionable insights to drive better decision-making. Securely unify structured and unstructured data with pre-enriched content, and use a simplified query language to eliminate the need for manual filtering of results. + +API Version: 2.0 +See: https://cloud.ibm.com/docs/discovery-data """ from datetime import datetime @@ -58,7 +61,7 @@ def __init__( Construct a new client for the Discovery service. :param str version: Release date of the version of the API you want to use. - Specify dates in YYYY-MM-DD format. The current version is `2019-11-22`. + Specify dates in YYYY-MM-DD format. The current version is `2020-08-30`. :param Authenticator authenticator: The authenticator specifies the authentication mechanism. Get up to date information from https://github.com/IBM/python-sdk-core/blob/master/README.md @@ -86,7 +89,7 @@ def list_collections(self, project_id: str, **kwargs) -> DetailedResponse: Lists existing collections for the specified project. :param str project_id: The ID of the project. This information can be found - from the deploy page of the Discovery administrative tooling. + from the *Integrate and Deploy* page in Discovery. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse with `dict` result representing a `ListCollectionsResponse` object @@ -115,7 +118,7 @@ def list_collections(self, project_id: str, **kwargs) -> DetailedResponse: headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def create_collection(self, @@ -132,7 +135,7 @@ def create_collection(self, Create a new collection in the specified project. :param str project_id: The ID of the project. This information can be found - from the deploy page of the Discovery administrative tooling. + from the *Integrate and Deploy* page in Discovery. :param str name: The name of the collection. :param str description: (optional) A description of the collection. :param str language: (optional) The language of the collection. @@ -181,7 +184,7 @@ def create_collection(self, params=params, data=data) - response = self.send(request) + response = self.send(request, **kwargs) return response def get_collection(self, project_id: str, collection_id: str, @@ -192,7 +195,7 @@ def get_collection(self, project_id: str, collection_id: str, Get details about the specified collection. :param str project_id: The ID of the project. This information can be found - from the deploy page of the Discovery administrative tooling. + from the *Integrate and Deploy* page in Discovery. :param str collection_id: The ID of the collection. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. @@ -225,7 +228,7 @@ def get_collection(self, project_id: str, collection_id: str, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def update_collection(self, @@ -242,7 +245,7 @@ def update_collection(self, Updates the specified collection's name, description, and enrichments. :param str project_id: The ID of the project. This information can be found - from the deploy page of the Discovery administrative tooling. + from the *Integrate and Deploy* page in Discovery. :param str collection_id: The ID of the collection. :param str name: (optional) The name of the collection. :param str description: (optional) A description of the collection. @@ -291,7 +294,7 @@ def update_collection(self, params=params, data=data) - response = self.send(request) + response = self.send(request, **kwargs) return response def delete_collection(self, project_id: str, collection_id: str, @@ -303,7 +306,7 @@ def delete_collection(self, project_id: str, collection_id: str, specified collection and not shared is also deleted. :param str project_id: The ID of the project. This information can be found - from the deploy page of the Discovery administrative tooling. + from the *Integrate and Deploy* page in Discovery. :param str collection_id: The ID of the collection. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. @@ -335,7 +338,7 @@ def delete_collection(self, project_id: str, collection_id: str, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response ######################### @@ -373,7 +376,7 @@ def query(self, settings. :param str project_id: The ID of the project. This information can be found - from the deploy page of the Discovery administrative tooling. + from the *Integrate and Deploy* page in Discovery. :param List[str] collection_ids: (optional) A comma-separated list of collection IDs to be queried against. :param str filter: (optional) A cacheable query that excludes documents @@ -392,16 +395,15 @@ def query(self, possible aggregations, see the Query reference. :param int count: (optional) Number of results to return. :param List[str] return_: (optional) A list of the fields in the document - hierarchy to return. If this parameter not specified, then all top-level - fields are returned. + hierarchy to return. If this parameter is an empty list, then all fields + are returned. :param int offset: (optional) The number of query results to skip at the beginning. For example, if the total number of results that are returned is 10 and the offset is 8, it returns the last two results. :param str sort: (optional) A comma-separated list of fields in the document to sort on. You can optionally specify a sort direction by prefixing the field with `-` for descending or `+` for ascending. Ascending - is the default sort direction if no prefix is specified. This parameter - cannot be used in the same query as the **bias** parameter. + is the default sort direction if no prefix is specified. :param bool highlight: (optional) When `true`, a highlight field is returned for each result which contains the fields which match the query with `` tags around the matching query terms. @@ -413,7 +415,7 @@ def query(self, :param QueryLargeTableResults table_results: (optional) Configuration for table retrieval. :param QueryLargeSuggestedRefinements suggested_refinements: (optional) - Configuration for suggested refinements. + Configuration for suggested refinements. Available with Premium plans only. :param QueryLargePassages passages: (optional) Configuration for passage retrieval. :param dict headers: A `dict` containing the request headers @@ -471,7 +473,7 @@ def query(self, params=params, data=data) - response = self.send(request) + response = self.send(request, **kwargs) return response def get_autocompletion(self, @@ -488,10 +490,9 @@ def get_autocompletion(self, Returns completion query suggestions for the specified prefix. :param str project_id: The ID of the project. This information can be found - from the deploy page of the Discovery administrative tooling. + from the *Integrate and Deploy* page in Discovery. :param str prefix: The prefix to use for autocompletion. For example, the - prefix `Ho` could autocomplete to `Hot`, `Housing`, or `How do I upgrade`. - Possible completions are. + prefix `Ho` could autocomplete to `hot`, `housing`, or `how`. :param List[str] collection_ids: (optional) Comma separated list of the collection IDs. If this parameter is not specified, all collections in the project are used. @@ -536,7 +537,7 @@ def get_autocompletion(self, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def query_collection_notices(self, @@ -556,7 +557,7 @@ def query_collection_notices(self, documents are ingested. :param str project_id: The ID of the project. This information can be found - from the deploy page of the Discovery administrative tooling. + from the *Integrate and Deploy* page in Discovery. :param str collection_id: The ID of the collection. :param str filter: (optional) A cacheable query that excludes documents that don't mention the query content. Filter searches are better for @@ -611,7 +612,7 @@ def query_collection_notices(self, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def query_notices(self, @@ -630,7 +631,7 @@ def query_notices(self, notices are generated by relevancy training. :param str project_id: The ID of the project. This information can be found - from the deploy page of the Discovery administrative tooling. + from the *Integrate and Deploy* page in Discovery. :param str filter: (optional) A cacheable query that excludes documents that don't mention the query content. Filter searches are better for metadata-type searches and for assessing the concepts in the data set. @@ -681,7 +682,7 @@ def query_notices(self, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def list_fields(self, @@ -696,7 +697,7 @@ def list_fields(self, collections. :param str project_id: The ID of the project. This information can be found - from the deploy page of the Discovery administrative tooling. + from the *Integrate and Deploy* page in Discovery. :param List[str] collection_ids: (optional) Comma separated list of the collection IDs. If this parameter is not specified, all collections in the project are used. @@ -731,7 +732,7 @@ def list_fields(self, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response ######################### @@ -746,7 +747,7 @@ def get_component_settings(self, project_id: str, Returns default configuration settings for components. :param str project_id: The ID of the project. This information can be found - from the deploy page of the Discovery administrative tooling. + from the *Integrate and Deploy* page in Discovery. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse with `dict` result representing a `ComponentSettingsResponse` object @@ -776,7 +777,7 @@ def get_component_settings(self, project_id: str, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response ######################### @@ -797,36 +798,35 @@ def add_document(self, Add a document. Add a document to a collection with optional metadata. - Returns immediately after the system has accepted the document for processing. + Returns immediately after the system has accepted the document for processing. * The user must provide document content, metadata, or both. If the request is missing both document content and metadata, it is rejected. - * The user can set the **Content-Type** parameter on the **file** part to - indicate the media type of the document. If the **Content-Type** parameter is - missing or is one of the generic media types (for example, - `application/octet-stream`), then the service attempts to automatically detect the - document's media type. - * The following field names are reserved and will be filtered out if present - after normalization: `id`, `score`, `highlight`, and any field with the prefix of: - `_`, `+`, or `-` + * You can set the **Content-Type** parameter on the **file** part to indicate + the media type of the document. If the **Content-Type** parameter is missing or is + one of the generic media types (for example, `application/octet-stream`), then the + service attempts to automatically detect the document's media type. + * The following field names are reserved and are filtered out if present after + normalization: `id`, `score`, `highlight`, and any field with the prefix of: `_`, + `+`, or `-` * Fields with empty name values after normalization are filtered out before indexing. - * Fields containing the following characters after normalization are filtered + * Fields that contain the following characters after normalization are filtered out before indexing: `#` and `,` - If the document is uploaded to a collection that has it's data shared with - another collection, the **X-Watson-Discovery-Force** header must be set to `true`. - **Note:** Documents can be added with a specific **document_id** by using the - **/v2/projects/{project_id}/collections/{collection_id}/documents** method. - **Note:** This operation only works on collections created to accept direct file - uploads. It cannot be used to modify a collection that connects to an external - source such as Microsoft SharePoint. + If the document is uploaded to a collection that shares its data with another + collection, the **X-Watson-Discovery-Force** header must be set to `true`. + **Note:** You can assign an ID to a document that you add by appending the ID to + the endpoint + (`/v2/projects/{project_id}/collections/{collection_id}/documents/{document_id}`). + If a document already exists with the specified ID, it is replaced. + **Note:** This operation works with a file upload collection. It cannot be used to + modify a collection that crawls an external data source. :param str project_id: The ID of the project. This information can be found - from the deploy page of the Discovery administrative tooling. + from the *Integrate and Deploy* page in Discovery. :param str collection_id: The ID of the collection. - :param BinaryIO file: (optional) The content of the document to ingest. The - maximum supported file size when adding a file to a collection is 50 - megabytes, the maximum supported file size when testing a configuration is - 1 megabyte. Files larger than the supported size are rejected. + :param BinaryIO file: (optional) The content of the document to ingest. For + maximum supported file size limits, see [the + documentation](https://cloud.ibm.com/docs/discovery-data?topic=discovery-data-collections#collections-doc-limits). :param str filename: (optional) The filename for file. :param str file_content_type: (optional) The content type of file. :param str metadata: (optional) The maximum supported metadata file size is @@ -881,7 +881,7 @@ def add_document(self, params=params, files=form_data) - response = self.send(request) + response = self.send(request, **kwargs) return response def update_document(self, @@ -900,24 +900,23 @@ def update_document(self, Replace an existing document or add a document with a specified **document_id**. Starts ingesting a document with optional metadata. - If the document is uploaded to a collection that has it's data shared with another + If the document is uploaded to a collection that shares its data with another collection, the **X-Watson-Discovery-Force** header must be set to `true`. **Note:** When uploading a new document with this method it automatically replaces any document stored with the same **document_id** if it exists. **Note:** This operation only works on collections created to accept direct file uploads. It cannot be used to modify a collection that connects to an external source such as Microsoft SharePoint. - **Note:** If an uploaded document is segmented, all segments will be overwritten, - even if the updated version of the document has fewer segments. + **Note:** If an uploaded document is segmented, all segments are overwritten, even + if the updated version of the document has fewer segments. :param str project_id: The ID of the project. This information can be found - from the deploy page of the Discovery administrative tooling. + from the *Integrate and Deploy* page in Discovery. :param str collection_id: The ID of the collection. :param str document_id: The ID of the document. - :param BinaryIO file: (optional) The content of the document to ingest. The - maximum supported file size when adding a file to a collection is 50 - megabytes, the maximum supported file size when testing a configuration is - 1 megabyte. Files larger than the supported size are rejected. + :param BinaryIO file: (optional) The content of the document to ingest. For + maximum supported file size limits, see [the + documentation](https://cloud.ibm.com/docs/discovery-data?topic=discovery-data-collections#collections-doc-limits). :param str filename: (optional) The filename for file. :param str file_content_type: (optional) The content type of file. :param str metadata: (optional) The maximum supported metadata file size is @@ -975,7 +974,7 @@ def update_document(self, params=params, files=form_data) - response = self.send(request) + response = self.send(request, **kwargs) return response def delete_document(self, @@ -998,7 +997,7 @@ def delete_document(self, all segments by deleting using the `parent_document_id` of a segment result. :param str project_id: The ID of the project. This information can be found - from the deploy page of the Discovery administrative tooling. + from the *Integrate and Deploy* page in Discovery. :param str collection_id: The ID of the collection. :param str document_id: The ID of the document. :param bool x_watson_discovery_force: (optional) When `true`, the uploaded @@ -1038,7 +1037,7 @@ def delete_document(self, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response ######################### @@ -1053,7 +1052,7 @@ def list_training_queries(self, project_id: str, List the training queries for the specified project. :param str project_id: The ID of the project. This information can be found - from the deploy page of the Discovery administrative tooling. + from the *Integrate and Deploy* page in Discovery. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse with `dict` result representing a `TrainingQuerySet` object @@ -1083,7 +1082,7 @@ def list_training_queries(self, project_id: str, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def delete_training_queries(self, project_id: str, @@ -1094,7 +1093,7 @@ def delete_training_queries(self, project_id: str, Removes all training queries for the specified project. :param str project_id: The ID of the project. This information can be found - from the deploy page of the Discovery administrative tooling. + from the *Integrate and Deploy* page in Discovery. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse @@ -1123,7 +1122,7 @@ def delete_training_queries(self, project_id: str, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def create_training_query(self, @@ -1140,7 +1139,7 @@ def create_training_query(self, and natural language query. :param str project_id: The ID of the project. This information can be found - from the deploy page of the Discovery administrative tooling. + from the *Integrate and Deploy* page in Discovery. :param str natural_language_query: The natural text query for the training query. :param List[TrainingExample] examples: Array of training examples. @@ -1190,7 +1189,7 @@ def create_training_query(self, params=params, data=data) - response = self.send(request) + response = self.send(request, **kwargs) return response def get_training_query(self, project_id: str, query_id: str, @@ -1202,7 +1201,7 @@ def get_training_query(self, project_id: str, query_id: str, examples. :param str project_id: The ID of the project. This information can be found - from the deploy page of the Discovery administrative tooling. + from the *Integrate and Deploy* page in Discovery. :param str query_id: The ID of the query used for training. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. @@ -1235,7 +1234,7 @@ def get_training_query(self, project_id: str, query_id: str, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def update_training_query(self, @@ -1252,7 +1251,7 @@ def update_training_query(self, Updates an existing training query and it's examples. :param str project_id: The ID of the project. This information can be found - from the deploy page of the Discovery administrative tooling. + from the *Integrate and Deploy* page in Discovery. :param str query_id: The ID of the query used for training. :param str natural_language_query: The natural text query for the training query. @@ -1305,7 +1304,7 @@ def update_training_query(self, params=params, data=data) - response = self.send(request) + response = self.send(request, **kwargs) return response def delete_training_query(self, project_id: str, query_id: str, @@ -1317,7 +1316,7 @@ def delete_training_query(self, project_id: str, query_id: str, examples. :param str project_id: The ID of the project. This information can be found - from the deploy page of the Discovery administrative tooling. + from the *Integrate and Deploy* page in Discovery. :param str query_id: The ID of the query used for training. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. @@ -1349,7 +1348,7 @@ def delete_training_query(self, project_id: str, query_id: str, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response ######################### @@ -1368,20 +1367,17 @@ def analyze_document(self, """ Analyze a Document. - Process a document using the specified collection's settings and return it for - realtime use. - **Note:** Documents processed using this method are not added to the specified - collection. - **Note:** This method is only supported on IBM Cloud Pak for Data instances of - Discovery. + Process a document and return it for realtime use. Supports JSON files only. + The document is processed according to the collection's configuration settings but + is not stored in the collection. + **Note:** This method is supported on installed instances of Discovery only. :param str project_id: The ID of the project. This information can be found - from the deploy page of the Discovery administrative tooling. + from the *Integrate and Deploy* page in Discovery. :param str collection_id: The ID of the collection. - :param BinaryIO file: (optional) The content of the document to ingest. The - maximum supported file size when adding a file to a collection is 50 - megabytes, the maximum supported file size when testing a configuration is - 1 megabyte. Files larger than the supported size are rejected. + :param BinaryIO file: (optional) The content of the document to ingest. For + maximum supported file size limits, see [the + documentation](https://cloud.ibm.com/docs/discovery-data?topic=discovery-data-collections#collections-doc-limits). :param str filename: (optional) The filename for file. :param str file_content_type: (optional) The content type of file. :param str metadata: (optional) The maximum supported metadata file size is @@ -1433,7 +1429,7 @@ def analyze_document(self, params=params, files=form_data) - response = self.send(request) + response = self.send(request, **kwargs) return response ######################### @@ -1444,10 +1440,12 @@ def list_enrichments(self, project_id: str, **kwargs) -> DetailedResponse: """ List Enrichments. - List the enrichments available to this project. + Lists the enrichments available to this project. The *Part of Speech* and + *Sentiment of Phrases* enrichments might be listed, but are reserved for internal + use only. :param str project_id: The ID of the project. This information can be found - from the deploy page of the Discovery administrative tooling. + from the *Integrate and Deploy* page in Discovery. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse with `dict` result representing a `Enrichments` object @@ -1476,7 +1474,7 @@ def list_enrichments(self, project_id: str, **kwargs) -> DetailedResponse: headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def create_enrichment(self, @@ -1488,10 +1486,10 @@ def create_enrichment(self, """ Create an enrichment. - Create an enrichment for use with the specified project/. + Create an enrichment for use with the specified project. :param str project_id: The ID of the project. This information can be found - from the deploy page of the Discovery administrative tooling. + from the *Integrate and Deploy* page in Discovery. :param CreateEnrichment enrichment: Information about a specific enrichment. :param BinaryIO file: (optional) The enrichment file to upload. @@ -1532,7 +1530,7 @@ def create_enrichment(self, params=params, files=form_data) - response = self.send(request) + response = self.send(request, **kwargs) return response def get_enrichment(self, project_id: str, enrichment_id: str, @@ -1543,7 +1541,7 @@ def get_enrichment(self, project_id: str, enrichment_id: str, Get details about a specific enrichment. :param str project_id: The ID of the project. This information can be found - from the deploy page of the Discovery administrative tooling. + from the *Integrate and Deploy* page in Discovery. :param str enrichment_id: The ID of the enrichment. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. @@ -1576,7 +1574,7 @@ def get_enrichment(self, project_id: str, enrichment_id: str, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def update_enrichment(self, @@ -1592,7 +1590,7 @@ def update_enrichment(self, Updates an existing enrichment's name and description. :param str project_id: The ID of the project. This information can be found - from the deploy page of the Discovery administrative tooling. + from the *Integrate and Deploy* page in Discovery. :param str enrichment_id: The ID of the enrichment. :param str name: A new name for the enrichment. :param str description: (optional) A new description for the enrichment. @@ -1635,7 +1633,7 @@ def update_enrichment(self, params=params, data=data) - response = self.send(request) + response = self.send(request, **kwargs) return response def delete_enrichment(self, project_id: str, enrichment_id: str, @@ -1647,7 +1645,7 @@ def delete_enrichment(self, project_id: str, enrichment_id: str, **Note:** Only enrichments that have been manually created can be deleted. :param str project_id: The ID of the project. This information can be found - from the deploy page of the Discovery administrative tooling. + from the *Integrate and Deploy* page in Discovery. :param str enrichment_id: The ID of the enrichment. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. @@ -1679,7 +1677,7 @@ def delete_enrichment(self, project_id: str, enrichment_id: str, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response ######################### @@ -1715,7 +1713,7 @@ def list_projects(self, **kwargs) -> DetailedResponse: headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def create_project(self, @@ -1730,7 +1728,11 @@ def create_project(self, Create a new project for this instance. :param str name: The human readable name of this project. - :param str type: The project type of this project. + :param str type: The type of project. + The `content_intelligence` type is a *Document Retrieval for Contracts* + project and the `other` type is a *Custom* project. + The `content_mining` and `content_intelligence` types are available with + Premium plan managed deployments and installed deployments only. :param DefaultQueryParams default_query_parameters: (optional) Default query parameters for this project. :param dict headers: A `dict` containing the request headers @@ -1772,7 +1774,7 @@ def create_project(self, params=params, data=data) - response = self.send(request) + response = self.send(request, **kwargs) return response def get_project(self, project_id: str, **kwargs) -> DetailedResponse: @@ -1782,7 +1784,7 @@ def get_project(self, project_id: str, **kwargs) -> DetailedResponse: Get details on the specified project. :param str project_id: The ID of the project. This information can be found - from the deploy page of the Discovery administrative tooling. + from the *Integrate and Deploy* page in Discovery. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse with `dict` result representing a `ProjectDetails` object @@ -1811,7 +1813,7 @@ def get_project(self, project_id: str, **kwargs) -> DetailedResponse: headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def update_project(self, @@ -1825,7 +1827,7 @@ def update_project(self, Update the specified project's name. :param str project_id: The ID of the project. This information can be found - from the deploy page of the Discovery administrative tooling. + from the *Integrate and Deploy* page in Discovery. :param str name: (optional) The new name to give this project. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. @@ -1861,7 +1863,7 @@ def update_project(self, params=params, data=data) - response = self.send(request) + response = self.send(request, **kwargs) return response def delete_project(self, project_id: str, **kwargs) -> DetailedResponse: @@ -1873,7 +1875,7 @@ def delete_project(self, project_id: str, **kwargs) -> DetailedResponse: project, including all collections. :param str project_id: The ID of the project. This information can be found - from the deploy page of the Discovery administrative tooling. + from the *Integrate and Deploy* page in Discovery. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse @@ -1901,7 +1903,7 @@ def delete_project(self, project_id: str, **kwargs) -> DetailedResponse: headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response ######################### @@ -1946,7 +1948,7 @@ def delete_user_data(self, customer_id: str, **kwargs) -> DetailedResponse: headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response @@ -2008,7 +2010,7 @@ class FileContentType(str, Enum): class AnalyzedDocument(): """ - An object containing the converted document and any identified enrichments. + An object that contains the converted document and any identified enrichments. :attr List[Notice] notices: (optional) Array of document results that match the query. @@ -2127,6 +2129,29 @@ def _to_dict(self): """Return a json dictionary representing this model.""" return self.to_dict() + def get_properties(self) -> Dict: + """Return a dictionary of arbitrary properties from this instance of AnalyzedResult""" + _dict = {} + + for _key in [ + k for k in vars(self).keys() + if k not in AnalyzedResult._properties + ]: + _dict[_key] = getattr(self, _key) + return _dict + + def set_properties(self, _dict: dict): + """Set a dictionary of arbitrary properties to this instance of AnalyzedResult""" + for _key in [ + k for k in vars(self).keys() + if k not in AnalyzedResult._properties + ]: + delattr(self, _key) + + for _key, _value in _dict.items(): + if _key not in AnalyzedResult._properties: + setattr(self, _key, _value) + def __str__(self) -> str: """Return a `str` version of this AnalyzedResult object.""" return json.dumps(self.to_dict(), indent=2) @@ -2314,6 +2339,8 @@ class CollectionEnrichment(): :attr str enrichment_id: (optional) The unique identifier of this enrichment. :attr List[str] fields: (optional) An array of field names that the enrichment is applied to. + If you apply an enrichment to a field from a JSON file, the data is converted to + an array automatically, even if the field contains a single value. """ def __init__(self, @@ -2327,6 +2354,9 @@ def __init__(self, enrichment. :param List[str] fields: (optional) An array of field names that the enrichment is applied to. + If you apply an enrichment to a field from a JSON file, the data is + converted to an array automatically, even if the field contains a single + value. """ self.enrichment_id = enrichment_id self.fields = fields @@ -2376,7 +2406,7 @@ def __ne__(self, other: 'CollectionEnrichment') -> bool: class Completions(): """ - An object containing an array of autocompletion suggestions. + An object that contains an array of autocompletion suggestions. :attr List[str] completions: (optional) Array of autocomplete suggestion based on the provided prefix. @@ -2820,8 +2850,9 @@ class CreateEnrichment(): :attr str name: (optional) The human readable name for this enrichment. :attr str description: (optional) The description of this enrichment. :attr str type: (optional) The type of this enrichment. - :attr EnrichmentOptions options: (optional) A object containing options for the - current enrichment. + :attr EnrichmentOptions options: (optional) An object that contains options for + the current enrichment. Starting with version `2020-08-30`, the enrichment + options are not included in responses from the List Enrichments method. """ def __init__(self, @@ -2836,8 +2867,10 @@ def __init__(self, :param str name: (optional) The human readable name for this enrichment. :param str description: (optional) The description of this enrichment. :param str type: (optional) The type of this enrichment. - :param EnrichmentOptions options: (optional) A object containing options - for the current enrichment. + :param EnrichmentOptions options: (optional) An object that contains + options for the current enrichment. Starting with version `2020-08-30`, the + enrichment options are not included in responses from the List Enrichments + method. """ self.name = name self.description = description @@ -2918,7 +2951,8 @@ class DefaultQueryParams(): :attr str aggregation: (optional) A string representing the default aggregation query for the project. :attr DefaultQueryParamsSuggestedRefinements suggested_refinements: (optional) - Object containing suggested refinement settings. + Object that contains suggested refinement settings. Available with Premium plans + only. :attr bool spelling_suggestions: (optional) When `true`, a spelling suggestions for the query are returned by default. :attr bool highlight: (optional) When `true`, a highlights for the query are @@ -2956,7 +2990,8 @@ def __init__(self, :param str aggregation: (optional) A string representing the default aggregation query for the project. :param DefaultQueryParamsSuggestedRefinements suggested_refinements: - (optional) Object containing suggested refinement settings. + (optional) Object that contains suggested refinement settings. Available + with Premium plans only. :param bool spelling_suggestions: (optional) When `true`, a spelling suggestions for the query are returned by default. :param bool highlight: (optional) When `true`, a highlights for the query @@ -3173,10 +3208,10 @@ def __ne__(self, other: 'DefaultQueryParamsPassages') -> bool: class DefaultQueryParamsSuggestedRefinements(): """ - Object containing suggested refinement settings. + Object that contains suggested refinement settings. Available with Premium plans only. - :attr bool enabled: (optional) When `true`, a suggested refinements for the - query are returned by default. + :attr bool enabled: (optional) When `true`, suggested refinements for the query + are returned by default. :attr int count: (optional) The number of suggested refinements to return by default. """ @@ -3185,8 +3220,8 @@ def __init__(self, *, enabled: bool = None, count: int = None) -> None: """ Initialize a DefaultQueryParamsSuggestedRefinements object. - :param bool enabled: (optional) When `true`, a suggested refinements for - the query are returned by default. + :param bool enabled: (optional) When `true`, suggested refinements for the + query are returned by default. :param int count: (optional) The number of suggested refinements to return by default. """ @@ -3545,8 +3580,9 @@ class Enrichment(): :attr str name: (optional) The human readable name for this enrichment. :attr str description: (optional) The description of this enrichment. :attr str type: (optional) The type of this enrichment. - :attr EnrichmentOptions options: (optional) A object containing options for the - current enrichment. + :attr EnrichmentOptions options: (optional) An object that contains options for + the current enrichment. Starting with version `2020-08-30`, the enrichment + options are not included in responses from the List Enrichments method. """ def __init__(self, @@ -3562,8 +3598,10 @@ def __init__(self, :param str name: (optional) The human readable name for this enrichment. :param str description: (optional) The description of this enrichment. :param str type: (optional) The type of this enrichment. - :param EnrichmentOptions options: (optional) A object containing options - for the current enrichment. + :param EnrichmentOptions options: (optional) An object that contains + options for the current enrichment. Starting with version `2020-08-30`, the + enrichment options are not included in responses from the List Enrichments + method. """ self.enrichment_id = enrichment_id self.name = name @@ -3642,19 +3680,22 @@ class TypeEnum(str, Enum): class EnrichmentOptions(): """ - A object containing options for the current enrichment. + An object that contains options for the current enrichment. Starting with version + `2020-08-30`, the enrichment options are not included in responses from the List + Enrichments method. :attr List[str] languages: (optional) An array of supported languages for this - enrichment. - :attr str entity_type: (optional) The type of entity. Required when creating - `dictionary` and `regular_expression` **type** enrichment. Not valid when - creating any other type of enrichment. + enrichment. Required when `type` is `dictionary`. Optional when `type` is + `rule_based`. Not valid when creating any other type of enrichment. + :attr str entity_type: (optional) The name of the entity type. This value is + used as the field name in the index. Required when `type` is `dictionary` or + `regular_expression`. Not valid when creating any other type of enrichment. :attr str regular_expression: (optional) The regular expression to apply for - this enrichment. Required only when the **type** of enrichment being created is - a `regular_expression`. Not valid when creating any other type of enrichment. + this enrichment. Required when `type` is `regular_expression`. Not valid when + creating any other type of enrichment. :attr str result_field: (optional) The name of the result document field that - this enrichment creates. Required only when the enrichment **type** is - `rule_based`. Not valid when creating any other type of enrichment. + this enrichment creates. Required when `type` is `rule_based`. Not valid when + creating any other type of enrichment. """ def __init__(self, @@ -3667,17 +3708,18 @@ def __init__(self, Initialize a EnrichmentOptions object. :param List[str] languages: (optional) An array of supported languages for - this enrichment. - :param str entity_type: (optional) The type of entity. Required when - creating `dictionary` and `regular_expression` **type** enrichment. Not - valid when creating any other type of enrichment. + this enrichment. Required when `type` is `dictionary`. Optional when `type` + is `rule_based`. Not valid when creating any other type of enrichment. + :param str entity_type: (optional) The name of the entity type. This value + is used as the field name in the index. Required when `type` is + `dictionary` or `regular_expression`. Not valid when creating any other + type of enrichment. :param str regular_expression: (optional) The regular expression to apply - for this enrichment. Required only when the **type** of enrichment being - created is a `regular_expression`. Not valid when creating any other type - of enrichment. + for this enrichment. Required when `type` is `regular_expression`. Not + valid when creating any other type of enrichment. :param str result_field: (optional) The name of the result document field - that this enrichment creates. Required only when the enrichment **type** is - `rule_based`. Not valid when creating any other type of enrichment. + that this enrichment creates. Required when `type` is `rule_based`. Not + valid when creating any other type of enrichment. """ self.languages = languages self.entity_type = entity_type @@ -3739,7 +3781,7 @@ def __ne__(self, other: 'EnrichmentOptions') -> bool: class Enrichments(): """ - An object containing an array of enrichment definitions. + An object that contains an array of enrichment definitions. :attr List[Enrichment] enrichments: (optional) An array of enrichment definitions. @@ -3797,7 +3839,7 @@ def __ne__(self, other: 'Enrichments') -> bool: class Field(): """ - Object containing field details. + Object that contains field details. :attr str field: (optional) The name of the field. :attr str type: (optional) The type of the field. @@ -3884,17 +3926,17 @@ class TypeEnum(str, Enum): class ListCollectionsResponse(): """ - Response object containing an array of collection details. + Response object that contains an array of collection details. - :attr List[Collection] collections: (optional) An array containing information - about each collection in the project. + :attr List[Collection] collections: (optional) An array that contains + information about each collection in the project. """ def __init__(self, *, collections: List['Collection'] = None) -> None: """ Initialize a ListCollectionsResponse object. - :param List[Collection] collections: (optional) An array containing + :param List[Collection] collections: (optional) An array that contains information about each collection in the project. """ self.collections = collections @@ -3950,16 +3992,16 @@ class ListFieldsResponse(): example, `warnings.properties.severity` means that the `warnings` object has a property called `severity`). - :attr List[Field] fields: (optional) An array containing information about each - field in the collections. + :attr List[Field] fields: (optional) An array that contains information about + each field in the collections. """ def __init__(self, *, fields: List['Field'] = None) -> None: """ Initialize a ListFieldsResponse object. - :param List[Field] fields: (optional) An array containing information about - each field in the collections. + :param List[Field] fields: (optional) An array that contains information + about each field in the collections. """ self.fields = fields @@ -4196,7 +4238,11 @@ class ProjectDetails(): :attr str project_id: (optional) The unique identifier of this project. :attr str name: (optional) The human readable name of this project. - :attr str type: (optional) The project type of this project. + :attr str type: (optional) The type of project. + The `content_intelligence` type is a *Document Retrieval for Contracts* project + and the `other` type is a *Custom* project. + The `content_mining` and `content_intelligence` types are available with Premium + plan managed deployments and installed deployments only. :attr ProjectListDetailsRelevancyTrainingStatus relevancy_training_status: (optional) Relevancy training status information for this project. :attr int collection_count: (optional) The number of collections configured in @@ -4218,7 +4264,11 @@ def __init__(self, Initialize a ProjectDetails object. :param str name: (optional) The human readable name of this project. - :param str type: (optional) The project type of this project. + :param str type: (optional) The type of project. + The `content_intelligence` type is a *Document Retrieval for Contracts* + project and the `other` type is a *Custom* project. + The `content_mining` and `content_intelligence` types are available with + Premium plan managed deployments and installed deployments only. :param ProjectListDetailsRelevancyTrainingStatus relevancy_training_status: (optional) Relevancy training status information for this project. :param DefaultQueryParams default_query_parameters: (optional) Default @@ -4302,11 +4352,16 @@ def __ne__(self, other: 'ProjectDetails') -> bool: class TypeEnum(str, Enum): """ - The project type of this project. + The type of project. + The `content_intelligence` type is a *Document Retrieval for Contracts* project + and the `other` type is a *Custom* project. + The `content_mining` and `content_intelligence` types are available with Premium + plan managed deployments and installed deployments only. """ DOCUMENT_RETRIEVAL = 'document_retrieval' - ANSWER_RETRIEVAL = 'answer_retrieval' + CONVERSATIONAL_SEARCH = 'conversational_search' CONTENT_MINING = 'content_mining' + CONTENT_INTELLIGENCE = 'content_intelligence' OTHER = 'other' @@ -4316,7 +4371,11 @@ class ProjectListDetails(): :attr str project_id: (optional) The unique identifier of this project. :attr str name: (optional) The human readable name of this project. - :attr str type: (optional) The project type of this project. + :attr str type: (optional) The type of project. + The `content_intelligence` type is a *Document Retrieval for Contracts* project + and the `other` type is a *Custom* project. + The `content_mining` and `content_intelligence` types are available with Premium + plan managed deployments and installed deployments only. :attr ProjectListDetailsRelevancyTrainingStatus relevancy_training_status: (optional) Relevancy training status information for this project. :attr int collection_count: (optional) The number of collections configured in @@ -4335,7 +4394,11 @@ def __init__(self, Initialize a ProjectListDetails object. :param str name: (optional) The human readable name of this project. - :param str type: (optional) The project type of this project. + :param str type: (optional) The type of project. + The `content_intelligence` type is a *Document Retrieval for Contracts* + project and the `other` type is a *Custom* project. + The `content_mining` and `content_intelligence` types are available with + Premium plan managed deployments and installed deployments only. :param ProjectListDetailsRelevancyTrainingStatus relevancy_training_status: (optional) Relevancy training status information for this project. """ @@ -4408,11 +4471,16 @@ def __ne__(self, other: 'ProjectListDetails') -> bool: class TypeEnum(str, Enum): """ - The project type of this project. + The type of project. + The `content_intelligence` type is a *Document Retrieval for Contracts* project + and the `other` type is a *Custom* project. + The `content_mining` and `content_intelligence` types are available with Premium + plan managed deployments and installed deployments only. """ DOCUMENT_RETRIEVAL = 'document_retrieval' - ANSWER_RETRIEVAL = 'answer_retrieval' + CONVERSATIONAL_SEARCH = 'conversational_search' CONTENT_MINING = 'content_mining' + CONTENT_INTELLIGENCE = 'content_intelligence' OTHER = 'other' @@ -4658,7 +4726,7 @@ class QueryGroupByAggregationResult(): Top value result for the term aggregation. :attr str key: Value of the field with a non-zero frequency in the document set. - :attr int matching_results: Number of documents containing the 'key'. + :attr int matching_results: Number of documents that contain the 'key'. :attr float relevancy: (optional) The relevancy for this group. :attr int total_matching_documents: (optional) The number of documents which have the group as the value of specified field in the whole set of documents in @@ -4666,8 +4734,8 @@ class QueryGroupByAggregationResult(): :attr int estimated_matching_documents: (optional) The estimated number of documents which would match the query and also meet the condition. Returned only when the `relevancy` parameter is set to `true`. - :attr List[QueryAggregation] aggregations: (optional) An array of sub - aggregations. + :attr List[QueryAggregation] aggregations: (optional) An array of + sub-aggregations. """ def __init__(self, @@ -4683,7 +4751,7 @@ def __init__(self, :param str key: Value of the field with a non-zero frequency in the document set. - :param int matching_results: Number of documents containing the 'key'. + :param int matching_results: Number of documents that contain the 'key'. :param float relevancy: (optional) The relevancy for this group. :param int total_matching_documents: (optional) The number of documents which have the group as the value of specified field in the whole set of @@ -4692,8 +4760,8 @@ def __init__(self, :param int estimated_matching_documents: (optional) The estimated number of documents which would match the query and also meet the condition. Returned only when the `relevancy` parameter is set to `true`. - :param List[QueryAggregation] aggregations: (optional) An array of sub - aggregations. + :param List[QueryAggregation] aggregations: (optional) An array of + sub-aggregations. """ self.key = key self.matching_results = matching_results @@ -4784,8 +4852,8 @@ class QueryHistogramAggregationResult(): :attr int key: The value of the upper bound for the numeric segment. :attr int matching_results: Number of documents with the specified key as the upper bound. - :attr List[QueryAggregation] aggregations: (optional) An array of sub - aggregations. + :attr List[QueryAggregation] aggregations: (optional) An array of + sub-aggregations. """ def __init__(self, @@ -4799,8 +4867,8 @@ def __init__(self, :param int key: The value of the upper bound for the numeric segment. :param int matching_results: Number of documents with the specified key as the upper bound. - :param List[QueryAggregation] aggregations: (optional) An array of sub - aggregations. + :param List[QueryAggregation] aggregations: (optional) An array of + sub-aggregations. """ self.key = key self.matching_results = matching_results @@ -4870,35 +4938,38 @@ class QueryLargePassages(): :attr bool enabled: (optional) A passages query that returns the most relevant passages from the results. - :attr bool per_document: (optional) When `true`, passages will be returned - within their respective result. + :attr bool per_document: (optional) If `true`, ranks the documents by document + quality, and then returns the highest-ranked passages per document in a + `document_passages` field for each document entry in the results list of the + response. + If `false`, ranks the passages from all of the documents by passage quality + regardless of the document quality and returns them in a separate `passages` + field in the response. :attr int max_per_document: (optional) Maximum number of passages to return per - result. - :attr List[str] fields: (optional) A list of fields that passages are drawn - from. If this parameter not specified, then all top-level fields are included. - :attr int count: (optional) The maximum number of passages to return. The search - returns fewer passages if the requested total is not found. The maximum is - `100`. + document in the result. Ignored if `passages.per_document` is `false`. + :attr List[str] fields: (optional) A list of fields to extract passages from. If + this parameter is an empty list, then all root-level fields are included. + :attr int count: (optional) The maximum number of passages to return. Ignored if + `passages.per_document` is `true`. :attr int characters: (optional) The approximate number of characters that any one passage will have. :attr bool find_answers: (optional) When true, `answer` objects are returned as part of each passage in the query results. The primary difference between an `answer` and a `passage` is that the length of a passage is defined by the query, where the length of an `answer` is calculated by Discovery based on how - much text is needed to answer the question./n/nThis parameter is ignored if - passages are not enabled for the query, or no **natural_language_query** is - specified./n/nIf the **find_answers** parameter is set to `true` and - **per_document** parameter is also set to `true`, then the document search - results and the passage search results within each document are reordered using - the answer confidences. The goal of this reordering is to do as much as possible - to make sure that the first answer of the first passage of the first document is - the best answer. Similarly, if the **find_answers** parameter is set to `true` - and **per_document** parameter is set to `false`, then the passage search - results are reordered in decreasing order of the highest confidence answer for - each document and passage./n/nThe **find_answers** parameter is **beta** - functionality available only on managed instances and should not be used in a - production environment. This parameter is not available on installed instances - of Discovery. + much text is needed to answer the question. + This parameter is ignored if passages are not enabled for the query, or no + **natural_language_query** is specified. + If the **find_answers** parameter is set to `true` and **per_document** + parameter is also set to `true`, then the document search results and the + passage search results within each document are reordered using the answer + confidences. The goal of this reordering is to place the best answer as the + first answer of the first passage of the first document. Similarly, if the + **find_answers** parameter is set to `true` and **per_document** parameter is + set to `false`, then the passage search results are reordered in decreasing + order of the highest confidence answer for each document and passage. + The **find_answers** parameter is available only on managed instances of + Discovery. :attr int max_answers_per_passage: (optional) The number of `answer` objects to return per passage if the **find_answers** parmeter is specified as `true`. """ @@ -4918,16 +4989,21 @@ def __init__(self, :param bool enabled: (optional) A passages query that returns the most relevant passages from the results. - :param bool per_document: (optional) When `true`, passages will be returned - within their respective result. + :param bool per_document: (optional) If `true`, ranks the documents by + document quality, and then returns the highest-ranked passages per document + in a `document_passages` field for each document entry in the results list + of the response. + If `false`, ranks the passages from all of the documents by passage quality + regardless of the document quality and returns them in a separate + `passages` field in the response. :param int max_per_document: (optional) Maximum number of passages to - return per result. - :param List[str] fields: (optional) A list of fields that passages are - drawn from. If this parameter not specified, then all top-level fields are + return per document in the result. Ignored if `passages.per_document` is + `false`. + :param List[str] fields: (optional) A list of fields to extract passages + from. If this parameter is an empty list, then all root-level fields are included. - :param int count: (optional) The maximum number of passages to return. The - search returns fewer passages if the requested total is not found. The - maximum is `100`. + :param int count: (optional) The maximum number of passages to return. + Ignored if `passages.per_document` is `true`. :param int characters: (optional) The approximate number of characters that any one passage will have. :param bool find_answers: (optional) When true, `answer` objects are @@ -4935,20 +5011,20 @@ def __init__(self, difference between an `answer` and a `passage` is that the length of a passage is defined by the query, where the length of an `answer` is calculated by Discovery based on how much text is needed to answer the - question./n/nThis parameter is ignored if passages are not enabled for the - query, or no **natural_language_query** is specified./n/nIf the + question. + This parameter is ignored if passages are not enabled for the query, or no + **natural_language_query** is specified. + If the **find_answers** parameter is set to `true` and **per_document** + parameter is also set to `true`, then the document search results and the + passage search results within each document are reordered using the answer + confidences. The goal of this reordering is to place the best answer as the + first answer of the first passage of the first document. Similarly, if the **find_answers** parameter is set to `true` and **per_document** parameter - is also set to `true`, then the document search results and the passage - search results within each document are reordered using the answer - confidences. The goal of this reordering is to do as much as possible to - make sure that the first answer of the first passage of the first document - is the best answer. Similarly, if the **find_answers** parameter is set to - `true` and **per_document** parameter is set to `false`, then the passage - search results are reordered in decreasing order of the highest confidence - answer for each document and passage./n/nThe **find_answers** parameter is - **beta** functionality available only on managed instances and should not - be used in a production environment. This parameter is not available on - installed instances of Discovery. + is set to `false`, then the passage search results are reordered in + decreasing order of the highest confidence answer for each document and + passage. + The **find_answers** parameter is available only on managed instances of + Discovery. :param int max_answers_per_passage: (optional) The number of `answer` objects to return per passage if the **find_answers** parmeter is specified as `true`. @@ -5034,7 +5110,7 @@ def __ne__(self, other: 'QueryLargePassages') -> bool: class QueryLargeSuggestedRefinements(): """ - Configuration for suggested refinements. + Configuration for suggested refinements. Available with Premium plans only. :attr bool enabled: (optional) Whether to perform suggested refinements. :attr int count: (optional) Maximum number of suggested refinements texts to be @@ -5158,7 +5234,7 @@ def __ne__(self, other: 'QueryLargeTableResults') -> bool: class QueryNoticesResponse(): """ - Object containing notice query results. + Object that contains notice query results. :attr int matching_results: (optional) The number of matching results. :attr List[Notice] notices: (optional) Array of document results that match the @@ -5227,10 +5303,10 @@ def __ne__(self, other: 'QueryNoticesResponse') -> bool: class QueryResponse(): """ - A response containing the documents and aggregations for the query. + A response that contains the documents and aggregations for the query. :attr int matching_results: (optional) The number of matching results for the - query. + query. Results that match due to a curation only are not counted in the total. :attr List[QueryResult] results: (optional) Array of document results for the query. :attr List[QueryAggregation] aggregations: (optional) Array of aggregations for @@ -5242,8 +5318,8 @@ class QueryResponse(): :attr List[QuerySuggestedRefinement] suggested_refinements: (optional) Array of suggested refinements. :attr List[QueryTableResult] table_results: (optional) Array of table results. - :attr List[QueryResponsePassage] passages: (optional) Passages returned by - Discovery. + :attr List[QueryResponsePassage] passages: (optional) Passages that best match + the query from across all of the collections in the project. """ def __init__(self, @@ -5260,7 +5336,8 @@ def __init__(self, Initialize a QueryResponse object. :param int matching_results: (optional) The number of matching results for - the query. + the query. Results that match due to a curation only are not counted in the + total. :param List[QueryResult] results: (optional) Array of document results for the query. :param List[QueryAggregation] aggregations: (optional) Array of @@ -5273,8 +5350,8 @@ def __init__(self, Array of suggested refinements. :param List[QueryTableResult] table_results: (optional) Array of table results. - :param List[QueryResponsePassage] passages: (optional) Passages returned by - Discovery. + :param List[QueryResponsePassage] passages: (optional) Passages that best + match the query from across all of the collections in the project. """ self.matching_results = matching_results self.results = results @@ -5383,7 +5460,7 @@ class QueryResponsePassage(): :attr str collection_id: (optional) The unique identifier of the collection. :attr int start_offset: (optional) The position of the first character of the extracted passage in the originating field. - :attr int end_offset: (optional) The position of the last character of the + :attr int end_offset: (optional) The position after the last character of the extracted passage in the originating field. :attr str field: (optional) The label of the field from which the passage has been extracted. @@ -5416,8 +5493,8 @@ def __init__(self, collection. :param int start_offset: (optional) The position of the first character of the extracted passage in the originating field. - :param int end_offset: (optional) The position of the last character of the - extracted passage in the originating field. + :param int end_offset: (optional) The position after the last character of + the extracted passage in the originating field. :param str field: (optional) The label of the field from which the passage has been extracted. :param float confidence: (optional) An estimate of the probability that the @@ -5515,8 +5592,8 @@ class QueryResult(): :attr str document_id: The unique identifier of the document. :attr dict metadata: (optional) Metadata of the document. :attr QueryResultMetadata result_metadata: Metadata of a query result. - :attr List[QueryResultPassage] document_passages: (optional) Passages returned - by Discovery. + :attr List[QueryResultPassage] document_passages: (optional) Passages from the + document that best matches the query. """ # The set of defined properties for the class @@ -5536,8 +5613,8 @@ def __init__(self, :param str document_id: The unique identifier of the document. :param QueryResultMetadata result_metadata: Metadata of a query result. :param dict metadata: (optional) Metadata of the document. - :param List[QueryResultPassage] document_passages: (optional) Passages - returned by Discovery. + :param List[QueryResultPassage] document_passages: (optional) Passages from + the document that best matches the query. :param **kwargs: (optional) Any additional properties. """ self.document_id = document_id @@ -5606,6 +5683,27 @@ def _to_dict(self): """Return a json dictionary representing this model.""" return self.to_dict() + def get_properties(self) -> Dict: + """Return a dictionary of arbitrary properties from this instance of QueryResult""" + _dict = {} + + for _key in [ + k for k in vars(self).keys() if k not in QueryResult._properties + ]: + _dict[_key] = getattr(self, _key) + return _dict + + def set_properties(self, _dict: dict): + """Set a dictionary of arbitrary properties to this instance of QueryResult""" + for _key in [ + k for k in vars(self).keys() if k not in QueryResult._properties + ]: + delattr(self, _key) + + for _key, _value in _dict.items(): + if _key not in QueryResult._properties: + setattr(self, _key, _value) + def __str__(self) -> str: """Return a `str` version of this QueryResult object.""" return json.dumps(self.to_dict(), indent=2) @@ -5729,7 +5827,7 @@ class QueryResultPassage(): :attr str passage_text: (optional) The content of the extracted passage. :attr int start_offset: (optional) The position of the first character of the extracted passage in the originating field. - :attr int end_offset: (optional) The position of the last character of the + :attr int end_offset: (optional) The position after the last character of the extracted passage in the originating field. :attr str field: (optional) The label of the field from which the passage has been extracted. @@ -5753,8 +5851,8 @@ def __init__(self, :param str passage_text: (optional) The content of the extracted passage. :param int start_offset: (optional) The position of the first character of the extracted passage in the originating field. - :param int end_offset: (optional) The position of the last character of the - extracted passage in the originating field. + :param int end_offset: (optional) The position after the last character of + the extracted passage in the originating field. :param str field: (optional) The label of the field from which the passage has been extracted. :param float confidence: (optional) Estimate of the probability that the @@ -5996,7 +6094,7 @@ class QueryTermAggregationResult(): Top value result for the term aggregation. :attr str key: Value of the field with a non-zero frequency in the document set. - :attr int matching_results: Number of documents containing the 'key'. + :attr int matching_results: Number of documents that contain the 'key'. :attr float relevancy: (optional) The relevancy for this term. :attr int total_matching_documents: (optional) The number of documents which have the term as the value of specified field in the whole set of documents in @@ -6004,8 +6102,8 @@ class QueryTermAggregationResult(): :attr int estimated_matching_documents: (optional) The estimated number of documents which would match the query and also meet the condition. Returned only when the `relevancy` parameter is set to `true`. - :attr List[QueryAggregation] aggregations: (optional) An array of sub - aggregations. + :attr List[QueryAggregation] aggregations: (optional) An array of + sub-aggregations. """ def __init__(self, @@ -6021,7 +6119,7 @@ def __init__(self, :param str key: Value of the field with a non-zero frequency in the document set. - :param int matching_results: Number of documents containing the 'key'. + :param int matching_results: Number of documents that contain the 'key'. :param float relevancy: (optional) The relevancy for this term. :param int total_matching_documents: (optional) The number of documents which have the term as the value of specified field in the whole set of @@ -6030,8 +6128,8 @@ def __init__(self, :param int estimated_matching_documents: (optional) The estimated number of documents which would match the query and also meet the condition. Returned only when the `relevancy` parameter is set to `true`. - :param List[QueryAggregation] aggregations: (optional) An array of sub - aggregations. + :param List[QueryAggregation] aggregations: (optional) An array of + sub-aggregations. """ self.key = key self.matching_results = matching_results @@ -6125,8 +6223,8 @@ class QueryTimesliceAggregationResult(): in UNIX milliseconds since epoch. :attr int matching_results: Number of documents with the specified key as the upper bound. - :attr List[QueryAggregation] aggregations: (optional) An array of sub - aggregations. + :attr List[QueryAggregation] aggregations: (optional) An array of + sub-aggregations. """ def __init__(self, @@ -6144,8 +6242,8 @@ def __init__(self, interval in UNIX milliseconds since epoch. :param int matching_results: Number of documents with the specified key as the upper bound. - :param List[QueryAggregation] aggregations: (optional) An array of sub - aggregations. + :param List[QueryAggregation] aggregations: (optional) An array of + sub-aggregations. """ self.key_as_string = key_as_string self.key = key @@ -6220,7 +6318,7 @@ def __ne__(self, other: 'QueryTimesliceAggregationResult') -> bool: class QueryTopHitsAggregationResult(): """ - A query response containing the matching documents for the preceding aggregations. + A query response that contains the matching documents for the preceding aggregations. :attr int matching_results: Number of matching results. :attr List[dict] hits: (optional) An array of the document results. @@ -6289,13 +6387,13 @@ def __ne__(self, other: 'QueryTopHitsAggregationResult') -> bool: class ResultPassageAnswer(): """ - Object containing a potential answer to the specified query. + Object that contains a potential answer to the specified query. :attr str answer_text: (optional) Answer text for the specified query as identified by Discovery. :attr int start_offset: (optional) The position of the first character of the extracted answer in the originating field. - :attr int end_offset: (optional) The position of the last character of the + :attr int end_offset: (optional) The position after the last character of the extracted answer in the originating field. :attr float confidence: (optional) An estimate of the probability that the answer is relevant. @@ -6314,8 +6412,8 @@ def __init__(self, identified by Discovery. :param int start_offset: (optional) The position of the first character of the extracted answer in the originating field. - :param int end_offset: (optional) The position of the last character of the - extracted answer in the originating field. + :param int end_offset: (optional) The position after the last character of + the extracted answer in the originating field. :param float confidence: (optional) An estimate of the probability that the answer is relevant. """ @@ -6382,8 +6480,8 @@ class RetrievalDetails(): :attr str document_retrieval_strategy: (optional) Identifies the document retrieval strategy used for this query. `relevancy_training` indicates that the results were returned using a relevancy trained model. - **Note**: In the event of trained collections being queried, but the trained - model is not used to return results, the **document_retrieval_strategy** will be + **Note**: In the event of trained collections being queried, but the trained + model is not used to return results, the **document_retrieval_strategy** is listed as `untrained`. """ @@ -6394,9 +6492,9 @@ def __init__(self, *, document_retrieval_strategy: str = None) -> None: :param str document_retrieval_strategy: (optional) Identifies the document retrieval strategy used for this query. `relevancy_training` indicates that the results were returned using a relevancy trained model. - **Note**: In the event of trained collections being queried, but the + **Note**: In the event of trained collections being queried, but the trained model is not used to return results, the - **document_retrieval_strategy** will be listed as `untrained`. + **document_retrieval_strategy** is listed as `untrained`. """ self.document_retrieval_strategy = document_retrieval_strategy @@ -6446,9 +6544,9 @@ class DocumentRetrievalStrategyEnum(str, Enum): Identifies the document retrieval strategy used for this query. `relevancy_training` indicates that the results were returned using a relevancy trained model. - **Note**: In the event of trained collections being queried, but the trained - model is not used to return results, the **document_retrieval_strategy** will be - listed as `untrained`. + **Note**: In the event of trained collections being queried, but the trained model + is not used to return results, the **document_retrieval_strategy** is listed as + `untrained`. """ UNTRAINED = 'untrained' RELEVANCY_TRAINING = 'relevancy_training' @@ -7964,7 +8062,7 @@ def __ne__(self, other: 'TableTextLocation') -> bool: class TrainingExample(): """ - Object containing example response details for a training query. + Object that contains example response details for a training query. :attr str document_id: The document ID associated with this training example. :attr str collection_id: The collection ID associated with this training @@ -8065,7 +8163,7 @@ def __ne__(self, other: 'TrainingExample') -> bool: class TrainingQuery(): """ - Object containing training query details. + Object that contains training query details. :attr str query_id: (optional) The query ID associated with the training query. :attr str natural_language_query: The natural text query for the training query. @@ -8306,13 +8404,13 @@ def __ne__(self, other: 'QueryCalculationAggregation') -> bool: class QueryFilterAggregation(QueryAggregation): """ - A modifier that will narrow down the document set of the sub aggregations it precedes. + A modifier that narrows the document set of the sub-aggregations it precedes. - :attr str match: The filter written in Discovery Query Language syntax applied - to the documents before sub aggregations are run. - :attr int matching_results: Number of documents matching the filter. - :attr List[QueryAggregation] aggregations: (optional) An array of sub - aggregations. + :attr str match: The filter that is written in Discovery Query Language syntax + and is applied to the documents before sub-aggregations are run. + :attr int matching_results: Number of documents that match the filter. + :attr List[QueryAggregation] aggregations: (optional) An array of + sub-aggregations. """ def __init__(self, @@ -8327,11 +8425,11 @@ def __init__(self, :param str type: The type of aggregation command used. Options include: term, histogram, timeslice, nested, filter, min, max, sum, average, unique_count, and top_hits. - :param str match: The filter written in Discovery Query Language syntax - applied to the documents before sub aggregations are run. - :param int matching_results: Number of documents matching the filter. - :param List[QueryAggregation] aggregations: (optional) An array of sub - aggregations. + :param str match: The filter that is written in Discovery Query Language + syntax and is applied to the documents before sub-aggregations are run. + :param int matching_results: Number of documents that match the filter. + :param List[QueryAggregation] aggregations: (optional) An array of + sub-aggregations. """ self.type = type self.match = match @@ -8484,7 +8582,7 @@ class QueryHistogramAggregation(QueryAggregation): numeric field to describe the category. :attr str field: The numeric field name used to create the histogram. - :attr int interval: The size of the sections the results are split into. + :attr int interval: The size of the sections that the results are split into. :attr str name: (optional) Identifier specified in the query request of this aggregation. :attr List[QueryHistogramAggregationResult] results: (optional) Array of numeric @@ -8506,7 +8604,8 @@ def __init__( term, histogram, timeslice, nested, filter, min, max, sum, average, unique_count, and top_hits. :param str field: The numeric field name used to create the histogram. - :param int interval: The size of the sections the results are split into. + :param int interval: The size of the sections that the results are split + into. :param str name: (optional) Identifier specified in the query request of this aggregation. :param List[QueryHistogramAggregationResult] results: (optional) Array of @@ -8590,14 +8689,14 @@ def __ne__(self, other: 'QueryHistogramAggregation') -> bool: class QueryNestedAggregation(QueryAggregation): """ - A restriction that alter the document set used for sub aggregations it precedes to - nested documents found in the field specified. + A restriction that alters the document set that is used for sub-aggregations it + precedes to nested documents found in the field specified. - :attr str path: The path to the document field to scope sub aggregations to. + :attr str path: The path to the document field to scope sub-aggregations to. :attr int matching_results: Number of nested documents found in the specified field. - :attr List[QueryAggregation] aggregations: (optional) An array of sub - aggregations. + :attr List[QueryAggregation] aggregations: (optional) An array of + sub-aggregations. """ def __init__(self, @@ -8612,12 +8711,12 @@ def __init__(self, :param str type: The type of aggregation command used. Options include: term, histogram, timeslice, nested, filter, min, max, sum, average, unique_count, and top_hits. - :param str path: The path to the document field to scope sub aggregations + :param str path: The path to the document field to scope sub-aggregations to. :param int matching_results: Number of nested documents found in the specified field. - :param List[QueryAggregation] aggregations: (optional) An array of sub - aggregations. + :param List[QueryAggregation] aggregations: (optional) An array of + sub-aggregations. """ self.type = type self.path = path diff --git a/ibm_watson/language_translator_v3.py b/ibm_watson/language_translator_v3.py index d5b9b184..59a3663d 100644 --- a/ibm_watson/language_translator_v3.py +++ b/ibm_watson/language_translator_v3.py @@ -14,13 +14,16 @@ # See the License for the specific language governing permissions and # limitations under the License. -# IBM OpenAPI SDK Code Generator Version: 99-SNAPSHOT-a45d89ef-20201209-192237 +# IBM OpenAPI SDK Code Generator Version: 3.38.0-07189efd-20210827-205025 """ IBM Watson™ Language Translator translates text from one language to another. The service offers multiple IBM-provided translation models that you can customize based on your unique terminology and language. Use Language Translator to take news from across the globe and present it in your language, communicate with your customers in their own language, and more. + +API Version: 3.0.0 +See: https://cloud.ibm.com/docs/language-translator """ from datetime import datetime @@ -112,7 +115,7 @@ def list_languages(self, **kwargs) -> DetailedResponse: headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response ######################### @@ -188,7 +191,7 @@ def translate(self, params=params, data=data) - response = self.send(request) + response = self.send(request, **kwargs) return response ######################### @@ -226,7 +229,7 @@ def list_identifiable_languages(self, **kwargs) -> DetailedResponse: headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def identify(self, text: Union[str, TextIO], **kwargs) -> DetailedResponse: @@ -265,7 +268,7 @@ def identify(self, text: Union[str, TextIO], **kwargs) -> DetailedResponse: params=params, data=data) - response = self.send(request) + response = self.send(request, **kwargs) return response ######################### @@ -320,7 +323,7 @@ def list_models(self, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def create_model(self, @@ -459,7 +462,7 @@ def create_model(self, params=params, files=form_data) - response = self.send(request) + response = self.send(request, **kwargs) return response def delete_model(self, model_id: str, **kwargs) -> DetailedResponse: @@ -497,7 +500,7 @@ def delete_model(self, model_id: str, **kwargs) -> DetailedResponse: headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def get_model(self, model_id: str, **kwargs) -> DetailedResponse: @@ -537,7 +540,7 @@ def get_model(self, model_id: str, **kwargs) -> DetailedResponse: headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response ######################### @@ -573,7 +576,7 @@ def list_documents(self, **kwargs) -> DetailedResponse: headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def translate_document(self, @@ -659,7 +662,7 @@ def translate_document(self, params=params, files=form_data) - response = self.send(request) + response = self.send(request, **kwargs) return response def get_document_status(self, document_id: str, @@ -698,7 +701,7 @@ def get_document_status(self, document_id: str, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def delete_document(self, document_id: str, **kwargs) -> DetailedResponse: @@ -735,7 +738,7 @@ def delete_document(self, document_id: str, **kwargs) -> DetailedResponse: headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def get_translated_document(self, @@ -792,7 +795,7 @@ def get_translated_document(self, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response diff --git a/ibm_watson/natural_language_classifier_v1.py b/ibm_watson/natural_language_classifier_v1.py index 4049ac1f..4b854369 100644 --- a/ibm_watson/natural_language_classifier_v1.py +++ b/ibm_watson/natural_language_classifier_v1.py @@ -14,12 +14,22 @@ # See the License for the specific language governing permissions and # limitations under the License. -# IBM OpenAPI SDK Code Generator Version: 99-SNAPSHOT-a45d89ef-20201209-192237 +# IBM OpenAPI SDK Code Generator Version: 3.38.0-07189efd-20210827-205025 """ -IBM Watson™ Natural Language Classifier uses machine learning algorithms to return -the top matching predefined classes for short text input. You create and train a -classifier to connect predefined classes to example texts so that the service can apply -those classes to new inputs. +On 9 August 2021, IBM announced the deprecation of IBM Watson™ Natural Language +Classifier. As of 9 September 2021, you cannot create new instances. However, existing +instances are supported until 8 August 2022. The service will no longer be available on 8 +August 2022.

As an alternative, consider migrating to IBM Watson Natural Language +Understanding. For more information, see [Migrating to Natural Language +Understanding](https://cloud.ibm.com/docs/natural-language-classifier?topic=natural-language-classifier-migrating). +{: deprecated} +Natural Language Classifier uses machine learning algorithms to return the top matching +predefined classes for short text input. You create and train a classifier to connect +predefined classes to example texts so that the service can apply those classes to new +inputs. + +API Version: 1.0 +See: https://cloud.ibm.com/docs/natural-language-classifier """ from datetime import datetime @@ -56,7 +66,7 @@ def __init__( :param Authenticator authenticator: The authenticator specifies the authentication mechanism. Get up to date information from https://github.com/IBM/python-sdk-core/blob/master/README.md about initializing the authenticator of your choice. - """ + """ print( """ On 9 August 2021, IBM announced the deprecation of the Natural Language Classifier service. @@ -121,7 +131,7 @@ def classify(self, classifier_id: str, text: str, headers=headers, data=data) - response = self.send(request) + response = self.send(request, **kwargs) return response def classify_collection(self, classifier_id: str, @@ -171,7 +181,7 @@ def classify_collection(self, classifier_id: str, headers=headers, data=data) - response = self.send(request) + response = self.send(request, **kwargs) return response ######################### @@ -228,7 +238,7 @@ def create_classifier(self, training_metadata: BinaryIO, headers=headers, files=form_data) - response = self.send(request) + response = self.send(request, **kwargs) return response def list_classifiers(self, **kwargs) -> DetailedResponse: @@ -255,7 +265,7 @@ def list_classifiers(self, **kwargs) -> DetailedResponse: url = '/v1/classifiers' request = self.prepare_request(method='GET', url=url, headers=headers) - response = self.send(request) + response = self.send(request, **kwargs) return response def get_classifier(self, classifier_id: str, **kwargs) -> DetailedResponse: @@ -288,7 +298,7 @@ def get_classifier(self, classifier_id: str, **kwargs) -> DetailedResponse: url = '/v1/classifiers/{classifier_id}'.format(**path_param_dict) request = self.prepare_request(method='GET', url=url, headers=headers) - response = self.send(request) + response = self.send(request, **kwargs) return response def delete_classifier(self, classifier_id: str, @@ -322,7 +332,7 @@ def delete_classifier(self, classifier_id: str, url=url, headers=headers) - response = self.send(request) + response = self.send(request, **kwargs) return response diff --git a/ibm_watson/natural_language_understanding_v1.py b/ibm_watson/natural_language_understanding_v1.py index 9840166c..bd3456a5 100644 --- a/ibm_watson/natural_language_understanding_v1.py +++ b/ibm_watson/natural_language_understanding_v1.py @@ -1,6 +1,6 @@ # coding: utf-8 -# (C) Copyright IBM Corp. 2021. +# (C) Copyright IBM Corp. 2017, 2021. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -# IBM OpenAPI SDK Code Generator Version: 99-SNAPSHOT-902c9336-20210507-162723 +# IBM OpenAPI SDK Code Generator Version: 3.38.0-07189efd-20210827-205025 """ Analyze various features of text content at scale. Provide text, raw HTML, or a public URL and IBM Watson Natural Language Understanding will give you results for the features you @@ -24,6 +24,9 @@ models](https://cloud.ibm.com/docs/natural-language-understanding?topic=natural-language-understanding-customizing) with Watson Knowledge Studio to detect custom entities and relations in Natural Language Understanding. + +API Version: 1.0 +See: https://cloud.ibm.com/docs/natural-language-understanding """ from datetime import datetime @@ -59,7 +62,7 @@ def __init__( Construct a new client for the Natural Language Understanding service. :param str version: Release date of the API version you want to use. - Specify dates in YYYY-MM-DD format. The current version is `2021-03-25`. + Specify dates in YYYY-MM-DD format. The current version is `2021-08-01`. :param Authenticator authenticator: The authenticator specifies the authentication mechanism. Get up to date information from https://github.com/IBM/python-sdk-core/blob/master/README.md @@ -182,7 +185,7 @@ def analyze(self, params=params, data=data) - response = self.send(request) + response = self.send(request, **kwargs) return response ######################### @@ -220,7 +223,7 @@ def list_models(self, **kwargs) -> DetailedResponse: headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def delete_model(self, model_id: str, **kwargs) -> DetailedResponse: @@ -258,7 +261,7 @@ def delete_model(self, model_id: str, **kwargs) -> DetailedResponse: headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response ######################### @@ -337,7 +340,7 @@ def create_sentiment_model(self, params=params, files=form_data) - response = self.send(request) + response = self.send(request, **kwargs) return response def list_sentiment_models(self, **kwargs) -> DetailedResponse: @@ -369,7 +372,7 @@ def list_sentiment_models(self, **kwargs) -> DetailedResponse: headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def get_sentiment_model(self, model_id: str, **kwargs) -> DetailedResponse: @@ -407,7 +410,7 @@ def get_sentiment_model(self, model_id: str, **kwargs) -> DetailedResponse: headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def update_sentiment_model(self, @@ -488,7 +491,7 @@ def update_sentiment_model(self, params=params, files=form_data) - response = self.send(request) + response = self.send(request, **kwargs) return response def delete_sentiment_model(self, model_id: str, @@ -528,7 +531,7 @@ def delete_sentiment_model(self, model_id: str, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response ######################### @@ -612,7 +615,7 @@ def create_categories_model(self, params=params, files=form_data) - response = self.send(request) + response = self.send(request, **kwargs) return response def list_categories_models(self, **kwargs) -> DetailedResponse: @@ -644,7 +647,7 @@ def list_categories_models(self, **kwargs) -> DetailedResponse: headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def get_categories_model(self, model_id: str, **kwargs) -> DetailedResponse: @@ -682,7 +685,7 @@ def get_categories_model(self, model_id: str, **kwargs) -> DetailedResponse: headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def update_categories_model(self, @@ -768,7 +771,7 @@ def update_categories_model(self, params=params, files=form_data) - response = self.send(request) + response = self.send(request, **kwargs) return response def delete_categories_model(self, model_id: str, @@ -808,7 +811,7 @@ def delete_categories_model(self, model_id: str, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response ######################### @@ -829,9 +832,9 @@ def create_classifications_model(self, """ Create classifications model. - (Beta) Creates a custom classifications model by uploading training data and - associated metadata. The model begins the training and deploying process and is - ready to use when the `status` is `available`. + Creates a custom classifications model by uploading training data and associated + metadata. The model begins the training and deploying process and is ready to use + when the `status` is `available`. :param str language: The 2-letter language code of this model. :param BinaryIO training_data: Training data in JSON format. For more @@ -893,19 +896,18 @@ def create_classifications_model(self, params=params, files=form_data) - response = self.send(request) + response = self.send(request, **kwargs) return response def list_classifications_models(self, **kwargs) -> DetailedResponse: """ List classifications models. - (Beta) Returns all custom classifications models associated with this service - instance. + Returns all custom classifications models associated with this service instance. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. - :rtype: DetailedResponse with `dict` result representing a `ListClassificationsModelsResponse` object + :rtype: DetailedResponse with `dict` result representing a `ClassificationsModelList` object """ headers = {} @@ -927,7 +929,7 @@ def list_classifications_models(self, **kwargs) -> DetailedResponse: headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def get_classifications_model(self, model_id: str, @@ -935,7 +937,7 @@ def get_classifications_model(self, model_id: str, """ Get classifications model details. - (Beta) Returns the status of the classifications model with the given model ID. + Returns the status of the classifications model with the given model ID. :param str model_id: ID of the model. :param dict headers: A `dict` containing the request headers @@ -966,7 +968,7 @@ def get_classifications_model(self, model_id: str, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def update_classifications_model(self, @@ -984,8 +986,8 @@ def update_classifications_model(self, """ Update classifications model. - (Beta) Overwrites the training data associated with this custom classifications - model and retrains the model. The new model replaces the current deployment. + Overwrites the training data associated with this custom classifications model and + retrains the model. The new model replaces the current deployment. :param str model_id: ID of the model. :param str language: The 2-letter language code of this model. @@ -1053,7 +1055,7 @@ def update_classifications_model(self, params=params, files=form_data) - response = self.send(request) + response = self.send(request, **kwargs) return response def delete_classifications_model(self, model_id: str, @@ -1061,9 +1063,8 @@ def delete_classifications_model(self, model_id: str, """ Delete classifications model. - (Beta) Un-deploys the custom classifications model with the given model ID and - deletes all associated customer data, including any training data or binary - artifacts. + Un-deploys the custom classifications model with the given model ID and deletes + all associated customer data, including any training data or binary artifacts. :param str model_id: ID of the model. :param dict headers: A `dict` containing the request headers @@ -1095,7 +1096,7 @@ def delete_classifications_model(self, model_id: str, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response @@ -1756,7 +1757,8 @@ def __ne__(self, other: 'CategoriesModelList') -> bool: class CategoriesOptions(): """ - Returns a five-level taxonomy of the content. The top three categories are returned. + Returns a hierarchical taxonomy of the content. The top three categories are returned + by default. Supported languages: Arabic, English, French, German, Italian, Japanese, Korean, Portuguese, Spanish. @@ -1897,7 +1899,7 @@ class CategoriesResult(): """ A categorization of the analyzed text. - :attr str label: (optional) The path to the category through the 5-level + :attr str label: (optional) The path to the category through the multi-level taxonomy hierarchy. For more information about the categories, see [Categories hierarchy](https://cloud.ibm.com/docs/natural-language-understanding?topic=natural-language-understanding-categories#categories-hierarchy). :attr float score: (optional) Confidence score for the category classification. @@ -1914,9 +1916,9 @@ def __init__(self, """ Initialize a CategoriesResult object. - :param str label: (optional) The path to the category through the 5-level - taxonomy hierarchy. For more information about the categories, see - [Categories + :param str label: (optional) The path to the category through the + multi-level taxonomy hierarchy. For more information about the categories, + see [Categories hierarchy](https://cloud.ibm.com/docs/natural-language-understanding?topic=natural-language-understanding-categories#categories-hierarchy). :param float score: (optional) Confidence score for the category classification. Higher values indicate greater confidence. @@ -2305,7 +2307,7 @@ class ClassificationsOptions(): Returns text classifications for the content. Supported languages: English only. - :attr str model: (optional) (Beta) Enter a [custom + :attr str model: (optional) Enter a [custom model](https://cloud.ibm.com/docs/natural-language-understanding?topic=natural-language-understanding-customizing) ID of the classification model to be used. """ @@ -2314,7 +2316,7 @@ def __init__(self, *, model: str = None) -> None: """ Initialize a ClassificationsOptions object. - :param str model: (optional) (Beta) Enter a [custom + :param str model: (optional) Enter a [custom model](https://cloud.ibm.com/docs/natural-language-understanding?topic=natural-language-understanding-customizing) ID of the classification model to be used. """ @@ -3485,8 +3487,8 @@ class Features(): :attr SummarizationOptions summarization: (optional) (Experimental) Returns a summary of content. Supported languages: English only. - :attr CategoriesOptions categories: (optional) Returns a five-level taxonomy of - the content. The top three categories are returned. + :attr CategoriesOptions categories: (optional) Returns a hierarchical taxonomy + of the content. The top three categories are returned by default. Supported languages: Arabic, English, French, German, Italian, Japanese, Korean, Portuguese, Spanish. :attr SyntaxOptions syntax: (optional) Returns tokens and sentences from the @@ -3558,8 +3560,8 @@ def __init__(self, :param SummarizationOptions summarization: (optional) (Experimental) Returns a summary of content. Supported languages: English only. - :param CategoriesOptions categories: (optional) Returns a five-level - taxonomy of the content. The top three categories are returned. + :param CategoriesOptions categories: (optional) Returns a hierarchical + taxonomy of the content. The top three categories are returned by default. Supported languages: Arabic, English, French, German, Italian, Japanese, Korean, Portuguese, Spanish. :param SyntaxOptions syntax: (optional) Returns tokens and sentences from @@ -3990,65 +3992,6 @@ def __ne__(self, other: 'KeywordsResult') -> bool: return not self == other -class ListClassificationsModelsResponse(): - """ - ListClassificationsModelsResponse. - - :attr List[ClassificationsModelList] models: (optional) - """ - - def __init__(self, - *, - models: List['ClassificationsModelList'] = None) -> None: - """ - Initialize a ListClassificationsModelsResponse object. - - :param List[ClassificationsModelList] models: (optional) - """ - self.models = models - - @classmethod - def from_dict(cls, _dict: Dict) -> 'ListClassificationsModelsResponse': - """Initialize a ListClassificationsModelsResponse object from a json dictionary.""" - args = {} - if 'models' in _dict: - args['models'] = [ - ClassificationsModelList.from_dict(x) - for x in _dict.get('models') - ] - return cls(**args) - - @classmethod - def _from_dict(cls, _dict): - """Initialize a ListClassificationsModelsResponse object from a json dictionary.""" - return cls.from_dict(_dict) - - def to_dict(self) -> Dict: - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'models') and self.models is not None: - _dict['models'] = [x.to_dict() for x in self.models] - return _dict - - def _to_dict(self): - """Return a json dictionary representing this model.""" - return self.to_dict() - - def __str__(self) -> str: - """Return a `str` version of this ListClassificationsModelsResponse object.""" - return json.dumps(self.to_dict(), indent=2) - - def __eq__(self, other: 'ListClassificationsModelsResponse') -> bool: - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other: 'ListClassificationsModelsResponse') -> bool: - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - class ListModelsResults(): """ Custom models that are available for entities and relations. diff --git a/ibm_watson/personality_insights_v3.py b/ibm_watson/personality_insights_v3.py index f049a5fc..4ef2e1d7 100644 --- a/ibm_watson/personality_insights_v3.py +++ b/ibm_watson/personality_insights_v3.py @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -# IBM OpenAPI SDK Code Generator Version: 99-SNAPSHOT-902c9336-20210507-162723 +# IBM OpenAPI SDK Code Generator Version: 3.38.0-07189efd-20210827-205025 """ IBM Watson™ Personality Insights is discontinued. Existing instances are supported until 1 December 2021, but as of 1 December 2020, you cannot create new instances. Any @@ -45,6 +45,9 @@ **Note:** Request logging is disabled for the Personality Insights service. Regardless of whether you set the `X-Watson-Learning-Opt-Out` request header, the service does not log or retain data from requests and responses. + +API Version: 3.4.4 +See: https://cloud.ibm.com/docs/personality-insights """ from enum import Enum @@ -233,7 +236,7 @@ def profile(self, params=params, data=data) - response = self.send(request) + response = self.send(request, **kwargs) return response diff --git a/ibm_watson/speech_to_text_v1.py b/ibm_watson/speech_to_text_v1.py index b29557c0..f768b59b 100644 --- a/ibm_watson/speech_to_text_v1.py +++ b/ibm_watson/speech_to_text_v1.py @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -# IBM OpenAPI SDK Code Generator Version: 99-SNAPSHOT-902c9336-20210507-162723 +# IBM OpenAPI SDK Code Generator Version: 3.38.0-07189efd-20210827-205025 """ The IBM Watson™ Speech to Text service provides APIs that use IBM's speech-recognition capabilities to produce transcripts of spoken audio. The service can @@ -22,11 +22,11 @@ transcription, the service can produce detailed information about many different aspects of the audio. It returns all JSON response content in the UTF-8 character set. The service supports two types of models: previous-generation models that include the -terms `Broadband` and `Narrowband` in their names, and beta next-generation models that -include the terms `Multimedia` and `Telephony` in their names. Broadband and multimedia -models have minimum sampling rates of 16 kHz. Narrowband and telephony models have minimum -sampling rates of 8 kHz. The beta next-generation models currently support fewer languages -and features, but they offer high throughput and greater transcription accuracy. +terms `Broadband` and `Narrowband` in their names, and next-generation models that include +the terms `Multimedia` and `Telephony` in their names. Broadband and multimedia models +have minimum sampling rates of 16 kHz. Narrowband and telephony models have minimum +sampling rates of 8 kHz. The next-generation models offer high throughput and greater +transcription accuracy. For speech recognition, the service supports synchronous and asynchronous HTTP Representational State Transfer (REST) interfaces. It also supports a WebSocket interface that provides a full-duplex, low-latency communication channel: Clients send requests and @@ -36,10 +36,13 @@ customization to adapt a base model for the acoustic characteristics of your audio. For language model customization, the service also supports grammars. A grammar is a formal language specification that lets you restrict the phrases that the service can recognize. -Language model customization and acoustic model customization are generally available for -production use with all previous-generation models that are generally available. Grammars -are beta functionality for all previous-generation models that support language model -customization. Next-generation models do not support customization at this time. +Language model customization is available for most previous- and next-generation models. +Acoustic model customization is available for all previous-generation models. Grammars are +beta functionality that is available for all previous-generation models that support +language model customization. + +API Version: 1.0.0 +See: https://cloud.ibm.com/docs/speech-to-text """ from enum import Enum @@ -116,7 +119,7 @@ def list_models(self, **kwargs) -> DetailedResponse: url = '/v1/models' request = self.prepare_request(method='GET', url=url, headers=headers) - response = self.send(request) + response = self.send(request, **kwargs) return response def get_model(self, model_id: str, **kwargs) -> DetailedResponse: @@ -130,8 +133,9 @@ def get_model(self, model_id: str, **kwargs) -> DetailedResponse: models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models-list). :param str model_id: The identifier of the model in the form of its name - from the output of the **Get a model** method. (**Note:** The model - `ar-AR_BroadbandModel` is deprecated; use `ar-MS_BroadbandModel` instead.). + from the output of the [List models](#listmodels) method. (**Note:** The + model `ar-AR_BroadbandModel` is deprecated; use `ar-MS_BroadbandModel` + instead.). :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse with `dict` result representing a `SpeechModel` object @@ -155,7 +159,7 @@ def get_model(self, model_id: str, **kwargs) -> DetailedResponse: url = '/v1/models/{model_id}'.format(**path_param_dict) request = self.prepare_request(method='GET', url=url, headers=headers) - response = self.send(request) + response = self.send(request, **kwargs) return response ######################### @@ -251,28 +255,19 @@ def recognize(self, **See also:** [Supported audio formats](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-audio-formats). ### Next-generation models - **Note:** The next-generation language models are beta functionality. They - support a limited number of languages and features at this time. The supported - languages, models, and features will increase with future releases. - The service supports next-generation `Multimedia` (16 kHz) and `Telephony` (8 kHz) - models for many languages. Next-generation models have higher throughput than the - service's previous generation of `Broadband` and `Narrowband` models. When you use - next-generation models, the service can return transcriptions more quickly and + The service supports next-generation `Multimedia` (16 kHz) and `Telephony` (8 + kHz) models for many languages. Next-generation models have higher throughput than + the service's previous generation of `Broadband` and `Narrowband` models. When you + use next-generation models, the service can return transcriptions more quickly and also provide noticeably better transcription accuracy. You specify a next-generation model by using the `model` query parameter, as you - do a previous-generation model. Next-generation models support the same request - headers as previous-generation models, but they support only the following - additional query parameters: - * `background_audio_suppression` - * `inactivity_timeout` - * `profanity_filter` - * `redaction` - * `smart_formatting` - * `speaker_labels` - * `speech_detector_sensitivity` - * `timestamps` - Many next-generation models also support the beta `low_latency` parameter, which - is not available with previous-generation models. + do a previous-generation model. Many next-generation models also support the + `low_latency` parameter, which is not available with previous-generation models. + But next-generation models do not support all of the parameters that are available + for use with previous-generation models. For more information about all parameters + that are supported for use with next-generation models, see [Supported features + for next-generation + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models-ng#models-ng-features). **See also:** [Next-generation languages and models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models-ng). ### Multipart speech recognition @@ -295,7 +290,8 @@ def recognize(self, (content types)** in the method description. :param str model: (optional) The identifier of the model that is to be used for the recognition request. (**Note:** The model `ar-AR_BroadbandModel` is - deprecated; use `ar-MS_BroadbandModel` instead.) See [Languages and + deprecated; use `ar-MS_BroadbandModel` instead.) See [Previous-generation + languages and models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models) and [Next-generation languages and models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models-ng). @@ -397,7 +393,8 @@ def recognize(self, the final transcript of a recognition request. For US English, the service also converts certain keyword strings to punctuation symbols. By default, the service performs no smart formatting. - **Note:** Applies to US English, Japanese, and Spanish transcription only. + **Beta:** The parameter is beta functionality. Applies to US English, + Japanese, and Spanish transcription only. See [Smart formatting](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-formatting#smart-formatting). :param bool speaker_labels: (optional) If `true`, the response includes @@ -405,11 +402,14 @@ def recognize(self, multi-person exchange. By default, the service returns no speaker labels. Setting `speaker_labels` to `true` forces the `timestamps` parameter to be `true`, regardless of whether you specify `false` for the parameter. - * For previous-generation models, can be used for US English, Australian - English, German, Japanese, Korean, and Spanish (both broadband and - narrowband models) and UK English (narrowband model) transcription only. - * For next-generation models, can be used for English (Australian, UK, and - US), German, and Spanish transcription only. + **Beta:** The parameter is beta functionality. + * For previous-generation models, the parameter can be used for Australian + English, US English, German, Japanese, Korean, and Spanish (both broadband + and narrowband models) and UK English (narrowband model) transcription + only. + * For next-generation models, the parameter can be used for English + (Australian, Indian, UK, and US), German, Japanese, Korean, and Spanish + transcription only. Restrictions and limitations apply to the use of speaker labels for both types of models. See [Speaker labels](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-speaker-labels). @@ -422,8 +422,9 @@ def recognize(self, use the `language_customization_id` parameter to specify the name of the custom language model for which the grammar is defined. The service recognizes only strings that are recognized by the specified grammar; it - does not recognize other custom words from the model's words resource. See - [Using a grammar for speech + does not recognize other custom words from the model's words resource. + **Beta:** The parameter is beta functionality. + See [Using a grammar for speech recognition](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-grammarUse). :param bool redaction: (optional) If `true`, the service redacts, or masks, numeric data from final transcripts. The feature redacts any number that @@ -436,7 +437,8 @@ def recognize(self, (ignores the `keywords` and `keywords_threshold` parameters) and returns only a single final transcript (forces the `max_alternatives` parameter to be `1`). - **Note:** Applies to US English, Japanese, and Korean transcription only. + **Beta:** The parameter is beta functionality. Applies to US English, + Japanese, and Korean transcription only. See [Numeric redaction](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-formatting#numeric-redaction). :param bool audio_metrics: (optional) If `true`, requests detailed @@ -501,13 +503,11 @@ def recognize(self, previous-generation models. The `low_latency` parameter causes the models to produce results even more quickly, though the results might be less accurate when the parameter is used. - **Note:** The parameter is beta functionality. It is not available for - previous-generation `Broadband` and `Narrowband` models. It is available - only for some next-generation models. - * For a list of next-generation models that support low latency, see - [Supported language - models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models-ng#models-ng-supported) - for next-generation models. + The parameter is not available for previous-generation `Broadband` and + `Narrowband` models. It is available only for some next-generation models. + For a list of next-generation models that support low latency, see + [Supported next-generation language + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models-ng#models-ng-supported). * For more information about the `low_latency` parameter, see [Low latency](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-interim#low-latency). :param dict headers: A `dict` containing the request headers @@ -563,7 +563,7 @@ def recognize(self, params=params, data=data) - response = self.send(request) + response = self.send(request, **kwargs) return response ######################### @@ -592,9 +592,9 @@ def register_callback(self, The service sends only a single `GET` request to the callback URL. If the service does not receive a reply with a response code of 200 and a body that echoes the challenge string sent by the service within five seconds, it does not allowlist - the URL; it instead sends status code 400 in response to the **Register a - callback** request. If the requested callback URL is already allowlisted, the - service responds to the initial registration request with response code 200. + the URL; it instead sends status code 400 in response to the request to register a + callback. If the requested callback URL is already allowlisted, the service + responds to the initial registration request with response code 200. If you specify a user secret with the request, the service uses it as a key to calculate an HMAC-SHA1 signature of the challenge string in its response to the `POST` request. It sends this signature in the `X-Callback-Signature` header of @@ -644,7 +644,7 @@ def register_callback(self, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def unregister_callback(self, callback_url: str, @@ -652,9 +652,10 @@ def unregister_callback(self, callback_url: str, """ Unregister a callback. - Unregisters a callback URL that was previously allowlisted with a **Register a - callback** request for use with the asynchronous interface. Once unregistered, the - URL can no longer be used with asynchronous recognition requests. + Unregisters a callback URL that was previously allowlisted with a [Register a + callback](#registercallback) request for use with the asynchronous interface. Once + unregistered, the URL can no longer be used with asynchronous recognition + requests. **See also:** [Unregistering a callback URL](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-async#unregister). @@ -683,7 +684,7 @@ def unregister_callback(self, callback_url: str, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def create_job(self, @@ -734,16 +735,16 @@ def create_job(self, to subscribe to specific events and to specify a string that is to be included with each notification for the job. * By polling the service: Omit the `callback_url`, `events`, and `user_token` - parameters. You must then use the **Check jobs** or **Check a job** methods to - check the status of the job, using the latter to retrieve the results when the job - is complete. + parameters. You must then use the [Check jobs](#checkjobs) or [Check a + job](#checkjob) methods to check the status of the job, using the latter to + retrieve the results when the job is complete. The two approaches are not mutually exclusive. You can poll the service for job status or obtain results from the service manually even if you include a callback URL. In both cases, you can include the `results_ttl` parameter to specify how long the results are to remain available after the job is complete. Using the - HTTPS **Check a job** method to retrieve results is more secure than receiving - them via callback notification over HTTP because it provides confidentiality in - addition to authentication and data integrity. + HTTPS [Check a job](#checkjob) method to retrieve results is more secure than + receiving them via callback notification over HTTP because it provides + confidentiality in addition to authentication and data integrity. The method supports the same basic parameters as other HTTP and WebSocket recognition requests. It also supports the following parameters specific to the asynchronous interface: @@ -807,28 +808,19 @@ def create_job(self, **See also:** [Supported audio formats](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-audio-formats). ### Next-generation models - **Note:** The next-generation language models are beta functionality. They - support a limited number of languages and features at this time. The supported - languages, models, and features will increase with future releases. - The service supports next-generation `Multimedia` (16 kHz) and `Telephony` (8 kHz) - models for many languages. Next-generation models have higher throughput than the - service's previous generation of `Broadband` and `Narrowband` models. When you use - next-generation models, the service can return transcriptions more quickly and + The service supports next-generation `Multimedia` (16 kHz) and `Telephony` (8 + kHz) models for many languages. Next-generation models have higher throughput than + the service's previous generation of `Broadband` and `Narrowband` models. When you + use next-generation models, the service can return transcriptions more quickly and also provide noticeably better transcription accuracy. You specify a next-generation model by using the `model` query parameter, as you - do a previous-generation model. Next-generation models support the same request - headers as previous-generation models, but they support only the following - additional query parameters: - * `background_audio_suppression` - * `inactivity_timeout` - * `profanity_filter` - * `redaction` - * `smart_formatting` - * `speaker_labels` - * `speech_detector_sensitivity` - * `timestamps` - Many next-generation models also support the beta `low_latency` parameter, which - is not available with previous-generation models. + do a previous-generation model. Many next-generation models also support the + `low_latency` parameter, which is not available with previous-generation models. + But next-generation models do not support all of the parameters that are available + for use with previous-generation models. For more information about all parameters + that are supported for use with next-generation models, see [Supported features + for next-generation + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models-ng#models-ng-features). **See also:** [Next-generation languages and models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models-ng). @@ -838,15 +830,16 @@ def create_job(self, (content types)** in the method description. :param str model: (optional) The identifier of the model that is to be used for the recognition request. (**Note:** The model `ar-AR_BroadbandModel` is - deprecated; use `ar-MS_BroadbandModel` instead.) See [Languages and + deprecated; use `ar-MS_BroadbandModel` instead.) See [Previous-generation + languages and models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models) and [Next-generation languages and models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models-ng). :param str callback_url: (optional) A URL to which callback notifications are to be sent. The URL must already be successfully allowlisted by using - the **Register a callback** method. You can include the same callback URL - with any number of job creation requests. Omit the parameter to poll the - service for job completion and results. + the [Register a callback](#registercallback) method. You can include the + same callback URL with any number of job creation requests. Omit the + parameter to poll the service for job completion and results. Use the `user_token` parameter to specify a unique user-specified string with each job to differentiate the callback notifications for the jobs. :param str events: (optional) If the job includes a callback URL, a @@ -855,8 +848,8 @@ def create_job(self, * `recognitions.started` generates a callback notification when the service begins to process the job. * `recognitions.completed` generates a callback notification when the job - is complete. You must use the **Check a job** method to retrieve the - results before they time out or are deleted. + is complete. You must use the [Check a job](#checkjob) method to retrieve + the results before they time out or are deleted. * `recognitions.completed_with_results` generates a callback notification when the job is complete. The notification includes the results of the request. @@ -976,7 +969,8 @@ def create_job(self, the final transcript of a recognition request. For US English, the service also converts certain keyword strings to punctuation symbols. By default, the service performs no smart formatting. - **Note:** Applies to US English, Japanese, and Spanish transcription only. + **Beta:** The parameter is beta functionality. Applies to US English, + Japanese, and Spanish transcription only. See [Smart formatting](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-formatting#smart-formatting). :param bool speaker_labels: (optional) If `true`, the response includes @@ -984,11 +978,14 @@ def create_job(self, multi-person exchange. By default, the service returns no speaker labels. Setting `speaker_labels` to `true` forces the `timestamps` parameter to be `true`, regardless of whether you specify `false` for the parameter. - * For previous-generation models, can be used for US English, Australian - English, German, Japanese, Korean, and Spanish (both broadband and - narrowband models) and UK English (narrowband model) transcription only. - * For next-generation models, can be used for English (Australian, UK, and - US), German, and Spanish transcription only. + **Beta:** The parameter is beta functionality. + * For previous-generation models, the parameter can be used for Australian + English, US English, German, Japanese, Korean, and Spanish (both broadband + and narrowband models) and UK English (narrowband model) transcription + only. + * For next-generation models, the parameter can be used for English + (Australian, Indian, UK, and US), German, Japanese, Korean, and Spanish + transcription only. Restrictions and limitations apply to the use of speaker labels for both types of models. See [Speaker labels](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-speaker-labels). @@ -1001,8 +998,9 @@ def create_job(self, use the `language_customization_id` parameter to specify the name of the custom language model for which the grammar is defined. The service recognizes only strings that are recognized by the specified grammar; it - does not recognize other custom words from the model's words resource. See - [Using a grammar for speech + does not recognize other custom words from the model's words resource. + **Beta:** The parameter is beta functionality. + See [Using a grammar for speech recognition](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-grammarUse). :param bool redaction: (optional) If `true`, the service redacts, or masks, numeric data from final transcripts. The feature redacts any number that @@ -1015,7 +1013,8 @@ def create_job(self, (ignores the `keywords` and `keywords_threshold` parameters) and returns only a single final transcript (forces the `max_alternatives` parameter to be `1`). - **Note:** Applies to US English, Japanese, and Korean transcription only. + **Beta:** The parameter is beta functionality. Applies to US English, + Japanese, and Korean transcription only. See [Numeric redaction](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-formatting#numeric-redaction). :param bool processing_metrics: (optional) If `true`, requests processing @@ -1102,13 +1101,11 @@ def create_job(self, previous-generation models. The `low_latency` parameter causes the models to produce results even more quickly, though the results might be less accurate when the parameter is used. - **Note:** The parameter is beta functionality. It is not available for - previous-generation `Broadband` and `Narrowband` models. It is available - only for some next-generation models. - * For a list of next-generation models that support low latency, see - [Supported language - models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models-ng#models-ng-supported) - for next-generation models. + The parameter is not available for previous-generation `Broadband` and + `Narrowband` models. It is available only for some next-generation models. + For a list of next-generation models that support low latency, see + [Supported next-generation language + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models-ng#models-ng-supported). * For more information about the `low_latency` parameter, see [Low latency](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-interim#low-latency). :param dict headers: A `dict` containing the request headers @@ -1170,7 +1167,7 @@ def create_job(self, params=params, data=data) - response = self.send(request) + response = self.send(request, **kwargs) return response def check_jobs(self, **kwargs) -> DetailedResponse: @@ -1181,10 +1178,10 @@ def check_jobs(self, **kwargs) -> DetailedResponse: credentials with which it is called. The method also returns the creation and update times of each job, and, if a job was created with a callback URL and a user token, the user token for the job. To obtain the results for a job whose status is - `completed` or not one of the latest 100 outstanding jobs, use the **Check a job** - method. A job and its results remain available until you delete them with the - **Delete a job** method or until the job's time to live expires, whichever comes - first. + `completed` or not one of the latest 100 outstanding jobs, use the [Check a + job[(#checkjob) method. A job and its results remain available until you delete + them with the [Delete a job](#deletejob) method or until the job's time to live + expires, whichever comes first. **See also:** [Checking the status of the latest jobs](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-async#jobs). @@ -1206,7 +1203,7 @@ def check_jobs(self, **kwargs) -> DetailedResponse: url = '/v1/recognitions' request = self.prepare_request(method='GET', url=url, headers=headers) - response = self.send(request) + response = self.send(request, **kwargs) return response def check_job(self, id: str, **kwargs) -> DetailedResponse: @@ -1221,8 +1218,8 @@ def check_job(self, id: str, **kwargs) -> DetailedResponse: You can use the method to retrieve the results of any job, regardless of whether it was submitted with a callback URL and the `recognitions.completed_with_results` event, and you can retrieve the results multiple times for as long as they remain - available. Use the **Check jobs** method to request information about the most - recent jobs associated with the calling credentials. + available. Use the [Check jobs](#checkjobs) method to request information about + the most recent jobs associated with the calling credentials. **See also:** [Checking the status and retrieving the results of a job](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-async#job). @@ -1252,7 +1249,7 @@ def check_job(self, id: str, **kwargs) -> DetailedResponse: url = '/v1/recognitions/{id}'.format(**path_param_dict) request = self.prepare_request(method='GET', url=url, headers=headers) - response = self.send(request) + response = self.send(request, **kwargs) return response def delete_job(self, id: str, **kwargs) -> DetailedResponse: @@ -1294,7 +1291,7 @@ def delete_job(self, id: str, **kwargs) -> DetailedResponse: url=url, headers=headers) - response = self.send(request) + response = self.send(request, **kwargs) return response ######################### @@ -1331,28 +1328,27 @@ def create_language_model(self, be customized by the new custom language model. The new custom model can be used only with the base model that it customizes. To determine whether a base model supports language model customization, - use the **Get a model** method and check that the attribute + use the [Get a model](#getmodel) method and check that the attribute `custom_language_model` is set to `true`. You can also refer to [Language support for - customization](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-customization#languageSupport). + customization](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-custom-support#custom-language-support). :param str dialect: (optional) The dialect of the specified language that is to be used with the custom language model. For most languages, the dialect matches the language of the base model by default. For example, - `en-US` is used for either of the US English language models. - For a Spanish language, the service creates a custom language model that is + `en-US` is used for the US English language models. All dialect values are + case-insensitive. + The parameter is meaningful only for Spanish language models, for which you + can always safely omit the parameter to have the service create the correct + mapping. For Spanish, the service creates a custom language model that is suited for speech in one of the following dialects: * `es-ES` for Castilian Spanish (`es-ES` models) * `es-LA` for Latin American Spanish (`es-AR`, `es-CL`, `es-CO`, and `es-PE` models) * `es-US` for Mexican (North American) Spanish (`es-MX` models) - The parameter is meaningful only for Spanish models, for which you can - always safely omit the parameter to have the service create the correct - mapping. - If you specify the `dialect` parameter for non-Spanish language models, its - value must match the language of the base model. If you specify the - `dialect` for Spanish language models, its value must match one of the - defined mappings as indicated (`es-ES`, `es-LA`, or `es-MX`). All dialect - values are case-insensitive. + If you specify the `dialect` parameter for a non-Spanish language model, + its value must match the language of the base model. If you specify the + `dialect` for a Spanish language model, its value must match one of the + defined mappings (`es-ES`, `es-LA`, or `es-MX`). :param str description: (optional) A description of the new custom language model. Use a localized description that matches the language of the custom model. @@ -1391,7 +1387,7 @@ def create_language_model(self, headers=headers, data=data) - response = self.send(request) + response = self.send(request, **kwargs) return response def list_language_models(self, @@ -1416,7 +1412,7 @@ def list_language_models(self, deprecated; use `ar-MS` instead.) To determine the languages for which customization is available, see [Language support for - customization](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-customization#languageSupport). + customization](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-custom-support#custom-language-support). :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse with `dict` result representing a `LanguageModels` object @@ -1440,7 +1436,7 @@ def list_language_models(self, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def get_language_model(self, customization_id: str, @@ -1480,7 +1476,7 @@ def get_language_model(self, customization_id: str, url = '/v1/customizations/{customization_id}'.format(**path_param_dict) request = self.prepare_request(method='GET', url=url, headers=headers) - response = self.send(request) + response = self.send(request, **kwargs) return response def delete_language_model(self, customization_id: str, @@ -1524,7 +1520,7 @@ def delete_language_model(self, customization_id: str, url=url, headers=headers) - response = self.send(request) + response = self.send(request, **kwargs) return response def train_language_model(self, @@ -1547,12 +1543,13 @@ def train_language_model(self, complete depending on the amount of data on which the service is being trained and the current load on the service. The method returns an HTTP 200 response code to indicate that the training process has begun. - You can monitor the status of the training by using the **Get a custom language - model** method to poll the model's status. Use a loop to check the status every 10 - seconds. The method returns a `LanguageModel` object that includes `status` and - `progress` fields. A status of `available` means that the custom model is trained - and ready to use. The service cannot accept subsequent training requests or - requests to add new resources until the existing request completes. + You can monitor the status of the training by using the [Get a custom language + model](#getlanguagemodel) method to poll the model's status. Use a loop to check + the status every 10 seconds. The method returns a `LanguageModel` object that + includes `status` and `progress` fields. A status of `available` means that the + custom model is trained and ready to use. The service cannot accept subsequent + training requests or requests to add new resources until the existing request + completes. **See also:** [Train the custom language model](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-languageCreate#trainModel-language). ### Training failures @@ -1570,14 +1567,18 @@ def train_language_model(self, language model that is to be used for the request. You must make the request with credentials for the instance of the service that owns the custom model. - :param str word_type_to_add: (optional) The type of words from the custom - language model's words resource on which to train the model: + :param str word_type_to_add: (optional) _For custom models that are based + on previous-generation models_, the type of words from the custom language + model's words resource on which to train the model: * `all` (the default) trains the model on all new words, regardless of whether they were extracted from corpora or grammars or were added or modified by the user. - * `user` trains the model only on new words that were added or modified by - the user directly. The model is not trained on new words extracted from + * `user` trains the model only on custom words that were added or modified + by the user directly. The model is not trained on new words extracted from corpora or grammars. + _For custom models that are based on next-generation models_, the service + ignores the parameter. The words resource contains only custom words that + the user adds or modifies directly, so the parameter is unnecessary. :param float customization_weight: (optional) Specifies a customization weight for the custom language model. The customization weight tells the service how much weight to give to words from the custom language model @@ -1625,7 +1626,7 @@ def train_language_model(self, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def reset_language_model(self, customization_id: str, @@ -1670,7 +1671,7 @@ def reset_language_model(self, customization_id: str, **path_param_dict) request = self.prepare_request(method='POST', url=url, headers=headers) - response = self.send(request) + response = self.send(request, **kwargs) return response def upgrade_language_model(self, customization_id: str, @@ -1686,12 +1687,15 @@ def upgrade_language_model(self, customization_id: str, that owns a model to upgrade it. The method returns an HTTP 200 response code to indicate that the upgrade process has begun successfully. You can monitor the status of the upgrade by using the - **Get a custom language model** method to poll the model's status. The method - returns a `LanguageModel` object that includes `status` and `progress` fields. Use - a loop to check the status every 10 seconds. While it is being upgraded, the - custom model has the status `upgrading`. When the upgrade is complete, the model - resumes the status that it had prior to upgrade. The service cannot accept - subsequent requests for the model until the upgrade completes. + [Get a custom language model](#getlanguagemodel) method to poll the model's + status. The method returns a `LanguageModel` object that includes `status` and + `progress` fields. Use a loop to check the status every 10 seconds. While it is + being upgraded, the custom model has the status `upgrading`. When the upgrade is + complete, the model resumes the status that it had prior to upgrade. The service + cannot accept subsequent requests for the model until the upgrade completes. + **Note:** Upgrading is necessary only for custom language models that are based on + previous-generation models. Only a single version of a custom model that is based + on a next-generation model is ever available. **See also:** [Upgrading a custom language model](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-custom-upgrade#custom-upgrade-language). @@ -1723,7 +1727,7 @@ def upgrade_language_model(self, customization_id: str, **path_param_dict) request = self.prepare_request(method='POST', url=url, headers=headers) - response = self.send(request) + response = self.send(request, **kwargs) return response ######################### @@ -1735,9 +1739,10 @@ def list_corpora(self, customization_id: str, **kwargs) -> DetailedResponse: List corpora. Lists information about all corpora from a custom language model. The information - includes the total number of words and out-of-vocabulary (OOV) words, name, and - status of each corpus. You must use credentials for the instance of the service - that owns a model to list its corpora. + includes the name, status, and total number of words for each corpus. _For custom + models that are based on previous-generation models_, it also includes the number + of out-of-vocabulary (OOV) words from the corpus. You must use credentials for the + instance of the service that owns a model to list its corpora. **See also:** [Listing corpora for a custom language model](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-manageCorpora#listCorpora). @@ -1769,7 +1774,7 @@ def list_corpora(self, customization_id: str, **kwargs) -> DetailedResponse: **path_param_dict) request = self.prepare_request(method='GET', url=url, headers=headers) - response = self.send(request) + response = self.send(request, **kwargs) return response def add_corpus(self, @@ -1786,45 +1791,52 @@ def add_corpus(self, Use multiple requests to submit multiple corpus text files. You must use credentials for the instance of the service that owns a model to add a corpus to it. Adding a corpus does not affect the custom language model until you train the - model for the new data by using the **Train a custom language model** method. + model for the new data by using the [Train a custom language + model](#trainlanguagemodel) method. Submit a plain text file that contains sample sentences from the domain of - interest to enable the service to extract words in context. The more sentences you - add that represent the context in which speakers use words from the domain, the - better the service's recognition accuracy. + interest to enable the service to parse the words in context. The more sentences + you add that represent the context in which speakers use words from the domain, + the better the service's recognition accuracy. The call returns an HTTP 201 response code if the corpus is valid. The service - then asynchronously processes the contents of the corpus and automatically - extracts new words that it finds. This operation can take on the order of minutes - to complete depending on the total number of words and the number of new words in - the corpus, as well as the current load on the service. You cannot submit requests - to add additional resources to the custom model or to train the model until the - service's analysis of the corpus for the current request completes. Use the **List - a corpus** method to check the status of the analysis. - The service auto-populates the model's words resource with words from the corpus - that are not found in its base vocabulary. These words are referred to as - out-of-vocabulary (OOV) words. After adding a corpus, you must validate the words - resource to ensure that each OOV word's definition is complete and valid. You can - use the **List custom words** method to examine the words resource. You can use - other words method to eliminate typos and modify how words are pronounced as - needed. + then asynchronously processes and automatically extracts data from the contents of + the corpus. This operation can take on the order of minutes to complete depending + on the current load on the service, the total number of words in the corpus, and, + _for custom models that are based on previous-generation models_, the number of + new (out-of-vocabulary) words in the corpus. You cannot submit requests to add + additional resources to the custom model or to train the model until the service's + analysis of the corpus for the current request completes. Use the [Get a + corpus](#getcorpus) method to check the status of the analysis. + _For custom models that are based on previous-generation models_, the service + auto-populates the model's words resource with words from the corpus that are not + found in its base vocabulary. These words are referred to as out-of-vocabulary + (OOV) words. After adding a corpus, you must validate the words resource to ensure + that each OOV word's definition is complete and valid. You can use the [List + custom words](#listwords) method to examine the words resource. You can use other + words method to eliminate typos and modify how words are pronounced as needed. To add a corpus file that has the same name as an existing corpus, set the `allow_overwrite` parameter to `true`; otherwise, the request fails. Overwriting an existing corpus causes the service to process the corpus text file and extract - OOV words anew. Before doing so, it removes any OOV words associated with the - existing corpus from the model's words resource unless they were also added by - another corpus or grammar, or they have been modified in some way with the **Add - custom words** or **Add a custom word** method. + its data anew. _For a custom model that is based on a previous-generation model_, + the service first removes any OOV words that are associated with the existing + corpus from the model's words resource unless they were also added by another + corpus or grammar, or they have been modified in some way with the [Add custom + words](#addwords) or [Add a custom word](#addword) method. The service limits the overall amount of data that you can add to a custom model - to a maximum of 10 million total words from all sources combined. Also, you can - add no more than 90 thousand custom (OOV) words to a model. This includes words - that the service extracts from corpora and grammars, and words that you add - directly. + to a maximum of 10 million total words from all sources combined. _For a custom + model that is based on a previous-generation model_, you can add no more than 90 + thousand custom (OOV) words to a model. This includes words that the service + extracts from corpora and grammars, and words that you add directly. **See also:** * [Add a corpus to the custom language model](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-languageCreate#addCorpus) - * [Working with - corpora](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-corporaWords#workingCorpora) - * [Validating a words - resource](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-corporaWords#validateModel). + * [Working with corpora for previous-generation + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-corporaWords#workingCorpora) + * [Working with corpora for next-generation + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-corporaWords-ng#workingCorpora-ng) + * [Validating a words resource for previous-generation + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-corporaWords#validateModel) + * [Validating a words resource for next-generation + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-corporaWords-ng#validateModel-ng). :param str customization_id: The customization ID (GUID) of the custom language model that is to be used for the request. You must make the @@ -1850,9 +1862,9 @@ def add_corpus(self, characters; the service assumes UTF-8 encoding if it encounters non-ASCII characters. Make sure that you know the character encoding of the file. You must use - that encoding when working with the words in the custom language model. For - more information, see [Character - encoding](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-corporaWords#charEncoding). + that same encoding when working with the words in the custom language + model. For more information, see [Character encoding for custom + words](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-manageWords#charEncoding). With the `curl` command, use the `--data-binary` option to upload the file for the request. :param bool allow_overwrite: (optional) If `true`, the specified corpus @@ -1896,7 +1908,7 @@ def add_corpus(self, params=params, files=form_data) - response = self.send(request) + response = self.send(request, **kwargs) return response def get_corpus(self, customization_id: str, corpus_name: str, @@ -1905,9 +1917,10 @@ def get_corpus(self, customization_id: str, corpus_name: str, Get a corpus. Gets information about a corpus from a custom language model. The information - includes the total number of words and out-of-vocabulary (OOV) words, name, and - status of the corpus. You must use credentials for the instance of the service - that owns a model to list its corpora. + includes the name, status, and total number of words for the corpus. _For custom + models that are based on previous-generation models_, it also includes the number + of out-of-vocabulary (OOV) words from the corpus. You must use credentials for the + instance of the service that owns a model to list its corpora. **See also:** [Listing corpora for a custom language model](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-manageCorpora#listCorpora). @@ -1943,7 +1956,7 @@ def get_corpus(self, customization_id: str, corpus_name: str, **path_param_dict) request = self.prepare_request(method='GET', url=url, headers=headers) - response = self.send(request) + response = self.send(request, **kwargs) return response def delete_corpus(self, customization_id: str, corpus_name: str, @@ -1951,14 +1964,15 @@ def delete_corpus(self, customization_id: str, corpus_name: str, """ Delete a corpus. - Deletes an existing corpus from a custom language model. The service removes any - out-of-vocabulary (OOV) words that are associated with the corpus from the custom - model's words resource unless they were also added by another corpus or grammar, - or they were modified in some way with the **Add custom words** or **Add a custom - word** method. Removing a corpus does not affect the custom model until you train - the model with the **Train a custom language model** method. You must use - credentials for the instance of the service that owns a model to delete its - corpora. + Deletes an existing corpus from a custom language model. Removing a corpus does + not affect the custom model until you train the model with the [Train a custom + language model](#trainlanguagemodel) method. You must use credentials for the + instance of the service that owns a model to delete its corpora. + _For custom models that are based on previous-generation models_, the service + removes any out-of-vocabulary (OOV) words that are associated with the corpus from + the custom model's words resource unless they were also added by another corpus or + grammar, or they were modified in some way with the [Add custom words](#addwords) + or [Add a custom word](#addword) method. **See also:** [Deleting a corpus from a custom language model](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-manageCorpora#deleteCorpus). @@ -1996,7 +2010,7 @@ def delete_corpus(self, customization_id: str, corpus_name: str, url=url, headers=headers) - response = self.send(request) + response = self.send(request, **kwargs) return response ######################### @@ -2014,10 +2028,11 @@ def list_words(self, Lists information about custom words from a custom language model. You can list all words from the custom model's words resource, only custom words that were - added or modified by the user, or only out-of-vocabulary (OOV) words that were - extracted from corpora or are recognized by grammars. You can also indicate the - order in which the service is to return words; by default, the service lists words - in ascending alphabetical order. You must use credentials for the instance of the + added or modified by the user, or, _for a custom model that is based on a + previous-generation model_, only out-of-vocabulary (OOV) words that were extracted + from corpora or are recognized by grammars. You can also indicate the order in + which the service is to return words; by default, the service lists words in + ascending alphabetical order. You must use credentials for the instance of the service that owns a model to list information about its words. **See also:** [Listing words from a custom language model](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-manageWords#listWords). @@ -2033,6 +2048,10 @@ def list_words(self, directly. * `corpora` shows only OOV that were extracted from corpora. * `grammars` shows only OOV words that are recognized by grammars. + _For a custom model that is based on a next-generation model_, only `all` + and `user` apply. Both options return the same results. Words from other + sources are not added to custom models that are based on next-generation + models. :param str sort: (optional) Indicates the order in which the words are to be listed, `alphabetical` or by `count`. You can prepend an optional `+` or `-` to an argument to indicate whether the results are to be sorted in @@ -2070,7 +2089,7 @@ def list_words(self, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def add_words(self, customization_id: str, words: List['CustomWord'], @@ -2078,34 +2097,38 @@ def add_words(self, customization_id: str, words: List['CustomWord'], """ Add custom words. - Adds one or more custom words to a custom language model. The service populates + Adds one or more custom words to a custom language model. You can use this method + to add words or to modify existing words in a custom model's words resource. _For + custom models that are based on previous-generation models_, the service populates the words resource for a custom model with out-of-vocabulary (OOV) words from each - corpus or grammar that is added to the model. You can use this method to add - additional words or to modify existing words in the words resource. The words + corpus or grammar that is added to the model. You can use this method to modify + OOV words in the model's words resource. + _For a custom model that is based on a previous-generation model_, the words resource for a model can contain a maximum of 90 thousand custom (OOV) words. This includes words that the service extracts from corpora and grammars and words that you add directly. You must use credentials for the instance of the service that owns a model to add or modify custom words for the model. Adding or modifying custom words does not affect the custom model until you train the model for the new data by using the - **Train a custom language model** method. + [Train a custom language model](#trainlanguagemodel) method. You add custom words by providing a `CustomWords` object, which is an array of - `CustomWord` objects, one per word. You must use the object's `word` parameter to - identify the word that is to be added. You can also provide one or both of the - optional `sounds_like` and `display_as` fields for each word. - * The `sounds_like` field provides an array of one or more pronunciations for the - word. Use the parameter to specify how the word can be pronounced by users. Use - the parameter for words that are difficult to pronounce, foreign words, acronyms, - and so on. For example, you might specify that the word `IEEE` can sound like `i - triple e`. You can specify a maximum of five sounds-like pronunciations for a - word. If you omit the `sounds_like` field, the service attempts to set the field - to its pronunciation of the word. It cannot generate a pronunciation for all - words, so you must review the word's definition to ensure that it is complete and - valid. + `CustomWord` objects, one per word. Use the object's `word` parameter to identify + the word that is to be added. You can also provide one or both of the optional + `display_as` or `sounds_like` fields for each word. * The `display_as` field provides a different way of spelling the word in a transcript. Use the parameter when you want the word to appear different from its usual representation or from its spelling in training data. For example, you might - indicate that the word `IBM(trademark)` is to be displayed as `IBM™`. + indicate that the word `IBM` is to be displayed as `IBM™`. + * The `sounds_like` field, _which can be used only with a custom model that is + based on a previous-generation model_, provides an array of one or more + pronunciations for the word. Use the parameter to specify how the word can be + pronounced by users. Use the parameter for words that are difficult to pronounce, + foreign words, acronyms, and so on. For example, you might specify that the word + `IEEE` can sound like `i triple e`. You can specify a maximum of five sounds-like + pronunciations for a word. If you omit the `sounds_like` field, the service + attempts to set the field to its pronunciation of the word. It cannot generate a + pronunciation for all words, so you must review the word's definition to ensure + that it is complete and valid. If you add a custom word that already exists in the words resource for the custom model, the new definition overwrites the existing data for the word. If the service encounters an error with the input data, it returns a failure code and @@ -2114,23 +2137,28 @@ def add_words(self, customization_id: str, words: List['CustomWord'], asynchronously processes the words to add them to the model's words resource. The time that it takes for the analysis to complete depends on the number of new words that you add but is generally faster than adding a corpus or grammar. - You can monitor the status of the request by using the **List a custom language - model** method to poll the model's status. Use a loop to check the status every 10 - seconds. The method returns a `Customization` object that includes a `status` - field. A status of `ready` means that the words have been added to the custom - model. The service cannot accept requests to add new data or to train the model - until the existing request completes. - You can use the **List custom words** or **List a custom word** method to review - the words that you add. Words with an invalid `sounds_like` field include an - `error` field that describes the problem. You can use other words-related methods - to correct errors, eliminate typos, and modify how words are pronounced as needed. + You can monitor the status of the request by using the [Get a custom language + model](#getlanguagemodel) method to poll the model's status. Use a loop to check + the status every 10 seconds. The method returns a `Customization` object that + includes a `status` field. A status of `ready` means that the words have been + added to the custom model. The service cannot accept requests to add new data or + to train the model until the existing request completes. + You can use the [List custom words](#listwords) or [Get a custom word](#getword) + method to review the words that you add. Words with an invalid `sounds_like` field + include an `error` field that describes the problem. You can use other + words-related methods to correct errors, eliminate typos, and modify how words are + pronounced as needed. **See also:** * [Add words to the custom language model](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-languageCreate#addWords) - * [Working with custom - words](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-corporaWords#workingWords) - * [Validating a words - resource](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-corporaWords#validateModel). + * [Working with custom words for previous-generation + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-corporaWords#workingWords) + * [Working with custom words for next-generation + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-corporaWords-ng#workingWords-ng) + * [Validating a words resource for previous-generation + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-corporaWords#validateModel) + * [Validating a words resource for next-generation + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-corporaWords-ng#validateModel-ng). :param str customization_id: The customization ID (GUID) of the custom language model that is to be used for the request. You must make the @@ -2174,7 +2202,7 @@ def add_words(self, customization_id: str, words: List['CustomWord'], headers=headers, data=data) - response = self.send(request) + response = self.send(request, **kwargs) return response def add_word(self, @@ -2188,43 +2216,52 @@ def add_word(self, """ Add a custom word. - Adds a custom word to a custom language model. The service populates the words - resource for a custom model with out-of-vocabulary (OOV) words from each corpus or - grammar that is added to the model. You can use this method to add a word or to - modify an existing word in the words resource. The words resource for a model can - contain a maximum of 90 thousand custom (OOV) words. This includes words that the - service extracts from corpora and grammars and words that you add directly. + Adds a custom word to a custom language model. You can use this method to add a + word or to modify an existing word in the words resource. _For custom models that + are based on previous-generation models_, the service populates the words resource + for a custom model with out-of-vocabulary (OOV) words from each corpus or grammar + that is added to the model. You can use this method to modify OOV words in the + model's words resource. + _For a custom model that is based on a previous-generation models_, the words + resource for a model can contain a maximum of 90 thousand custom (OOV) words. This + includes words that the service extracts from corpora and grammars and words that + you add directly. You must use credentials for the instance of the service that owns a model to add or modify a custom word for the model. Adding or modifying a custom word does not affect the custom model until you train the model for the new data by using the - **Train a custom language model** method. + [Train a custom language model](#trainlanguagemodel) method. Use the `word_name` parameter to specify the custom word that is to be added or modified. Use the `CustomWord` object to provide one or both of the optional - `sounds_like` and `display_as` fields for the word. - * The `sounds_like` field provides an array of one or more pronunciations for the - word. Use the parameter to specify how the word can be pronounced by users. Use - the parameter for words that are difficult to pronounce, foreign words, acronyms, - and so on. For example, you might specify that the word `IEEE` can sound like `i - triple e`. You can specify a maximum of five sounds-like pronunciations for a - word. If you omit the `sounds_like` field, the service attempts to set the field - to its pronunciation of the word. It cannot generate a pronunciation for all - words, so you must review the word's definition to ensure that it is complete and - valid. + `display_as` or `sounds_like` fields for the word. * The `display_as` field provides a different way of spelling the word in a transcript. Use the parameter when you want the word to appear different from its usual representation or from its spelling in training data. For example, you might - indicate that the word `IBM(trademark)` is to be displayed as `IBM™`. + indicate that the word `IBM` is to be displayed as `IBM™`. + * The `sounds_like` field, _which can be used only with a custom model that is + based on a previous-generation model_, provides an array of one or more + pronunciations for the word. Use the parameter to specify how the word can be + pronounced by users. Use the parameter for words that are difficult to pronounce, + foreign words, acronyms, and so on. For example, you might specify that the word + `IEEE` can sound like `i triple e`. You can specify a maximum of five sounds-like + pronunciations for a word. If you omit the `sounds_like` field, the service + attempts to set the field to its pronunciation of the word. It cannot generate a + pronunciation for all words, so you must review the word's definition to ensure + that it is complete and valid. If you add a custom word that already exists in the words resource for the custom model, the new definition overwrites the existing data for the word. If the service encounters an error, it does not add the word to the words resource. Use - the **List a custom word** method to review the word that you add. + the [Get a custom word](#getword) method to review the word that you add. **See also:** * [Add words to the custom language model](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-languageCreate#addWords) - * [Working with custom - words](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-corporaWords#workingWords) - * [Validating a words - resource](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-corporaWords#validateModel). + * [Working with custom words for previous-generation + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-corporaWords#workingWords) + * [Working with custom words for next-generation + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-corporaWords-ng#workingWords-ng) + * [Validating a words resource for previous-generation + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-corporaWords#validateModel) + * [Validating a words resource for next-generation + models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-corporaWords-ng#validateModel-ng). :param str customization_id: The customization ID (GUID) of the custom language model that is to be used for the request. You must make the @@ -2236,14 +2273,15 @@ def add_word(self, URL-encode the word if it includes non-ASCII characters. For more information, see [Character encoding](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-corporaWords#charEncoding). - :param str word: (optional) For the **Add custom words** method, you must - specify the custom word that is to be added to or updated in the custom - model. Do not include spaces in the word. Use a `-` (dash) or `_` + :param str word: (optional) For the [Add custom words](#addwords) method, + you must specify the custom word that is to be added to or updated in the + custom model. Do not include spaces in the word. Use a `-` (dash) or `_` (underscore) to connect the tokens of compound words. - Omit this parameter for the **Add a custom word** method. - :param List[str] sounds_like: (optional) An array of sounds-like - pronunciations for the custom word. Specify how words that are difficult to - pronounce, foreign words, acronyms, and so on can be pronounced by users. + Omit this parameter for the [Add a custom word](#addword) method. + :param List[str] sounds_like: (optional) _For a custom model that is based + on a previous-generation model_, an array of sounds-like pronunciations for + the custom word. Specify how words that are difficult to pronounce, foreign + words, acronyms, and so on can be pronounced by users. * For a word that is not in the service's base vocabulary, omit the parameter to have the service automatically generate a sounds-like pronunciation for the word. @@ -2253,6 +2291,9 @@ def add_word(self, pronunciation from the base vocabulary. A word can have at most five sounds-like pronunciations. A pronunciation can include at most 40 characters not including spaces. + _For a custom model that is based on a next-generation model_, omit this + field. Custom models based on next-generation models do not support the + `sounds_like` field. The service ignores the field. :param str display_as: (optional) An alternative spelling for the custom word when it appears in a transcript. Use the parameter when you want the word to have a spelling that is different from its usual representation or @@ -2295,7 +2336,7 @@ def add_word(self, headers=headers, data=data) - response = self.send(request) + response = self.send(request, **kwargs) return response def get_word(self, customization_id: str, word_name: str, @@ -2343,7 +2384,7 @@ def get_word(self, customization_id: str, word_name: str, **path_param_dict) request = self.prepare_request(method='GET', url=url, headers=headers) - response = self.send(request) + response = self.send(request, **kwargs) return response def delete_word(self, customization_id: str, word_name: str, @@ -2353,11 +2394,11 @@ def delete_word(self, customization_id: str, word_name: str, Deletes a custom word from a custom language model. You can remove any word that you added to the custom model's words resource via any means. However, if the word - also exists in the service's base vocabulary, the service removes only the custom - pronunciation for the word; the word remains in the base vocabulary. Removing a + also exists in the service's base vocabulary, the service removes the word only + from the words resource; the word remains in the base vocabulary. Removing a custom word does not affect the custom model until you train the model with the - **Train a custom language model** method. You must use credentials for the - instance of the service that owns a model to delete its words. + [Train a custom language model](#trainlanguagemodel) method. You must use + credentials for the instance of the service that owns a model to delete its words. **See also:** [Deleting a word from a custom language model](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-manageWords#deleteWord). @@ -2397,7 +2438,7 @@ def delete_word(self, customization_id: str, word_name: str, url=url, headers=headers) - response = self.send(request) + response = self.send(request, **kwargs) return response ######################### @@ -2412,7 +2453,10 @@ def list_grammars(self, customization_id: str, Lists information about all grammars from a custom language model. The information includes the total number of out-of-vocabulary (OOV) words, name, and status of each grammar. You must use credentials for the instance of the service that owns a - model to list its grammars. + model to list its grammars. Grammars are available for all languages and models + that support language customization. + **Note:** Grammars are supported only for use with previous-generation models. + They are not supported for next-generation models. **See also:** [Listing grammars from a custom language model](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-manageGrammars#listGrammars). @@ -2444,7 +2488,7 @@ def list_grammars(self, customization_id: str, **path_param_dict) request = self.prepare_request(method='GET', url=url, headers=headers) - response = self.send(request) + response = self.send(request, **kwargs) return response def add_grammar(self, @@ -2462,32 +2506,37 @@ def add_grammar(self, UTF-8 format that defines the grammar. Use multiple requests to submit multiple grammar files. You must use credentials for the instance of the service that owns a model to add a grammar to it. Adding a grammar does not affect the custom - language model until you train the model for the new data by using the **Train a - custom language model** method. + language model until you train the model for the new data by using the [Train a + custom language model](#trainlanguagemodel) method. The call returns an HTTP 201 response code if the grammar is valid. The service then asynchronously processes the contents of the grammar and automatically extracts new words that it finds. This operation can take a few seconds or minutes to complete depending on the size and complexity of the grammar, as well as the current load on the service. You cannot submit requests to add additional resources to the custom model or to train the model until the service's analysis - of the grammar for the current request completes. Use the **Get a grammar** method - to check the status of the analysis. + of the grammar for the current request completes. Use the [Get a + grammar](#getgrammar) method to check the status of the analysis. The service populates the model's words resource with any word that is recognized by the grammar that is not found in the model's base vocabulary. These are - referred to as out-of-vocabulary (OOV) words. You can use the **List custom - words** method to examine the words resource and use other words-related methods - to eliminate typos and modify how words are pronounced as needed. + referred to as out-of-vocabulary (OOV) words. You can use the [List custom + words](#listwords) method to examine the words resource and use other + words-related methods to eliminate typos and modify how words are pronounced as + needed. To add a grammar that has the same name as an existing grammar, set the `allow_overwrite` parameter to `true`; otherwise, the request fails. Overwriting an existing grammar causes the service to process the grammar file and extract OOV words anew. Before doing so, it removes any OOV words associated with the existing grammar from the model's words resource unless they were also added by another - resource or they have been modified in some way with the **Add custom words** or - **Add a custom word** method. + resource or they have been modified in some way with the [Add custom + words](#addwords) or [Add a custom word](#addword) method. The service limits the overall amount of data that you can add to a custom model to a maximum of 10 million total words from all sources combined. Also, you can add no more than 90 thousand OOV words to a model. This includes words that the service extracts from corpora and grammars and words that you add directly. + Grammars are available for all languages and models that support language + customization. + **Note:** Grammars are supported only for use with previous-generation models. + They are not supported for next-generation models. **See also:** * [Understanding grammars](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-grammarUnderstand#grammarUnderstand) @@ -2568,7 +2617,7 @@ def add_grammar(self, params=params, data=data) - response = self.send(request) + response = self.send(request, **kwargs) return response def get_grammar(self, customization_id: str, grammar_name: str, @@ -2579,7 +2628,10 @@ def get_grammar(self, customization_id: str, grammar_name: str, Gets information about a grammar from a custom language model. The information includes the total number of out-of-vocabulary (OOV) words, name, and status of the grammar. You must use credentials for the instance of the service that owns a - model to list its grammars. + model to list its grammars. Grammars are available for all languages and models + that support language customization. + **Note:** Grammars are supported only for use with previous-generation models. + They are not supported for next-generation models. **See also:** [Listing grammars from a custom language model](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-manageGrammars#listGrammars). @@ -2616,7 +2668,7 @@ def get_grammar(self, customization_id: str, grammar_name: str, **path_param_dict) request = self.prepare_request(method='GET', url=url, headers=headers) - response = self.send(request) + response = self.send(request, **kwargs) return response def delete_grammar(self, customization_id: str, grammar_name: str, @@ -2627,10 +2679,14 @@ def delete_grammar(self, customization_id: str, grammar_name: str, Deletes an existing grammar from a custom language model. The service removes any out-of-vocabulary (OOV) words associated with the grammar from the custom model's words resource unless they were also added by another resource or they were - modified in some way with the **Add custom words** or **Add a custom word** - method. Removing a grammar does not affect the custom model until you train the - model with the **Train a custom language model** method. You must use credentials - for the instance of the service that owns a model to delete its grammar. + modified in some way with the [Add custom words](#addwords) or [Add a custom + word](#addword) method. Removing a grammar does not affect the custom model until + you train the model with the [Train a custom language model](#trainlanguagemodel) + method. You must use credentials for the instance of the service that owns a model + to delete its grammar. Grammars are available for all languages and models that + support language customization. + **Note:** Grammars are supported only for use with previous-generation models. + They are not supported for next-generation models. **See also:** [Deleting a grammar from a custom language model](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-manageGrammars#deleteGrammar). @@ -2669,7 +2725,7 @@ def delete_grammar(self, customization_id: str, grammar_name: str, url=url, headers=headers) - response = self.send(request) + response = self.send(request, **kwargs) return response ######################### @@ -2693,6 +2749,8 @@ def create_acoustic_model(self, The service returns an error if you attempt to create more than 1024 models. You do not lose any models, but you cannot create any more until your model count is below the limit. + **Note:** Acoustic model customization is supported only for use with + previous-generation models. It is not supported for next-generation models. **See also:** [Create a custom acoustic model](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-acoustic#createModel-acoustic). @@ -2707,7 +2765,7 @@ def create_acoustic_model(self, `ar-AR_BroadbandModel` is deprecated; use `ar-MS_BroadbandModel` instead.) To determine whether a base model supports acoustic model customization, refer to [Language support for - customization](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-customization#languageSupport). + customization](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-custom-support#custom-language-support). :param str description: (optional) A description of the new custom acoustic model. Use a localized description that matches the language of the custom model. @@ -2745,7 +2803,7 @@ def create_acoustic_model(self, headers=headers, data=data) - response = self.send(request) + response = self.send(request, **kwargs) return response def list_acoustic_models(self, @@ -2760,6 +2818,8 @@ def list_acoustic_models(self, the specified language. Omit the parameter to see all custom acoustic models for all languages. You must use credentials for the instance of the service that owns a model to list information about it. + **Note:** Acoustic model customization is supported only for use with + previous-generation models. It is not supported for next-generation models. **See also:** [Listing custom acoustic models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-manageAcousticModels#listModels-acoustic). @@ -2770,7 +2830,7 @@ def list_acoustic_models(self, deprecated; use `ar-MS` instead.) To determine the languages for which customization is available, see [Language support for - customization](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-customization#languageSupport). + customization](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-custom-support#custom-language-support). :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse with `dict` result representing a `AcousticModels` object @@ -2794,7 +2854,7 @@ def list_acoustic_models(self, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def get_acoustic_model(self, customization_id: str, @@ -2804,6 +2864,8 @@ def get_acoustic_model(self, customization_id: str, Gets information about a specified custom acoustic model. You must use credentials for the instance of the service that owns a model to list information about it. + **Note:** Acoustic model customization is supported only for use with + previous-generation models. It is not supported for next-generation models. **See also:** [Listing custom acoustic models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-manageAcousticModels#listModels-acoustic). @@ -2835,7 +2897,7 @@ def get_acoustic_model(self, customization_id: str, **path_param_dict) request = self.prepare_request(method='GET', url=url, headers=headers) - response = self.send(request) + response = self.send(request, **kwargs) return response def delete_acoustic_model(self, customization_id: str, @@ -2847,6 +2909,8 @@ def delete_acoustic_model(self, customization_id: str, another request, such as adding an audio resource to the model, is currently being processed. You must use credentials for the instance of the service that owns a model to delete it. + **Note:** Acoustic model customization is supported only for use with + previous-generation models. It is not supported for next-generation models. **See also:** [Deleting a custom acoustic model](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-manageAcousticModels#deleteModel-acoustic). @@ -2880,7 +2944,7 @@ def delete_acoustic_model(self, customization_id: str, url=url, headers=headers) - response = self.send(request) + response = self.send(request, **kwargs) return response def train_acoustic_model(self, @@ -2905,14 +2969,14 @@ def train_acoustic_model(self, takes approximately 2 hours to train a model that contains a total of 2 hours of audio. The method returns an HTTP 200 response code to indicate that the training process has begun. - You can monitor the status of the training by using the **Get a custom acoustic - model** method to poll the model's status. Use a loop to check the status once a - minute. The method returns an `AcousticModel` object that includes `status` and - `progress` fields. A status of `available` indicates that the custom model is - trained and ready to use. The service cannot train a model while it is handling - another request for the model. The service cannot accept subsequent training - requests, or requests to add new audio resources, until the existing training - request completes. + You can monitor the status of the training by using the [Get a custom acoustic + model](#getacousticmodel) method to poll the model's status. Use a loop to check + the status once a minute. The method returns an `AcousticModel` object that + includes `status` and `progress` fields. A status of `available` indicates that + the custom model is trained and ready to use. The service cannot train a model + while it is handling another request for the model. The service cannot accept + subsequent training requests, or requests to add new audio resources, until the + existing training request completes. You can use the optional `custom_language_model_id` parameter to specify the GUID of a separately created custom language model that is to be used during training. Train with a custom language model if you have verbatim transcriptions of the @@ -2921,6 +2985,8 @@ def train_acoustic_model(self, files. For training to succeed, both of the custom models must be based on the same version of the same base model, and the custom language model must be fully trained and available. + **Note:** Acoustic model customization is supported only for use with + previous-generation models. It is not supported for next-generation models. **See also:** * [Train the custom acoustic model](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-acoustic#trainModel-acoustic) @@ -2985,7 +3051,7 @@ def train_acoustic_model(self, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def reset_acoustic_model(self, customization_id: str, @@ -3001,6 +3067,8 @@ def reset_acoustic_model(self, customization_id: str, service cannot accept subsequent requests for the model until the existing reset request completes. You must use credentials for the instance of the service that owns a model to reset it. + **Note:** Acoustic model customization is supported only for use with + previous-generation models. It is not supported for next-generation models. **See also:** [Resetting a custom acoustic model](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-manageAcousticModels#resetModel-acoustic). @@ -3032,7 +3100,7 @@ def reset_acoustic_model(self, customization_id: str, **path_param_dict) request = self.prepare_request(method='POST', url=url, headers=headers) - response = self.send(request) + response = self.send(request, **kwargs) return response def upgrade_acoustic_model(self, @@ -3053,19 +3121,21 @@ def upgrade_acoustic_model(self, for the instance of the service that owns a model to upgrade it. The method returns an HTTP 200 response code to indicate that the upgrade process has begun successfully. You can monitor the status of the upgrade by using the - **Get a custom acoustic model** method to poll the model's status. The method - returns an `AcousticModel` object that includes `status` and `progress` fields. - Use a loop to check the status once a minute. While it is being upgraded, the - custom model has the status `upgrading`. When the upgrade is complete, the model - resumes the status that it had prior to upgrade. The service cannot upgrade a - model while it is handling another request for the model. The service cannot - accept subsequent requests for the model until the existing upgrade request - completes. + [Get a custom acoustic model](#getacousticmodel) method to poll the model's + status. The method returns an `AcousticModel` object that includes `status` and + `progress` fields. Use a loop to check the status once a minute. While it is being + upgraded, the custom model has the status `upgrading`. When the upgrade is + complete, the model resumes the status that it had prior to upgrade. The service + cannot upgrade a model while it is handling another request for the model. The + service cannot accept subsequent requests for the model until the existing upgrade + request completes. If the custom acoustic model was trained with a separately created custom language model, you must use the `custom_language_model_id` parameter to specify the GUID of that custom language model. The custom language model must be upgraded before the custom acoustic model can be upgraded. Omit the parameter if the custom acoustic model was not trained with a custom language model. + **Note:** Acoustic model customization is supported only for use with + previous-generation models. It is not supported for next-generation models. **See also:** [Upgrading a custom acoustic model](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-custom-upgrade#custom-upgrade-acoustic). @@ -3118,7 +3188,7 @@ def upgrade_acoustic_model(self, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response ######################### @@ -3135,6 +3205,8 @@ def list_audio(self, customization_id: str, **kwargs) -> DetailedResponse: which is important for checking the service's analysis of the resource in response to a request to add it to the custom acoustic model. You must use credentials for the instance of the service that owns a model to list its audio resources. + **Note:** Acoustic model customization is supported only for use with + previous-generation models. It is not supported for next-generation models. **See also:** [Listing audio resources for a custom acoustic model](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-manageAudio#listAudio). @@ -3166,7 +3238,7 @@ def list_audio(self, customization_id: str, **kwargs) -> DetailedResponse: **path_param_dict) request = self.prepare_request(method='GET', url=url, headers=headers) - response = self.send(request) + response = self.send(request, **kwargs) return response def add_audio(self, @@ -3185,8 +3257,8 @@ def add_audio(self, the acoustic characteristics of the audio that you plan to transcribe. You must use credentials for the instance of the service that owns a model to add an audio resource to it. Adding audio data does not affect the custom acoustic model until - you train the model for the new data by using the **Train a custom acoustic - model** method. + you train the model for the new data by using the [Train a custom acoustic + model](#trainacousticmodel) method. You can add individual audio files or an archive file that contains multiple audio files. Adding multiple audio files via a single archive file is significantly more efficient than adding each file individually. You can add audio resources in any @@ -3207,11 +3279,13 @@ def add_audio(self, its length, sampling rate, and encoding. You cannot submit requests to train or upgrade the model until the service's analysis of all audio resources for current requests completes. - To determine the status of the service's analysis of the audio, use the **Get an - audio resource** method to poll the status of the audio. The method accepts the - customization ID of the custom model and the name of the audio resource, and it - returns the status of the resource. Use a loop to check the status of the audio - every few seconds until it becomes `ok`. + To determine the status of the service's analysis of the audio, use the [Get an + audio resource](#getaudio) method to poll the status of the audio. The method + accepts the customization ID of the custom model and the name of the audio + resource, and it returns the status of the resource. Use a loop to check the + status of the audio every few seconds until it becomes `ok`. + **Note:** Acoustic model customization is supported only for use with + previous-generation models. It is not supported for next-generation models. **See also:** [Add audio to the custom acoustic model](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-acoustic#addAudio). ### Content types for audio-type resources @@ -3292,8 +3366,8 @@ def add_audio(self, For an archive-type resource, the media type of the archive file. For more information, see **Content types for archive-type resources** in the method description. - :param str contained_content_type: (optional) **For an archive-type - resource,** specify the format of the audio files that are contained in the + :param str contained_content_type: (optional) _For an archive-type + resource_, specify the format of the audio files that are contained in the archive file if they are of type `audio/alaw`, `audio/basic`, `audio/l16`, or `audio/mulaw`. Include the `rate`, `channels`, and `endianness` parameters where necessary. In this case, all audio files that are @@ -3304,7 +3378,7 @@ def add_audio(self, The parameter accepts all of the audio formats that are supported for use with speech recognition. For more information, see **Content types for audio-type resources** in the method description. - **For an audio-type resource,** omit the header. + _For an audio-type resource_, omit the header. :param bool allow_overwrite: (optional) If `true`, the specified audio resource overwrites an existing audio resource with the same name. If `false`, the request fails if an audio resource with the same name already @@ -3349,7 +3423,7 @@ def add_audio(self, params=params, data=data) - response = self.send(request) + response = self.send(request, **kwargs) return response def get_audio(self, customization_id: str, audio_name: str, @@ -3360,21 +3434,23 @@ def get_audio(self, customization_id: str, audio_name: str, Gets information about an audio resource from a custom acoustic model. The method returns an `AudioListing` object whose fields depend on the type of audio resource that you specify with the method's `audio_name` parameter: - * **For an audio-type resource,** the object's fields match those of an + * _For an audio-type resource_, the object's fields match those of an `AudioResource` object: `duration`, `name`, `details`, and `status`. - * **For an archive-type resource,** the object includes a `container` field whose + * _For an archive-type resource_, the object includes a `container` field whose fields match those of an `AudioResource` object. It also includes an `audio` field, which contains an array of `AudioResource` objects that provides information about the audio files that are contained in the archive. The information includes the status of the specified audio resource. The status is important for checking the service's analysis of a resource that you add to the custom model. - * For an audio-type resource, the `status` field is located in the `AudioListing` - object. - * For an archive-type resource, the `status` field is located in the + * _For an audio-type resource_, the `status` field is located in the + `AudioListing` object. + * _For an archive-type resource_, the `status` field is located in the `AudioResource` object that is returned in the `container` field. You must use credentials for the instance of the service that owns a model to list its audio resources. + **Note:** Acoustic model customization is supported only for use with + previous-generation models. It is not supported for next-generation models. **See also:** [Listing audio resources for a custom acoustic model](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-manageAudio#listAudio). @@ -3410,7 +3486,7 @@ def get_audio(self, customization_id: str, audio_name: str, **path_param_dict) request = self.prepare_request(method='GET', url=url, headers=headers) - response = self.send(request) + response = self.send(request, **kwargs) return response def delete_audio(self, customization_id: str, audio_name: str, @@ -3422,10 +3498,13 @@ def delete_audio(self, customization_id: str, audio_name: str, archive-type audio resource removes the entire archive of files. The service does not allow deletion of individual files from an archive resource. Removing an audio resource does not affect the custom model until you train the - model on its updated data by using the **Train a custom acoustic model** method. - You can delete an existing audio resource from a model while a different resource - is being added to the model. You must use credentials for the instance of the - service that owns a model to delete its audio resources. + model on its updated data by using the [Train a custom acoustic + model](#trainacousticmodel) method. You can delete an existing audio resource from + a model while a different resource is being added to the model. You must use + credentials for the instance of the service that owns a model to delete its audio + resources. + **Note:** Acoustic model customization is supported only for use with + previous-generation models. It is not supported for next-generation models. **See also:** [Deleting an audio resource from a custom acoustic model](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-manageAudio#deleteAudio). @@ -3463,7 +3542,7 @@ def delete_audio(self, customization_id: str, audio_name: str, url=url, headers=headers) - response = self.send(request) + response = self.send(request, **kwargs) return response ######################### @@ -3515,7 +3594,7 @@ def delete_user_data(self, customer_id: str, **kwargs) -> DetailedResponse: headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response @@ -3526,9 +3605,9 @@ class GetModelEnums: class ModelId(str, Enum): """ - The identifier of the model in the form of its name from the output of the **Get a - model** method. (**Note:** The model `ar-AR_BroadbandModel` is deprecated; use - `ar-MS_BroadbandModel` instead.). + The identifier of the model in the form of its name from the output of the [List + models](#listmodels) method. (**Note:** The model `ar-AR_BroadbandModel` is + deprecated; use `ar-MS_BroadbandModel` instead.). """ AR_AR_BROADBANDMODEL = 'ar-AR_BroadbandModel' AR_MS_BROADBANDMODEL = 'ar-MS_BroadbandModel' @@ -3542,6 +3621,7 @@ class ModelId(str, Enum): EN_GB_BROADBANDMODEL = 'en-GB_BroadbandModel' EN_GB_NARROWBANDMODEL = 'en-GB_NarrowbandModel' EN_GB_TELEPHONY = 'en-GB_Telephony' + EN_IN_TELEPHONY = 'en-IN_Telephony' EN_US_BROADBANDMODEL = 'en-US_BroadbandModel' EN_US_MULTIMEDIA = 'en-US_Multimedia' EN_US_NARROWBANDMODEL = 'en-US_NarrowbandModel' @@ -3564,15 +3644,21 @@ class ModelId(str, Enum): FR_CA_NARROWBANDMODEL = 'fr-CA_NarrowbandModel' FR_CA_TELEPHONY = 'fr-CA_Telephony' FR_FR_BROADBANDMODEL = 'fr-FR_BroadbandModel' + FR_FR_MULTIMEDIA = 'fr-FR_Multimedia' FR_FR_NARROWBANDMODEL = 'fr-FR_NarrowbandModel' FR_FR_TELEPHONY = 'fr-FR_Telephony' + HI_IN_TELEPHONY = 'hi-IN_Telephony' IT_IT_BROADBANDMODEL = 'it-IT_BroadbandModel' IT_IT_NARROWBANDMODEL = 'it-IT_NarrowbandModel' IT_IT_TELEPHONY = 'it-IT_Telephony' JA_JP_BROADBANDMODEL = 'ja-JP_BroadbandModel' + JA_JP_MULTIMEDIA = 'ja-JP_Multimedia' JA_JP_NARROWBANDMODEL = 'ja-JP_NarrowbandModel' KO_KR_BROADBANDMODEL = 'ko-KR_BroadbandModel' + KO_KR_MULTIMEDIA = 'ko-KR_Multimedia' KO_KR_NARROWBANDMODEL = 'ko-KR_NarrowbandModel' + KO_KR_TELEPHONY = 'ko-KR_Telephony' + NL_BE_TELEPHONY = 'nl-BE_Telephony' NL_NL_BROADBANDMODEL = 'nl-NL_BroadbandModel' NL_NL_NARROWBANDMODEL = 'nl-NL_NarrowbandModel' PT_BR_BROADBANDMODEL = 'pt-BR_BroadbandModel' @@ -3613,7 +3699,7 @@ class Model(str, Enum): """ The identifier of the model that is to be used for the recognition request. (**Note:** The model `ar-AR_BroadbandModel` is deprecated; use - `ar-MS_BroadbandModel` instead.) See [Languages and + `ar-MS_BroadbandModel` instead.) See [Previous-generation languages and models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models) and [Next-generation languages and models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models-ng). @@ -3627,6 +3713,7 @@ class Model(str, Enum): EN_AU_BROADBANDMODEL = 'en-AU_BroadbandModel' EN_AU_NARROWBANDMODEL = 'en-AU_NarrowbandModel' EN_AU_TELEPHONY = 'en-AU_Telephony' + EN_IN_TELEPHONY = 'en-IN_Telephony' EN_GB_BROADBANDMODEL = 'en-GB_BroadbandModel' EN_GB_NARROWBANDMODEL = 'en-GB_NarrowbandModel' EN_GB_TELEPHONY = 'en-GB_Telephony' @@ -3652,15 +3739,21 @@ class Model(str, Enum): FR_CA_NARROWBANDMODEL = 'fr-CA_NarrowbandModel' FR_CA_TELEPHONY = 'fr-CA_Telephony' FR_FR_BROADBANDMODEL = 'fr-FR_BroadbandModel' + FR_FR_MULTIMEDIA = 'fr-FR_Multimedia' FR_FR_NARROWBANDMODEL = 'fr-FR_NarrowbandModel' FR_FR_TELEPHONY = 'fr-FR_Telephony' + HI_IN_TELEPHONY = 'hi-IN_Telephony' IT_IT_BROADBANDMODEL = 'it-IT_BroadbandModel' IT_IT_NARROWBANDMODEL = 'it-IT_NarrowbandModel' IT_IT_TELEPHONY = 'it-IT_Telephony' JA_JP_BROADBANDMODEL = 'ja-JP_BroadbandModel' + JA_JP_MULTIMEDIA = 'ja-JP_Multimedia' JA_JP_NARROWBANDMODEL = 'ja-JP_NarrowbandModel' KO_KR_BROADBANDMODEL = 'ko-KR_BroadbandModel' + KO_KR_MULTIMEDIA = 'ko-KR_Multimedia' KO_KR_NARROWBANDMODEL = 'ko-KR_NarrowbandModel' + KO_KR_TELEPHONY = 'ko-KR_Telephony' + NL_BE_TELEPHONY = 'nl-BE_Telephony' NL_NL_BROADBANDMODEL = 'nl-NL_BroadbandModel' NL_NL_NARROWBANDMODEL = 'nl-NL_NarrowbandModel' PT_BR_BROADBANDMODEL = 'pt-BR_BroadbandModel' @@ -3701,7 +3794,7 @@ class Model(str, Enum): """ The identifier of the model that is to be used for the recognition request. (**Note:** The model `ar-AR_BroadbandModel` is deprecated; use - `ar-MS_BroadbandModel` instead.) See [Languages and + `ar-MS_BroadbandModel` instead.) See [Previous-generation languages and models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models) and [Next-generation languages and models](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-models-ng). @@ -3715,6 +3808,7 @@ class Model(str, Enum): EN_AU_BROADBANDMODEL = 'en-AU_BroadbandModel' EN_AU_NARROWBANDMODEL = 'en-AU_NarrowbandModel' EN_AU_TELEPHONY = 'en-AU_Telephony' + EN_IN_TELEPHONY = 'en-IN_Telephony' EN_GB_BROADBANDMODEL = 'en-GB_BroadbandModel' EN_GB_NARROWBANDMODEL = 'en-GB_NarrowbandModel' EN_GB_TELEPHONY = 'en-GB_Telephony' @@ -3740,15 +3834,21 @@ class Model(str, Enum): FR_CA_NARROWBANDMODEL = 'fr-CA_NarrowbandModel' FR_CA_TELEPHONY = 'fr-CA_Telephony' FR_FR_BROADBANDMODEL = 'fr-FR_BroadbandModel' + FR_FR_MULTIMEDIA = 'fr-FR_Multimedia' FR_FR_NARROWBANDMODEL = 'fr-FR_NarrowbandModel' FR_FR_TELEPHONY = 'fr-FR_Telephony' + HI_IN_TELEPHONY = 'hi-IN_Telephony' IT_IT_BROADBANDMODEL = 'it-IT_BroadbandModel' IT_IT_NARROWBANDMODEL = 'it-IT_NarrowbandModel' IT_IT_TELEPHONY = 'it-IT_Telephony' JA_JP_BROADBANDMODEL = 'ja-JP_BroadbandModel' + JA_JP_MULTIMEDIA = 'ja-JP_Multimedia' JA_JP_NARROWBANDMODEL = 'ja-JP_NarrowbandModel' KO_KR_BROADBANDMODEL = 'ko-KR_BroadbandModel' + KO_KR_MULTIMEDIA = 'ko-KR_Multimedia' KO_KR_NARROWBANDMODEL = 'ko-KR_NarrowbandModel' + KO_KR_TELEPHONY = 'ko-KR_Telephony' + NL_BE_TELEPHONY = 'nl-BE_Telephony' NL_NL_BROADBANDMODEL = 'nl-NL_BroadbandModel' NL_NL_NARROWBANDMODEL = 'nl-NL_NarrowbandModel' PT_BR_BROADBANDMODEL = 'pt-BR_BroadbandModel' @@ -3764,8 +3864,8 @@ class Events(str, Enum): * `recognitions.started` generates a callback notification when the service begins to process the job. * `recognitions.completed` generates a callback notification when the job is - complete. You must use the **Check a job** method to retrieve the results before - they time out or are deleted. + complete. You must use the [Check a job](#checkjob) method to retrieve the results + before they time out or are deleted. * `recognitions.completed_with_results` generates a callback notification when the job is complete. The notification includes the results of the request. * `recognitions.failed` generates a callback notification if the service @@ -3796,13 +3896,14 @@ class Language(str, Enum): identifier `ar-AR` is deprecated; use `ar-MS` instead.) To determine the languages for which customization is available, see [Language support for - customization](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-customization#languageSupport). + customization](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-custom-support#custom-language-support). """ AR_AR = 'ar-AR' AR_MS = 'ar-MS' DE_DE = 'de-DE' EN_AU = 'en-AU' EN_GB = 'en-GB' + EN_IN = 'en-IN' EN_US = 'en-US' ES_AR = 'es-AR' ES_ES = 'es-ES' @@ -3812,9 +3913,11 @@ class Language(str, Enum): ES_PE = 'es-PE' FR_CA = 'fr-CA' FR_FR = 'fr-FR' + HI_IN = 'hi-IN' IT_IT = 'it-IT' JA_JP = 'ja-JP' KO_KR = 'ko-KR' + NL_BE = 'nl-BE' NL_NL = 'nl-NL' PT_BR = 'pt-BR' ZH_CN = 'zh-CN' @@ -3827,14 +3930,17 @@ class TrainLanguageModelEnums: class WordTypeToAdd(str, Enum): """ - The type of words from the custom language model's words resource on which to - train the model: + _For custom models that are based on previous-generation models_, the type of + words from the custom language model's words resource on which to train the model: * `all` (the default) trains the model on all new words, regardless of whether they were extracted from corpora or grammars or were added or modified by the user. - * `user` trains the model only on new words that were added or modified by the + * `user` trains the model only on custom words that were added or modified by the user directly. The model is not trained on new words extracted from corpora or grammars. + _For custom models that are based on next-generation models_, the service ignores + the parameter. The words resource contains only custom words that the user adds or + modifies directly, so the parameter is unnecessary. """ ALL = 'all' USER = 'user' @@ -3852,6 +3958,9 @@ class WordType(str, Enum): * `user` shows only custom words that were added or modified by the user directly. * `corpora` shows only OOV that were extracted from corpora. * `grammars` shows only OOV words that are recognized by grammars. + _For a custom model that is based on a next-generation model_, only `all` and + `user` apply. Both options return the same results. Words from other sources are + not added to custom models that are based on next-generation models. """ ALL = 'all' USER = 'user' @@ -3902,13 +4011,14 @@ class Language(str, Enum): identifier `ar-AR` is deprecated; use `ar-MS` instead.) To determine the languages for which customization is available, see [Language support for - customization](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-customization#languageSupport). + customization](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-custom-support#custom-language-support). """ AR_AR = 'ar-AR' AR_MS = 'ar-MS' DE_DE = 'de-DE' EN_AU = 'en-AU' EN_GB = 'en-GB' + EN_IN = 'en-IN' EN_US = 'en-US' ES_AR = 'es-AR' ES_ES = 'es-ES' @@ -3918,9 +4028,11 @@ class Language(str, Enum): ES_PE = 'es-PE' FR_CA = 'fr-CA' FR_FR = 'fr-FR' + HI_IN = 'hi-IN' IT_IT = 'it-IT' JA_JP = 'ja-JP' KO_KR = 'ko-KR' + NL_BE = 'nl-BE' NL_NL = 'nl-NL' PT_BR = 'pt-BR' ZH_CN = 'zh-CN' @@ -3960,7 +4072,7 @@ class ContentType(str, Enum): class ContainedContentType(str, Enum): """ - **For an archive-type resource,** specify the format of the audio files that are + _For an archive-type resource_, specify the format of the audio files that are contained in the archive file if they are of type `audio/alaw`, `audio/basic`, `audio/l16`, or `audio/mulaw`. Include the `rate`, `channels`, and `endianness` parameters where necessary. In this case, all audio files that are contained in @@ -3971,7 +4083,7 @@ class ContainedContentType(str, Enum): The parameter accepts all of the audio formats that are supported for use with speech recognition. For more information, see **Content types for audio-type resources** in the method description. - **For an audio-type resource,** omit the header. + _For an audio-type resource_, omit the header. """ AUDIO_ALAW = 'audio/alaw' AUDIO_BASIC = 'audio/basic' @@ -4000,8 +4112,8 @@ class AcousticModel(): Information about an existing custom acoustic model. :attr str customization_id: The customization ID (GUID) of the custom acoustic - model. The **Create a custom acoustic model** method returns only this field of - the object; it does not return the other fields. + model. The [Create a custom acoustic model](#createacousticmodel) method returns + only this field of the object; it does not return the other fields. :attr str created: (optional) The date and time in Coordinated Universal Time (UTC) at which the custom acoustic model was created. The value is provided in full ISO 8601 format (`YYYY-MM-DDThh:mm:ss.sTZD`). @@ -4061,8 +4173,9 @@ def __init__(self, Initialize a AcousticModel object. :param str customization_id: The customization ID (GUID) of the custom - acoustic model. The **Create a custom acoustic model** method returns only - this field of the object; it does not return the other fields. + acoustic model. The [Create a custom acoustic model](#createacousticmodel) + method returns only this field of the object; it does not return the other + fields. :param str created: (optional) The date and time in Coordinated Universal Time (UTC) at which the custom acoustic model was created. The value is provided in full ISO 8601 format (`YYYY-MM-DDThh:mm:ss.sTZD`). @@ -4307,13 +4420,13 @@ class AudioDetails(): * `undetermined` for a resource that the service cannot validate (for example, if the user mistakenly passes a file that does not contain audio, such as a JPEG file). - :attr str codec: (optional) **For an audio-type resource,** the codec in which - the audio is encoded. Omitted for an archive-type resource. - :attr int frequency: (optional) **For an audio-type resource,** the sampling - rate of the audio in Hertz (samples per second). Omitted for an archive-type + :attr str codec: (optional) _For an audio-type resource_, the codec in which the + audio is encoded. Omitted for an archive-type resource. + :attr int frequency: (optional) _For an audio-type resource_, the sampling rate + of the audio in Hertz (samples per second). Omitted for an archive-type resource. - :attr str compression: (optional) **For an archive-type resource,** the format - of the compressed archive: + :attr str compression: (optional) _For an archive-type resource_, the format of + the compressed archive: * `zip` for a **.zip** file * `gzip` for a **.tar.gz** file Omitted for an audio-type resource. @@ -4335,12 +4448,12 @@ def __init__(self, * `undetermined` for a resource that the service cannot validate (for example, if the user mistakenly passes a file that does not contain audio, such as a JPEG file). - :param str codec: (optional) **For an audio-type resource,** the codec in + :param str codec: (optional) _For an audio-type resource_, the codec in which the audio is encoded. Omitted for an archive-type resource. - :param int frequency: (optional) **For an audio-type resource,** the - sampling rate of the audio in Hertz (samples per second). Omitted for an + :param int frequency: (optional) _For an audio-type resource_, the sampling + rate of the audio in Hertz (samples per second). Omitted for an archive-type resource. - :param str compression: (optional) **For an archive-type resource,** the + :param str compression: (optional) _For an archive-type resource_, the format of the compressed archive: * `zip` for a **.zip** file * `gzip` for a **.tar.gz** file @@ -4417,7 +4530,7 @@ class TypeEnum(str, Enum): class CompressionEnum(str, Enum): """ - **For an archive-type resource,** the format of the compressed archive: + _For an archive-type resource_, the format of the compressed archive: * `zip` for a **.zip** file * `gzip` for a **.tar.gz** file Omitted for an audio-type resource. @@ -4430,15 +4543,15 @@ class AudioListing(): """ Information about an audio resource from a custom acoustic model. - :attr int duration: (optional) **For an audio-type resource,** the total - seconds of audio in the resource. Omitted for an archive-type resource. - :attr str name: (optional) **For an audio-type resource,** the user-specified - name of the resource. Omitted for an archive-type resource. - :attr AudioDetails details: (optional) **For an audio-type resource,** an + :attr int duration: (optional) _For an audio-type resource_, the total seconds + of audio in the resource. Omitted for an archive-type resource. + :attr str name: (optional) _For an audio-type resource_, the user-specified name + of the resource. Omitted for an archive-type resource. + :attr AudioDetails details: (optional) _For an audio-type resource_, an `AudioDetails` object that provides detailed information about the resource. The object is empty until the service finishes processing the audio. Omitted for an archive-type resource. - :attr str status: (optional) **For an audio-type resource,** the status of the + :attr str status: (optional) _For an audio-type resource_, the status of the resource: * `ok`: The service successfully analyzed the audio data. The data can be used to train the custom model. @@ -4448,10 +4561,10 @@ class AudioListing(): * `invalid`: The audio data is not valid for training the custom model (possibly because it has the wrong format or sampling rate, or because it is corrupted). Omitted for an archive-type resource. - :attr AudioResource container: (optional) **For an archive-type resource,** an + :attr AudioResource container: (optional) _For an archive-type resource_, an object of type `AudioResource` that provides information about the resource. Omitted for an audio-type resource. - :attr List[AudioResource] audio: (optional) **For an archive-type resource,** an + :attr List[AudioResource] audio: (optional) _For an archive-type resource_, an array of `AudioResource` objects that provides information about the audio-type resources that are contained in the resource. Omitted for an audio-type resource. @@ -4468,15 +4581,15 @@ def __init__(self, """ Initialize a AudioListing object. - :param int duration: (optional) **For an audio-type resource,** the total + :param int duration: (optional) _For an audio-type resource_, the total seconds of audio in the resource. Omitted for an archive-type resource. - :param str name: (optional) **For an audio-type resource,** the + :param str name: (optional) _For an audio-type resource_, the user-specified name of the resource. Omitted for an archive-type resource. - :param AudioDetails details: (optional) **For an audio-type resource,** an + :param AudioDetails details: (optional) _For an audio-type resource_, an `AudioDetails` object that provides detailed information about the resource. The object is empty until the service finishes processing the audio. Omitted for an archive-type resource. - :param str status: (optional) **For an audio-type resource,** the status of + :param str status: (optional) _For an audio-type resource_, the status of the resource: * `ok`: The service successfully analyzed the audio data. The data can be used to train the custom model. @@ -4487,11 +4600,11 @@ def __init__(self, (possibly because it has the wrong format or sampling rate, or because it is corrupted). Omitted for an archive-type resource. - :param AudioResource container: (optional) **For an archive-type - resource,** an object of type `AudioResource` that provides information - about the resource. Omitted for an audio-type resource. - :param List[AudioResource] audio: (optional) **For an archive-type - resource,** an array of `AudioResource` objects that provides information + :param AudioResource container: (optional) _For an archive-type resource_, + an object of type `AudioResource` that provides information about the + resource. Omitted for an audio-type resource. + :param List[AudioResource] audio: (optional) _For an archive-type + resource_, an array of `AudioResource` objects that provides information about the audio-type resources that are contained in the resource. Omitted for an audio-type resource. """ @@ -4564,7 +4677,7 @@ def __ne__(self, other: 'AudioListing') -> bool: class StatusEnum(str, Enum): """ - **For an audio-type resource,** the status of the resource: + _For an audio-type resource_, the status of the resource: * `ok`: The service successfully analyzed the audio data. The data can be used to train the custom model. * `being_processed`: The service is still analyzing the audio data. The service @@ -4988,9 +5101,9 @@ class AudioResource(): Information about an audio resource from a custom acoustic model. :attr int duration: The total seconds of audio in the audio resource. - :attr str name: **For an archive-type resource,** the user-specified name of the + :attr str name: _For an archive-type resource_, the user-specified name of the resource. - **For an audio-type resource,** the user-specified name of the resource or the + _For an audio-type resource_, the user-specified name of the resource or the name of the audio file that the user added for the resource. The value depends on the method that is called. :attr AudioDetails details: An `AudioDetails` object that provides detailed @@ -5014,9 +5127,9 @@ def __init__(self, duration: int, name: str, details: 'AudioDetails', Initialize a AudioResource object. :param int duration: The total seconds of audio in the audio resource. - :param str name: **For an archive-type resource,** the user-specified name - of the resource. - **For an audio-type resource,** the user-specified name of the resource or + :param str name: _For an archive-type resource_, the user-specified name of + the resource. + _For an audio-type resource_, the user-specified name of the resource or the name of the audio file that the user added for the resource. The value depends on the method that is called. :param AudioDetails details: An `AudioDetails` object that provides @@ -5274,8 +5387,11 @@ class Corpus(): :attr str name: The name of the corpus. :attr int total_words: The total number of words in the corpus. The value is `0` while the corpus is being processed. - :attr int out_of_vocabulary_words: The number of OOV words in the corpus. The - value is `0` while the corpus is being processed. + :attr int out_of_vocabulary_words: _For custom models that are based on + previous-generation models_, the number of OOV words extracted from the corpus. + The value is `0` while the corpus is being processed. + _For custom models that are based on next-generation models_, no OOV words are + extracted from corpora, so the value is always `0`. :attr str status: The status of the corpus: * `analyzed`: The service successfully analyzed the corpus. The custom model can be trained with data from the corpus. @@ -5301,8 +5417,11 @@ def __init__(self, :param str name: The name of the corpus. :param int total_words: The total number of words in the corpus. The value is `0` while the corpus is being processed. - :param int out_of_vocabulary_words: The number of OOV words in the corpus. - The value is `0` while the corpus is being processed. + :param int out_of_vocabulary_words: _For custom models that are based on + previous-generation models_, the number of OOV words extracted from the + corpus. The value is `0` while the corpus is being processed. + _For custom models that are based on next-generation models_, no OOV words + are extracted from corpora, so the value is always `0`. :param str status: The status of the corpus: * `analyzed`: The service successfully analyzed the corpus. The custom model can be trained with data from the corpus. @@ -5408,14 +5527,15 @@ class CustomWord(): """ Information about a word that is to be added to a custom language model. - :attr str word: (optional) For the **Add custom words** method, you must specify - the custom word that is to be added to or updated in the custom model. Do not - include spaces in the word. Use a `-` (dash) or `_` (underscore) to connect the - tokens of compound words. - Omit this parameter for the **Add a custom word** method. - :attr List[str] sounds_like: (optional) An array of sounds-like pronunciations - for the custom word. Specify how words that are difficult to pronounce, foreign - words, acronyms, and so on can be pronounced by users. + :attr str word: (optional) For the [Add custom words](#addwords) method, you + must specify the custom word that is to be added to or updated in the custom + model. Do not include spaces in the word. Use a `-` (dash) or `_` (underscore) + to connect the tokens of compound words. + Omit this parameter for the [Add a custom word](#addword) method. + :attr List[str] sounds_like: (optional) _For a custom model that is based on a + previous-generation model_, an array of sounds-like pronunciations for the + custom word. Specify how words that are difficult to pronounce, foreign words, + acronyms, and so on can be pronounced by users. * For a word that is not in the service's base vocabulary, omit the parameter to have the service automatically generate a sounds-like pronunciation for the word. @@ -5425,6 +5545,9 @@ class CustomWord(): the base vocabulary. A word can have at most five sounds-like pronunciations. A pronunciation can include at most 40 characters not including spaces. + _For a custom model that is based on a next-generation model_, omit this field. + Custom models based on next-generation models do not support the `sounds_like` + field. The service ignores the field. :attr str display_as: (optional) An alternative spelling for the custom word when it appears in a transcript. Use the parameter when you want the word to have a spelling that is different from its usual representation or from its @@ -5439,14 +5562,15 @@ def __init__(self, """ Initialize a CustomWord object. - :param str word: (optional) For the **Add custom words** method, you must - specify the custom word that is to be added to or updated in the custom - model. Do not include spaces in the word. Use a `-` (dash) or `_` + :param str word: (optional) For the [Add custom words](#addwords) method, + you must specify the custom word that is to be added to or updated in the + custom model. Do not include spaces in the word. Use a `-` (dash) or `_` (underscore) to connect the tokens of compound words. - Omit this parameter for the **Add a custom word** method. - :param List[str] sounds_like: (optional) An array of sounds-like - pronunciations for the custom word. Specify how words that are difficult to - pronounce, foreign words, acronyms, and so on can be pronounced by users. + Omit this parameter for the [Add a custom word](#addword) method. + :param List[str] sounds_like: (optional) _For a custom model that is based + on a previous-generation model_, an array of sounds-like pronunciations for + the custom word. Specify how words that are difficult to pronounce, foreign + words, acronyms, and so on can be pronounced by users. * For a word that is not in the service's base vocabulary, omit the parameter to have the service automatically generate a sounds-like pronunciation for the word. @@ -5456,6 +5580,9 @@ def __init__(self, pronunciation from the base vocabulary. A word can have at most five sounds-like pronunciations. A pronunciation can include at most 40 characters not including spaces. + _For a custom model that is based on a next-generation model_, omit this + field. Custom models based on next-generation models do not support the + `sounds_like` field. The service ignores the field. :param str display_as: (optional) An alternative spelling for the custom word when it appears in a transcript. Use the parameter when you want the word to have a spelling that is different from its usual representation or @@ -5804,8 +5931,8 @@ class LanguageModel(): Information about an existing custom language model. :attr str customization_id: The customization ID (GUID) of the custom language - model. The **Create a custom language model** method returns only this field of - the object; it does not return the other fields. + model. The [Create a custom language model](#createlanguagemodel) method returns + only this field of the object; it does not return the other fields. :attr str created: (optional) The date and time in Coordinated Universal Time (UTC) at which the custom language model was created. The value is provided in full ISO 8601 format (`YYYY-MM-DDThh:mm:ss.sTZD`). @@ -5826,10 +5953,14 @@ class LanguageModel(): models) * `es-US` for Mexican (North American) Spanish (`es-MX` models) Dialect values are case-insensitive. - :attr List[str] versions: (optional) A list of the available versions of the - custom language model. Each element of the array indicates a version of the base - model with which the custom model can be used. Multiple versions exist only if - the custom model has been upgraded; otherwise, only a single version is shown. + :attr List[str] versions: (optional) _For custom models that are based on + previous-generation models_, a list of the available versions of the custom + language model. Each element of the array indicates a version of the base model + with which the custom model can be used. Multiple versions exist only if the + custom model has been upgraded; otherwise, only a single version is shown. + _For custom models that are based on next-generation models_, a single version + of the custom model. Only one version of a custom model that is based on a + next-generation model is ever available, and upgrading does not apply. :attr str owner: (optional) The GUID of the credentials for the instance of the service that owns the custom language model. :attr str name: (optional) The name of the custom language model. @@ -5881,8 +6012,9 @@ def __init__(self, Initialize a LanguageModel object. :param str customization_id: The customization ID (GUID) of the custom - language model. The **Create a custom language model** method returns only - this field of the object; it does not return the other fields. + language model. The [Create a custom language model](#createlanguagemodel) + method returns only this field of the object; it does not return the other + fields. :param str created: (optional) The date and time in Coordinated Universal Time (UTC) at which the custom language model was created. The value is provided in full ISO 8601 format (`YYYY-MM-DDThh:mm:ss.sTZD`). @@ -5903,11 +6035,16 @@ def __init__(self, `es-PE` models) * `es-US` for Mexican (North American) Spanish (`es-MX` models) Dialect values are case-insensitive. - :param List[str] versions: (optional) A list of the available versions of - the custom language model. Each element of the array indicates a version of - the base model with which the custom model can be used. Multiple versions - exist only if the custom model has been upgraded; otherwise, only a single - version is shown. + :param List[str] versions: (optional) _For custom models that are based on + previous-generation models_, a list of the available versions of the custom + language model. Each element of the array indicates a version of the base + model with which the custom model can be used. Multiple versions exist only + if the custom model has been upgraded; otherwise, only a single version is + shown. + _For custom models that are based on next-generation models_, a single + version of the custom model. Only one version of a custom model that is + based on a next-generation model is ever available, and upgrading does not + apply. :param str owner: (optional) The GUID of the credentials for the instance of the service that owns the custom language model. :param str name: (optional) The name of the custom language model. @@ -6271,8 +6408,8 @@ def __ne__(self, other: 'ProcessedAudio') -> bool: class ProcessingMetrics(): """ If processing metrics are requested, information about the service's processing of the - input audio. Processing metrics are not available with the synchronous **Recognize - audio** method. + input audio. Processing metrics are not available with the synchronous [Recognize + audio](#recognize) method. :attr ProcessedAudio processed_audio: Detailed timing information about the service's processing of the input audio. @@ -6413,23 +6550,23 @@ class RecognitionJob(): :attr str updated: (optional) The date and time in Coordinated Universal Time (UTC) at which the job was last updated by the service. The value is provided in full ISO 8601 format (`YYYY-MM-DDThh:mm:ss.sTZD`). This field is returned only - by the **Check jobs** and **Check a job** methods. + by the [Check jobs](#checkjobs) and [Check a job[(#checkjob) methods. :attr str url: (optional) The URL to use to request information about the job - with the **Check a job** method. This field is returned only by the **Create a - job** method. + with the [Check a job](#checkjob) method. This field is returned only by the + [Create a job](#createjob) method. :attr str user_token: (optional) The user token associated with a job that was created with a callback URL and a user token. This field can be returned only by - the **Check jobs** method. + the [Check jobs](#checkjobs) method. :attr List[SpeechRecognitionResults] results: (optional) If the status is `completed`, the results of the recognition request as an array that includes a single instance of a `SpeechRecognitionResults` object. This field is returned - only by the **Check a job** method. + only by the [Check a job](#checkjob) method. :attr List[str] warnings: (optional) An array of warning messages about invalid parameters included with the request. Each warning includes a descriptive message and a list of invalid argument strings, for example, `"unexpected query parameter 'user_token', query parameter 'callback_url' was not specified"`. The request succeeds despite the warnings. This field can be returned only by the - **Create a job** method. + [Create a job](#createjob) method. """ def __init__(self, @@ -6464,23 +6601,24 @@ def __init__(self, :param str updated: (optional) The date and time in Coordinated Universal Time (UTC) at which the job was last updated by the service. The value is provided in full ISO 8601 format (`YYYY-MM-DDThh:mm:ss.sTZD`). This field - is returned only by the **Check jobs** and **Check a job** methods. + is returned only by the [Check jobs](#checkjobs) and [Check a + job[(#checkjob) methods. :param str url: (optional) The URL to use to request information about the - job with the **Check a job** method. This field is returned only by the - **Create a job** method. + job with the [Check a job](#checkjob) method. This field is returned only + by the [Create a job](#createjob) method. :param str user_token: (optional) The user token associated with a job that was created with a callback URL and a user token. This field can be - returned only by the **Check jobs** method. + returned only by the [Check jobs](#checkjobs) method. :param List[SpeechRecognitionResults] results: (optional) If the status is `completed`, the results of the recognition request as an array that includes a single instance of a `SpeechRecognitionResults` object. This - field is returned only by the **Check a job** method. + field is returned only by the [Check a job](#checkjob) method. :param List[str] warnings: (optional) An array of warning messages about invalid parameters included with the request. Each warning includes a descriptive message and a list of invalid argument strings, for example, `"unexpected query parameter 'user_token', query parameter 'callback_url' was not specified"`. The request succeeds despite the warnings. This field - can be returned only by the **Create a job** method. + can be returned only by the [Create a job](#createjob) method. """ self.id = id self.status = status @@ -7053,10 +7191,9 @@ class SpeechRecognitionAlternative(): :attr str transcript: A transcription of the audio. :attr float confidence: (optional) A score that indicates the service's - confidence in the transcript in the range of 0.0 to 1.0. For speech recognition - with previous-generation models, a confidence score is returned only for the - best alternative and only with results marked as final. For speech recognition - with next-generation models, a confidence score is never returned. + confidence in the transcript in the range of 0.0 to 1.0. The service returns a + confidence score only for the best alternative and only with results marked as + final. :attr List[str] timestamps: (optional) Time alignments for each word from the transcript as a list of lists. Each inner list consists of three elements: the word followed by its start and end time in seconds, for example: @@ -7080,11 +7217,9 @@ def __init__(self, :param str transcript: A transcription of the audio. :param float confidence: (optional) A score that indicates the service's - confidence in the transcript in the range of 0.0 to 1.0. For speech - recognition with previous-generation models, a confidence score is returned - only for the best alternative and only with results marked as final. For - speech recognition with next-generation models, a confidence score is never - returned. + confidence in the transcript in the range of 0.0 to 1.0. The service + returns a confidence score only for the best alternative and only with + results marked as final. :param List[str] timestamps: (optional) Time alignments for each word from the transcript as a list of lists. Each inner list consists of three elements: the word followed by its start and end time in seconds, for @@ -7349,8 +7484,8 @@ class SpeechRecognitionResults(): object to include only the `speaker_labels` field. :attr ProcessingMetrics processing_metrics: (optional) If processing metrics are requested, information about the service's processing of the input audio. - Processing metrics are not available with the synchronous **Recognize audio** - method. + Processing metrics are not available with the synchronous [Recognize + audio](#recognize) method. :attr AudioMetrics audio_metrics: (optional) If audio metrics are requested, information about the signal characteristics of the input audio. :attr List[str] warnings: (optional) An array of warning messages associated @@ -7401,7 +7536,7 @@ def __init__(self, :param ProcessingMetrics processing_metrics: (optional) If processing metrics are requested, information about the service's processing of the input audio. Processing metrics are not available with the synchronous - **Recognize audio** method. + [Recognize audio](#recognize) method. :param AudioMetrics audio_metrics: (optional) If audio metrics are requested, information about the signal characteristics of the input audio. :param List[str] warnings: (optional) An array of warning messages @@ -7504,9 +7639,13 @@ class SupportedFeatures(): :attr bool speaker_labels: Indicates whether the `speaker_labels` parameter can be used with the language model. **Note:** The field returns `true` for all models. However, speaker labels are - supported only for US English, Australian English, German, Japanese, Korean, and - Spanish (both broadband and narrowband models) and UK English (narrowband model - only). Speaker labels are not supported for any other models. + supported as beta functionality only for the following languages and models: + * For previous-generation models, the parameter can be used for Australian + English, US English, German, Japanese, Korean, and Spanish (both broadband and + narrowband models) and UK English (narrowband model) transcription only. + * For next-generation models, the parameter can be used for English (Australian, + Indian, UK, and US), German, Japanese, Korean, and Spanish transcription only. + Speaker labels are not supported for any other models. :attr bool low_latency: (optional) Indicates whether the `low_latency` parameter can be used with a next-generation language model. The field is returned only for next-generation models. Previous-generation models do not support the @@ -7527,10 +7666,16 @@ def __init__(self, :param bool speaker_labels: Indicates whether the `speaker_labels` parameter can be used with the language model. **Note:** The field returns `true` for all models. However, speaker labels - are supported only for US English, Australian English, German, Japanese, - Korean, and Spanish (both broadband and narrowband models) and UK English - (narrowband model only). Speaker labels are not supported for any other - models. + are supported as beta functionality only for the following languages and + models: + * For previous-generation models, the parameter can be used for Australian + English, US English, German, Japanese, Korean, and Spanish (both broadband + and narrowband models) and UK English (narrowband model) transcription + only. + * For next-generation models, the parameter can be used for English + (Australian, Indian, UK, and US), German, Japanese, Korean, and Spanish + transcription only. + Speaker labels are not supported for any other models. :param bool low_latency: (optional) Indicates whether the `low_latency` parameter can be used with a next-generation language model. The field is returned only for next-generation models. Previous-generation models do not @@ -7754,25 +7899,39 @@ class Word(): :attr str word: A word from the custom model's words resource. The spelling of the word is used to train the model. - :attr List[str] sounds_like: An array of pronunciations for the word. The array - can include the sounds-like pronunciation automatically generated by the service - if none is provided for the word; the service adds this pronunciation when it - finishes processing the word. + :attr List[str] sounds_like: _For a custom model that is based on a + previous-generation model_, an array of as many as five pronunciations for the + word. The array can include the sounds-like pronunciation that is automatically + generated by the service if none is provided when the word is added to the + custom model; the service adds this pronunciation when it finishes processing + the word. + _For a custom model that is based on a next-generation model_, this field does + not apply. Custom models based on next-generation models do not support the + `sounds_like` field, which is ignored. :attr str display_as: The spelling of the word that the service uses to display the word in a transcript. The field contains an empty string if no display-as value is provided for the word, in which case the word is displayed as it is spelled. - :attr int count: A sum of the number of times the word is found across all - corpora. For example, if the word occurs five times in one corpus and seven + :attr int count: _For a custom model that is based on a previous-generation + model_, a sum of the number of times the word is found across all corpora and + grammars. For example, if the word occurs five times in one corpus and seven times in another, its count is `12`. If you add a custom word to a model before - it is added by any corpora, the count begins at `1`; if the word is added from a - corpus first and later modified, the count reflects only the number of times it - is found in corpora. + it is added by any corpora or grammars, the count begins at `1`; if the word is + added from a corpus or grammar first and later modified, the count reflects only + the number of times it is found in corpora and grammars. + _For a custom model that is based on a next-generation model_, the `count` field + for any word is always `1`. :attr List[str] source: An array of sources that describes how the word was - added to the custom model's words resource. For OOV words added from a corpus, - includes the name of the corpus; if the word was added by multiple corpora, the - names of all corpora are listed. If the word was modified or added by the user - directly, the field includes the string `user`. + added to the custom model's words resource. + * _For a custom model that is based on previous-generation model,_ the field + includes the name of each corpus and grammar from which the service extracted + the word. For OOV that are added by multiple corpora or grammars, the names of + all corpora and grammars are listed. If you modified or added the word directly, + the field includes the string `user`. + * _For a custom model that is based on a next-generation model,_ this field + shows only `user` for custom words that were added directly to the custom model. + Words from corpora and grammars are not added to the words resource for custom + models that are based on next-generation models. :attr List[WordError] error: (optional) If the service discovered one or more problems that you need to correct for the word's definition, an array that describes each of the errors. @@ -7791,25 +7950,40 @@ def __init__(self, :param str word: A word from the custom model's words resource. The spelling of the word is used to train the model. - :param List[str] sounds_like: An array of pronunciations for the word. The - array can include the sounds-like pronunciation automatically generated by - the service if none is provided for the word; the service adds this - pronunciation when it finishes processing the word. + :param List[str] sounds_like: _For a custom model that is based on a + previous-generation model_, an array of as many as five pronunciations for + the word. The array can include the sounds-like pronunciation that is + automatically generated by the service if none is provided when the word is + added to the custom model; the service adds this pronunciation when it + finishes processing the word. + _For a custom model that is based on a next-generation model_, this field + does not apply. Custom models based on next-generation models do not + support the `sounds_like` field, which is ignored. :param str display_as: The spelling of the word that the service uses to display the word in a transcript. The field contains an empty string if no display-as value is provided for the word, in which case the word is displayed as it is spelled. - :param int count: A sum of the number of times the word is found across all - corpora. For example, if the word occurs five times in one corpus and seven - times in another, its count is `12`. If you add a custom word to a model - before it is added by any corpora, the count begins at `1`; if the word is - added from a corpus first and later modified, the count reflects only the - number of times it is found in corpora. + :param int count: _For a custom model that is based on a + previous-generation model_, a sum of the number of times the word is found + across all corpora and grammars. For example, if the word occurs five times + in one corpus and seven times in another, its count is `12`. If you add a + custom word to a model before it is added by any corpora or grammars, the + count begins at `1`; if the word is added from a corpus or grammar first + and later modified, the count reflects only the number of times it is found + in corpora and grammars. + _For a custom model that is based on a next-generation model_, the `count` + field for any word is always `1`. :param List[str] source: An array of sources that describes how the word - was added to the custom model's words resource. For OOV words added from a - corpus, includes the name of the corpus; if the word was added by multiple - corpora, the names of all corpora are listed. If the word was modified or - added by the user directly, the field includes the string `user`. + was added to the custom model's words resource. + * _For a custom model that is based on previous-generation model,_ the + field includes the name of each corpus and grammar from which the service + extracted the word. For OOV that are added by multiple corpora or grammars, + the names of all corpora and grammars are listed. If you modified or added + the word directly, the field includes the string `user`. + * _For a custom model that is based on a next-generation model,_ this field + shows only `user` for custom words that were added directly to the custom + model. Words from corpora and grammars are not added to the words resource + for custom models that are based on next-generation models. :param List[WordError] error: (optional) If the service discovered one or more problems that you need to correct for the word's definition, an array that describes each of the errors. diff --git a/ibm_watson/text_to_speech_v1.py b/ibm_watson/text_to_speech_v1.py index fb0716e7..4924eeed 100644 --- a/ibm_watson/text_to_speech_v1.py +++ b/ibm_watson/text_to_speech_v1.py @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -# IBM OpenAPI SDK Code Generator Version: 99-SNAPSHOT-902c9336-20210507-162723 +# IBM OpenAPI SDK Code Generator Version: 3.38.0-07189efd-20210827-205025 """ The IBM Watson™ Text to Speech service provides APIs that use IBM's speech-synthesis capabilities to synthesize text into natural-sounding speech in a variety of languages, @@ -30,11 +30,16 @@ that, when combined, sound like the word. A phonetic translation is based on the SSML phoneme format for representing a word. You can specify a phonetic translation in standard International Phonetic Alphabet (IPA) representation or in the proprietary IBM Symbolic -Phonetic Representation (SPR). The Arabic, Chinese, Dutch, Australian English, and Korean -languages support only IPA. +Phonetic Representation (SPR). The service also offers a Tune by Example feature that lets you define custom prompts. You can also define speaker models to improve the quality of your custom prompts. The service support custom prompts only for US English custom models and voices. +**IBM Cloud®.** The Arabic, Chinese, Dutch, Australian English, and Korean languages +and voices are supported only for IBM Cloud. For phonetic translation, they support only +IPA, not SPR. + +API Version: 1.0.0 +See: https://cloud.ibm.com/docs/text-to-speech """ from enum import Enum @@ -89,8 +94,8 @@ def list_voices(self, **kwargs) -> DetailedResponse: Lists all voices available for use with the service. The information includes the name, language, gender, and other details about the voice. The ordering of the list of voices can change from call to call; do not rely on an alphabetized or - static list of voices. To see information about a specific voice, use the **Get a - voice** method. + static list of voices. To see information about a specific voice, use the [Get a + voice](#getvoice). **See also:** [Listing all available voices](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-voices#listVoices). @@ -112,7 +117,7 @@ def list_voices(self, **kwargs) -> DetailedResponse: url = '/v1/voices' request = self.prepare_request(method='GET', url=url, headers=headers) - response = self.send(request) + response = self.send(request, **kwargs) return response def get_voice(self, @@ -126,11 +131,11 @@ def get_voice(self, Gets information about the specified voice. The information includes the name, language, gender, and other details about the voice. Specify a customization ID to obtain information for a custom model that is defined for the language of the - specified voice. To list information about all available voices, use the **List - voices** method. + specified voice. To list information about all available voices, use the [List + voices](#listvoices) method. **See also:** [Listing a specific voice](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-voices#listVoice). - ### Important voice updates + ### Important voice updates for IBM Cloud The service's voices underwent significant change on 2 December 2020. * The Arabic, Chinese, Dutch, Australian English, and Korean voices are now neural instead of concatenative. @@ -150,11 +155,13 @@ def get_voice(self, equivalent neural voices at your earliest convenience. For more information about all voice updates, see the [2 December 2020 service update](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-release-notes#December2020) - in the release notes. + in the release notes for IBM Cloud. :param str voice: The voice for which information is to be returned. For - more information about specifying a voice, see **Important voice updates** - in the method description. + more information about specifying a voice, see **Important voice updates + for IBM Cloud** in the method description. + **IBM Cloud:** The Arabic, Chinese, Dutch, Australian English, and Korean + languages and voices are supported only for IBM Cloud. :param str customization_id: (optional) The customization ID (GUID) of a custom model for which information is to be returned. You must make the request with credentials for the instance of the service that owns the @@ -188,7 +195,7 @@ def get_voice(self, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response ######################### @@ -254,9 +261,9 @@ def synthesize(self, * `audio/webm;codecs=vorbis` - You can optionally specify the `rate` of the audio. The default sampling rate is 22,050 Hz. For more information about specifying an audio format, including additional - details about some of the formats, see [Audio - formats](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-audioFormats#audioFormats). - ### Important voice updates + details about some of the formats, see [Using audio + formats](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-audio-formats). + ### Important voice updates for IBM Cloud The service's voices underwent significant change on 2 December 2020. * The Arabic, Chinese, Dutch, Australian English, and Korean voices are now neural instead of concatenative. @@ -276,7 +283,7 @@ def synthesize(self, equivalent neural voices at your earliest convenience. For more information about all voice updates, see the [2 December 2020 service update](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-release-notes#December2020) - in the release notes. + in the release notes for IBM Cloud. ### Warning messages If a request includes invalid query parameters, the service returns a `Warnings` response header that provides messages about the invalid parameters. The warning @@ -291,8 +298,12 @@ def synthesize(self, the audio format. For more information about specifying an audio format, see **Audio formats (accept types)** in the method description. :param str voice: (optional) The voice to use for synthesis. For more - information about specifying a voice, see **Important voice updates** in - the method description. + information about specifying a voice, see **Important voice updates for IBM + Cloud** in the method description. + **IBM Cloud:** The Arabic, Chinese, Dutch, Australian English, and Korean + languages and voices are supported only for IBM Cloud. + **See also:** See also [Using languages and + voices](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-voices). :param str customization_id: (optional) The customization ID (GUID) of a custom model to use for the synthesis. If a custom model is specified, it works only if it matches the language of the indicated voice. You must make @@ -329,7 +340,7 @@ def synthesize(self, params=params, data=data) - response = self.send(request) + response = self.send(request, **kwargs) return response ######################### @@ -352,7 +363,7 @@ def get_pronunciation(self, for a specific custom model to see the translation for that model. **See also:** [Querying a word from a language](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-customWords#cuWordsQueryLanguage). - ### Important voice updates + ### Important voice updates for IBM Cloud The service's voices underwent significant change on 2 December 2020. * The Arabic, Chinese, Dutch, Australian English, and Korean voices are now neural instead of concatenative. @@ -372,14 +383,16 @@ def get_pronunciation(self, equivalent neural voices at your earliest convenience. For more information about all voice updates, see the [2 December 2020 service update](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-release-notes#December2020) - in the release notes. + in the release notes for IBM Cloud. :param str text: The word for which the pronunciation is requested. :param str voice: (optional) A voice that specifies the language in which the pronunciation is to be returned. All voices for the same language (for example, `en-US`) return the same translation. For more information about - specifying a voice, see **Important voice updates** in the method - description. + specifying a voice, see **Important voice updates for IBM Cloud** in the + method description. + **IBM Cloud:** The Arabic, Chinese, Dutch, Australian English, and Korean + languages and voices are supported only for IBM Cloud. :param str format: (optional) The phoneme format in which to return the pronunciation. The Arabic, Chinese, Dutch, Australian English, and Korean languages support only IPA. Omit the parameter to obtain the pronunciation @@ -422,7 +435,7 @@ def get_pronunciation(self, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response ######################### @@ -444,7 +457,7 @@ def create_custom_model(self, used to create it. **See also:** [Creating a custom model](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-customModels#cuModelsCreate). - ### Important voice updates + ### Important voice updates for IBM Cloud The service's voices underwent significant change on 2 December 2020. * The Arabic, Chinese, Dutch, Australian English, and Korean voices are now neural instead of concatenative. @@ -464,7 +477,7 @@ def create_custom_model(self, equivalent neural voices at your earliest convenience. For more information about all voice updates, see the [2 December 2020 service update](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-release-notes#December2020) - in the release notes. + in the release notes for IBM Cloud. :param str name: The name of the new custom model. :param str language: (optional) The language of the new custom model. You @@ -473,6 +486,8 @@ def create_custom_model(self, the parameter to use the the default language, `en-US`. **Note:** The `ar-AR` language identifier cannot be used to create a custom model. Use the `ar-MS` identifier instead. + **IBM Cloud:** The Arabic, Chinese, Dutch, Australian English, and Korean + languages and voices are supported only for IBM Cloud. :param str description: (optional) A description of the new custom model. Specifying a description is recommended. :param dict headers: A `dict` containing the request headers @@ -503,7 +518,7 @@ def create_custom_model(self, headers=headers, data=data) - response = self.send(request) + response = self.send(request, **kwargs) return response def list_custom_models(self, @@ -516,9 +531,9 @@ def list_custom_models(self, Lists metadata such as the name and description for all custom models that are owned by an instance of the service. Specify a language to list the custom models for that language only. To see the words and prompts in addition to the metadata - for a specific custom model, use the **Get a custom model** method. You must use - credentials for the instance of the service that owns a model to list information - about it. + for a specific custom model, use the [Get a custom model](#getcustommodel) method. + You must use credentials for the instance of the service that owns a model to list + information about it. **See also:** [Querying all custom models](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-customModels#cuModelsQueryAll). @@ -548,7 +563,7 @@ def list_custom_models(self, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def update_custom_model(self, @@ -626,7 +641,7 @@ def update_custom_model(self, headers=headers, data=data) - response = self.send(request) + response = self.send(request, **kwargs) return response def get_custom_model(self, customization_id: str, @@ -637,8 +652,8 @@ def get_custom_model(self, customization_id: str, Gets all information about a specified custom model. In addition to metadata such as the name and description of the custom model, the output includes the words and their translations that are defined for the model, as well as any prompts that are - defined for the model. To see just the metadata for a model, use the **List custom - models** method. + defined for the model. To see just the metadata for a model, use the [List custom + models](#listcustommodels) method. **See also:** [Querying a custom model](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-customModels#cuModelsQuery). @@ -668,7 +683,7 @@ def get_custom_model(self, customization_id: str, url = '/v1/customizations/{customization_id}'.format(**path_param_dict) request = self.prepare_request(method='GET', url=url, headers=headers) - response = self.send(request) + response = self.send(request, **kwargs) return response def delete_custom_model(self, customization_id: str, @@ -708,7 +723,7 @@ def delete_custom_model(self, customization_id: str, url=url, headers=headers) - response = self.send(request) + response = self.send(request, **kwargs) return response ######################### @@ -746,14 +761,14 @@ def add_words(self, customization_id: str, words: List['Word'], :param str customization_id: The customization ID (GUID) of the custom model. You must make the request with credentials for the instance of the service that owns the custom model. - :param List[Word] words: The **Add custom words** method accepts an array - of `Word` objects. Each object provides a word that is to be added or - updated for the custom model and the word's translation. - The **List custom words** method returns an array of `Word` objects. Each - object shows a word and its translation from the custom model. The words - are listed in alphabetical order, with uppercase letters listed before - lowercase letters. The array is empty if the custom model contains no - words. + :param List[Word] words: The [Add custom words](#addwords) method accepts + an array of `Word` objects. Each object provides a word that is to be added + or updated for the custom model and the word's translation. + The [List custom words](#listwords) method returns an array of `Word` + objects. Each object shows a word and its translation from the custom + model. The words are listed in alphabetical order, with uppercase letters + listed before lowercase letters. The array is empty if the custom model + contains no words. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse @@ -789,7 +804,7 @@ def add_words(self, customization_id: str, words: List['Word'], headers=headers, data=data) - response = self.send(request) + response = self.send(request, **kwargs) return response def list_words(self, customization_id: str, **kwargs) -> DetailedResponse: @@ -829,7 +844,7 @@ def list_words(self, customization_id: str, **kwargs) -> DetailedResponse: **path_param_dict) request = self.prepare_request(method='GET', url=url, headers=headers) - response = self.send(request) + response = self.send(request, **kwargs) return response def add_word(self, @@ -918,7 +933,7 @@ def add_word(self, headers=headers, data=data) - response = self.send(request) + response = self.send(request, **kwargs) return response def get_word(self, customization_id: str, word: str, @@ -962,7 +977,7 @@ def get_word(self, customization_id: str, word: str, **path_param_dict) request = self.prepare_request(method='GET', url=url, headers=headers) - response = self.send(request) + response = self.send(request, **kwargs) return response def delete_word(self, customization_id: str, word: str, @@ -1006,7 +1021,7 @@ def delete_word(self, customization_id: str, word: str, url=url, headers=headers) - response = self.send(request) + response = self.send(request, **kwargs) return response ######################### @@ -1022,11 +1037,11 @@ def list_custom_prompts(self, customization_id: str, The information includes the prompt ID, prompt text, status, and optional speaker ID for each prompt of the custom model. You must use credentials for the instance of the service that owns the custom model. The same information about all of the - prompts for a custom model is also provided by the **Get a custom model** method. - That method provides complete details about a specified custom model, including - its language, owner, custom words, and more. - **Beta:** Custom prompts are beta functionality that is supported only for use - with US English custom models and voices. + prompts for a custom model is also provided by the [Get a custom + model](#getcustommodel) method. That method provides complete details about a + specified custom model, including its language, owner, custom words, and more. + Custom prompts are supported only for use with US English custom models and + voices. **See also:** [Listing custom prompts](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-tbe-custom-prompts#tbe-custom-prompts-list). @@ -1057,7 +1072,7 @@ def list_custom_prompts(self, customization_id: str, **path_param_dict) request = self.prepare_request(method='GET', url=url, headers=headers) - response = self.send(request) + response = self.send(request, **kwargs) return response def add_custom_prompt(self, customization_id: str, prompt_id: str, @@ -1091,11 +1106,11 @@ def add_custom_prompt(self, customization_id: str, prompt_id: str, processing time for a reasonably sized prompt generally matches the length of the audio (for example, it takes 20 seconds to process a 20-second prompt). For shorter prompts, you can wait for a reasonable amount of time and then check - the status of the prompt with the **Get a custom prompt** method. For longer - prompts, consider using that method to poll the service every few seconds to - determine when the prompt becomes available. No prompt can be used for speech - synthesis if it is in the `processing` or `failed` state. Only prompts that are in - the `available` state can be used for speech synthesis. + the status of the prompt with the [Get a custom prompt](#getcustomprompt) method. + For longer prompts, consider using that method to poll the service every few + seconds to determine when the prompt becomes available. No prompt can be used for + speech synthesis if it is in the `processing` or `failed` state. Only prompts that + are in the `available` state can be used for speech synthesis. When it processes a request, the service attempts to align the text and the audio that are provided for the prompt. The text that is passed with a prompt must match the spoken audio as closely as possible. Optimally, the text and audio match @@ -1126,8 +1141,8 @@ def add_custom_prompt(self, customization_id: str, prompt_id: str, is one recommended means of potentially improving the quality of the prompt. This is especially important for shorter prompts such as "good-bye" or "thank you," where less audio data makes it more difficult to match the prosody of the speaker. - **Beta:** Custom prompts are beta functionality that is supported only for use - with US English custom models and voices. + Custom prompts are supported only for use with US English custom models and + voices. **See also:** * [Add a custom prompt](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-tbe-create#tbe-create-add-prompt) @@ -1198,7 +1213,7 @@ def add_custom_prompt(self, customization_id: str, prompt_id: str, headers=headers, files=form_data) - response = self.send(request) + response = self.send(request, **kwargs) return response def get_custom_prompt(self, customization_id: str, prompt_id: str, @@ -1209,8 +1224,7 @@ def get_custom_prompt(self, customization_id: str, prompt_id: str, Gets information about a specified custom prompt for a specified custom model. The information includes the prompt ID, prompt text, status, and optional speaker ID for each prompt of the custom model. You must use credentials for the instance of - the service that owns the custom model. - **Beta:** Custom prompts are beta functionality that is supported only for use + the service that owns the custom model. Custom prompts are supported only for use with US English custom models and voices. **See also:** [Listing custom prompts](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-tbe-custom-prompts#tbe-custom-prompts-list). @@ -1245,7 +1259,7 @@ def get_custom_prompt(self, customization_id: str, prompt_id: str, **path_param_dict) request = self.prepare_request(method='GET', url=url, headers=headers) - response = self.send(request) + response = self.send(request, **kwargs) return response def delete_custom_prompt(self, customization_id: str, prompt_id: str, @@ -1258,9 +1272,8 @@ def delete_custom_prompt(self, customization_id: str, prompt_id: str, service that owns the custom model from which the prompt is to be deleted. **Caution:** Deleting a custom prompt elicits a 400 response code from synthesis requests that attempt to use the prompt. Make sure that you do not attempt to use - a deleted prompt in a production application. - **Beta:** Custom prompts are beta functionality that is supported only for use - with US English custom models and voices. + a deleted prompt in a production application. Custom prompts are supported only + for use with US English custom models and voices. **See also:** [Deleting a custom prompt](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-tbe-custom-prompts#tbe-custom-prompts-delete). @@ -1296,7 +1309,7 @@ def delete_custom_prompt(self, customization_id: str, prompt_id: str, url=url, headers=headers) - response = self.send(request) + response = self.send(request, **kwargs) return response ######################### @@ -1310,10 +1323,8 @@ def list_speaker_models(self, **kwargs) -> DetailedResponse: Lists information about all speaker models that are defined for a service instance. The information includes the speaker ID and speaker name of each defined speaker. You must use credentials for the instance of a service to list its - speakers. - **Beta:** Speaker models and the custom prompts with which they are used are beta - functionality that is supported only for use with US English custom models and - voices. + speakers. Speaker models and the custom prompts with which they are used are + supported only for use with US English custom models and voices. **See also:** [Listing speaker models](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-tbe-speaker-models#tbe-speaker-models-list). @@ -1335,7 +1346,7 @@ def list_speaker_models(self, **kwargs) -> DetailedResponse: url = '/v1/speakers' request = self.prepare_request(method='GET', url=url, headers=headers) - response = self.send(request) + response = self.send(request, **kwargs) return response def create_speaker_model(self, speaker_name: str, audio: BinaryIO, @@ -1375,10 +1386,8 @@ def create_speaker_model(self, speaker_name: str, audio: BinaryIO, returns, the audio is fully processed and the speaker enrollment is complete. The service returns a speaker ID with the request. A speaker ID is globally unique identifier (GUID) that you use to identify the speaker in subsequent requests to - the service. - **Beta:** Speaker models and the custom prompts with which they are used are beta - functionality that is supported only for use with US English custom models and - voices. + the service. Speaker models and the custom prompts with which they are used are + supported only for use with US English custom models and voices. **See also:** * [Create a speaker model](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-tbe-create#tbe-create-speaker-model) @@ -1432,7 +1441,7 @@ def create_speaker_model(self, speaker_name: str, audio: BinaryIO, params=params, data=data) - response = self.send(request) + response = self.send(request, **kwargs) return response def get_speaker_model(self, speaker_id: str, **kwargs) -> DetailedResponse: @@ -1444,9 +1453,8 @@ def get_speaker_model(self, speaker_id: str, **kwargs) -> DetailedResponse: the customization IDs of the custom models. For each custom model, the information lists information about each prompt that is defined for that custom model by the speaker. You must use credentials for the instance of the service that owns a - speaker model to list its prompts. - **Beta:** Speaker models and the custom prompts with which they are used are beta - functionality that is supported only for use with US English custom models and + speaker model to list its prompts. Speaker models and the custom prompts with + which they are used are supported only for use with US English custom models and voices. **See also:** [Listing the custom prompts for a speaker model](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-tbe-speaker-models#tbe-speaker-models-list-prompts). @@ -1477,7 +1485,7 @@ def get_speaker_model(self, speaker_id: str, **kwargs) -> DetailedResponse: url = '/v1/speakers/{speaker_id}'.format(**path_param_dict) request = self.prepare_request(method='GET', url=url, headers=headers) - response = self.send(request) + response = self.send(request, **kwargs) return response def delete_speaker_model(self, speaker_id: str, @@ -1492,10 +1500,9 @@ def delete_speaker_model(self, speaker_id: str, speaker's deletion. The prosodic data that defines the quality of a prompt is established when the prompt is created. A prompt is static and remains unaffected by deletion of its associated speaker. However, the prompt cannot be resubmitted - or updated with its original speaker once that speaker is deleted. - **Beta:** Speaker models and the custom prompts with which they are used are beta - functionality that is supported only for use with US English custom models and - voices. + or updated with its original speaker once that speaker is deleted. Speaker models + and the custom prompts with which they are used are supported only for use with US + English custom models and voices. **See also:** [Deleting a speaker model](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-tbe-speaker-models#tbe-speaker-models-delete). @@ -1526,7 +1533,7 @@ def delete_speaker_model(self, speaker_id: str, url=url, headers=headers) - response = self.send(request) + response = self.send(request, **kwargs) return response ######################### @@ -1577,7 +1584,7 @@ def delete_user_data(self, customer_id: str, **kwargs) -> DetailedResponse: headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response @@ -1589,7 +1596,10 @@ class GetVoiceEnums: class Voice(str, Enum): """ The voice for which information is to be returned. For more information about - specifying a voice, see **Important voice updates** in the method description. + specifying a voice, see **Important voice updates for IBM Cloud** in the method + description. + **IBM Cloud:** The Arabic, Chinese, Dutch, Australian English, and Korean + languages and voices are supported only for IBM Cloud. """ AR_AR_OMARVOICE = 'ar-AR_OmarVoice' AR_MS_OMARVOICE = 'ar-MS_OmarVoice' @@ -1634,6 +1644,7 @@ class Voice(str, Enum): KO_KR_SIWOOVOICE = 'ko-KR_SiWooVoice' KO_KR_YOUNGMIVOICE = 'ko-KR_YoungmiVoice' KO_KR_YUNAVOICE = 'ko-KR_YunaVoice' + NL_BE_ADELEVOICE = 'nl-BE_AdeleVoice' NL_NL_EMMAVOICE = 'nl-NL_EmmaVoice' NL_NL_LIAMVOICE = 'nl-NL_LiamVoice' PT_BR_ISABELAVOICE = 'pt-BR_IsabelaVoice' @@ -1672,7 +1683,11 @@ class Accept(str, Enum): class Voice(str, Enum): """ The voice to use for synthesis. For more information about specifying a voice, see - **Important voice updates** in the method description. + **Important voice updates for IBM Cloud** in the method description. + **IBM Cloud:** The Arabic, Chinese, Dutch, Australian English, and Korean + languages and voices are supported only for IBM Cloud. + **See also:** See also [Using languages and + voices](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-voices). """ AR_AR_OMARVOICE = 'ar-AR_OmarVoice' AR_MS_OMARVOICE = 'ar-MS_OmarVoice' @@ -1717,6 +1732,7 @@ class Voice(str, Enum): KO_KR_SIWOOVOICE = 'ko-KR_SiWooVoice' KO_KR_YOUNGMIVOICE = 'ko-KR_YoungmiVoice' KO_KR_YUNAVOICE = 'ko-KR_YunaVoice' + NL_BE_ADELEVOICE = 'nl-BE_AdeleVoice' NL_NL_EMMAVOICE = 'nl-NL_EmmaVoice' NL_NL_LIAMVOICE = 'nl-NL_LiamVoice' PT_BR_ISABELAVOICE = 'pt-BR_IsabelaVoice' @@ -1736,7 +1752,9 @@ class Voice(str, Enum): A voice that specifies the language in which the pronunciation is to be returned. All voices for the same language (for example, `en-US`) return the same translation. For more information about specifying a voice, see **Important voice - updates** in the method description. + updates for IBM Cloud** in the method description. + **IBM Cloud:** The Arabic, Chinese, Dutch, Australian English, and Korean + languages and voices are supported only for IBM Cloud. """ AR_AR_OMARVOICE = 'ar-AR_OmarVoice' AR_MS_OMARVOICE = 'ar-MS_OmarVoice' @@ -1781,6 +1799,7 @@ class Voice(str, Enum): KO_KR_SIWOOVOICE = 'ko-KR_SiWooVoice' KO_KR_YOUNGMIVOICE = 'ko-KR_YoungmiVoice' KO_KR_YUNAVOICE = 'ko-KR_YunaVoice' + NL_BE_ADELEVOICE = 'nl-BE_AdeleVoice' NL_NL_EMMAVOICE = 'nl-NL_EmmaVoice' NL_NL_LIAMVOICE = 'nl-NL_LiamVoice' PT_BR_ISABELAVOICE = 'pt-BR_IsabelaVoice' @@ -1823,6 +1842,7 @@ class Language(str, Enum): IT_IT = 'it-IT' JA_JP = 'ja-JP' KO_KR = 'ko-KR' + NL_BE = 'nl-BE' NL_NL = 'nl-NL' PT_BR = 'pt-BR' ZH_CN = 'zh-CN' @@ -1838,8 +1858,8 @@ class CustomModel(): Information about an existing custom model. :attr str customization_id: The customization ID (GUID) of the custom model. The - **Create a custom model** method returns only this field. It does not not return - the other fields of this object. + [Create a custom model](#createcustommodel) method returns only this field. It + does not not return the other fields of this object. :attr str name: (optional) The name of the custom model. :attr str language: (optional) The language identifier of the custom model (for example, `en-US`). @@ -1858,11 +1878,11 @@ class CustomModel(): words and their translations from the custom model. The words are listed in alphabetical order, with uppercase letters listed before lowercase letters. The array is empty if no words are defined for the custom model. This field is - returned only by the **Get a custom model** method. + returned only by the [Get a custom model](#getcustommodel) method. :attr List[Prompt] prompts: (optional) An array of `Prompt` objects that provides information about the prompts that are defined for the specified custom model. The array is empty if no prompts are defined for the custom model. This - field is returned only by the **Get a custom model** method. + field is returned only by the [Get a custom model](#getcustommodel) method. """ def __init__(self, @@ -1880,8 +1900,8 @@ def __init__(self, Initialize a CustomModel object. :param str customization_id: The customization ID (GUID) of the custom - model. The **Create a custom model** method returns only this field. It - does not not return the other fields of this object. + model. The [Create a custom model](#createcustommodel) method returns only + this field. It does not not return the other fields of this object. :param str name: (optional) The name of the custom model. :param str language: (optional) The language identifier of the custom model (for example, `en-US`). @@ -1900,12 +1920,13 @@ def __init__(self, the words and their translations from the custom model. The words are listed in alphabetical order, with uppercase letters listed before lowercase letters. The array is empty if no words are defined for the - custom model. This field is returned only by the **Get a custom model** - method. + custom model. This field is returned only by the [Get a custom + model](#getcustommodel) method. :param List[Prompt] prompts: (optional) An array of `Prompt` objects that provides information about the prompts that are defined for the specified custom model. The array is empty if no prompts are defined for the custom - model. This field is returned only by the **Get a custom model** method. + model. This field is returned only by the [Get a custom + model](#getcustommodel) method. """ self.customization_id = customization_id self.name = name @@ -3038,8 +3059,9 @@ class Voice(): :attr SupportedFeatures supported_features: Additional service features that are supported with the voice. :attr CustomModel customization: (optional) Returns information about a - specified custom model. This field is returned only by the **Get a voice** - method and only when you specify the customization ID of a custom model. + specified custom model. This field is returned only by the [Get a + voice](#getvoice) method and only when you specify the customization ID of a + custom model. """ def __init__(self, @@ -3068,8 +3090,9 @@ def __init__(self, :param SupportedFeatures supported_features: Additional service features that are supported with the voice. :param CustomModel customization: (optional) Returns information about a - specified custom model. This field is returned only by the **Get a voice** - method and only when you specify the customization ID of a custom model. + specified custom model. This field is returned only by the [Get a + voice](#getvoice) method and only when you specify the customization ID of + a custom model. """ self.url = url self.gender = gender @@ -3361,32 +3384,32 @@ class PartOfSpeechEnum(str, Enum): class Words(): """ - For the **Add custom words** method, one or more words that are to be added or updated - for the custom model and the translation for each specified word. - For the **List custom words** method, the words and their translations from the custom - model. - - :attr List[Word] words: The **Add custom words** method accepts an array of - `Word` objects. Each object provides a word that is to be added or updated for - the custom model and the word's translation. - The **List custom words** method returns an array of `Word` objects. Each object - shows a word and its translation from the custom model. The words are listed in - alphabetical order, with uppercase letters listed before lowercase letters. The - array is empty if the custom model contains no words. + For the [Add custom words](#addwords) method, one or more words that are to be added + or updated for the custom model and the translation for each specified word. + For the [List custom words](#listwords) method, the words and their translations from + the custom model. + + :attr List[Word] words: The [Add custom words](#addwords) method accepts an + array of `Word` objects. Each object provides a word that is to be added or + updated for the custom model and the word's translation. + The [List custom words](#listwords) method returns an array of `Word` objects. + Each object shows a word and its translation from the custom model. The words + are listed in alphabetical order, with uppercase letters listed before lowercase + letters. The array is empty if the custom model contains no words. """ def __init__(self, words: List['Word']) -> None: """ Initialize a Words object. - :param List[Word] words: The **Add custom words** method accepts an array - of `Word` objects. Each object provides a word that is to be added or - updated for the custom model and the word's translation. - The **List custom words** method returns an array of `Word` objects. Each - object shows a word and its translation from the custom model. The words - are listed in alphabetical order, with uppercase letters listed before - lowercase letters. The array is empty if the custom model contains no - words. + :param List[Word] words: The [Add custom words](#addwords) method accepts + an array of `Word` objects. Each object provides a word that is to be added + or updated for the custom model and the word's translation. + The [List custom words](#listwords) method returns an array of `Word` + objects. Each object shows a word and its translation from the custom + model. The words are listed in alphabetical order, with uppercase letters + listed before lowercase letters. The array is empty if the custom model + contains no words. """ self.words = words diff --git a/ibm_watson/tone_analyzer_v3.py b/ibm_watson/tone_analyzer_v3.py index c1f45c4a..61985fc5 100644 --- a/ibm_watson/tone_analyzer_v3.py +++ b/ibm_watson/tone_analyzer_v3.py @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -# IBM OpenAPI SDK Code Generator Version: 99-SNAPSHOT-a45d89ef-20201209-192237 +# IBM OpenAPI SDK Code Generator Version: 3.38.0-07189efd-20210827-205025 """ The IBM Watson™ Tone Analyzer service uses linguistic analysis to detect emotional and language tones in written text. The service can analyze tone at both the document and @@ -25,6 +25,9 @@ **Note:** Request logging is disabled for the Tone Analyzer service. Regardless of whether you set the `X-Watson-Learning-Opt-Out` request header, the service does not log or retain data from requests and responses. + +API Version: 3.5.3 +See: https://cloud.ibm.com/docs/tone-analyzer """ from enum import Enum @@ -184,7 +187,7 @@ def tone(self, params=params, data=data) - response = self.send(request) + response = self.send(request, **kwargs) return response def tone_chat(self, @@ -258,7 +261,7 @@ def tone_chat(self, params=params, data=data) - response = self.send(request) + response = self.send(request, **kwargs) return response diff --git a/ibm_watson/version.py b/ibm_watson/version.py index f9bc7375..1d4672ff 100644 --- a/ibm_watson/version.py +++ b/ibm_watson/version.py @@ -1 +1 @@ -__version__ = '5.2.3' +__version__ = '5.3.0' diff --git a/ibm_watson/visual_recognition_v3.py b/ibm_watson/visual_recognition_v3.py index cf371461..431a379a 100644 --- a/ibm_watson/visual_recognition_v3.py +++ b/ibm_watson/visual_recognition_v3.py @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -# IBM OpenAPI SDK Code Generator Version: 99-SNAPSHOT-a45d89ef-20201209-192237 +# IBM OpenAPI SDK Code Generator Version: 3.38.0-07189efd-20210827-205025 """ IBM Watson™ Visual Recognition is discontinued. Existing instances are supported until 1 December 2021, but as of 7 January 2021, you can't create instances. Any instance @@ -23,6 +23,9 @@ The IBM Watson Visual Recognition service uses deep learning algorithms to identify scenes and objects in images that you upload to the service. You can create and train a custom classifier to identify subjects that suit your needs. + +API Version: 3.0 +See: https://cloud.ibm.com/docs/visual-recognition """ from datetime import datetime @@ -182,7 +185,7 @@ def classify(self, params=params, files=form_data) - response = self.send(request) + response = self.send(request, **kwargs) return response ######################### @@ -236,6 +239,7 @@ def create_classifier(self, :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse with `dict` result representing a `Classifier` object """ + if name is None: raise ValueError('name must be provided') if not positive_examples: @@ -279,7 +283,7 @@ def create_classifier(self, params=params, files=form_data) - response = self.send(request) + response = self.send(request, **kwargs) return response def list_classifiers(self, @@ -314,7 +318,7 @@ def list_classifiers(self, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def get_classifier(self, classifier_id: str, **kwargs) -> DetailedResponse: @@ -352,7 +356,7 @@ def get_classifier(self, classifier_id: str, **kwargs) -> DetailedResponse: headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def update_classifier(self, @@ -406,6 +410,7 @@ def update_classifier(self, :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse with `dict` result representing a `Classifier` object """ + if classifier_id is None: raise ValueError('classifier_id must be provided') headers = {} @@ -449,7 +454,7 @@ def update_classifier(self, params=params, files=form_data) - response = self.send(request) + response = self.send(request, **kwargs) return response def delete_classifier(self, classifier_id: str, @@ -486,7 +491,7 @@ def delete_classifier(self, classifier_id: str, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response ######################### @@ -531,7 +536,7 @@ def get_core_ml_model(self, classifier_id: str, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response ######################### @@ -576,7 +581,7 @@ def delete_user_data(self, customer_id: str, **kwargs) -> DetailedResponse: headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response diff --git a/ibm_watson/visual_recognition_v4.py b/ibm_watson/visual_recognition_v4.py index f1dd4200..0c62cee0 100644 --- a/ibm_watson/visual_recognition_v4.py +++ b/ibm_watson/visual_recognition_v4.py @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -# IBM OpenAPI SDK Code Generator Version: 99-SNAPSHOT-a45d89ef-20201209-192237 +# IBM OpenAPI SDK Code Generator Version: 3.38.0-07189efd-20210827-205025 """ IBM Watson™ Visual Recognition is discontinued. Existing instances are supported until 1 December 2021, but as of 7 January 2021, you can't create instances. Any instance @@ -22,6 +22,9 @@ {: deprecated} Provide images to the IBM Watson Visual Recognition service for analysis. The service detects objects based on a set of images with training data. + +API Version: 4.0 +See: https://cloud.ibm.com/docs/visual-recognition?topic=visual-recognition-object-detection-overview """ from datetime import date @@ -164,7 +167,7 @@ def analyze(self, params=params, files=form_data) - response = self.send(request) + response = self.send(request, **kwargs) return response ######################### @@ -227,7 +230,7 @@ def create_collection(self, params=params, data=data) - response = self.send(request) + response = self.send(request, **kwargs) return response def list_collections(self, **kwargs) -> DetailedResponse: @@ -259,7 +262,7 @@ def list_collections(self, **kwargs) -> DetailedResponse: headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def get_collection(self, collection_id: str, **kwargs) -> DetailedResponse: @@ -297,7 +300,7 @@ def get_collection(self, collection_id: str, **kwargs) -> DetailedResponse: headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def update_collection(self, @@ -361,7 +364,7 @@ def update_collection(self, params=params, data=data) - response = self.send(request) + response = self.send(request, **kwargs) return response def delete_collection(self, collection_id: str, @@ -400,7 +403,7 @@ def delete_collection(self, collection_id: str, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def get_model_file(self, collection_id: str, feature: str, @@ -455,7 +458,7 @@ def get_model_file(self, collection_id: str, feature: str, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response ######################### @@ -540,7 +543,7 @@ def add_images(self, params=params, files=form_data) - response = self.send(request) + response = self.send(request, **kwargs) return response def list_images(self, collection_id: str, **kwargs) -> DetailedResponse: @@ -578,7 +581,7 @@ def list_images(self, collection_id: str, **kwargs) -> DetailedResponse: headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def get_image_details(self, collection_id: str, image_id: str, @@ -621,7 +624,7 @@ def get_image_details(self, collection_id: str, image_id: str, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def delete_image(self, collection_id: str, image_id: str, @@ -664,7 +667,7 @@ def delete_image(self, collection_id: str, image_id: str, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def get_jpeg_image(self, @@ -715,7 +718,7 @@ def get_jpeg_image(self, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response ######################### @@ -759,7 +762,7 @@ def list_object_metadata(self, collection_id: str, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def update_object_metadata(self, collection_id: str, object: str, @@ -814,7 +817,7 @@ def update_object_metadata(self, collection_id: str, object: str, params=params, data=data) - response = self.send(request) + response = self.send(request, **kwargs) return response def get_object_metadata(self, collection_id: str, object: str, @@ -857,7 +860,7 @@ def get_object_metadata(self, collection_id: str, object: str, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def delete_object(self, collection_id: str, object: str, @@ -901,7 +904,7 @@ def delete_object(self, collection_id: str, object: str, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response ######################### @@ -945,7 +948,7 @@ def train(self, collection_id: str, **kwargs) -> DetailedResponse: headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response def add_image_training_data(self, @@ -1008,7 +1011,7 @@ def add_image_training_data(self, params=params, data=data) - response = self.send(request) + response = self.send(request, **kwargs) return response def get_training_usage(self, @@ -1056,7 +1059,7 @@ def get_training_usage(self, headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response ######################### @@ -1101,7 +1104,7 @@ def delete_user_data(self, customer_id: str, **kwargs) -> DetailedResponse: headers=headers, params=params) - response = self.send(request) + response = self.send(request, **kwargs) return response diff --git a/ibm_watson/websocket/recognize_listener.py b/ibm_watson/websocket/recognize_listener.py index 21679760..9847e919 100644 --- a/ibm_watson/websocket/recognize_listener.py +++ b/ibm_watson/websocket/recognize_listener.py @@ -198,15 +198,24 @@ def on_data(self, ws, message, message_type, fin): # set of transcriptions and send them to the appropriate callbacks. results = json_object.get('results') if results: - b_final = (results[0].get('final') is True) - alternatives = results[0].get('alternatives') - if alternatives: - hypothesis = alternatives[0].get('transcript') - transcripts = self.extract_transcripts(alternatives) - if b_final: - self.callback.on_transcription(transcripts) - if hypothesis: - self.callback.on_hypothesis(hypothesis) + if (self.options.get('interim_results') is True): + b_final = (results[0].get('final') is True) + alternatives = results[0].get('alternatives') + if alternatives: + hypothesis = alternatives[0].get('transcript') + transcripts = self.extract_transcripts(alternatives) + if b_final: + self.callback.on_transcription(transcripts) + if hypothesis: + self.callback.on_hypothesis(hypothesis) + else: + final_transcript = [] + for result in results: + transcript = self.extract_transcripts( + result.get('alternatives')) + final_transcript.append(transcript) + + self.callback.on_transcription(final_transcript) # Always call the on_data callback if 'results' or 'speaker_labels' are present self.callback.on_data(json_object) diff --git a/resources/speech_with_pause.wav b/resources/speech_with_pause.wav new file mode 100644 index 00000000..783426cb Binary files /dev/null and b/resources/speech_with_pause.wav differ diff --git a/setup.py b/setup.py index 2c060053..09a62b2a 100644 --- a/setup.py +++ b/setup.py @@ -16,7 +16,7 @@ from setuptools import setup from os import path -__version__ = '5.2.3' +__version__ = '5.3.0' # read contents of README file this_directory = path.abspath(path.dirname(__file__)) diff --git a/test/integration/test_discovery_v1.py b/test/integration/test_discovery_v1.py index 6d842bd0..32428e00 100644 --- a/test/integration/test_discovery_v1.py +++ b/test/integration/test_discovery_v1.py @@ -134,8 +134,7 @@ def test_queries(self): query_results = self.discovery.query( self.environment_id, self.collection_id, - filter='extracted_metadata.sha1::9181d244*', - return_fields='extracted_metadata.sha1').get_result() + filter='extracted_metadata.sha1::9181d244*').get_result() assert query_results is not None @pytest.mark.skip( diff --git a/test/integration/test_speech_to_text_v1.py b/test/integration/test_speech_to_text_v1.py index 3b58aa85..0efc3130 100644 --- a/test/integration/test_speech_to_text_v1.py +++ b/test/integration/test_speech_to_text_v1.py @@ -100,8 +100,8 @@ def __init__(self): def on_error(self, error): self.error = error - def on_transcription(self, transcript): - self.transcript = transcript + def on_data(self, data): + self.data = data test_callback = MyRecognizeCallback() with open( @@ -114,9 +114,86 @@ def on_transcription(self, transcript): t.start() t.join() assert test_callback.error is None - assert test_callback.transcript is not None - assert test_callback.transcript[0][ - 'transcript'] == 'thunderstorms could produce large hail isolated tornadoes and heavy rain ' + assert test_callback.data is not None + assert test_callback.data['results'][0]['alternatives'][0] + ['transcript'] == 'thunderstorms could produce large hail isolated tornadoes and heavy rain ' + + def test_on_transcription_interim_results_false(self): + + class MyRecognizeCallback(RecognizeCallback): + + def __init__(self): + RecognizeCallback.__init__(self) + self.error = None + self.transcript = None + + def on_error(self, error): + self.error = error + + def on_transcription(self, transcript): + self.transcript = transcript + + test_callback = MyRecognizeCallback() + with open(os.path.join(os.path.dirname(__file__), '../../resources/speech_with_pause.wav'), 'rb') as audio_file: + audio_source = AudioSource(audio_file, False) + self.speech_to_text.recognize_using_websocket(audio_source, "audio/wav", test_callback, model="en-US_Telephony", + interim_results=False, low_latency=False) + assert test_callback.error is None + assert test_callback.transcript is not None + assert test_callback.transcript[0][0]['transcript'] == 'isolated tornadoes ' + assert test_callback.transcript[1][0]['transcript'] == 'and heavy rain ' + + def test_on_transcription_interim_results_true(self): + + class MyRecognizeCallback(RecognizeCallback): + + def __init__(self): + RecognizeCallback.__init__(self) + self.error = None + self.transcript = None + + def on_error(self, error): + self.error = error + + def on_transcription(self, transcript): + self.transcript = transcript + assert transcript[0]['confidence'] is not None + assert transcript[0]['transcript'] is not None + + test_callback = MyRecognizeCallback() + with open(os.path.join(os.path.dirname(__file__), '../../resources/speech_with_pause.wav'), 'rb') as audio_file: + audio_source = AudioSource(audio_file, False) + self.speech_to_text.recognize_using_websocket(audio_source, "audio/wav", test_callback, model="en-US_Telephony", + interim_results=True, low_latency=True) + assert test_callback.error is None + assert test_callback.transcript is not None + assert test_callback.transcript[0]['transcript'] == 'and heavy rain ' + + def test_on_transcription_interim_results_true_low_latency_false(self): + + class MyRecognizeCallback(RecognizeCallback): + + def __init__(self): + RecognizeCallback.__init__(self) + self.error = None + self.transcript = None + + def on_error(self, error): + self.error = error + + def on_transcription(self, transcript): + self.transcript = transcript + assert transcript[0]['confidence'] is not None + assert transcript[0]['transcript'] is not None + + test_callback = MyRecognizeCallback() + with open(os.path.join(os.path.dirname(__file__), '../../resources/speech_with_pause.wav'), 'rb') as audio_file: + audio_source = AudioSource(audio_file, False) + self.speech_to_text.recognize_using_websocket(audio_source, "audio/wav", test_callback, model="en-US_Telephony", + interim_results=True, low_latency=False) + assert test_callback.error is None + assert test_callback.transcript is not None + assert test_callback.transcript[0]['transcript'] == 'and heavy rain ' def test_custom_grammars(self): customization_id = None diff --git a/test/unit/test_assistant_v1.py b/test/unit/test_assistant_v1.py index 37db94d3..c2314b63 100644 --- a/test/unit/test_assistant_v1.py +++ b/test/unit/test_assistant_v1.py @@ -53,6 +53,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -65,7 +67,7 @@ def test_message_all_params(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/workspaces/testString/message') - mock_response = '{"input": {"text": "text", "spelling_suggestions": true, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "alternate_intents": false, "context": {"conversation_id": "conversation_id", "system": {"mapKey": {"anyKey": "anyValue"}}, "metadata": {"deployment": "deployment", "user_id": "user_id"}}, "output": {"nodes_visited": ["nodes_visited"], "nodes_visited_details": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "msg": "msg", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "text": ["text"], "generic": [{"response_type": "option", "title": "title", "description": "description", "preference": "dropdown", "options": [{"label": "label", "value": {"input": {"text": "text", "spelling_suggestions": true, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}]}}], "channels": [{"channel": "chat"}]}]}, "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": {"anyKey": "anyValue"}}, "result_variable": "result_variable", "credentials": "credentials"}], "user_id": "user_id"}' + mock_response = '{"input": {"text": "text", "spelling_suggestions": false, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": "anyValue"}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "alternate_intents": false, "context": {"conversation_id": "conversation_id", "system": {"mapKey": "anyValue"}, "metadata": {"deployment": "deployment", "user_id": "user_id"}}, "output": {"nodes_visited": ["nodes_visited"], "nodes_visited_details": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "msg": "msg", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "text": ["text"], "generic": [{"response_type": "option", "title": "title", "description": "description", "preference": "dropdown", "options": [{"label": "label", "value": {"input": {"text": "text", "spelling_suggestions": false, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": "anyValue"}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}]}}], "channels": [{"channel": "chat"}]}]}, "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "user_id": "user_id"}' responses.add(responses.POST, url, body=mock_response, @@ -75,9 +77,9 @@ def test_message_all_params(self): # Construct a dict representation of a MessageInput model message_input_model = {} message_input_model['text'] = 'testString' - message_input_model['spelling_suggestions'] = True - message_input_model['spelling_auto_correct'] = True - message_input_model['foo'] = { 'foo': 'bar' } + message_input_model['spelling_suggestions'] = False + message_input_model['spelling_auto_correct'] = False + message_input_model['foo'] = 'testString' # Construct a dict representation of a RuntimeIntent model runtime_intent_model = {} @@ -149,7 +151,7 @@ def test_message_all_params(self): context_model['conversation_id'] = 'testString' context_model['system'] = {} context_model['metadata'] = message_context_metadata_model - context_model['foo'] = { 'foo': 'bar' } + context_model['foo'] = 'testString' # Construct a dict representation of a DialogNodeVisitedDetails model dialog_node_visited_details_model = {} @@ -200,18 +202,18 @@ def test_message_all_params(self): output_data_model['log_messages'] = [log_message_model] output_data_model['text'] = ['testString'] output_data_model['generic'] = [runtime_response_generic_model] - output_data_model['foo'] = { 'foo': 'bar' } + output_data_model['foo'] = 'testString' # Set up parameter values workspace_id = 'testString' input = message_input_model intents = [runtime_intent_model] entities = [runtime_entity_model] - alternate_intents = True + alternate_intents = False context = context_model output = output_data_model user_id = 'testString' - nodes_visited_details = True + nodes_visited_details = False # Invoke method response = _service.message( @@ -239,7 +241,7 @@ def test_message_all_params(self): assert req_body['input'] == message_input_model assert req_body['intents'] == [runtime_intent_model] assert req_body['entities'] == [runtime_entity_model] - assert req_body['alternate_intents'] == True + assert req_body['alternate_intents'] == False assert req_body['context'] == context_model assert req_body['output'] == output_data_model assert req_body['user_id'] == 'testString' @@ -252,7 +254,7 @@ def test_message_required_params(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/workspaces/testString/message') - mock_response = '{"input": {"text": "text", "spelling_suggestions": true, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "alternate_intents": false, "context": {"conversation_id": "conversation_id", "system": {"mapKey": {"anyKey": "anyValue"}}, "metadata": {"deployment": "deployment", "user_id": "user_id"}}, "output": {"nodes_visited": ["nodes_visited"], "nodes_visited_details": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "msg": "msg", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "text": ["text"], "generic": [{"response_type": "option", "title": "title", "description": "description", "preference": "dropdown", "options": [{"label": "label", "value": {"input": {"text": "text", "spelling_suggestions": true, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}]}}], "channels": [{"channel": "chat"}]}]}, "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": {"anyKey": "anyValue"}}, "result_variable": "result_variable", "credentials": "credentials"}], "user_id": "user_id"}' + mock_response = '{"input": {"text": "text", "spelling_suggestions": false, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": "anyValue"}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "alternate_intents": false, "context": {"conversation_id": "conversation_id", "system": {"mapKey": "anyValue"}, "metadata": {"deployment": "deployment", "user_id": "user_id"}}, "output": {"nodes_visited": ["nodes_visited"], "nodes_visited_details": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "msg": "msg", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "text": ["text"], "generic": [{"response_type": "option", "title": "title", "description": "description", "preference": "dropdown", "options": [{"label": "label", "value": {"input": {"text": "text", "spelling_suggestions": false, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": "anyValue"}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}]}}], "channels": [{"channel": "chat"}]}]}, "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "user_id": "user_id"}' responses.add(responses.POST, url, body=mock_response, @@ -280,7 +282,7 @@ def test_message_value_error(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/workspaces/testString/message') - mock_response = '{"input": {"text": "text", "spelling_suggestions": true, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "alternate_intents": false, "context": {"conversation_id": "conversation_id", "system": {"mapKey": {"anyKey": "anyValue"}}, "metadata": {"deployment": "deployment", "user_id": "user_id"}}, "output": {"nodes_visited": ["nodes_visited"], "nodes_visited_details": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "msg": "msg", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "text": ["text"], "generic": [{"response_type": "option", "title": "title", "description": "description", "preference": "dropdown", "options": [{"label": "label", "value": {"input": {"text": "text", "spelling_suggestions": true, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}]}}], "channels": [{"channel": "chat"}]}]}, "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": {"anyKey": "anyValue"}}, "result_variable": "result_variable", "credentials": "credentials"}], "user_id": "user_id"}' + mock_response = '{"input": {"text": "text", "spelling_suggestions": false, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": "anyValue"}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "alternate_intents": false, "context": {"conversation_id": "conversation_id", "system": {"mapKey": "anyValue"}, "metadata": {"deployment": "deployment", "user_id": "user_id"}}, "output": {"nodes_visited": ["nodes_visited"], "nodes_visited_details": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "msg": "msg", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "text": ["text"], "generic": [{"response_type": "option", "title": "title", "description": "description", "preference": "dropdown", "options": [{"label": "label", "value": {"input": {"text": "text", "spelling_suggestions": false, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": "anyValue"}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}]}}], "channels": [{"channel": "chat"}]}]}, "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "user_id": "user_id"}' responses.add(responses.POST, url, body=mock_response, @@ -320,6 +322,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -332,7 +336,7 @@ def test_bulk_classify_all_params(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/workspaces/testString/bulk_classify') - mock_response = '{"output": [{"input": {"text": "text"}, "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "intents": [{"intent": "intent", "confidence": 10}]}]}' + mock_response = '{"output": [{"input": {"text": "text"}, "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": "anyValue"}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "intents": [{"intent": "intent", "confidence": 10}]}]}' responses.add(responses.POST, url, body=mock_response, @@ -369,7 +373,7 @@ def test_bulk_classify_required_params(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/workspaces/testString/bulk_classify') - mock_response = '{"output": [{"input": {"text": "text"}, "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "intents": [{"intent": "intent", "confidence": 10}]}]}' + mock_response = '{"output": [{"input": {"text": "text"}, "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": "anyValue"}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "intents": [{"intent": "intent", "confidence": 10}]}]}' responses.add(responses.POST, url, body=mock_response, @@ -397,7 +401,7 @@ def test_bulk_classify_value_error(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/workspaces/testString/bulk_classify') - mock_response = '{"output": [{"input": {"text": "text"}, "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "intents": [{"intent": "intent", "confidence": 10}]}]}' + mock_response = '{"output": [{"input": {"text": "text"}, "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": "anyValue"}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "intents": [{"intent": "intent", "confidence": 10}]}]}' responses.add(responses.POST, url, body=mock_response, @@ -437,6 +441,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -449,7 +455,7 @@ def test_list_workspaces_all_params(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/workspaces') - mock_response = '{"workspaces": [{"name": "name", "description": "description", "language": "language", "workspace_id": "workspace_id", "dialog_nodes": [{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "channel_transfer", "message_to_user": "message_to_user", "transfer_info": {"target": {"chat": {"url": "url"}}}, "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"mapKey": {"anyKey": "anyValue"}}}, "modifiers": {"overwrite": false}}, "context": {"integrations": {"mapKey": {"mapKey": {"anyKey": "anyValue"}}}}, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": {"anyKey": "anyValue"}}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": true, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "counterexamples": [{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "learning_opt_out": true, "system_settings": {"tooling": {"store_generic_responses": false}, "disambiguation": {"prompt": "prompt", "none_of_the_above_prompt": "none_of_the_above_prompt", "enabled": false, "sensitivity": "auto", "randomize": false, "max_suggestions": 1, "suggestion_text_policy": "suggestion_text_policy"}, "human_agent_assist": {"mapKey": {"anyKey": "anyValue"}}, "spelling_suggestions": true, "spelling_auto_correct": false, "system_entities": {"enabled": false}, "off_topic": {"enabled": false}}, "status": "Non Existent", "webhooks": [{"url": "url", "name": "name", "headers": [{"name": "name", "value": "value"}]}], "intents": [{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "entities": [{"entity": "entity", "description": "description", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}]}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}' + mock_response = '{"workspaces": [{"name": "name", "description": "description", "language": "language", "workspace_id": "workspace_id", "dialog_nodes": [{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "channel_transfer", "message_to_user": "message_to_user", "transfer_info": {"target": {"chat": {"url": "url"}}}, "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"mapKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"mapKey": "anyValue"}}}, "metadata": {"mapKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "counterexamples": [{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "metadata": {"mapKey": "anyValue"}, "learning_opt_out": false, "system_settings": {"tooling": {"store_generic_responses": false}, "disambiguation": {"prompt": "prompt", "none_of_the_above_prompt": "none_of_the_above_prompt", "enabled": false, "sensitivity": "auto", "randomize": false, "max_suggestions": 1, "suggestion_text_policy": "suggestion_text_policy"}, "human_agent_assist": {"mapKey": "anyValue"}, "spelling_suggestions": false, "spelling_auto_correct": false, "system_entities": {"enabled": false}, "off_topic": {"enabled": false}}, "status": "Non Existent", "webhooks": [{"url": "url", "name": "name", "headers": [{"name": "name", "value": "value"}]}], "intents": [{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "entities": [{"entity": "entity", "description": "description", "metadata": {"mapKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"mapKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}]}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}' responses.add(responses.GET, url, body=mock_response, @@ -458,10 +464,10 @@ def test_list_workspaces_all_params(self): # Set up parameter values page_limit = 38 - include_count = True + include_count = False sort = 'name' cursor = 'testString' - include_audit = True + include_audit = False # Invoke method response = _service.list_workspaces( @@ -493,7 +499,7 @@ def test_list_workspaces_required_params(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/workspaces') - mock_response = '{"workspaces": [{"name": "name", "description": "description", "language": "language", "workspace_id": "workspace_id", "dialog_nodes": [{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "channel_transfer", "message_to_user": "message_to_user", "transfer_info": {"target": {"chat": {"url": "url"}}}, "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"mapKey": {"anyKey": "anyValue"}}}, "modifiers": {"overwrite": false}}, "context": {"integrations": {"mapKey": {"mapKey": {"anyKey": "anyValue"}}}}, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": {"anyKey": "anyValue"}}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": true, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "counterexamples": [{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "learning_opt_out": true, "system_settings": {"tooling": {"store_generic_responses": false}, "disambiguation": {"prompt": "prompt", "none_of_the_above_prompt": "none_of_the_above_prompt", "enabled": false, "sensitivity": "auto", "randomize": false, "max_suggestions": 1, "suggestion_text_policy": "suggestion_text_policy"}, "human_agent_assist": {"mapKey": {"anyKey": "anyValue"}}, "spelling_suggestions": true, "spelling_auto_correct": false, "system_entities": {"enabled": false}, "off_topic": {"enabled": false}}, "status": "Non Existent", "webhooks": [{"url": "url", "name": "name", "headers": [{"name": "name", "value": "value"}]}], "intents": [{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "entities": [{"entity": "entity", "description": "description", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}]}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}' + mock_response = '{"workspaces": [{"name": "name", "description": "description", "language": "language", "workspace_id": "workspace_id", "dialog_nodes": [{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "channel_transfer", "message_to_user": "message_to_user", "transfer_info": {"target": {"chat": {"url": "url"}}}, "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"mapKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"mapKey": "anyValue"}}}, "metadata": {"mapKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "counterexamples": [{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "metadata": {"mapKey": "anyValue"}, "learning_opt_out": false, "system_settings": {"tooling": {"store_generic_responses": false}, "disambiguation": {"prompt": "prompt", "none_of_the_above_prompt": "none_of_the_above_prompt", "enabled": false, "sensitivity": "auto", "randomize": false, "max_suggestions": 1, "suggestion_text_policy": "suggestion_text_policy"}, "human_agent_assist": {"mapKey": "anyValue"}, "spelling_suggestions": false, "spelling_auto_correct": false, "system_entities": {"enabled": false}, "off_topic": {"enabled": false}}, "status": "Non Existent", "webhooks": [{"url": "url", "name": "name", "headers": [{"name": "name", "value": "value"}]}], "intents": [{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "entities": [{"entity": "entity", "description": "description", "metadata": {"mapKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"mapKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}]}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}' responses.add(responses.GET, url, body=mock_response, @@ -516,7 +522,7 @@ def test_list_workspaces_value_error(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/workspaces') - mock_response = '{"workspaces": [{"name": "name", "description": "description", "language": "language", "workspace_id": "workspace_id", "dialog_nodes": [{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "channel_transfer", "message_to_user": "message_to_user", "transfer_info": {"target": {"chat": {"url": "url"}}}, "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"mapKey": {"anyKey": "anyValue"}}}, "modifiers": {"overwrite": false}}, "context": {"integrations": {"mapKey": {"mapKey": {"anyKey": "anyValue"}}}}, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": {"anyKey": "anyValue"}}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": true, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "counterexamples": [{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "learning_opt_out": true, "system_settings": {"tooling": {"store_generic_responses": false}, "disambiguation": {"prompt": "prompt", "none_of_the_above_prompt": "none_of_the_above_prompt", "enabled": false, "sensitivity": "auto", "randomize": false, "max_suggestions": 1, "suggestion_text_policy": "suggestion_text_policy"}, "human_agent_assist": {"mapKey": {"anyKey": "anyValue"}}, "spelling_suggestions": true, "spelling_auto_correct": false, "system_entities": {"enabled": false}, "off_topic": {"enabled": false}}, "status": "Non Existent", "webhooks": [{"url": "url", "name": "name", "headers": [{"name": "name", "value": "value"}]}], "intents": [{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "entities": [{"entity": "entity", "description": "description", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}]}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}' + mock_response = '{"workspaces": [{"name": "name", "description": "description", "language": "language", "workspace_id": "workspace_id", "dialog_nodes": [{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "channel_transfer", "message_to_user": "message_to_user", "transfer_info": {"target": {"chat": {"url": "url"}}}, "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"mapKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"mapKey": "anyValue"}}}, "metadata": {"mapKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "counterexamples": [{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "metadata": {"mapKey": "anyValue"}, "learning_opt_out": false, "system_settings": {"tooling": {"store_generic_responses": false}, "disambiguation": {"prompt": "prompt", "none_of_the_above_prompt": "none_of_the_above_prompt", "enabled": false, "sensitivity": "auto", "randomize": false, "max_suggestions": 1, "suggestion_text_policy": "suggestion_text_policy"}, "human_agent_assist": {"mapKey": "anyValue"}, "spelling_suggestions": false, "spelling_auto_correct": false, "system_entities": {"enabled": false}, "off_topic": {"enabled": false}}, "status": "Non Existent", "webhooks": [{"url": "url", "name": "name", "headers": [{"name": "name", "value": "value"}]}], "intents": [{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "entities": [{"entity": "entity", "description": "description", "metadata": {"mapKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"mapKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}]}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}' responses.add(responses.GET, url, body=mock_response, @@ -542,6 +548,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -554,7 +562,7 @@ def test_create_workspace_all_params(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/workspaces') - mock_response = '{"name": "name", "description": "description", "language": "language", "workspace_id": "workspace_id", "dialog_nodes": [{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "channel_transfer", "message_to_user": "message_to_user", "transfer_info": {"target": {"chat": {"url": "url"}}}, "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"mapKey": {"anyKey": "anyValue"}}}, "modifiers": {"overwrite": false}}, "context": {"integrations": {"mapKey": {"mapKey": {"anyKey": "anyValue"}}}}, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": {"anyKey": "anyValue"}}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": true, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "counterexamples": [{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "learning_opt_out": true, "system_settings": {"tooling": {"store_generic_responses": false}, "disambiguation": {"prompt": "prompt", "none_of_the_above_prompt": "none_of_the_above_prompt", "enabled": false, "sensitivity": "auto", "randomize": false, "max_suggestions": 1, "suggestion_text_policy": "suggestion_text_policy"}, "human_agent_assist": {"mapKey": {"anyKey": "anyValue"}}, "spelling_suggestions": true, "spelling_auto_correct": false, "system_entities": {"enabled": false}, "off_topic": {"enabled": false}}, "status": "Non Existent", "webhooks": [{"url": "url", "name": "name", "headers": [{"name": "name", "value": "value"}]}], "intents": [{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "entities": [{"entity": "entity", "description": "description", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}]}' + mock_response = '{"name": "name", "description": "description", "language": "language", "workspace_id": "workspace_id", "dialog_nodes": [{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "channel_transfer", "message_to_user": "message_to_user", "transfer_info": {"target": {"chat": {"url": "url"}}}, "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"mapKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"mapKey": "anyValue"}}}, "metadata": {"mapKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "counterexamples": [{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "metadata": {"mapKey": "anyValue"}, "learning_opt_out": false, "system_settings": {"tooling": {"store_generic_responses": false}, "disambiguation": {"prompt": "prompt", "none_of_the_above_prompt": "none_of_the_above_prompt", "enabled": false, "sensitivity": "auto", "randomize": false, "max_suggestions": 1, "suggestion_text_policy": "suggestion_text_policy"}, "human_agent_assist": {"mapKey": "anyValue"}, "spelling_suggestions": false, "spelling_auto_correct": false, "system_entities": {"enabled": false}, "off_topic": {"enabled": false}}, "status": "Non Existent", "webhooks": [{"url": "url", "name": "name", "headers": [{"name": "name", "value": "value"}]}], "intents": [{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "entities": [{"entity": "entity", "description": "description", "metadata": {"mapKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"mapKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}]}' responses.add(responses.POST, url, body=mock_response, @@ -593,12 +601,12 @@ def test_create_workspace_all_params(self): dialog_node_output_model['generic'] = [dialog_node_output_generic_model] dialog_node_output_model['integrations'] = {} dialog_node_output_model['modifiers'] = dialog_node_output_modifiers_model - dialog_node_output_model['foo'] = { 'foo': 'bar' } + dialog_node_output_model['foo'] = 'testString' # Construct a dict representation of a DialogNodeContext model dialog_node_context_model = {} dialog_node_context_model['integrations'] = {} - dialog_node_context_model['foo'] = { 'foo': 'bar' } + dialog_node_context_model['foo'] = 'testString' # Construct a dict representation of a DialogNodeNextStep model dialog_node_next_step_model = {} @@ -634,7 +642,7 @@ def test_create_workspace_all_params(self): dialog_node_model['digress_out'] = 'allow_returning' dialog_node_model['digress_out_slots'] = 'not_allowed' dialog_node_model['user_label'] = 'testString' - dialog_node_model['disambiguation_opt_out'] = True + dialog_node_model['disambiguation_opt_out'] = False # Construct a dict representation of a Counterexample model counterexample_model = {} @@ -648,7 +656,7 @@ def test_create_workspace_all_params(self): workspace_system_settings_disambiguation_model = {} workspace_system_settings_disambiguation_model['prompt'] = 'testString' workspace_system_settings_disambiguation_model['none_of_the_above_prompt'] = 'testString' - workspace_system_settings_disambiguation_model['enabled'] = True + workspace_system_settings_disambiguation_model['enabled'] = False workspace_system_settings_disambiguation_model['sensitivity'] = 'auto' workspace_system_settings_disambiguation_model['randomize'] = True workspace_system_settings_disambiguation_model['max_suggestions'] = 1 @@ -656,19 +664,19 @@ def test_create_workspace_all_params(self): # Construct a dict representation of a WorkspaceSystemSettingsSystemEntities model workspace_system_settings_system_entities_model = {} - workspace_system_settings_system_entities_model['enabled'] = True + workspace_system_settings_system_entities_model['enabled'] = False # Construct a dict representation of a WorkspaceSystemSettingsOffTopic model workspace_system_settings_off_topic_model = {} - workspace_system_settings_off_topic_model['enabled'] = True + workspace_system_settings_off_topic_model['enabled'] = False # Construct a dict representation of a WorkspaceSystemSettings model workspace_system_settings_model = {} workspace_system_settings_model['tooling'] = workspace_system_settings_tooling_model workspace_system_settings_model['disambiguation'] = workspace_system_settings_disambiguation_model workspace_system_settings_model['human_agent_assist'] = {} - workspace_system_settings_model['spelling_suggestions'] = True - workspace_system_settings_model['spelling_auto_correct'] = True + workspace_system_settings_model['spelling_suggestions'] = False + workspace_system_settings_model['spelling_auto_correct'] = False workspace_system_settings_model['system_entities'] = workspace_system_settings_system_entities_model workspace_system_settings_model['off_topic'] = workspace_system_settings_off_topic_model @@ -722,12 +730,12 @@ def test_create_workspace_all_params(self): dialog_nodes = [dialog_node_model] counterexamples = [counterexample_model] metadata = {} - learning_opt_out = True + learning_opt_out = False system_settings = workspace_system_settings_model webhooks = [webhook_model] intents = [create_intent_model] entities = [create_entity_model] - include_audit = True + include_audit = False # Invoke method response = _service.create_workspace( @@ -761,7 +769,7 @@ def test_create_workspace_all_params(self): assert req_body['dialog_nodes'] == [dialog_node_model] assert req_body['counterexamples'] == [counterexample_model] assert req_body['metadata'] == {} - assert req_body['learning_opt_out'] == True + assert req_body['learning_opt_out'] == False assert req_body['system_settings'] == workspace_system_settings_model assert req_body['webhooks'] == [webhook_model] assert req_body['intents'] == [create_intent_model] @@ -775,7 +783,7 @@ def test_create_workspace_required_params(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/workspaces') - mock_response = '{"name": "name", "description": "description", "language": "language", "workspace_id": "workspace_id", "dialog_nodes": [{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "channel_transfer", "message_to_user": "message_to_user", "transfer_info": {"target": {"chat": {"url": "url"}}}, "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"mapKey": {"anyKey": "anyValue"}}}, "modifiers": {"overwrite": false}}, "context": {"integrations": {"mapKey": {"mapKey": {"anyKey": "anyValue"}}}}, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": {"anyKey": "anyValue"}}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": true, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "counterexamples": [{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "learning_opt_out": true, "system_settings": {"tooling": {"store_generic_responses": false}, "disambiguation": {"prompt": "prompt", "none_of_the_above_prompt": "none_of_the_above_prompt", "enabled": false, "sensitivity": "auto", "randomize": false, "max_suggestions": 1, "suggestion_text_policy": "suggestion_text_policy"}, "human_agent_assist": {"mapKey": {"anyKey": "anyValue"}}, "spelling_suggestions": true, "spelling_auto_correct": false, "system_entities": {"enabled": false}, "off_topic": {"enabled": false}}, "status": "Non Existent", "webhooks": [{"url": "url", "name": "name", "headers": [{"name": "name", "value": "value"}]}], "intents": [{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "entities": [{"entity": "entity", "description": "description", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}]}' + mock_response = '{"name": "name", "description": "description", "language": "language", "workspace_id": "workspace_id", "dialog_nodes": [{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "channel_transfer", "message_to_user": "message_to_user", "transfer_info": {"target": {"chat": {"url": "url"}}}, "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"mapKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"mapKey": "anyValue"}}}, "metadata": {"mapKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "counterexamples": [{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "metadata": {"mapKey": "anyValue"}, "learning_opt_out": false, "system_settings": {"tooling": {"store_generic_responses": false}, "disambiguation": {"prompt": "prompt", "none_of_the_above_prompt": "none_of_the_above_prompt", "enabled": false, "sensitivity": "auto", "randomize": false, "max_suggestions": 1, "suggestion_text_policy": "suggestion_text_policy"}, "human_agent_assist": {"mapKey": "anyValue"}, "spelling_suggestions": false, "spelling_auto_correct": false, "system_entities": {"enabled": false}, "off_topic": {"enabled": false}}, "status": "Non Existent", "webhooks": [{"url": "url", "name": "name", "headers": [{"name": "name", "value": "value"}]}], "intents": [{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "entities": [{"entity": "entity", "description": "description", "metadata": {"mapKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"mapKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}]}' responses.add(responses.POST, url, body=mock_response, @@ -798,7 +806,7 @@ def test_create_workspace_value_error(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/workspaces') - mock_response = '{"name": "name", "description": "description", "language": "language", "workspace_id": "workspace_id", "dialog_nodes": [{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "channel_transfer", "message_to_user": "message_to_user", "transfer_info": {"target": {"chat": {"url": "url"}}}, "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"mapKey": {"anyKey": "anyValue"}}}, "modifiers": {"overwrite": false}}, "context": {"integrations": {"mapKey": {"mapKey": {"anyKey": "anyValue"}}}}, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": {"anyKey": "anyValue"}}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": true, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "counterexamples": [{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "learning_opt_out": true, "system_settings": {"tooling": {"store_generic_responses": false}, "disambiguation": {"prompt": "prompt", "none_of_the_above_prompt": "none_of_the_above_prompt", "enabled": false, "sensitivity": "auto", "randomize": false, "max_suggestions": 1, "suggestion_text_policy": "suggestion_text_policy"}, "human_agent_assist": {"mapKey": {"anyKey": "anyValue"}}, "spelling_suggestions": true, "spelling_auto_correct": false, "system_entities": {"enabled": false}, "off_topic": {"enabled": false}}, "status": "Non Existent", "webhooks": [{"url": "url", "name": "name", "headers": [{"name": "name", "value": "value"}]}], "intents": [{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "entities": [{"entity": "entity", "description": "description", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}]}' + mock_response = '{"name": "name", "description": "description", "language": "language", "workspace_id": "workspace_id", "dialog_nodes": [{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "channel_transfer", "message_to_user": "message_to_user", "transfer_info": {"target": {"chat": {"url": "url"}}}, "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"mapKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"mapKey": "anyValue"}}}, "metadata": {"mapKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "counterexamples": [{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "metadata": {"mapKey": "anyValue"}, "learning_opt_out": false, "system_settings": {"tooling": {"store_generic_responses": false}, "disambiguation": {"prompt": "prompt", "none_of_the_above_prompt": "none_of_the_above_prompt", "enabled": false, "sensitivity": "auto", "randomize": false, "max_suggestions": 1, "suggestion_text_policy": "suggestion_text_policy"}, "human_agent_assist": {"mapKey": "anyValue"}, "spelling_suggestions": false, "spelling_auto_correct": false, "system_entities": {"enabled": false}, "off_topic": {"enabled": false}}, "status": "Non Existent", "webhooks": [{"url": "url", "name": "name", "headers": [{"name": "name", "value": "value"}]}], "intents": [{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "entities": [{"entity": "entity", "description": "description", "metadata": {"mapKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"mapKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}]}' responses.add(responses.POST, url, body=mock_response, @@ -824,6 +832,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -836,7 +846,7 @@ def test_get_workspace_all_params(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/workspaces/testString') - mock_response = '{"name": "name", "description": "description", "language": "language", "workspace_id": "workspace_id", "dialog_nodes": [{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "channel_transfer", "message_to_user": "message_to_user", "transfer_info": {"target": {"chat": {"url": "url"}}}, "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"mapKey": {"anyKey": "anyValue"}}}, "modifiers": {"overwrite": false}}, "context": {"integrations": {"mapKey": {"mapKey": {"anyKey": "anyValue"}}}}, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": {"anyKey": "anyValue"}}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": true, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "counterexamples": [{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "learning_opt_out": true, "system_settings": {"tooling": {"store_generic_responses": false}, "disambiguation": {"prompt": "prompt", "none_of_the_above_prompt": "none_of_the_above_prompt", "enabled": false, "sensitivity": "auto", "randomize": false, "max_suggestions": 1, "suggestion_text_policy": "suggestion_text_policy"}, "human_agent_assist": {"mapKey": {"anyKey": "anyValue"}}, "spelling_suggestions": true, "spelling_auto_correct": false, "system_entities": {"enabled": false}, "off_topic": {"enabled": false}}, "status": "Non Existent", "webhooks": [{"url": "url", "name": "name", "headers": [{"name": "name", "value": "value"}]}], "intents": [{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "entities": [{"entity": "entity", "description": "description", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}]}' + mock_response = '{"name": "name", "description": "description", "language": "language", "workspace_id": "workspace_id", "dialog_nodes": [{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "channel_transfer", "message_to_user": "message_to_user", "transfer_info": {"target": {"chat": {"url": "url"}}}, "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"mapKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"mapKey": "anyValue"}}}, "metadata": {"mapKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "counterexamples": [{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "metadata": {"mapKey": "anyValue"}, "learning_opt_out": false, "system_settings": {"tooling": {"store_generic_responses": false}, "disambiguation": {"prompt": "prompt", "none_of_the_above_prompt": "none_of_the_above_prompt", "enabled": false, "sensitivity": "auto", "randomize": false, "max_suggestions": 1, "suggestion_text_policy": "suggestion_text_policy"}, "human_agent_assist": {"mapKey": "anyValue"}, "spelling_suggestions": false, "spelling_auto_correct": false, "system_entities": {"enabled": false}, "off_topic": {"enabled": false}}, "status": "Non Existent", "webhooks": [{"url": "url", "name": "name", "headers": [{"name": "name", "value": "value"}]}], "intents": [{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "entities": [{"entity": "entity", "description": "description", "metadata": {"mapKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"mapKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}]}' responses.add(responses.GET, url, body=mock_response, @@ -845,8 +855,8 @@ def test_get_workspace_all_params(self): # Set up parameter values workspace_id = 'testString' - export = True - include_audit = True + export = False + include_audit = False sort = 'stable' # Invoke method @@ -876,7 +886,7 @@ def test_get_workspace_required_params(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/workspaces/testString') - mock_response = '{"name": "name", "description": "description", "language": "language", "workspace_id": "workspace_id", "dialog_nodes": [{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "channel_transfer", "message_to_user": "message_to_user", "transfer_info": {"target": {"chat": {"url": "url"}}}, "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"mapKey": {"anyKey": "anyValue"}}}, "modifiers": {"overwrite": false}}, "context": {"integrations": {"mapKey": {"mapKey": {"anyKey": "anyValue"}}}}, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": {"anyKey": "anyValue"}}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": true, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "counterexamples": [{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "learning_opt_out": true, "system_settings": {"tooling": {"store_generic_responses": false}, "disambiguation": {"prompt": "prompt", "none_of_the_above_prompt": "none_of_the_above_prompt", "enabled": false, "sensitivity": "auto", "randomize": false, "max_suggestions": 1, "suggestion_text_policy": "suggestion_text_policy"}, "human_agent_assist": {"mapKey": {"anyKey": "anyValue"}}, "spelling_suggestions": true, "spelling_auto_correct": false, "system_entities": {"enabled": false}, "off_topic": {"enabled": false}}, "status": "Non Existent", "webhooks": [{"url": "url", "name": "name", "headers": [{"name": "name", "value": "value"}]}], "intents": [{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "entities": [{"entity": "entity", "description": "description", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}]}' + mock_response = '{"name": "name", "description": "description", "language": "language", "workspace_id": "workspace_id", "dialog_nodes": [{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "channel_transfer", "message_to_user": "message_to_user", "transfer_info": {"target": {"chat": {"url": "url"}}}, "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"mapKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"mapKey": "anyValue"}}}, "metadata": {"mapKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "counterexamples": [{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "metadata": {"mapKey": "anyValue"}, "learning_opt_out": false, "system_settings": {"tooling": {"store_generic_responses": false}, "disambiguation": {"prompt": "prompt", "none_of_the_above_prompt": "none_of_the_above_prompt", "enabled": false, "sensitivity": "auto", "randomize": false, "max_suggestions": 1, "suggestion_text_policy": "suggestion_text_policy"}, "human_agent_assist": {"mapKey": "anyValue"}, "spelling_suggestions": false, "spelling_auto_correct": false, "system_entities": {"enabled": false}, "off_topic": {"enabled": false}}, "status": "Non Existent", "webhooks": [{"url": "url", "name": "name", "headers": [{"name": "name", "value": "value"}]}], "intents": [{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "entities": [{"entity": "entity", "description": "description", "metadata": {"mapKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"mapKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}]}' responses.add(responses.GET, url, body=mock_response, @@ -904,7 +914,7 @@ def test_get_workspace_value_error(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/workspaces/testString') - mock_response = '{"name": "name", "description": "description", "language": "language", "workspace_id": "workspace_id", "dialog_nodes": [{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "channel_transfer", "message_to_user": "message_to_user", "transfer_info": {"target": {"chat": {"url": "url"}}}, "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"mapKey": {"anyKey": "anyValue"}}}, "modifiers": {"overwrite": false}}, "context": {"integrations": {"mapKey": {"mapKey": {"anyKey": "anyValue"}}}}, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": {"anyKey": "anyValue"}}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": true, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "counterexamples": [{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "learning_opt_out": true, "system_settings": {"tooling": {"store_generic_responses": false}, "disambiguation": {"prompt": "prompt", "none_of_the_above_prompt": "none_of_the_above_prompt", "enabled": false, "sensitivity": "auto", "randomize": false, "max_suggestions": 1, "suggestion_text_policy": "suggestion_text_policy"}, "human_agent_assist": {"mapKey": {"anyKey": "anyValue"}}, "spelling_suggestions": true, "spelling_auto_correct": false, "system_entities": {"enabled": false}, "off_topic": {"enabled": false}}, "status": "Non Existent", "webhooks": [{"url": "url", "name": "name", "headers": [{"name": "name", "value": "value"}]}], "intents": [{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "entities": [{"entity": "entity", "description": "description", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}]}' + mock_response = '{"name": "name", "description": "description", "language": "language", "workspace_id": "workspace_id", "dialog_nodes": [{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "channel_transfer", "message_to_user": "message_to_user", "transfer_info": {"target": {"chat": {"url": "url"}}}, "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"mapKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"mapKey": "anyValue"}}}, "metadata": {"mapKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "counterexamples": [{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "metadata": {"mapKey": "anyValue"}, "learning_opt_out": false, "system_settings": {"tooling": {"store_generic_responses": false}, "disambiguation": {"prompt": "prompt", "none_of_the_above_prompt": "none_of_the_above_prompt", "enabled": false, "sensitivity": "auto", "randomize": false, "max_suggestions": 1, "suggestion_text_policy": "suggestion_text_policy"}, "human_agent_assist": {"mapKey": "anyValue"}, "spelling_suggestions": false, "spelling_auto_correct": false, "system_entities": {"enabled": false}, "off_topic": {"enabled": false}}, "status": "Non Existent", "webhooks": [{"url": "url", "name": "name", "headers": [{"name": "name", "value": "value"}]}], "intents": [{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "entities": [{"entity": "entity", "description": "description", "metadata": {"mapKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"mapKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}]}' responses.add(responses.GET, url, body=mock_response, @@ -934,6 +944,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -946,7 +958,7 @@ def test_update_workspace_all_params(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/workspaces/testString') - mock_response = '{"name": "name", "description": "description", "language": "language", "workspace_id": "workspace_id", "dialog_nodes": [{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "channel_transfer", "message_to_user": "message_to_user", "transfer_info": {"target": {"chat": {"url": "url"}}}, "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"mapKey": {"anyKey": "anyValue"}}}, "modifiers": {"overwrite": false}}, "context": {"integrations": {"mapKey": {"mapKey": {"anyKey": "anyValue"}}}}, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": {"anyKey": "anyValue"}}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": true, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "counterexamples": [{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "learning_opt_out": true, "system_settings": {"tooling": {"store_generic_responses": false}, "disambiguation": {"prompt": "prompt", "none_of_the_above_prompt": "none_of_the_above_prompt", "enabled": false, "sensitivity": "auto", "randomize": false, "max_suggestions": 1, "suggestion_text_policy": "suggestion_text_policy"}, "human_agent_assist": {"mapKey": {"anyKey": "anyValue"}}, "spelling_suggestions": true, "spelling_auto_correct": false, "system_entities": {"enabled": false}, "off_topic": {"enabled": false}}, "status": "Non Existent", "webhooks": [{"url": "url", "name": "name", "headers": [{"name": "name", "value": "value"}]}], "intents": [{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "entities": [{"entity": "entity", "description": "description", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}]}' + mock_response = '{"name": "name", "description": "description", "language": "language", "workspace_id": "workspace_id", "dialog_nodes": [{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "channel_transfer", "message_to_user": "message_to_user", "transfer_info": {"target": {"chat": {"url": "url"}}}, "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"mapKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"mapKey": "anyValue"}}}, "metadata": {"mapKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "counterexamples": [{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "metadata": {"mapKey": "anyValue"}, "learning_opt_out": false, "system_settings": {"tooling": {"store_generic_responses": false}, "disambiguation": {"prompt": "prompt", "none_of_the_above_prompt": "none_of_the_above_prompt", "enabled": false, "sensitivity": "auto", "randomize": false, "max_suggestions": 1, "suggestion_text_policy": "suggestion_text_policy"}, "human_agent_assist": {"mapKey": "anyValue"}, "spelling_suggestions": false, "spelling_auto_correct": false, "system_entities": {"enabled": false}, "off_topic": {"enabled": false}}, "status": "Non Existent", "webhooks": [{"url": "url", "name": "name", "headers": [{"name": "name", "value": "value"}]}], "intents": [{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "entities": [{"entity": "entity", "description": "description", "metadata": {"mapKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"mapKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}]}' responses.add(responses.POST, url, body=mock_response, @@ -985,12 +997,12 @@ def test_update_workspace_all_params(self): dialog_node_output_model['generic'] = [dialog_node_output_generic_model] dialog_node_output_model['integrations'] = {} dialog_node_output_model['modifiers'] = dialog_node_output_modifiers_model - dialog_node_output_model['foo'] = { 'foo': 'bar' } + dialog_node_output_model['foo'] = 'testString' # Construct a dict representation of a DialogNodeContext model dialog_node_context_model = {} dialog_node_context_model['integrations'] = {} - dialog_node_context_model['foo'] = { 'foo': 'bar' } + dialog_node_context_model['foo'] = 'testString' # Construct a dict representation of a DialogNodeNextStep model dialog_node_next_step_model = {} @@ -1026,7 +1038,7 @@ def test_update_workspace_all_params(self): dialog_node_model['digress_out'] = 'allow_returning' dialog_node_model['digress_out_slots'] = 'not_allowed' dialog_node_model['user_label'] = 'testString' - dialog_node_model['disambiguation_opt_out'] = True + dialog_node_model['disambiguation_opt_out'] = False # Construct a dict representation of a Counterexample model counterexample_model = {} @@ -1040,7 +1052,7 @@ def test_update_workspace_all_params(self): workspace_system_settings_disambiguation_model = {} workspace_system_settings_disambiguation_model['prompt'] = 'testString' workspace_system_settings_disambiguation_model['none_of_the_above_prompt'] = 'testString' - workspace_system_settings_disambiguation_model['enabled'] = True + workspace_system_settings_disambiguation_model['enabled'] = False workspace_system_settings_disambiguation_model['sensitivity'] = 'auto' workspace_system_settings_disambiguation_model['randomize'] = True workspace_system_settings_disambiguation_model['max_suggestions'] = 1 @@ -1048,19 +1060,19 @@ def test_update_workspace_all_params(self): # Construct a dict representation of a WorkspaceSystemSettingsSystemEntities model workspace_system_settings_system_entities_model = {} - workspace_system_settings_system_entities_model['enabled'] = True + workspace_system_settings_system_entities_model['enabled'] = False # Construct a dict representation of a WorkspaceSystemSettingsOffTopic model workspace_system_settings_off_topic_model = {} - workspace_system_settings_off_topic_model['enabled'] = True + workspace_system_settings_off_topic_model['enabled'] = False # Construct a dict representation of a WorkspaceSystemSettings model workspace_system_settings_model = {} workspace_system_settings_model['tooling'] = workspace_system_settings_tooling_model workspace_system_settings_model['disambiguation'] = workspace_system_settings_disambiguation_model workspace_system_settings_model['human_agent_assist'] = {} - workspace_system_settings_model['spelling_suggestions'] = True - workspace_system_settings_model['spelling_auto_correct'] = True + workspace_system_settings_model['spelling_suggestions'] = False + workspace_system_settings_model['spelling_auto_correct'] = False workspace_system_settings_model['system_entities'] = workspace_system_settings_system_entities_model workspace_system_settings_model['off_topic'] = workspace_system_settings_off_topic_model @@ -1115,13 +1127,13 @@ def test_update_workspace_all_params(self): dialog_nodes = [dialog_node_model] counterexamples = [counterexample_model] metadata = {} - learning_opt_out = True + learning_opt_out = False system_settings = workspace_system_settings_model webhooks = [webhook_model] intents = [create_intent_model] entities = [create_entity_model] - append = True - include_audit = True + append = False + include_audit = False # Invoke method response = _service.update_workspace( @@ -1158,7 +1170,7 @@ def test_update_workspace_all_params(self): assert req_body['dialog_nodes'] == [dialog_node_model] assert req_body['counterexamples'] == [counterexample_model] assert req_body['metadata'] == {} - assert req_body['learning_opt_out'] == True + assert req_body['learning_opt_out'] == False assert req_body['system_settings'] == workspace_system_settings_model assert req_body['webhooks'] == [webhook_model] assert req_body['intents'] == [create_intent_model] @@ -1172,7 +1184,7 @@ def test_update_workspace_required_params(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/workspaces/testString') - mock_response = '{"name": "name", "description": "description", "language": "language", "workspace_id": "workspace_id", "dialog_nodes": [{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "channel_transfer", "message_to_user": "message_to_user", "transfer_info": {"target": {"chat": {"url": "url"}}}, "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"mapKey": {"anyKey": "anyValue"}}}, "modifiers": {"overwrite": false}}, "context": {"integrations": {"mapKey": {"mapKey": {"anyKey": "anyValue"}}}}, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": {"anyKey": "anyValue"}}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": true, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "counterexamples": [{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "learning_opt_out": true, "system_settings": {"tooling": {"store_generic_responses": false}, "disambiguation": {"prompt": "prompt", "none_of_the_above_prompt": "none_of_the_above_prompt", "enabled": false, "sensitivity": "auto", "randomize": false, "max_suggestions": 1, "suggestion_text_policy": "suggestion_text_policy"}, "human_agent_assist": {"mapKey": {"anyKey": "anyValue"}}, "spelling_suggestions": true, "spelling_auto_correct": false, "system_entities": {"enabled": false}, "off_topic": {"enabled": false}}, "status": "Non Existent", "webhooks": [{"url": "url", "name": "name", "headers": [{"name": "name", "value": "value"}]}], "intents": [{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "entities": [{"entity": "entity", "description": "description", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}]}' + mock_response = '{"name": "name", "description": "description", "language": "language", "workspace_id": "workspace_id", "dialog_nodes": [{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "channel_transfer", "message_to_user": "message_to_user", "transfer_info": {"target": {"chat": {"url": "url"}}}, "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"mapKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"mapKey": "anyValue"}}}, "metadata": {"mapKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "counterexamples": [{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "metadata": {"mapKey": "anyValue"}, "learning_opt_out": false, "system_settings": {"tooling": {"store_generic_responses": false}, "disambiguation": {"prompt": "prompt", "none_of_the_above_prompt": "none_of_the_above_prompt", "enabled": false, "sensitivity": "auto", "randomize": false, "max_suggestions": 1, "suggestion_text_policy": "suggestion_text_policy"}, "human_agent_assist": {"mapKey": "anyValue"}, "spelling_suggestions": false, "spelling_auto_correct": false, "system_entities": {"enabled": false}, "off_topic": {"enabled": false}}, "status": "Non Existent", "webhooks": [{"url": "url", "name": "name", "headers": [{"name": "name", "value": "value"}]}], "intents": [{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "entities": [{"entity": "entity", "description": "description", "metadata": {"mapKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"mapKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}]}' responses.add(responses.POST, url, body=mock_response, @@ -1200,7 +1212,7 @@ def test_update_workspace_value_error(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/workspaces/testString') - mock_response = '{"name": "name", "description": "description", "language": "language", "workspace_id": "workspace_id", "dialog_nodes": [{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "channel_transfer", "message_to_user": "message_to_user", "transfer_info": {"target": {"chat": {"url": "url"}}}, "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"mapKey": {"anyKey": "anyValue"}}}, "modifiers": {"overwrite": false}}, "context": {"integrations": {"mapKey": {"mapKey": {"anyKey": "anyValue"}}}}, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": {"anyKey": "anyValue"}}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": true, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "counterexamples": [{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "learning_opt_out": true, "system_settings": {"tooling": {"store_generic_responses": false}, "disambiguation": {"prompt": "prompt", "none_of_the_above_prompt": "none_of_the_above_prompt", "enabled": false, "sensitivity": "auto", "randomize": false, "max_suggestions": 1, "suggestion_text_policy": "suggestion_text_policy"}, "human_agent_assist": {"mapKey": {"anyKey": "anyValue"}}, "spelling_suggestions": true, "spelling_auto_correct": false, "system_entities": {"enabled": false}, "off_topic": {"enabled": false}}, "status": "Non Existent", "webhooks": [{"url": "url", "name": "name", "headers": [{"name": "name", "value": "value"}]}], "intents": [{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "entities": [{"entity": "entity", "description": "description", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}]}' + mock_response = '{"name": "name", "description": "description", "language": "language", "workspace_id": "workspace_id", "dialog_nodes": [{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "channel_transfer", "message_to_user": "message_to_user", "transfer_info": {"target": {"chat": {"url": "url"}}}, "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"mapKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"mapKey": "anyValue"}}}, "metadata": {"mapKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "counterexamples": [{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "metadata": {"mapKey": "anyValue"}, "learning_opt_out": false, "system_settings": {"tooling": {"store_generic_responses": false}, "disambiguation": {"prompt": "prompt", "none_of_the_above_prompt": "none_of_the_above_prompt", "enabled": false, "sensitivity": "auto", "randomize": false, "max_suggestions": 1, "suggestion_text_policy": "suggestion_text_policy"}, "human_agent_assist": {"mapKey": "anyValue"}, "spelling_suggestions": false, "spelling_auto_correct": false, "system_entities": {"enabled": false}, "off_topic": {"enabled": false}}, "status": "Non Existent", "webhooks": [{"url": "url", "name": "name", "headers": [{"name": "name", "value": "value"}]}], "intents": [{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "entities": [{"entity": "entity", "description": "description", "metadata": {"mapKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"mapKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}]}' responses.add(responses.POST, url, body=mock_response, @@ -1230,6 +1242,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -1304,6 +1318,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -1325,12 +1341,12 @@ def test_list_intents_all_params(self): # Set up parameter values workspace_id = 'testString' - export = True + export = False page_limit = 38 - include_count = True + include_count = False sort = 'intent' cursor = 'testString' - include_audit = True + include_audit = False # Invoke method response = _service.list_intents( @@ -1423,6 +1439,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -1457,7 +1475,7 @@ def test_create_intent_all_params(self): intent = 'testString' description = 'testString' examples = [example_model] - include_audit = True + include_audit = False # Invoke method response = _service.create_intent( @@ -1583,6 +1601,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -1605,8 +1625,8 @@ def test_get_intent_all_params(self): # Set up parameter values workspace_id = 'testString' intent = 'testString' - export = True - include_audit = True + export = False + include_audit = False # Invoke method response = _service.get_intent( @@ -1696,6 +1716,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -1731,8 +1753,8 @@ def test_update_intent_all_params(self): new_intent = 'testString' new_description = 'testString' new_examples = [example_model] - append = True - include_audit = True + append = False + include_audit = False # Invoke method response = _service.update_intent( @@ -1864,6 +1886,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -1942,6 +1966,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -1965,10 +1991,10 @@ def test_list_examples_all_params(self): workspace_id = 'testString' intent = 'testString' page_limit = 38 - include_count = True + include_count = False sort = 'text' cursor = 'testString' - include_audit = True + include_audit = False # Invoke method response = _service.list_examples( @@ -2064,6 +2090,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -2093,7 +2121,7 @@ def test_create_example_all_params(self): intent = 'testString' text = 'testString' mentions = [mention_model] - include_audit = True + include_audit = False # Invoke method response = _service.create_example( @@ -2208,6 +2236,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -2231,7 +2261,7 @@ def test_get_example_all_params(self): workspace_id = 'testString' intent = 'testString' text = 'testString' - include_audit = True + include_audit = False # Invoke method response = _service.get_example( @@ -2324,6 +2354,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -2354,7 +2386,7 @@ def test_update_example_all_params(self): text = 'testString' new_text = 'testString' new_mentions = [mention_model] - include_audit = True + include_audit = False # Invoke method response = _service.update_example( @@ -2473,6 +2505,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -2555,6 +2589,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -2577,10 +2613,10 @@ def test_list_counterexamples_all_params(self): # Set up parameter values workspace_id = 'testString' page_limit = 38 - include_count = True + include_count = False sort = 'text' cursor = 'testString' - include_audit = True + include_audit = False # Invoke method response = _service.list_counterexamples( @@ -2671,6 +2707,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -2693,7 +2731,7 @@ def test_create_counterexample_all_params(self): # Set up parameter values workspace_id = 'testString' text = 'testString' - include_audit = True + include_audit = False # Invoke method response = _service.create_counterexample( @@ -2787,6 +2825,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -2809,7 +2849,7 @@ def test_get_counterexample_all_params(self): # Set up parameter values workspace_id = 'testString' text = 'testString' - include_audit = True + include_audit = False # Invoke method response = _service.get_counterexample( @@ -2897,6 +2937,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -2920,7 +2962,7 @@ def test_update_counterexample_all_params(self): workspace_id = 'testString' text = 'testString' new_text = 'testString' - include_audit = True + include_audit = False # Invoke method response = _service.update_counterexample( @@ -3018,6 +3060,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -3096,6 +3140,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -3108,7 +3154,7 @@ def test_list_entities_all_params(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/workspaces/testString/entities') - mock_response = '{"entities": [{"entity": "entity", "description": "description", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}' + mock_response = '{"entities": [{"entity": "entity", "description": "description", "metadata": {"mapKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"mapKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}' responses.add(responses.GET, url, body=mock_response, @@ -3117,12 +3163,12 @@ def test_list_entities_all_params(self): # Set up parameter values workspace_id = 'testString' - export = True + export = False page_limit = 38 - include_count = True + include_count = False sort = 'entity' cursor = 'testString' - include_audit = True + include_audit = False # Invoke method response = _service.list_entities( @@ -3157,7 +3203,7 @@ def test_list_entities_required_params(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/workspaces/testString/entities') - mock_response = '{"entities": [{"entity": "entity", "description": "description", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}' + mock_response = '{"entities": [{"entity": "entity", "description": "description", "metadata": {"mapKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"mapKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}' responses.add(responses.GET, url, body=mock_response, @@ -3185,7 +3231,7 @@ def test_list_entities_value_error(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/workspaces/testString/entities') - mock_response = '{"entities": [{"entity": "entity", "description": "description", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}' + mock_response = '{"entities": [{"entity": "entity", "description": "description", "metadata": {"mapKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"mapKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}' responses.add(responses.GET, url, body=mock_response, @@ -3215,6 +3261,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -3227,7 +3275,7 @@ def test_create_entity_all_params(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/workspaces/testString/entities') - mock_response = '{"entity": "entity", "description": "description", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}' + mock_response = '{"entity": "entity", "description": "description", "metadata": {"mapKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"mapKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}' responses.add(responses.POST, url, body=mock_response, @@ -3249,7 +3297,7 @@ def test_create_entity_all_params(self): metadata = {} fuzzy_match = True values = [create_value_model] - include_audit = True + include_audit = False # Invoke method response = _service.create_entity( @@ -3286,7 +3334,7 @@ def test_create_entity_required_params(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/workspaces/testString/entities') - mock_response = '{"entity": "entity", "description": "description", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}' + mock_response = '{"entity": "entity", "description": "description", "metadata": {"mapKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"mapKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}' responses.add(responses.POST, url, body=mock_response, @@ -3339,7 +3387,7 @@ def test_create_entity_value_error(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/workspaces/testString/entities') - mock_response = '{"entity": "entity", "description": "description", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}' + mock_response = '{"entity": "entity", "description": "description", "metadata": {"mapKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"mapKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}' responses.add(responses.POST, url, body=mock_response, @@ -3383,6 +3431,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -3395,7 +3445,7 @@ def test_get_entity_all_params(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/workspaces/testString/entities/testString') - mock_response = '{"entity": "entity", "description": "description", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}' + mock_response = '{"entity": "entity", "description": "description", "metadata": {"mapKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"mapKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}' responses.add(responses.GET, url, body=mock_response, @@ -3405,8 +3455,8 @@ def test_get_entity_all_params(self): # Set up parameter values workspace_id = 'testString' entity = 'testString' - export = True - include_audit = True + export = False + include_audit = False # Invoke method response = _service.get_entity( @@ -3434,7 +3484,7 @@ def test_get_entity_required_params(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/workspaces/testString/entities/testString') - mock_response = '{"entity": "entity", "description": "description", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}' + mock_response = '{"entity": "entity", "description": "description", "metadata": {"mapKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"mapKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}' responses.add(responses.GET, url, body=mock_response, @@ -3464,7 +3514,7 @@ def test_get_entity_value_error(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/workspaces/testString/entities/testString') - mock_response = '{"entity": "entity", "description": "description", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}' + mock_response = '{"entity": "entity", "description": "description", "metadata": {"mapKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"mapKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}' responses.add(responses.GET, url, body=mock_response, @@ -3496,6 +3546,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -3508,7 +3560,7 @@ def test_update_entity_all_params(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/workspaces/testString/entities/testString') - mock_response = '{"entity": "entity", "description": "description", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}' + mock_response = '{"entity": "entity", "description": "description", "metadata": {"mapKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"mapKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}' responses.add(responses.POST, url, body=mock_response, @@ -3531,8 +3583,8 @@ def test_update_entity_all_params(self): new_metadata = {} new_fuzzy_match = True new_values = [create_value_model] - append = True - include_audit = True + append = False + include_audit = False # Invoke method response = _service.update_entity( @@ -3572,7 +3624,7 @@ def test_update_entity_required_params(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/workspaces/testString/entities/testString') - mock_response = '{"entity": "entity", "description": "description", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}' + mock_response = '{"entity": "entity", "description": "description", "metadata": {"mapKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"mapKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}' responses.add(responses.POST, url, body=mock_response, @@ -3627,7 +3679,7 @@ def test_update_entity_value_error(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/workspaces/testString/entities/testString') - mock_response = '{"entity": "entity", "description": "description", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}' + mock_response = '{"entity": "entity", "description": "description", "metadata": {"mapKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"mapKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}' responses.add(responses.POST, url, body=mock_response, @@ -3672,6 +3724,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -3750,6 +3804,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -3772,8 +3828,8 @@ def test_list_mentions_all_params(self): # Set up parameter values workspace_id = 'testString' entity = 'testString' - export = True - include_audit = True + export = False + include_audit = False # Invoke method response = _service.list_mentions( @@ -3873,6 +3929,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -3885,7 +3943,7 @@ def test_list_values_all_params(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/workspaces/testString/entities/testString/values') - mock_response = '{"values": [{"value": "value", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}' + mock_response = '{"values": [{"value": "value", "metadata": {"mapKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}' responses.add(responses.GET, url, body=mock_response, @@ -3895,12 +3953,12 @@ def test_list_values_all_params(self): # Set up parameter values workspace_id = 'testString' entity = 'testString' - export = True + export = False page_limit = 38 - include_count = True + include_count = False sort = 'value' cursor = 'testString' - include_audit = True + include_audit = False # Invoke method response = _service.list_values( @@ -3936,7 +3994,7 @@ def test_list_values_required_params(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/workspaces/testString/entities/testString/values') - mock_response = '{"values": [{"value": "value", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}' + mock_response = '{"values": [{"value": "value", "metadata": {"mapKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}' responses.add(responses.GET, url, body=mock_response, @@ -3966,7 +4024,7 @@ def test_list_values_value_error(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/workspaces/testString/entities/testString/values') - mock_response = '{"values": [{"value": "value", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}' + mock_response = '{"values": [{"value": "value", "metadata": {"mapKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}' responses.add(responses.GET, url, body=mock_response, @@ -3998,6 +4056,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -4010,7 +4070,7 @@ def test_create_value_all_params(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/workspaces/testString/entities/testString/values') - mock_response = '{"value": "value", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + mock_response = '{"value": "value", "metadata": {"mapKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' responses.add(responses.POST, url, body=mock_response, @@ -4025,7 +4085,7 @@ def test_create_value_all_params(self): type = 'synonyms' synonyms = ['testString'] patterns = ['testString'] - include_audit = True + include_audit = False # Invoke method response = _service.create_value( @@ -4063,7 +4123,7 @@ def test_create_value_required_params(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/workspaces/testString/entities/testString/values') - mock_response = '{"value": "value", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + mock_response = '{"value": "value", "metadata": {"mapKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' responses.add(responses.POST, url, body=mock_response, @@ -4110,7 +4170,7 @@ def test_create_value_value_error(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/workspaces/testString/entities/testString/values') - mock_response = '{"value": "value", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + mock_response = '{"value": "value", "metadata": {"mapKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' responses.add(responses.POST, url, body=mock_response, @@ -4148,6 +4208,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -4160,7 +4222,7 @@ def test_get_value_all_params(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/workspaces/testString/entities/testString/values/testString') - mock_response = '{"value": "value", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + mock_response = '{"value": "value", "metadata": {"mapKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' responses.add(responses.GET, url, body=mock_response, @@ -4171,8 +4233,8 @@ def test_get_value_all_params(self): workspace_id = 'testString' entity = 'testString' value = 'testString' - export = True - include_audit = True + export = False + include_audit = False # Invoke method response = _service.get_value( @@ -4201,7 +4263,7 @@ def test_get_value_required_params(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/workspaces/testString/entities/testString/values/testString') - mock_response = '{"value": "value", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + mock_response = '{"value": "value", "metadata": {"mapKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' responses.add(responses.GET, url, body=mock_response, @@ -4233,7 +4295,7 @@ def test_get_value_value_error(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/workspaces/testString/entities/testString/values/testString') - mock_response = '{"value": "value", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + mock_response = '{"value": "value", "metadata": {"mapKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' responses.add(responses.GET, url, body=mock_response, @@ -4267,6 +4329,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -4279,7 +4343,7 @@ def test_update_value_all_params(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/workspaces/testString/entities/testString/values/testString') - mock_response = '{"value": "value", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + mock_response = '{"value": "value", "metadata": {"mapKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' responses.add(responses.POST, url, body=mock_response, @@ -4295,8 +4359,8 @@ def test_update_value_all_params(self): new_type = 'synonyms' new_synonyms = ['testString'] new_patterns = ['testString'] - append = True - include_audit = True + append = False + include_audit = False # Invoke method response = _service.update_value( @@ -4337,7 +4401,7 @@ def test_update_value_required_params(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/workspaces/testString/entities/testString/values/testString') - mock_response = '{"value": "value", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + mock_response = '{"value": "value", "metadata": {"mapKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' responses.add(responses.POST, url, body=mock_response, @@ -4386,7 +4450,7 @@ def test_update_value_value_error(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/workspaces/testString/entities/testString/values/testString') - mock_response = '{"value": "value", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + mock_response = '{"value": "value", "metadata": {"mapKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' responses.add(responses.POST, url, body=mock_response, @@ -4425,6 +4489,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -4507,6 +4573,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -4531,10 +4599,10 @@ def test_list_synonyms_all_params(self): entity = 'testString' value = 'testString' page_limit = 38 - include_count = True + include_count = False sort = 'synonym' cursor = 'testString' - include_audit = True + include_audit = False # Invoke method response = _service.list_synonyms( @@ -4635,6 +4703,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -4659,7 +4729,7 @@ def test_create_synonym_all_params(self): entity = 'testString' value = 'testString' synonym = 'testString' - include_audit = True + include_audit = False # Invoke method response = _service.create_synonym( @@ -4763,6 +4833,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -4787,7 +4859,7 @@ def test_get_synonym_all_params(self): entity = 'testString' value = 'testString' synonym = 'testString' - include_audit = True + include_audit = False # Invoke method response = _service.get_synonym( @@ -4885,6 +4957,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -4910,7 +4984,7 @@ def test_update_synonym_all_params(self): value = 'testString' synonym = 'testString' new_synonym = 'testString' - include_audit = True + include_audit = False # Invoke method response = _service.update_synonym( @@ -5018,6 +5092,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -5104,6 +5180,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -5116,7 +5194,7 @@ def test_list_dialog_nodes_all_params(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/workspaces/testString/dialog_nodes') - mock_response = '{"dialog_nodes": [{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "channel_transfer", "message_to_user": "message_to_user", "transfer_info": {"target": {"chat": {"url": "url"}}}, "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"mapKey": {"anyKey": "anyValue"}}}, "modifiers": {"overwrite": false}}, "context": {"integrations": {"mapKey": {"mapKey": {"anyKey": "anyValue"}}}}, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": {"anyKey": "anyValue"}}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": true, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}' + mock_response = '{"dialog_nodes": [{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "channel_transfer", "message_to_user": "message_to_user", "transfer_info": {"target": {"chat": {"url": "url"}}}, "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"mapKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"mapKey": "anyValue"}}}, "metadata": {"mapKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}' responses.add(responses.GET, url, body=mock_response, @@ -5126,10 +5204,10 @@ def test_list_dialog_nodes_all_params(self): # Set up parameter values workspace_id = 'testString' page_limit = 38 - include_count = True + include_count = False sort = 'dialog_node' cursor = 'testString' - include_audit = True + include_audit = False # Invoke method response = _service.list_dialog_nodes( @@ -5162,7 +5240,7 @@ def test_list_dialog_nodes_required_params(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/workspaces/testString/dialog_nodes') - mock_response = '{"dialog_nodes": [{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "channel_transfer", "message_to_user": "message_to_user", "transfer_info": {"target": {"chat": {"url": "url"}}}, "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"mapKey": {"anyKey": "anyValue"}}}, "modifiers": {"overwrite": false}}, "context": {"integrations": {"mapKey": {"mapKey": {"anyKey": "anyValue"}}}}, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": {"anyKey": "anyValue"}}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": true, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}' + mock_response = '{"dialog_nodes": [{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "channel_transfer", "message_to_user": "message_to_user", "transfer_info": {"target": {"chat": {"url": "url"}}}, "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"mapKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"mapKey": "anyValue"}}}, "metadata": {"mapKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}' responses.add(responses.GET, url, body=mock_response, @@ -5190,7 +5268,7 @@ def test_list_dialog_nodes_value_error(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/workspaces/testString/dialog_nodes') - mock_response = '{"dialog_nodes": [{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "channel_transfer", "message_to_user": "message_to_user", "transfer_info": {"target": {"chat": {"url": "url"}}}, "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"mapKey": {"anyKey": "anyValue"}}}, "modifiers": {"overwrite": false}}, "context": {"integrations": {"mapKey": {"mapKey": {"anyKey": "anyValue"}}}}, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": {"anyKey": "anyValue"}}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": true, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}' + mock_response = '{"dialog_nodes": [{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "channel_transfer", "message_to_user": "message_to_user", "transfer_info": {"target": {"chat": {"url": "url"}}}, "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"mapKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"mapKey": "anyValue"}}}, "metadata": {"mapKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}' responses.add(responses.GET, url, body=mock_response, @@ -5220,6 +5298,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -5232,7 +5312,7 @@ def test_create_dialog_node_all_params(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/workspaces/testString/dialog_nodes') - mock_response = '{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "channel_transfer", "message_to_user": "message_to_user", "transfer_info": {"target": {"chat": {"url": "url"}}}, "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"mapKey": {"anyKey": "anyValue"}}}, "modifiers": {"overwrite": false}}, "context": {"integrations": {"mapKey": {"mapKey": {"anyKey": "anyValue"}}}}, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": {"anyKey": "anyValue"}}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": true, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + mock_response = '{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "channel_transfer", "message_to_user": "message_to_user", "transfer_info": {"target": {"chat": {"url": "url"}}}, "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"mapKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"mapKey": "anyValue"}}}, "metadata": {"mapKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' responses.add(responses.POST, url, body=mock_response, @@ -5271,12 +5351,12 @@ def test_create_dialog_node_all_params(self): dialog_node_output_model['generic'] = [dialog_node_output_generic_model] dialog_node_output_model['integrations'] = {} dialog_node_output_model['modifiers'] = dialog_node_output_modifiers_model - dialog_node_output_model['foo'] = { 'foo': 'bar' } + dialog_node_output_model['foo'] = 'testString' # Construct a dict representation of a DialogNodeContext model dialog_node_context_model = {} dialog_node_context_model['integrations'] = {} - dialog_node_context_model['foo'] = { 'foo': 'bar' } + dialog_node_context_model['foo'] = 'testString' # Construct a dict representation of a DialogNodeNextStep model dialog_node_next_step_model = {} @@ -5312,8 +5392,8 @@ def test_create_dialog_node_all_params(self): digress_out = 'allow_returning' digress_out_slots = 'not_allowed' user_label = 'testString' - disambiguation_opt_out = True - include_audit = True + disambiguation_opt_out = False + include_audit = False # Invoke method response = _service.create_dialog_node( @@ -5368,7 +5448,7 @@ def test_create_dialog_node_all_params(self): assert req_body['digress_out'] == 'allow_returning' assert req_body['digress_out_slots'] == 'not_allowed' assert req_body['user_label'] == 'testString' - assert req_body['disambiguation_opt_out'] == True + assert req_body['disambiguation_opt_out'] == False @responses.activate @@ -5378,7 +5458,7 @@ def test_create_dialog_node_required_params(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/workspaces/testString/dialog_nodes') - mock_response = '{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "channel_transfer", "message_to_user": "message_to_user", "transfer_info": {"target": {"chat": {"url": "url"}}}, "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"mapKey": {"anyKey": "anyValue"}}}, "modifiers": {"overwrite": false}}, "context": {"integrations": {"mapKey": {"mapKey": {"anyKey": "anyValue"}}}}, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": {"anyKey": "anyValue"}}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": true, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + mock_response = '{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "channel_transfer", "message_to_user": "message_to_user", "transfer_info": {"target": {"chat": {"url": "url"}}}, "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"mapKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"mapKey": "anyValue"}}}, "metadata": {"mapKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' responses.add(responses.POST, url, body=mock_response, @@ -5417,12 +5497,12 @@ def test_create_dialog_node_required_params(self): dialog_node_output_model['generic'] = [dialog_node_output_generic_model] dialog_node_output_model['integrations'] = {} dialog_node_output_model['modifiers'] = dialog_node_output_modifiers_model - dialog_node_output_model['foo'] = { 'foo': 'bar' } + dialog_node_output_model['foo'] = 'testString' # Construct a dict representation of a DialogNodeContext model dialog_node_context_model = {} dialog_node_context_model['integrations'] = {} - dialog_node_context_model['foo'] = { 'foo': 'bar' } + dialog_node_context_model['foo'] = 'testString' # Construct a dict representation of a DialogNodeNextStep model dialog_node_next_step_model = {} @@ -5458,7 +5538,7 @@ def test_create_dialog_node_required_params(self): digress_out = 'allow_returning' digress_out_slots = 'not_allowed' user_label = 'testString' - disambiguation_opt_out = True + disambiguation_opt_out = False # Invoke method response = _service.create_dialog_node( @@ -5508,7 +5588,7 @@ def test_create_dialog_node_required_params(self): assert req_body['digress_out'] == 'allow_returning' assert req_body['digress_out_slots'] == 'not_allowed' assert req_body['user_label'] == 'testString' - assert req_body['disambiguation_opt_out'] == True + assert req_body['disambiguation_opt_out'] == False @responses.activate @@ -5518,7 +5598,7 @@ def test_create_dialog_node_value_error(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/workspaces/testString/dialog_nodes') - mock_response = '{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "channel_transfer", "message_to_user": "message_to_user", "transfer_info": {"target": {"chat": {"url": "url"}}}, "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"mapKey": {"anyKey": "anyValue"}}}, "modifiers": {"overwrite": false}}, "context": {"integrations": {"mapKey": {"mapKey": {"anyKey": "anyValue"}}}}, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": {"anyKey": "anyValue"}}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": true, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + mock_response = '{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "channel_transfer", "message_to_user": "message_to_user", "transfer_info": {"target": {"chat": {"url": "url"}}}, "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"mapKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"mapKey": "anyValue"}}}, "metadata": {"mapKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' responses.add(responses.POST, url, body=mock_response, @@ -5557,12 +5637,12 @@ def test_create_dialog_node_value_error(self): dialog_node_output_model['generic'] = [dialog_node_output_generic_model] dialog_node_output_model['integrations'] = {} dialog_node_output_model['modifiers'] = dialog_node_output_modifiers_model - dialog_node_output_model['foo'] = { 'foo': 'bar' } + dialog_node_output_model['foo'] = 'testString' # Construct a dict representation of a DialogNodeContext model dialog_node_context_model = {} dialog_node_context_model['integrations'] = {} - dialog_node_context_model['foo'] = { 'foo': 'bar' } + dialog_node_context_model['foo'] = 'testString' # Construct a dict representation of a DialogNodeNextStep model dialog_node_next_step_model = {} @@ -5598,7 +5678,7 @@ def test_create_dialog_node_value_error(self): digress_out = 'allow_returning' digress_out_slots = 'not_allowed' user_label = 'testString' - disambiguation_opt_out = True + disambiguation_opt_out = False # Pass in all but one required param and check for a ValueError req_param_dict = { @@ -5621,6 +5701,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -5633,7 +5715,7 @@ def test_get_dialog_node_all_params(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/workspaces/testString/dialog_nodes/testString') - mock_response = '{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "channel_transfer", "message_to_user": "message_to_user", "transfer_info": {"target": {"chat": {"url": "url"}}}, "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"mapKey": {"anyKey": "anyValue"}}}, "modifiers": {"overwrite": false}}, "context": {"integrations": {"mapKey": {"mapKey": {"anyKey": "anyValue"}}}}, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": {"anyKey": "anyValue"}}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": true, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + mock_response = '{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "channel_transfer", "message_to_user": "message_to_user", "transfer_info": {"target": {"chat": {"url": "url"}}}, "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"mapKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"mapKey": "anyValue"}}}, "metadata": {"mapKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' responses.add(responses.GET, url, body=mock_response, @@ -5643,7 +5725,7 @@ def test_get_dialog_node_all_params(self): # Set up parameter values workspace_id = 'testString' dialog_node = 'testString' - include_audit = True + include_audit = False # Invoke method response = _service.get_dialog_node( @@ -5669,7 +5751,7 @@ def test_get_dialog_node_required_params(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/workspaces/testString/dialog_nodes/testString') - mock_response = '{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "channel_transfer", "message_to_user": "message_to_user", "transfer_info": {"target": {"chat": {"url": "url"}}}, "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"mapKey": {"anyKey": "anyValue"}}}, "modifiers": {"overwrite": false}}, "context": {"integrations": {"mapKey": {"mapKey": {"anyKey": "anyValue"}}}}, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": {"anyKey": "anyValue"}}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": true, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + mock_response = '{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "channel_transfer", "message_to_user": "message_to_user", "transfer_info": {"target": {"chat": {"url": "url"}}}, "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"mapKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"mapKey": "anyValue"}}}, "metadata": {"mapKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' responses.add(responses.GET, url, body=mock_response, @@ -5699,7 +5781,7 @@ def test_get_dialog_node_value_error(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/workspaces/testString/dialog_nodes/testString') - mock_response = '{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "channel_transfer", "message_to_user": "message_to_user", "transfer_info": {"target": {"chat": {"url": "url"}}}, "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"mapKey": {"anyKey": "anyValue"}}}, "modifiers": {"overwrite": false}}, "context": {"integrations": {"mapKey": {"mapKey": {"anyKey": "anyValue"}}}}, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": {"anyKey": "anyValue"}}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": true, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + mock_response = '{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "channel_transfer", "message_to_user": "message_to_user", "transfer_info": {"target": {"chat": {"url": "url"}}}, "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"mapKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"mapKey": "anyValue"}}}, "metadata": {"mapKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' responses.add(responses.GET, url, body=mock_response, @@ -5731,6 +5813,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -5743,7 +5827,7 @@ def test_update_dialog_node_all_params(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/workspaces/testString/dialog_nodes/testString') - mock_response = '{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "channel_transfer", "message_to_user": "message_to_user", "transfer_info": {"target": {"chat": {"url": "url"}}}, "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"mapKey": {"anyKey": "anyValue"}}}, "modifiers": {"overwrite": false}}, "context": {"integrations": {"mapKey": {"mapKey": {"anyKey": "anyValue"}}}}, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": {"anyKey": "anyValue"}}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": true, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + mock_response = '{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "channel_transfer", "message_to_user": "message_to_user", "transfer_info": {"target": {"chat": {"url": "url"}}}, "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"mapKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"mapKey": "anyValue"}}}, "metadata": {"mapKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' responses.add(responses.POST, url, body=mock_response, @@ -5782,12 +5866,12 @@ def test_update_dialog_node_all_params(self): dialog_node_output_model['generic'] = [dialog_node_output_generic_model] dialog_node_output_model['integrations'] = {} dialog_node_output_model['modifiers'] = dialog_node_output_modifiers_model - dialog_node_output_model['foo'] = { 'foo': 'bar' } + dialog_node_output_model['foo'] = 'testString' # Construct a dict representation of a DialogNodeContext model dialog_node_context_model = {} dialog_node_context_model['integrations'] = {} - dialog_node_context_model['foo'] = { 'foo': 'bar' } + dialog_node_context_model['foo'] = 'testString' # Construct a dict representation of a DialogNodeNextStep model dialog_node_next_step_model = {} @@ -5824,8 +5908,8 @@ def test_update_dialog_node_all_params(self): new_digress_out = 'allow_returning' new_digress_out_slots = 'not_allowed' new_user_label = 'testString' - new_disambiguation_opt_out = True - include_audit = True + new_disambiguation_opt_out = False + include_audit = False # Invoke method response = _service.update_dialog_node( @@ -5881,7 +5965,7 @@ def test_update_dialog_node_all_params(self): assert req_body['digress_out'] == 'allow_returning' assert req_body['digress_out_slots'] == 'not_allowed' assert req_body['user_label'] == 'testString' - assert req_body['disambiguation_opt_out'] == True + assert req_body['disambiguation_opt_out'] == False @responses.activate @@ -5891,7 +5975,7 @@ def test_update_dialog_node_required_params(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/workspaces/testString/dialog_nodes/testString') - mock_response = '{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "channel_transfer", "message_to_user": "message_to_user", "transfer_info": {"target": {"chat": {"url": "url"}}}, "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"mapKey": {"anyKey": "anyValue"}}}, "modifiers": {"overwrite": false}}, "context": {"integrations": {"mapKey": {"mapKey": {"anyKey": "anyValue"}}}}, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": {"anyKey": "anyValue"}}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": true, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + mock_response = '{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "channel_transfer", "message_to_user": "message_to_user", "transfer_info": {"target": {"chat": {"url": "url"}}}, "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"mapKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"mapKey": "anyValue"}}}, "metadata": {"mapKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' responses.add(responses.POST, url, body=mock_response, @@ -5930,12 +6014,12 @@ def test_update_dialog_node_required_params(self): dialog_node_output_model['generic'] = [dialog_node_output_generic_model] dialog_node_output_model['integrations'] = {} dialog_node_output_model['modifiers'] = dialog_node_output_modifiers_model - dialog_node_output_model['foo'] = { 'foo': 'bar' } + dialog_node_output_model['foo'] = 'testString' # Construct a dict representation of a DialogNodeContext model dialog_node_context_model = {} dialog_node_context_model['integrations'] = {} - dialog_node_context_model['foo'] = { 'foo': 'bar' } + dialog_node_context_model['foo'] = 'testString' # Construct a dict representation of a DialogNodeNextStep model dialog_node_next_step_model = {} @@ -5972,7 +6056,7 @@ def test_update_dialog_node_required_params(self): new_digress_out = 'allow_returning' new_digress_out_slots = 'not_allowed' new_user_label = 'testString' - new_disambiguation_opt_out = True + new_disambiguation_opt_out = False # Invoke method response = _service.update_dialog_node( @@ -6023,7 +6107,7 @@ def test_update_dialog_node_required_params(self): assert req_body['digress_out'] == 'allow_returning' assert req_body['digress_out_slots'] == 'not_allowed' assert req_body['user_label'] == 'testString' - assert req_body['disambiguation_opt_out'] == True + assert req_body['disambiguation_opt_out'] == False @responses.activate @@ -6033,7 +6117,7 @@ def test_update_dialog_node_value_error(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/workspaces/testString/dialog_nodes/testString') - mock_response = '{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "channel_transfer", "message_to_user": "message_to_user", "transfer_info": {"target": {"chat": {"url": "url"}}}, "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"mapKey": {"anyKey": "anyValue"}}}, "modifiers": {"overwrite": false}}, "context": {"integrations": {"mapKey": {"mapKey": {"anyKey": "anyValue"}}}}, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": {"anyKey": "anyValue"}}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": true, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' + mock_response = '{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "channel_transfer", "message_to_user": "message_to_user", "transfer_info": {"target": {"chat": {"url": "url"}}}, "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"mapKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"mapKey": "anyValue"}}}, "metadata": {"mapKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}' responses.add(responses.POST, url, body=mock_response, @@ -6072,12 +6156,12 @@ def test_update_dialog_node_value_error(self): dialog_node_output_model['generic'] = [dialog_node_output_generic_model] dialog_node_output_model['integrations'] = {} dialog_node_output_model['modifiers'] = dialog_node_output_modifiers_model - dialog_node_output_model['foo'] = { 'foo': 'bar' } + dialog_node_output_model['foo'] = 'testString' # Construct a dict representation of a DialogNodeContext model dialog_node_context_model = {} dialog_node_context_model['integrations'] = {} - dialog_node_context_model['foo'] = { 'foo': 'bar' } + dialog_node_context_model['foo'] = 'testString' # Construct a dict representation of a DialogNodeNextStep model dialog_node_next_step_model = {} @@ -6114,7 +6198,7 @@ def test_update_dialog_node_value_error(self): new_digress_out = 'allow_returning' new_digress_out_slots = 'not_allowed' new_user_label = 'testString' - new_disambiguation_opt_out = True + new_disambiguation_opt_out = False # Pass in all but one required param and check for a ValueError req_param_dict = { @@ -6137,6 +6221,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -6215,6 +6301,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -6227,7 +6315,7 @@ def test_list_logs_all_params(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/workspaces/testString/logs') - mock_response = '{"logs": [{"request": {"input": {"text": "text", "spelling_suggestions": true, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "alternate_intents": false, "context": {"conversation_id": "conversation_id", "system": {"mapKey": {"anyKey": "anyValue"}}, "metadata": {"deployment": "deployment", "user_id": "user_id"}}, "output": {"nodes_visited": ["nodes_visited"], "nodes_visited_details": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "msg": "msg", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "text": ["text"], "generic": [{"response_type": "option", "title": "title", "description": "description", "preference": "dropdown", "options": [{"label": "label", "value": {"input": {"text": "text", "spelling_suggestions": true, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}]}}], "channels": [{"channel": "chat"}]}]}, "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": {"anyKey": "anyValue"}}, "result_variable": "result_variable", "credentials": "credentials"}], "user_id": "user_id"}, "response": {"input": {"text": "text", "spelling_suggestions": true, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "alternate_intents": false, "context": {"conversation_id": "conversation_id", "system": {"mapKey": {"anyKey": "anyValue"}}, "metadata": {"deployment": "deployment", "user_id": "user_id"}}, "output": {"nodes_visited": ["nodes_visited"], "nodes_visited_details": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "msg": "msg", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "text": ["text"], "generic": [{"response_type": "option", "title": "title", "description": "description", "preference": "dropdown", "options": [{"label": "label", "value": {"input": {"text": "text", "spelling_suggestions": true, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}]}}], "channels": [{"channel": "chat"}]}]}, "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": {"anyKey": "anyValue"}}, "result_variable": "result_variable", "credentials": "credentials"}], "user_id": "user_id"}, "log_id": "log_id", "request_timestamp": "request_timestamp", "response_timestamp": "response_timestamp", "workspace_id": "workspace_id", "language": "language"}], "pagination": {"next_url": "next_url", "matched": 7, "next_cursor": "next_cursor"}}' + mock_response = '{"logs": [{"request": {"input": {"text": "text", "spelling_suggestions": false, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": "anyValue"}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "alternate_intents": false, "context": {"conversation_id": "conversation_id", "system": {"mapKey": "anyValue"}, "metadata": {"deployment": "deployment", "user_id": "user_id"}}, "output": {"nodes_visited": ["nodes_visited"], "nodes_visited_details": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "msg": "msg", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "text": ["text"], "generic": [{"response_type": "option", "title": "title", "description": "description", "preference": "dropdown", "options": [{"label": "label", "value": {"input": {"text": "text", "spelling_suggestions": false, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": "anyValue"}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}]}}], "channels": [{"channel": "chat"}]}]}, "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "user_id": "user_id"}, "response": {"input": {"text": "text", "spelling_suggestions": false, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": "anyValue"}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "alternate_intents": false, "context": {"conversation_id": "conversation_id", "system": {"mapKey": "anyValue"}, "metadata": {"deployment": "deployment", "user_id": "user_id"}}, "output": {"nodes_visited": ["nodes_visited"], "nodes_visited_details": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "msg": "msg", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "text": ["text"], "generic": [{"response_type": "option", "title": "title", "description": "description", "preference": "dropdown", "options": [{"label": "label", "value": {"input": {"text": "text", "spelling_suggestions": false, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": "anyValue"}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}]}}], "channels": [{"channel": "chat"}]}]}, "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "user_id": "user_id"}, "log_id": "log_id", "request_timestamp": "request_timestamp", "response_timestamp": "response_timestamp", "workspace_id": "workspace_id", "language": "language"}], "pagination": {"next_url": "next_url", "matched": 7, "next_cursor": "next_cursor"}}' responses.add(responses.GET, url, body=mock_response, @@ -6270,7 +6358,7 @@ def test_list_logs_required_params(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/workspaces/testString/logs') - mock_response = '{"logs": [{"request": {"input": {"text": "text", "spelling_suggestions": true, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "alternate_intents": false, "context": {"conversation_id": "conversation_id", "system": {"mapKey": {"anyKey": "anyValue"}}, "metadata": {"deployment": "deployment", "user_id": "user_id"}}, "output": {"nodes_visited": ["nodes_visited"], "nodes_visited_details": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "msg": "msg", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "text": ["text"], "generic": [{"response_type": "option", "title": "title", "description": "description", "preference": "dropdown", "options": [{"label": "label", "value": {"input": {"text": "text", "spelling_suggestions": true, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}]}}], "channels": [{"channel": "chat"}]}]}, "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": {"anyKey": "anyValue"}}, "result_variable": "result_variable", "credentials": "credentials"}], "user_id": "user_id"}, "response": {"input": {"text": "text", "spelling_suggestions": true, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "alternate_intents": false, "context": {"conversation_id": "conversation_id", "system": {"mapKey": {"anyKey": "anyValue"}}, "metadata": {"deployment": "deployment", "user_id": "user_id"}}, "output": {"nodes_visited": ["nodes_visited"], "nodes_visited_details": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "msg": "msg", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "text": ["text"], "generic": [{"response_type": "option", "title": "title", "description": "description", "preference": "dropdown", "options": [{"label": "label", "value": {"input": {"text": "text", "spelling_suggestions": true, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}]}}], "channels": [{"channel": "chat"}]}]}, "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": {"anyKey": "anyValue"}}, "result_variable": "result_variable", "credentials": "credentials"}], "user_id": "user_id"}, "log_id": "log_id", "request_timestamp": "request_timestamp", "response_timestamp": "response_timestamp", "workspace_id": "workspace_id", "language": "language"}], "pagination": {"next_url": "next_url", "matched": 7, "next_cursor": "next_cursor"}}' + mock_response = '{"logs": [{"request": {"input": {"text": "text", "spelling_suggestions": false, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": "anyValue"}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "alternate_intents": false, "context": {"conversation_id": "conversation_id", "system": {"mapKey": "anyValue"}, "metadata": {"deployment": "deployment", "user_id": "user_id"}}, "output": {"nodes_visited": ["nodes_visited"], "nodes_visited_details": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "msg": "msg", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "text": ["text"], "generic": [{"response_type": "option", "title": "title", "description": "description", "preference": "dropdown", "options": [{"label": "label", "value": {"input": {"text": "text", "spelling_suggestions": false, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": "anyValue"}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}]}}], "channels": [{"channel": "chat"}]}]}, "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "user_id": "user_id"}, "response": {"input": {"text": "text", "spelling_suggestions": false, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": "anyValue"}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "alternate_intents": false, "context": {"conversation_id": "conversation_id", "system": {"mapKey": "anyValue"}, "metadata": {"deployment": "deployment", "user_id": "user_id"}}, "output": {"nodes_visited": ["nodes_visited"], "nodes_visited_details": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "msg": "msg", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "text": ["text"], "generic": [{"response_type": "option", "title": "title", "description": "description", "preference": "dropdown", "options": [{"label": "label", "value": {"input": {"text": "text", "spelling_suggestions": false, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": "anyValue"}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}]}}], "channels": [{"channel": "chat"}]}]}, "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "user_id": "user_id"}, "log_id": "log_id", "request_timestamp": "request_timestamp", "response_timestamp": "response_timestamp", "workspace_id": "workspace_id", "language": "language"}], "pagination": {"next_url": "next_url", "matched": 7, "next_cursor": "next_cursor"}}' responses.add(responses.GET, url, body=mock_response, @@ -6298,7 +6386,7 @@ def test_list_logs_value_error(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/workspaces/testString/logs') - mock_response = '{"logs": [{"request": {"input": {"text": "text", "spelling_suggestions": true, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "alternate_intents": false, "context": {"conversation_id": "conversation_id", "system": {"mapKey": {"anyKey": "anyValue"}}, "metadata": {"deployment": "deployment", "user_id": "user_id"}}, "output": {"nodes_visited": ["nodes_visited"], "nodes_visited_details": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "msg": "msg", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "text": ["text"], "generic": [{"response_type": "option", "title": "title", "description": "description", "preference": "dropdown", "options": [{"label": "label", "value": {"input": {"text": "text", "spelling_suggestions": true, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}]}}], "channels": [{"channel": "chat"}]}]}, "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": {"anyKey": "anyValue"}}, "result_variable": "result_variable", "credentials": "credentials"}], "user_id": "user_id"}, "response": {"input": {"text": "text", "spelling_suggestions": true, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "alternate_intents": false, "context": {"conversation_id": "conversation_id", "system": {"mapKey": {"anyKey": "anyValue"}}, "metadata": {"deployment": "deployment", "user_id": "user_id"}}, "output": {"nodes_visited": ["nodes_visited"], "nodes_visited_details": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "msg": "msg", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "text": ["text"], "generic": [{"response_type": "option", "title": "title", "description": "description", "preference": "dropdown", "options": [{"label": "label", "value": {"input": {"text": "text", "spelling_suggestions": true, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}]}}], "channels": [{"channel": "chat"}]}]}, "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": {"anyKey": "anyValue"}}, "result_variable": "result_variable", "credentials": "credentials"}], "user_id": "user_id"}, "log_id": "log_id", "request_timestamp": "request_timestamp", "response_timestamp": "response_timestamp", "workspace_id": "workspace_id", "language": "language"}], "pagination": {"next_url": "next_url", "matched": 7, "next_cursor": "next_cursor"}}' + mock_response = '{"logs": [{"request": {"input": {"text": "text", "spelling_suggestions": false, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": "anyValue"}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "alternate_intents": false, "context": {"conversation_id": "conversation_id", "system": {"mapKey": "anyValue"}, "metadata": {"deployment": "deployment", "user_id": "user_id"}}, "output": {"nodes_visited": ["nodes_visited"], "nodes_visited_details": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "msg": "msg", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "text": ["text"], "generic": [{"response_type": "option", "title": "title", "description": "description", "preference": "dropdown", "options": [{"label": "label", "value": {"input": {"text": "text", "spelling_suggestions": false, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": "anyValue"}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}]}}], "channels": [{"channel": "chat"}]}]}, "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "user_id": "user_id"}, "response": {"input": {"text": "text", "spelling_suggestions": false, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": "anyValue"}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "alternate_intents": false, "context": {"conversation_id": "conversation_id", "system": {"mapKey": "anyValue"}, "metadata": {"deployment": "deployment", "user_id": "user_id"}}, "output": {"nodes_visited": ["nodes_visited"], "nodes_visited_details": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "msg": "msg", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "text": ["text"], "generic": [{"response_type": "option", "title": "title", "description": "description", "preference": "dropdown", "options": [{"label": "label", "value": {"input": {"text": "text", "spelling_suggestions": false, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": "anyValue"}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}]}}], "channels": [{"channel": "chat"}]}]}, "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "user_id": "user_id"}, "log_id": "log_id", "request_timestamp": "request_timestamp", "response_timestamp": "response_timestamp", "workspace_id": "workspace_id", "language": "language"}], "pagination": {"next_url": "next_url", "matched": 7, "next_cursor": "next_cursor"}}' responses.add(responses.GET, url, body=mock_response, @@ -6328,6 +6416,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -6340,7 +6430,7 @@ def test_list_all_logs_all_params(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/logs') - mock_response = '{"logs": [{"request": {"input": {"text": "text", "spelling_suggestions": true, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "alternate_intents": false, "context": {"conversation_id": "conversation_id", "system": {"mapKey": {"anyKey": "anyValue"}}, "metadata": {"deployment": "deployment", "user_id": "user_id"}}, "output": {"nodes_visited": ["nodes_visited"], "nodes_visited_details": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "msg": "msg", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "text": ["text"], "generic": [{"response_type": "option", "title": "title", "description": "description", "preference": "dropdown", "options": [{"label": "label", "value": {"input": {"text": "text", "spelling_suggestions": true, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}]}}], "channels": [{"channel": "chat"}]}]}, "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": {"anyKey": "anyValue"}}, "result_variable": "result_variable", "credentials": "credentials"}], "user_id": "user_id"}, "response": {"input": {"text": "text", "spelling_suggestions": true, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "alternate_intents": false, "context": {"conversation_id": "conversation_id", "system": {"mapKey": {"anyKey": "anyValue"}}, "metadata": {"deployment": "deployment", "user_id": "user_id"}}, "output": {"nodes_visited": ["nodes_visited"], "nodes_visited_details": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "msg": "msg", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "text": ["text"], "generic": [{"response_type": "option", "title": "title", "description": "description", "preference": "dropdown", "options": [{"label": "label", "value": {"input": {"text": "text", "spelling_suggestions": true, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}]}}], "channels": [{"channel": "chat"}]}]}, "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": {"anyKey": "anyValue"}}, "result_variable": "result_variable", "credentials": "credentials"}], "user_id": "user_id"}, "log_id": "log_id", "request_timestamp": "request_timestamp", "response_timestamp": "response_timestamp", "workspace_id": "workspace_id", "language": "language"}], "pagination": {"next_url": "next_url", "matched": 7, "next_cursor": "next_cursor"}}' + mock_response = '{"logs": [{"request": {"input": {"text": "text", "spelling_suggestions": false, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": "anyValue"}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "alternate_intents": false, "context": {"conversation_id": "conversation_id", "system": {"mapKey": "anyValue"}, "metadata": {"deployment": "deployment", "user_id": "user_id"}}, "output": {"nodes_visited": ["nodes_visited"], "nodes_visited_details": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "msg": "msg", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "text": ["text"], "generic": [{"response_type": "option", "title": "title", "description": "description", "preference": "dropdown", "options": [{"label": "label", "value": {"input": {"text": "text", "spelling_suggestions": false, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": "anyValue"}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}]}}], "channels": [{"channel": "chat"}]}]}, "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "user_id": "user_id"}, "response": {"input": {"text": "text", "spelling_suggestions": false, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": "anyValue"}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "alternate_intents": false, "context": {"conversation_id": "conversation_id", "system": {"mapKey": "anyValue"}, "metadata": {"deployment": "deployment", "user_id": "user_id"}}, "output": {"nodes_visited": ["nodes_visited"], "nodes_visited_details": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "msg": "msg", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "text": ["text"], "generic": [{"response_type": "option", "title": "title", "description": "description", "preference": "dropdown", "options": [{"label": "label", "value": {"input": {"text": "text", "spelling_suggestions": false, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": "anyValue"}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}]}}], "channels": [{"channel": "chat"}]}]}, "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "user_id": "user_id"}, "log_id": "log_id", "request_timestamp": "request_timestamp", "response_timestamp": "response_timestamp", "workspace_id": "workspace_id", "language": "language"}], "pagination": {"next_url": "next_url", "matched": 7, "next_cursor": "next_cursor"}}' responses.add(responses.GET, url, body=mock_response, @@ -6381,7 +6471,7 @@ def test_list_all_logs_required_params(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/logs') - mock_response = '{"logs": [{"request": {"input": {"text": "text", "spelling_suggestions": true, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "alternate_intents": false, "context": {"conversation_id": "conversation_id", "system": {"mapKey": {"anyKey": "anyValue"}}, "metadata": {"deployment": "deployment", "user_id": "user_id"}}, "output": {"nodes_visited": ["nodes_visited"], "nodes_visited_details": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "msg": "msg", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "text": ["text"], "generic": [{"response_type": "option", "title": "title", "description": "description", "preference": "dropdown", "options": [{"label": "label", "value": {"input": {"text": "text", "spelling_suggestions": true, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}]}}], "channels": [{"channel": "chat"}]}]}, "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": {"anyKey": "anyValue"}}, "result_variable": "result_variable", "credentials": "credentials"}], "user_id": "user_id"}, "response": {"input": {"text": "text", "spelling_suggestions": true, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "alternate_intents": false, "context": {"conversation_id": "conversation_id", "system": {"mapKey": {"anyKey": "anyValue"}}, "metadata": {"deployment": "deployment", "user_id": "user_id"}}, "output": {"nodes_visited": ["nodes_visited"], "nodes_visited_details": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "msg": "msg", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "text": ["text"], "generic": [{"response_type": "option", "title": "title", "description": "description", "preference": "dropdown", "options": [{"label": "label", "value": {"input": {"text": "text", "spelling_suggestions": true, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}]}}], "channels": [{"channel": "chat"}]}]}, "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": {"anyKey": "anyValue"}}, "result_variable": "result_variable", "credentials": "credentials"}], "user_id": "user_id"}, "log_id": "log_id", "request_timestamp": "request_timestamp", "response_timestamp": "response_timestamp", "workspace_id": "workspace_id", "language": "language"}], "pagination": {"next_url": "next_url", "matched": 7, "next_cursor": "next_cursor"}}' + mock_response = '{"logs": [{"request": {"input": {"text": "text", "spelling_suggestions": false, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": "anyValue"}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "alternate_intents": false, "context": {"conversation_id": "conversation_id", "system": {"mapKey": "anyValue"}, "metadata": {"deployment": "deployment", "user_id": "user_id"}}, "output": {"nodes_visited": ["nodes_visited"], "nodes_visited_details": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "msg": "msg", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "text": ["text"], "generic": [{"response_type": "option", "title": "title", "description": "description", "preference": "dropdown", "options": [{"label": "label", "value": {"input": {"text": "text", "spelling_suggestions": false, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": "anyValue"}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}]}}], "channels": [{"channel": "chat"}]}]}, "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "user_id": "user_id"}, "response": {"input": {"text": "text", "spelling_suggestions": false, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": "anyValue"}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "alternate_intents": false, "context": {"conversation_id": "conversation_id", "system": {"mapKey": "anyValue"}, "metadata": {"deployment": "deployment", "user_id": "user_id"}}, "output": {"nodes_visited": ["nodes_visited"], "nodes_visited_details": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "msg": "msg", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "text": ["text"], "generic": [{"response_type": "option", "title": "title", "description": "description", "preference": "dropdown", "options": [{"label": "label", "value": {"input": {"text": "text", "spelling_suggestions": false, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": "anyValue"}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}]}}], "channels": [{"channel": "chat"}]}]}, "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "user_id": "user_id"}, "log_id": "log_id", "request_timestamp": "request_timestamp", "response_timestamp": "response_timestamp", "workspace_id": "workspace_id", "language": "language"}], "pagination": {"next_url": "next_url", "matched": 7, "next_cursor": "next_cursor"}}' responses.add(responses.GET, url, body=mock_response, @@ -6413,7 +6503,7 @@ def test_list_all_logs_value_error(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/logs') - mock_response = '{"logs": [{"request": {"input": {"text": "text", "spelling_suggestions": true, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "alternate_intents": false, "context": {"conversation_id": "conversation_id", "system": {"mapKey": {"anyKey": "anyValue"}}, "metadata": {"deployment": "deployment", "user_id": "user_id"}}, "output": {"nodes_visited": ["nodes_visited"], "nodes_visited_details": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "msg": "msg", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "text": ["text"], "generic": [{"response_type": "option", "title": "title", "description": "description", "preference": "dropdown", "options": [{"label": "label", "value": {"input": {"text": "text", "spelling_suggestions": true, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}]}}], "channels": [{"channel": "chat"}]}]}, "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": {"anyKey": "anyValue"}}, "result_variable": "result_variable", "credentials": "credentials"}], "user_id": "user_id"}, "response": {"input": {"text": "text", "spelling_suggestions": true, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "alternate_intents": false, "context": {"conversation_id": "conversation_id", "system": {"mapKey": {"anyKey": "anyValue"}}, "metadata": {"deployment": "deployment", "user_id": "user_id"}}, "output": {"nodes_visited": ["nodes_visited"], "nodes_visited_details": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "msg": "msg", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "text": ["text"], "generic": [{"response_type": "option", "title": "title", "description": "description", "preference": "dropdown", "options": [{"label": "label", "value": {"input": {"text": "text", "spelling_suggestions": true, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}]}}], "channels": [{"channel": "chat"}]}]}, "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": {"anyKey": "anyValue"}}, "result_variable": "result_variable", "credentials": "credentials"}], "user_id": "user_id"}, "log_id": "log_id", "request_timestamp": "request_timestamp", "response_timestamp": "response_timestamp", "workspace_id": "workspace_id", "language": "language"}], "pagination": {"next_url": "next_url", "matched": 7, "next_cursor": "next_cursor"}}' + mock_response = '{"logs": [{"request": {"input": {"text": "text", "spelling_suggestions": false, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": "anyValue"}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "alternate_intents": false, "context": {"conversation_id": "conversation_id", "system": {"mapKey": "anyValue"}, "metadata": {"deployment": "deployment", "user_id": "user_id"}}, "output": {"nodes_visited": ["nodes_visited"], "nodes_visited_details": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "msg": "msg", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "text": ["text"], "generic": [{"response_type": "option", "title": "title", "description": "description", "preference": "dropdown", "options": [{"label": "label", "value": {"input": {"text": "text", "spelling_suggestions": false, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": "anyValue"}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}]}}], "channels": [{"channel": "chat"}]}]}, "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "user_id": "user_id"}, "response": {"input": {"text": "text", "spelling_suggestions": false, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": "anyValue"}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "alternate_intents": false, "context": {"conversation_id": "conversation_id", "system": {"mapKey": "anyValue"}, "metadata": {"deployment": "deployment", "user_id": "user_id"}}, "output": {"nodes_visited": ["nodes_visited"], "nodes_visited_details": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "msg": "msg", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "text": ["text"], "generic": [{"response_type": "option", "title": "title", "description": "description", "preference": "dropdown", "options": [{"label": "label", "value": {"input": {"text": "text", "spelling_suggestions": false, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": "anyValue"}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}]}}], "channels": [{"channel": "chat"}]}]}, "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "user_id": "user_id"}, "log_id": "log_id", "request_timestamp": "request_timestamp", "response_timestamp": "response_timestamp", "workspace_id": "workspace_id", "language": "language"}], "pagination": {"next_url": "next_url", "matched": 7, "next_cursor": "next_cursor"}}' responses.add(responses.GET, url, body=mock_response, @@ -6453,6 +6543,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -6522,7 +6614,7 @@ def test_delete_user_data_value_error(self): # Start of Model Tests ############################################################################## # region -class TestAgentAvailabilityMessage(): +class TestModel_AgentAvailabilityMessage(): """ Test Class for AgentAvailabilityMessage """ @@ -6551,7 +6643,7 @@ def test_agent_availability_message_serialization(self): agent_availability_message_model_json2 = agent_availability_message_model.to_dict() assert agent_availability_message_model_json2 == agent_availability_message_model_json -class TestBulkClassifyOutput(): +class TestModel_BulkClassifyOutput(): """ Test Class for BulkClassifyOutput """ @@ -6641,7 +6733,7 @@ def test_bulk_classify_output_serialization(self): bulk_classify_output_model_json2 = bulk_classify_output_model.to_dict() assert bulk_classify_output_model_json2 == bulk_classify_output_model_json -class TestBulkClassifyResponse(): +class TestModel_BulkClassifyResponse(): """ Test Class for BulkClassifyResponse """ @@ -6734,7 +6826,7 @@ def test_bulk_classify_response_serialization(self): bulk_classify_response_model_json2 = bulk_classify_response_model.to_dict() assert bulk_classify_response_model_json2 == bulk_classify_response_model_json -class TestBulkClassifyUtterance(): +class TestModel_BulkClassifyUtterance(): """ Test Class for BulkClassifyUtterance """ @@ -6763,7 +6855,7 @@ def test_bulk_classify_utterance_serialization(self): bulk_classify_utterance_model_json2 = bulk_classify_utterance_model.to_dict() assert bulk_classify_utterance_model_json2 == bulk_classify_utterance_model_json -class TestCaptureGroup(): +class TestModel_CaptureGroup(): """ Test Class for CaptureGroup """ @@ -6793,7 +6885,7 @@ def test_capture_group_serialization(self): capture_group_model_json2 = capture_group_model.to_dict() assert capture_group_model_json2 == capture_group_model_json -class TestChannelTransferInfo(): +class TestModel_ChannelTransferInfo(): """ Test Class for ChannelTransferInfo """ @@ -6830,7 +6922,7 @@ def test_channel_transfer_info_serialization(self): channel_transfer_info_model_json2 = channel_transfer_info_model.to_dict() assert channel_transfer_info_model_json2 == channel_transfer_info_model_json -class TestChannelTransferTarget(): +class TestModel_ChannelTransferTarget(): """ Test Class for ChannelTransferTarget """ @@ -6864,7 +6956,7 @@ def test_channel_transfer_target_serialization(self): channel_transfer_target_model_json2 = channel_transfer_target_model.to_dict() assert channel_transfer_target_model_json2 == channel_transfer_target_model_json -class TestChannelTransferTargetChat(): +class TestModel_ChannelTransferTargetChat(): """ Test Class for ChannelTransferTargetChat """ @@ -6893,7 +6985,7 @@ def test_channel_transfer_target_chat_serialization(self): channel_transfer_target_chat_model_json2 = channel_transfer_target_chat_model.to_dict() assert channel_transfer_target_chat_model_json2 == channel_transfer_target_chat_model_json -class TestContext(): +class TestModel_Context(): """ Test Class for Context """ @@ -6914,7 +7006,7 @@ def test_context_serialization(self): context_model_json['conversation_id'] = 'testString' context_model_json['system'] = {} context_model_json['metadata'] = message_context_metadata_model - context_model_json['foo'] = { 'foo': 'bar' } + context_model_json['foo'] = 'testString' # Construct a model instance of Context by calling from_dict on the json representation context_model = Context.from_dict(context_model_json) @@ -6931,7 +7023,17 @@ def test_context_serialization(self): context_model_json2 = context_model.to_dict() assert context_model_json2 == context_model_json -class TestCounterexample(): + # Test get_properties and set_properties methods. + context_model.set_properties({}) + actual_dict = context_model.get_properties() + assert actual_dict == {} + + expected_dict = {'foo': 'testString'} + context_model.set_properties(expected_dict) + actual_dict = context_model.get_properties() + assert actual_dict == expected_dict + +class TestModel_Counterexample(): """ Test Class for Counterexample """ @@ -6944,8 +7046,8 @@ def test_counterexample_serialization(self): # Construct a json representation of a Counterexample model counterexample_model_json = {} counterexample_model_json['text'] = 'testString' - counterexample_model_json['created'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) - counterexample_model_json['updated'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + counterexample_model_json['created'] = "2019-01-01T12:00:00Z" + counterexample_model_json['updated'] = "2019-01-01T12:00:00Z" # Construct a model instance of Counterexample by calling from_dict on the json representation counterexample_model = Counterexample.from_dict(counterexample_model_json) @@ -6962,7 +7064,7 @@ def test_counterexample_serialization(self): counterexample_model_json2 = counterexample_model.to_dict() assert counterexample_model_json2 == counterexample_model_json -class TestCounterexampleCollection(): +class TestModel_CounterexampleCollection(): """ Test Class for CounterexampleCollection """ @@ -6976,8 +7078,8 @@ def test_counterexample_collection_serialization(self): counterexample_model = {} # Counterexample counterexample_model['text'] = 'testString' - counterexample_model['created'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) - counterexample_model['updated'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + counterexample_model['created'] = "2019-01-01T12:00:00Z" + counterexample_model['updated'] = "2019-01-01T12:00:00Z" pagination_model = {} # Pagination pagination_model['refresh_url'] = 'testString' @@ -7007,7 +7109,7 @@ def test_counterexample_collection_serialization(self): counterexample_collection_model_json2 = counterexample_collection_model.to_dict() assert counterexample_collection_model_json2 == counterexample_collection_model_json -class TestCreateEntity(): +class TestModel_CreateEntity(): """ Test Class for CreateEntity """ @@ -7025,8 +7127,8 @@ def test_create_entity_serialization(self): create_value_model['type'] = 'synonyms' create_value_model['synonyms'] = ['testString'] create_value_model['patterns'] = ['testString'] - create_value_model['created'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) - create_value_model['updated'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + create_value_model['created'] = "2019-01-01T12:00:00Z" + create_value_model['updated'] = "2019-01-01T12:00:00Z" # Construct a json representation of a CreateEntity model create_entity_model_json = {} @@ -7034,8 +7136,8 @@ def test_create_entity_serialization(self): create_entity_model_json['description'] = 'testString' create_entity_model_json['metadata'] = {} create_entity_model_json['fuzzy_match'] = True - create_entity_model_json['created'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) - create_entity_model_json['updated'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + create_entity_model_json['created'] = "2019-01-01T12:00:00Z" + create_entity_model_json['updated'] = "2019-01-01T12:00:00Z" create_entity_model_json['values'] = [create_value_model] # Construct a model instance of CreateEntity by calling from_dict on the json representation @@ -7053,7 +7155,7 @@ def test_create_entity_serialization(self): create_entity_model_json2 = create_entity_model.to_dict() assert create_entity_model_json2 == create_entity_model_json -class TestCreateIntent(): +class TestModel_CreateIntent(): """ Test Class for CreateIntent """ @@ -7072,15 +7174,15 @@ def test_create_intent_serialization(self): example_model = {} # Example example_model['text'] = 'testString' example_model['mentions'] = [mention_model] - example_model['created'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) - example_model['updated'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + example_model['created'] = "2019-01-01T12:00:00Z" + example_model['updated'] = "2019-01-01T12:00:00Z" # Construct a json representation of a CreateIntent model create_intent_model_json = {} create_intent_model_json['intent'] = 'testString' create_intent_model_json['description'] = 'testString' - create_intent_model_json['created'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) - create_intent_model_json['updated'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + create_intent_model_json['created'] = "2019-01-01T12:00:00Z" + create_intent_model_json['updated'] = "2019-01-01T12:00:00Z" create_intent_model_json['examples'] = [example_model] # Construct a model instance of CreateIntent by calling from_dict on the json representation @@ -7098,7 +7200,7 @@ def test_create_intent_serialization(self): create_intent_model_json2 = create_intent_model.to_dict() assert create_intent_model_json2 == create_intent_model_json -class TestCreateValue(): +class TestModel_CreateValue(): """ Test Class for CreateValue """ @@ -7115,8 +7217,8 @@ def test_create_value_serialization(self): create_value_model_json['type'] = 'synonyms' create_value_model_json['synonyms'] = ['testString'] create_value_model_json['patterns'] = ['testString'] - create_value_model_json['created'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) - create_value_model_json['updated'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + create_value_model_json['created'] = "2019-01-01T12:00:00Z" + create_value_model_json['updated'] = "2019-01-01T12:00:00Z" # Construct a model instance of CreateValue by calling from_dict on the json representation create_value_model = CreateValue.from_dict(create_value_model_json) @@ -7133,7 +7235,7 @@ def test_create_value_serialization(self): create_value_model_json2 = create_value_model.to_dict() assert create_value_model_json2 == create_value_model_json -class TestDialogNode(): +class TestModel_DialogNode(): """ Test Class for DialogNode """ @@ -7170,11 +7272,11 @@ def test_dialog_node_serialization(self): dialog_node_output_model['generic'] = [dialog_node_output_generic_model] dialog_node_output_model['integrations'] = {} dialog_node_output_model['modifiers'] = dialog_node_output_modifiers_model - dialog_node_output_model['foo'] = { 'foo': 'bar' } + dialog_node_output_model['foo'] = 'testString' dialog_node_context_model = {} # DialogNodeContext dialog_node_context_model['integrations'] = {} - dialog_node_context_model['foo'] = { 'foo': 'bar' } + dialog_node_context_model['foo'] = 'testString' dialog_node_next_step_model = {} # DialogNodeNextStep dialog_node_next_step_model['behavior'] = 'get_user_input' @@ -7208,10 +7310,10 @@ def test_dialog_node_serialization(self): dialog_node_model_json['digress_out'] = 'allow_returning' dialog_node_model_json['digress_out_slots'] = 'not_allowed' dialog_node_model_json['user_label'] = 'testString' - dialog_node_model_json['disambiguation_opt_out'] = True + dialog_node_model_json['disambiguation_opt_out'] = False dialog_node_model_json['disabled'] = True - dialog_node_model_json['created'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) - dialog_node_model_json['updated'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + dialog_node_model_json['created'] = "2019-01-01T12:00:00Z" + dialog_node_model_json['updated'] = "2019-01-01T12:00:00Z" # Construct a model instance of DialogNode by calling from_dict on the json representation dialog_node_model = DialogNode.from_dict(dialog_node_model_json) @@ -7228,7 +7330,7 @@ def test_dialog_node_serialization(self): dialog_node_model_json2 = dialog_node_model.to_dict() assert dialog_node_model_json2 == dialog_node_model_json -class TestDialogNodeAction(): +class TestModel_DialogNodeAction(): """ Test Class for DialogNodeAction """ @@ -7261,7 +7363,7 @@ def test_dialog_node_action_serialization(self): dialog_node_action_model_json2 = dialog_node_action_model.to_dict() assert dialog_node_action_model_json2 == dialog_node_action_model_json -class TestDialogNodeCollection(): +class TestModel_DialogNodeCollection(): """ Test Class for DialogNodeCollection """ @@ -7298,11 +7400,11 @@ def test_dialog_node_collection_serialization(self): dialog_node_output_model['generic'] = [dialog_node_output_generic_model] dialog_node_output_model['integrations'] = {} dialog_node_output_model['modifiers'] = dialog_node_output_modifiers_model - dialog_node_output_model['foo'] = { 'foo': 'bar' } + dialog_node_output_model['foo'] = 'testString' dialog_node_context_model = {} # DialogNodeContext dialog_node_context_model['integrations'] = {} - dialog_node_context_model['foo'] = { 'foo': 'bar' } + dialog_node_context_model['foo'] = 'testString' dialog_node_next_step_model = {} # DialogNodeNextStep dialog_node_next_step_model['behavior'] = 'get_user_input' @@ -7335,10 +7437,10 @@ def test_dialog_node_collection_serialization(self): dialog_node_model['digress_out'] = 'allow_returning' dialog_node_model['digress_out_slots'] = 'not_allowed' dialog_node_model['user_label'] = 'testString' - dialog_node_model['disambiguation_opt_out'] = True + dialog_node_model['disambiguation_opt_out'] = False dialog_node_model['disabled'] = True - dialog_node_model['created'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) - dialog_node_model['updated'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + dialog_node_model['created'] = "2019-01-01T12:00:00Z" + dialog_node_model['updated'] = "2019-01-01T12:00:00Z" pagination_model = {} # Pagination pagination_model['refresh_url'] = 'testString' @@ -7368,7 +7470,7 @@ def test_dialog_node_collection_serialization(self): dialog_node_collection_model_json2 = dialog_node_collection_model.to_dict() assert dialog_node_collection_model_json2 == dialog_node_collection_model_json -class TestDialogNodeContext(): +class TestModel_DialogNodeContext(): """ Test Class for DialogNodeContext """ @@ -7381,7 +7483,7 @@ def test_dialog_node_context_serialization(self): # Construct a json representation of a DialogNodeContext model dialog_node_context_model_json = {} dialog_node_context_model_json['integrations'] = {} - dialog_node_context_model_json['foo'] = { 'foo': 'bar' } + dialog_node_context_model_json['foo'] = 'testString' # Construct a model instance of DialogNodeContext by calling from_dict on the json representation dialog_node_context_model = DialogNodeContext.from_dict(dialog_node_context_model_json) @@ -7398,7 +7500,17 @@ def test_dialog_node_context_serialization(self): dialog_node_context_model_json2 = dialog_node_context_model.to_dict() assert dialog_node_context_model_json2 == dialog_node_context_model_json -class TestDialogNodeNextStep(): + # Test get_properties and set_properties methods. + dialog_node_context_model.set_properties({}) + actual_dict = dialog_node_context_model.get_properties() + assert actual_dict == {} + + expected_dict = {'foo': 'testString'} + dialog_node_context_model.set_properties(expected_dict) + actual_dict = dialog_node_context_model.get_properties() + assert actual_dict == expected_dict + +class TestModel_DialogNodeNextStep(): """ Test Class for DialogNodeNextStep """ @@ -7429,7 +7541,7 @@ def test_dialog_node_next_step_serialization(self): dialog_node_next_step_model_json2 = dialog_node_next_step_model.to_dict() assert dialog_node_next_step_model_json2 == dialog_node_next_step_model_json -class TestDialogNodeOutput(): +class TestModel_DialogNodeOutput(): """ Test Class for DialogNodeOutput """ @@ -7467,7 +7579,7 @@ def test_dialog_node_output_serialization(self): dialog_node_output_model_json['generic'] = [dialog_node_output_generic_model] dialog_node_output_model_json['integrations'] = {} dialog_node_output_model_json['modifiers'] = dialog_node_output_modifiers_model - dialog_node_output_model_json['foo'] = { 'foo': 'bar' } + dialog_node_output_model_json['foo'] = 'testString' # Construct a model instance of DialogNodeOutput by calling from_dict on the json representation dialog_node_output_model = DialogNodeOutput.from_dict(dialog_node_output_model_json) @@ -7484,7 +7596,17 @@ def test_dialog_node_output_serialization(self): dialog_node_output_model_json2 = dialog_node_output_model.to_dict() assert dialog_node_output_model_json2 == dialog_node_output_model_json -class TestDialogNodeOutputConnectToAgentTransferInfo(): + # Test get_properties and set_properties methods. + dialog_node_output_model.set_properties({}) + actual_dict = dialog_node_output_model.get_properties() + assert actual_dict == {} + + expected_dict = {'foo': 'testString'} + dialog_node_output_model.set_properties(expected_dict) + actual_dict = dialog_node_output_model.get_properties() + assert actual_dict == expected_dict + +class TestModel_DialogNodeOutputConnectToAgentTransferInfo(): """ Test Class for DialogNodeOutputConnectToAgentTransferInfo """ @@ -7513,7 +7635,7 @@ def test_dialog_node_output_connect_to_agent_transfer_info_serialization(self): dialog_node_output_connect_to_agent_transfer_info_model_json2 = dialog_node_output_connect_to_agent_transfer_info_model.to_dict() assert dialog_node_output_connect_to_agent_transfer_info_model_json2 == dialog_node_output_connect_to_agent_transfer_info_model_json -class TestDialogNodeOutputModifiers(): +class TestModel_DialogNodeOutputModifiers(): """ Test Class for DialogNodeOutputModifiers """ @@ -7542,7 +7664,7 @@ def test_dialog_node_output_modifiers_serialization(self): dialog_node_output_modifiers_model_json2 = dialog_node_output_modifiers_model.to_dict() assert dialog_node_output_modifiers_model_json2 == dialog_node_output_modifiers_model_json -class TestDialogNodeOutputOptionsElement(): +class TestModel_DialogNodeOutputOptionsElement(): """ Test Class for DialogNodeOutputOptionsElement """ @@ -7556,11 +7678,11 @@ def test_dialog_node_output_options_element_serialization(self): message_input_model = {} # MessageInput message_input_model['text'] = 'testString' - message_input_model['spelling_suggestions'] = True - message_input_model['spelling_auto_correct'] = True + message_input_model['spelling_suggestions'] = False + message_input_model['spelling_auto_correct'] = False message_input_model['suggested_text'] = 'testString' message_input_model['original_text'] = 'testString' - message_input_model['foo'] = { 'foo': 'bar' } + message_input_model['foo'] = 'testString' runtime_intent_model = {} # RuntimeIntent runtime_intent_model['intent'] = 'testString' @@ -7641,7 +7763,7 @@ def test_dialog_node_output_options_element_serialization(self): dialog_node_output_options_element_model_json2 = dialog_node_output_options_element_model.to_dict() assert dialog_node_output_options_element_model_json2 == dialog_node_output_options_element_model_json -class TestDialogNodeOutputOptionsElementValue(): +class TestModel_DialogNodeOutputOptionsElementValue(): """ Test Class for DialogNodeOutputOptionsElementValue """ @@ -7655,11 +7777,11 @@ def test_dialog_node_output_options_element_value_serialization(self): message_input_model = {} # MessageInput message_input_model['text'] = 'testString' - message_input_model['spelling_suggestions'] = True - message_input_model['spelling_auto_correct'] = True + message_input_model['spelling_suggestions'] = False + message_input_model['spelling_auto_correct'] = False message_input_model['suggested_text'] = 'testString' message_input_model['original_text'] = 'testString' - message_input_model['foo'] = { 'foo': 'bar' } + message_input_model['foo'] = 'testString' runtime_intent_model = {} # RuntimeIntent runtime_intent_model['intent'] = 'testString' @@ -7736,7 +7858,7 @@ def test_dialog_node_output_options_element_value_serialization(self): dialog_node_output_options_element_value_model_json2 = dialog_node_output_options_element_value_model.to_dict() assert dialog_node_output_options_element_value_model_json2 == dialog_node_output_options_element_value_model_json -class TestDialogNodeOutputTextValuesElement(): +class TestModel_DialogNodeOutputTextValuesElement(): """ Test Class for DialogNodeOutputTextValuesElement """ @@ -7765,7 +7887,7 @@ def test_dialog_node_output_text_values_element_serialization(self): dialog_node_output_text_values_element_model_json2 = dialog_node_output_text_values_element_model.to_dict() assert dialog_node_output_text_values_element_model_json2 == dialog_node_output_text_values_element_model_json -class TestDialogNodeVisitedDetails(): +class TestModel_DialogNodeVisitedDetails(): """ Test Class for DialogNodeVisitedDetails """ @@ -7796,7 +7918,7 @@ def test_dialog_node_visited_details_serialization(self): dialog_node_visited_details_model_json2 = dialog_node_visited_details_model.to_dict() assert dialog_node_visited_details_model_json2 == dialog_node_visited_details_model_json -class TestDialogSuggestion(): +class TestModel_DialogSuggestion(): """ Test Class for DialogSuggestion """ @@ -7810,11 +7932,11 @@ def test_dialog_suggestion_serialization(self): message_input_model = {} # MessageInput message_input_model['text'] = 'testString' - message_input_model['spelling_suggestions'] = True - message_input_model['spelling_auto_correct'] = True + message_input_model['spelling_suggestions'] = False + message_input_model['spelling_auto_correct'] = False message_input_model['suggested_text'] = 'testString' message_input_model['original_text'] = 'testString' - message_input_model['foo'] = { 'foo': 'bar' } + message_input_model['foo'] = 'testString' runtime_intent_model = {} # RuntimeIntent runtime_intent_model['intent'] = 'testString' @@ -7897,7 +8019,7 @@ def test_dialog_suggestion_serialization(self): dialog_suggestion_model_json2 = dialog_suggestion_model.to_dict() assert dialog_suggestion_model_json2 == dialog_suggestion_model_json -class TestDialogSuggestionValue(): +class TestModel_DialogSuggestionValue(): """ Test Class for DialogSuggestionValue """ @@ -7911,11 +8033,11 @@ def test_dialog_suggestion_value_serialization(self): message_input_model = {} # MessageInput message_input_model['text'] = 'testString' - message_input_model['spelling_suggestions'] = True - message_input_model['spelling_auto_correct'] = True + message_input_model['spelling_suggestions'] = False + message_input_model['spelling_auto_correct'] = False message_input_model['suggested_text'] = 'testString' message_input_model['original_text'] = 'testString' - message_input_model['foo'] = { 'foo': 'bar' } + message_input_model['foo'] = 'testString' runtime_intent_model = {} # RuntimeIntent runtime_intent_model['intent'] = 'testString' @@ -7992,7 +8114,7 @@ def test_dialog_suggestion_value_serialization(self): dialog_suggestion_value_model_json2 = dialog_suggestion_value_model.to_dict() assert dialog_suggestion_value_model_json2 == dialog_suggestion_value_model_json -class TestEntity(): +class TestModel_Entity(): """ Test Class for Entity """ @@ -8010,8 +8132,8 @@ def test_entity_serialization(self): value_model['type'] = 'synonyms' value_model['synonyms'] = ['testString'] value_model['patterns'] = ['testString'] - value_model['created'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) - value_model['updated'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + value_model['created'] = "2019-01-01T12:00:00Z" + value_model['updated'] = "2019-01-01T12:00:00Z" # Construct a json representation of a Entity model entity_model_json = {} @@ -8019,8 +8141,8 @@ def test_entity_serialization(self): entity_model_json['description'] = 'testString' entity_model_json['metadata'] = {} entity_model_json['fuzzy_match'] = True - entity_model_json['created'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) - entity_model_json['updated'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + entity_model_json['created'] = "2019-01-01T12:00:00Z" + entity_model_json['updated'] = "2019-01-01T12:00:00Z" entity_model_json['values'] = [value_model] # Construct a model instance of Entity by calling from_dict on the json representation @@ -8038,7 +8160,7 @@ def test_entity_serialization(self): entity_model_json2 = entity_model.to_dict() assert entity_model_json2 == entity_model_json -class TestEntityCollection(): +class TestModel_EntityCollection(): """ Test Class for EntityCollection """ @@ -8056,16 +8178,16 @@ def test_entity_collection_serialization(self): value_model['type'] = 'synonyms' value_model['synonyms'] = ['testString'] value_model['patterns'] = ['testString'] - value_model['created'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) - value_model['updated'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + value_model['created'] = "2019-01-01T12:00:00Z" + value_model['updated'] = "2019-01-01T12:00:00Z" entity_model = {} # Entity entity_model['entity'] = 'testString' entity_model['description'] = 'testString' entity_model['metadata'] = {} entity_model['fuzzy_match'] = True - entity_model['created'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) - entity_model['updated'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + entity_model['created'] = "2019-01-01T12:00:00Z" + entity_model['updated'] = "2019-01-01T12:00:00Z" entity_model['values'] = [value_model] pagination_model = {} # Pagination @@ -8096,7 +8218,7 @@ def test_entity_collection_serialization(self): entity_collection_model_json2 = entity_collection_model.to_dict() assert entity_collection_model_json2 == entity_collection_model_json -class TestEntityMention(): +class TestModel_EntityMention(): """ Test Class for EntityMention """ @@ -8127,7 +8249,7 @@ def test_entity_mention_serialization(self): entity_mention_model_json2 = entity_mention_model.to_dict() assert entity_mention_model_json2 == entity_mention_model_json -class TestEntityMentionCollection(): +class TestModel_EntityMentionCollection(): """ Test Class for EntityMentionCollection """ @@ -8172,7 +8294,7 @@ def test_entity_mention_collection_serialization(self): entity_mention_collection_model_json2 = entity_mention_collection_model.to_dict() assert entity_mention_collection_model_json2 == entity_mention_collection_model_json -class TestExample(): +class TestModel_Example(): """ Test Class for Example """ @@ -8192,8 +8314,8 @@ def test_example_serialization(self): example_model_json = {} example_model_json['text'] = 'testString' example_model_json['mentions'] = [mention_model] - example_model_json['created'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) - example_model_json['updated'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + example_model_json['created'] = "2019-01-01T12:00:00Z" + example_model_json['updated'] = "2019-01-01T12:00:00Z" # Construct a model instance of Example by calling from_dict on the json representation example_model = Example.from_dict(example_model_json) @@ -8210,7 +8332,7 @@ def test_example_serialization(self): example_model_json2 = example_model.to_dict() assert example_model_json2 == example_model_json -class TestExampleCollection(): +class TestModel_ExampleCollection(): """ Test Class for ExampleCollection """ @@ -8229,8 +8351,8 @@ def test_example_collection_serialization(self): example_model = {} # Example example_model['text'] = 'testString' example_model['mentions'] = [mention_model] - example_model['created'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) - example_model['updated'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + example_model['created'] = "2019-01-01T12:00:00Z" + example_model['updated'] = "2019-01-01T12:00:00Z" pagination_model = {} # Pagination pagination_model['refresh_url'] = 'testString' @@ -8260,7 +8382,7 @@ def test_example_collection_serialization(self): example_collection_model_json2 = example_collection_model.to_dict() assert example_collection_model_json2 == example_collection_model_json -class TestIntent(): +class TestModel_Intent(): """ Test Class for Intent """ @@ -8279,15 +8401,15 @@ def test_intent_serialization(self): example_model = {} # Example example_model['text'] = 'testString' example_model['mentions'] = [mention_model] - example_model['created'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) - example_model['updated'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + example_model['created'] = "2019-01-01T12:00:00Z" + example_model['updated'] = "2019-01-01T12:00:00Z" # Construct a json representation of a Intent model intent_model_json = {} intent_model_json['intent'] = 'testString' intent_model_json['description'] = 'testString' - intent_model_json['created'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) - intent_model_json['updated'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + intent_model_json['created'] = "2019-01-01T12:00:00Z" + intent_model_json['updated'] = "2019-01-01T12:00:00Z" intent_model_json['examples'] = [example_model] # Construct a model instance of Intent by calling from_dict on the json representation @@ -8305,7 +8427,7 @@ def test_intent_serialization(self): intent_model_json2 = intent_model.to_dict() assert intent_model_json2 == intent_model_json -class TestIntentCollection(): +class TestModel_IntentCollection(): """ Test Class for IntentCollection """ @@ -8324,14 +8446,14 @@ def test_intent_collection_serialization(self): example_model = {} # Example example_model['text'] = 'testString' example_model['mentions'] = [mention_model] - example_model['created'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) - example_model['updated'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + example_model['created'] = "2019-01-01T12:00:00Z" + example_model['updated'] = "2019-01-01T12:00:00Z" intent_model = {} # Intent intent_model['intent'] = 'testString' intent_model['description'] = 'testString' - intent_model['created'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) - intent_model['updated'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + intent_model['created'] = "2019-01-01T12:00:00Z" + intent_model['updated'] = "2019-01-01T12:00:00Z" intent_model['examples'] = [example_model] pagination_model = {} # Pagination @@ -8362,7 +8484,7 @@ def test_intent_collection_serialization(self): intent_collection_model_json2 = intent_collection_model.to_dict() assert intent_collection_model_json2 == intent_collection_model_json -class TestLog(): +class TestModel_Log(): """ Test Class for Log """ @@ -8376,11 +8498,11 @@ def test_log_serialization(self): message_input_model = {} # MessageInput message_input_model['text'] = 'testString' - message_input_model['spelling_suggestions'] = True - message_input_model['spelling_auto_correct'] = True + message_input_model['spelling_suggestions'] = False + message_input_model['spelling_auto_correct'] = False message_input_model['suggested_text'] = 'testString' message_input_model['original_text'] = 'testString' - message_input_model['foo'] = { 'foo': 'bar' } + message_input_model['foo'] = 'testString' runtime_intent_model = {} # RuntimeIntent runtime_intent_model['intent'] = 'testString' @@ -8444,7 +8566,7 @@ def test_log_serialization(self): context_model['conversation_id'] = 'testString' context_model['system'] = {} context_model['metadata'] = message_context_metadata_model - context_model['foo'] = { 'foo': 'bar' } + context_model['foo'] = 'testString' dialog_node_visited_details_model = {} # DialogNodeVisitedDetails dialog_node_visited_details_model['dialog_node'] = 'testString' @@ -8487,7 +8609,7 @@ def test_log_serialization(self): output_data_model['log_messages'] = [log_message_model] output_data_model['text'] = ['testString'] output_data_model['generic'] = [runtime_response_generic_model] - output_data_model['foo'] = { 'foo': 'bar' } + output_data_model['foo'] = 'testString' dialog_node_action_model = {} # DialogNodeAction dialog_node_action_model['name'] = 'testString' @@ -8500,7 +8622,7 @@ def test_log_serialization(self): message_request_model['input'] = message_input_model message_request_model['intents'] = [runtime_intent_model] message_request_model['entities'] = [runtime_entity_model] - message_request_model['alternate_intents'] = True + message_request_model['alternate_intents'] = False message_request_model['context'] = context_model message_request_model['output'] = output_data_model message_request_model['actions'] = [dialog_node_action_model] @@ -8510,7 +8632,7 @@ def test_log_serialization(self): message_response_model['input'] = message_input_model message_response_model['intents'] = [runtime_intent_model] message_response_model['entities'] = [runtime_entity_model] - message_response_model['alternate_intents'] = True + message_response_model['alternate_intents'] = False message_response_model['context'] = context_model message_response_model['output'] = output_data_model message_response_model['actions'] = [dialog_node_action_model] @@ -8541,7 +8663,7 @@ def test_log_serialization(self): log_model_json2 = log_model.to_dict() assert log_model_json2 == log_model_json -class TestLogCollection(): +class TestModel_LogCollection(): """ Test Class for LogCollection """ @@ -8555,11 +8677,11 @@ def test_log_collection_serialization(self): message_input_model = {} # MessageInput message_input_model['text'] = 'testString' - message_input_model['spelling_suggestions'] = True - message_input_model['spelling_auto_correct'] = True + message_input_model['spelling_suggestions'] = False + message_input_model['spelling_auto_correct'] = False message_input_model['suggested_text'] = 'testString' message_input_model['original_text'] = 'testString' - message_input_model['foo'] = { 'foo': 'bar' } + message_input_model['foo'] = 'testString' runtime_intent_model = {} # RuntimeIntent runtime_intent_model['intent'] = 'testString' @@ -8623,7 +8745,7 @@ def test_log_collection_serialization(self): context_model['conversation_id'] = 'testString' context_model['system'] = {} context_model['metadata'] = message_context_metadata_model - context_model['foo'] = { 'foo': 'bar' } + context_model['foo'] = 'testString' dialog_node_visited_details_model = {} # DialogNodeVisitedDetails dialog_node_visited_details_model['dialog_node'] = 'testString' @@ -8666,7 +8788,7 @@ def test_log_collection_serialization(self): output_data_model['log_messages'] = [log_message_model] output_data_model['text'] = ['testString'] output_data_model['generic'] = [runtime_response_generic_model] - output_data_model['foo'] = { 'foo': 'bar' } + output_data_model['foo'] = 'testString' dialog_node_action_model = {} # DialogNodeAction dialog_node_action_model['name'] = 'testString' @@ -8679,7 +8801,7 @@ def test_log_collection_serialization(self): message_request_model['input'] = message_input_model message_request_model['intents'] = [runtime_intent_model] message_request_model['entities'] = [runtime_entity_model] - message_request_model['alternate_intents'] = True + message_request_model['alternate_intents'] = False message_request_model['context'] = context_model message_request_model['output'] = output_data_model message_request_model['actions'] = [dialog_node_action_model] @@ -8689,7 +8811,7 @@ def test_log_collection_serialization(self): message_response_model['input'] = message_input_model message_response_model['intents'] = [runtime_intent_model] message_response_model['entities'] = [runtime_entity_model] - message_response_model['alternate_intents'] = True + message_response_model['alternate_intents'] = False message_response_model['context'] = context_model message_response_model['output'] = output_data_model message_response_model['actions'] = [dialog_node_action_model] @@ -8729,7 +8851,7 @@ def test_log_collection_serialization(self): log_collection_model_json2 = log_collection_model.to_dict() assert log_collection_model_json2 == log_collection_model_json -class TestLogMessage(): +class TestModel_LogMessage(): """ Test Class for LogMessage """ @@ -8767,7 +8889,7 @@ def test_log_message_serialization(self): log_message_model_json2 = log_message_model.to_dict() assert log_message_model_json2 == log_message_model_json -class TestLogMessageSource(): +class TestModel_LogMessageSource(): """ Test Class for LogMessageSource """ @@ -8797,7 +8919,7 @@ def test_log_message_source_serialization(self): log_message_source_model_json2 = log_message_source_model.to_dict() assert log_message_source_model_json2 == log_message_source_model_json -class TestLogPagination(): +class TestModel_LogPagination(): """ Test Class for LogPagination """ @@ -8828,7 +8950,7 @@ def test_log_pagination_serialization(self): log_pagination_model_json2 = log_pagination_model.to_dict() assert log_pagination_model_json2 == log_pagination_model_json -class TestMention(): +class TestModel_Mention(): """ Test Class for Mention """ @@ -8858,7 +8980,7 @@ def test_mention_serialization(self): mention_model_json2 = mention_model.to_dict() assert mention_model_json2 == mention_model_json -class TestMessageContextMetadata(): +class TestModel_MessageContextMetadata(): """ Test Class for MessageContextMetadata """ @@ -8888,7 +9010,7 @@ def test_message_context_metadata_serialization(self): message_context_metadata_model_json2 = message_context_metadata_model.to_dict() assert message_context_metadata_model_json2 == message_context_metadata_model_json -class TestMessageInput(): +class TestModel_MessageInput(): """ Test Class for MessageInput """ @@ -8901,11 +9023,11 @@ def test_message_input_serialization(self): # Construct a json representation of a MessageInput model message_input_model_json = {} message_input_model_json['text'] = 'testString' - message_input_model_json['spelling_suggestions'] = True - message_input_model_json['spelling_auto_correct'] = True + message_input_model_json['spelling_suggestions'] = False + message_input_model_json['spelling_auto_correct'] = False message_input_model_json['suggested_text'] = 'testString' message_input_model_json['original_text'] = 'testString' - message_input_model_json['foo'] = { 'foo': 'bar' } + message_input_model_json['foo'] = 'testString' # Construct a model instance of MessageInput by calling from_dict on the json representation message_input_model = MessageInput.from_dict(message_input_model_json) @@ -8922,7 +9044,17 @@ def test_message_input_serialization(self): message_input_model_json2 = message_input_model.to_dict() assert message_input_model_json2 == message_input_model_json -class TestMessageRequest(): + # Test get_properties and set_properties methods. + message_input_model.set_properties({}) + actual_dict = message_input_model.get_properties() + assert actual_dict == {} + + expected_dict = {'foo': 'testString'} + message_input_model.set_properties(expected_dict) + actual_dict = message_input_model.get_properties() + assert actual_dict == expected_dict + +class TestModel_MessageRequest(): """ Test Class for MessageRequest """ @@ -8936,11 +9068,11 @@ def test_message_request_serialization(self): message_input_model = {} # MessageInput message_input_model['text'] = 'testString' - message_input_model['spelling_suggestions'] = True - message_input_model['spelling_auto_correct'] = True + message_input_model['spelling_suggestions'] = False + message_input_model['spelling_auto_correct'] = False message_input_model['suggested_text'] = 'testString' message_input_model['original_text'] = 'testString' - message_input_model['foo'] = { 'foo': 'bar' } + message_input_model['foo'] = 'testString' runtime_intent_model = {} # RuntimeIntent runtime_intent_model['intent'] = 'testString' @@ -9004,7 +9136,7 @@ def test_message_request_serialization(self): context_model['conversation_id'] = 'testString' context_model['system'] = {} context_model['metadata'] = message_context_metadata_model - context_model['foo'] = { 'foo': 'bar' } + context_model['foo'] = 'testString' dialog_node_visited_details_model = {} # DialogNodeVisitedDetails dialog_node_visited_details_model['dialog_node'] = 'testString' @@ -9047,7 +9179,7 @@ def test_message_request_serialization(self): output_data_model['log_messages'] = [log_message_model] output_data_model['text'] = ['testString'] output_data_model['generic'] = [runtime_response_generic_model] - output_data_model['foo'] = { 'foo': 'bar' } + output_data_model['foo'] = 'testString' dialog_node_action_model = {} # DialogNodeAction dialog_node_action_model['name'] = 'testString' @@ -9061,7 +9193,7 @@ def test_message_request_serialization(self): message_request_model_json['input'] = message_input_model message_request_model_json['intents'] = [runtime_intent_model] message_request_model_json['entities'] = [runtime_entity_model] - message_request_model_json['alternate_intents'] = True + message_request_model_json['alternate_intents'] = False message_request_model_json['context'] = context_model message_request_model_json['output'] = output_data_model message_request_model_json['actions'] = [dialog_node_action_model] @@ -9082,7 +9214,7 @@ def test_message_request_serialization(self): message_request_model_json2 = message_request_model.to_dict() assert message_request_model_json2 == message_request_model_json -class TestMessageResponse(): +class TestModel_MessageResponse(): """ Test Class for MessageResponse """ @@ -9096,11 +9228,11 @@ def test_message_response_serialization(self): message_input_model = {} # MessageInput message_input_model['text'] = 'testString' - message_input_model['spelling_suggestions'] = True - message_input_model['spelling_auto_correct'] = True + message_input_model['spelling_suggestions'] = False + message_input_model['spelling_auto_correct'] = False message_input_model['suggested_text'] = 'testString' message_input_model['original_text'] = 'testString' - message_input_model['foo'] = { 'foo': 'bar' } + message_input_model['foo'] = 'testString' runtime_intent_model = {} # RuntimeIntent runtime_intent_model['intent'] = 'testString' @@ -9164,7 +9296,7 @@ def test_message_response_serialization(self): context_model['conversation_id'] = 'testString' context_model['system'] = {} context_model['metadata'] = message_context_metadata_model - context_model['foo'] = { 'foo': 'bar' } + context_model['foo'] = 'testString' dialog_node_visited_details_model = {} # DialogNodeVisitedDetails dialog_node_visited_details_model['dialog_node'] = 'testString' @@ -9207,7 +9339,7 @@ def test_message_response_serialization(self): output_data_model['log_messages'] = [log_message_model] output_data_model['text'] = ['testString'] output_data_model['generic'] = [runtime_response_generic_model] - output_data_model['foo'] = { 'foo': 'bar' } + output_data_model['foo'] = 'testString' dialog_node_action_model = {} # DialogNodeAction dialog_node_action_model['name'] = 'testString' @@ -9221,7 +9353,7 @@ def test_message_response_serialization(self): message_response_model_json['input'] = message_input_model message_response_model_json['intents'] = [runtime_intent_model] message_response_model_json['entities'] = [runtime_entity_model] - message_response_model_json['alternate_intents'] = True + message_response_model_json['alternate_intents'] = False message_response_model_json['context'] = context_model message_response_model_json['output'] = output_data_model message_response_model_json['actions'] = [dialog_node_action_model] @@ -9242,7 +9374,7 @@ def test_message_response_serialization(self): message_response_model_json2 = message_response_model.to_dict() assert message_response_model_json2 == message_response_model_json -class TestOutputData(): +class TestModel_OutputData(): """ Test Class for OutputData """ @@ -9271,11 +9403,11 @@ def test_output_data_serialization(self): message_input_model = {} # MessageInput message_input_model['text'] = 'testString' - message_input_model['spelling_suggestions'] = True - message_input_model['spelling_auto_correct'] = True + message_input_model['spelling_suggestions'] = False + message_input_model['spelling_auto_correct'] = False message_input_model['suggested_text'] = 'testString' message_input_model['original_text'] = 'testString' - message_input_model['foo'] = { 'foo': 'bar' } + message_input_model['foo'] = 'testString' runtime_intent_model = {} # RuntimeIntent runtime_intent_model['intent'] = 'testString' @@ -9358,7 +9490,7 @@ def test_output_data_serialization(self): output_data_model_json['log_messages'] = [log_message_model] output_data_model_json['text'] = ['testString'] output_data_model_json['generic'] = [runtime_response_generic_model] - output_data_model_json['foo'] = { 'foo': 'bar' } + output_data_model_json['foo'] = 'testString' # Construct a model instance of OutputData by calling from_dict on the json representation output_data_model = OutputData.from_dict(output_data_model_json) @@ -9375,7 +9507,17 @@ def test_output_data_serialization(self): output_data_model_json2 = output_data_model.to_dict() assert output_data_model_json2 == output_data_model_json -class TestPagination(): + # Test get_properties and set_properties methods. + output_data_model.set_properties({}) + actual_dict = output_data_model.get_properties() + assert actual_dict == {} + + expected_dict = {'foo': 'testString'} + output_data_model.set_properties(expected_dict) + actual_dict = output_data_model.get_properties() + assert actual_dict == expected_dict + +class TestModel_Pagination(): """ Test Class for Pagination """ @@ -9409,7 +9551,7 @@ def test_pagination_serialization(self): pagination_model_json2 = pagination_model.to_dict() assert pagination_model_json2 == pagination_model_json -class TestResponseGenericChannel(): +class TestModel_ResponseGenericChannel(): """ Test Class for ResponseGenericChannel """ @@ -9438,7 +9580,7 @@ def test_response_generic_channel_serialization(self): response_generic_channel_model_json2 = response_generic_channel_model.to_dict() assert response_generic_channel_model_json2 == response_generic_channel_model_json -class TestRuntimeEntity(): +class TestModel_RuntimeEntity(): """ Test Class for RuntimeEntity """ @@ -9516,7 +9658,7 @@ def test_runtime_entity_serialization(self): runtime_entity_model_json2 = runtime_entity_model.to_dict() assert runtime_entity_model_json2 == runtime_entity_model_json -class TestRuntimeEntityAlternative(): +class TestModel_RuntimeEntityAlternative(): """ Test Class for RuntimeEntityAlternative """ @@ -9546,7 +9688,7 @@ def test_runtime_entity_alternative_serialization(self): runtime_entity_alternative_model_json2 = runtime_entity_alternative_model.to_dict() assert runtime_entity_alternative_model_json2 == runtime_entity_alternative_model_json -class TestRuntimeEntityInterpretation(): +class TestModel_RuntimeEntityInterpretation(): """ Test Class for RuntimeEntityInterpretation """ @@ -9600,7 +9742,7 @@ def test_runtime_entity_interpretation_serialization(self): runtime_entity_interpretation_model_json2 = runtime_entity_interpretation_model.to_dict() assert runtime_entity_interpretation_model_json2 == runtime_entity_interpretation_model_json -class TestRuntimeEntityRole(): +class TestModel_RuntimeEntityRole(): """ Test Class for RuntimeEntityRole """ @@ -9629,7 +9771,7 @@ def test_runtime_entity_role_serialization(self): runtime_entity_role_model_json2 = runtime_entity_role_model.to_dict() assert runtime_entity_role_model_json2 == runtime_entity_role_model_json -class TestRuntimeIntent(): +class TestModel_RuntimeIntent(): """ Test Class for RuntimeIntent """ @@ -9659,7 +9801,7 @@ def test_runtime_intent_serialization(self): runtime_intent_model_json2 = runtime_intent_model.to_dict() assert runtime_intent_model_json2 == runtime_intent_model_json -class TestSynonym(): +class TestModel_Synonym(): """ Test Class for Synonym """ @@ -9672,8 +9814,8 @@ def test_synonym_serialization(self): # Construct a json representation of a Synonym model synonym_model_json = {} synonym_model_json['synonym'] = 'testString' - synonym_model_json['created'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) - synonym_model_json['updated'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + synonym_model_json['created'] = "2019-01-01T12:00:00Z" + synonym_model_json['updated'] = "2019-01-01T12:00:00Z" # Construct a model instance of Synonym by calling from_dict on the json representation synonym_model = Synonym.from_dict(synonym_model_json) @@ -9690,7 +9832,7 @@ def test_synonym_serialization(self): synonym_model_json2 = synonym_model.to_dict() assert synonym_model_json2 == synonym_model_json -class TestSynonymCollection(): +class TestModel_SynonymCollection(): """ Test Class for SynonymCollection """ @@ -9704,8 +9846,8 @@ def test_synonym_collection_serialization(self): synonym_model = {} # Synonym synonym_model['synonym'] = 'testString' - synonym_model['created'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) - synonym_model['updated'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + synonym_model['created'] = "2019-01-01T12:00:00Z" + synonym_model['updated'] = "2019-01-01T12:00:00Z" pagination_model = {} # Pagination pagination_model['refresh_url'] = 'testString' @@ -9735,7 +9877,7 @@ def test_synonym_collection_serialization(self): synonym_collection_model_json2 = synonym_collection_model.to_dict() assert synonym_collection_model_json2 == synonym_collection_model_json -class TestValue(): +class TestModel_Value(): """ Test Class for Value """ @@ -9752,8 +9894,8 @@ def test_value_serialization(self): value_model_json['type'] = 'synonyms' value_model_json['synonyms'] = ['testString'] value_model_json['patterns'] = ['testString'] - value_model_json['created'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) - value_model_json['updated'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + value_model_json['created'] = "2019-01-01T12:00:00Z" + value_model_json['updated'] = "2019-01-01T12:00:00Z" # Construct a model instance of Value by calling from_dict on the json representation value_model = Value.from_dict(value_model_json) @@ -9770,7 +9912,7 @@ def test_value_serialization(self): value_model_json2 = value_model.to_dict() assert value_model_json2 == value_model_json -class TestValueCollection(): +class TestModel_ValueCollection(): """ Test Class for ValueCollection """ @@ -9788,8 +9930,8 @@ def test_value_collection_serialization(self): value_model['type'] = 'synonyms' value_model['synonyms'] = ['testString'] value_model['patterns'] = ['testString'] - value_model['created'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) - value_model['updated'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + value_model['created'] = "2019-01-01T12:00:00Z" + value_model['updated'] = "2019-01-01T12:00:00Z" pagination_model = {} # Pagination pagination_model['refresh_url'] = 'testString' @@ -9819,7 +9961,7 @@ def test_value_collection_serialization(self): value_collection_model_json2 = value_collection_model.to_dict() assert value_collection_model_json2 == value_collection_model_json -class TestWebhook(): +class TestModel_Webhook(): """ Test Class for Webhook """ @@ -9856,7 +9998,7 @@ def test_webhook_serialization(self): webhook_model_json2 = webhook_model.to_dict() assert webhook_model_json2 == webhook_model_json -class TestWebhookHeader(): +class TestModel_WebhookHeader(): """ Test Class for WebhookHeader """ @@ -9886,7 +10028,7 @@ def test_webhook_header_serialization(self): webhook_header_model_json2 = webhook_header_model.to_dict() assert webhook_header_model_json2 == webhook_header_model_json -class TestWorkspace(): +class TestModel_Workspace(): """ Test Class for Workspace """ @@ -9923,11 +10065,11 @@ def test_workspace_serialization(self): dialog_node_output_model['generic'] = [dialog_node_output_generic_model] dialog_node_output_model['integrations'] = {} dialog_node_output_model['modifiers'] = dialog_node_output_modifiers_model - dialog_node_output_model['foo'] = { 'foo': 'bar' } + dialog_node_output_model['foo'] = 'testString' dialog_node_context_model = {} # DialogNodeContext dialog_node_context_model['integrations'] = {} - dialog_node_context_model['foo'] = { 'foo': 'bar' } + dialog_node_context_model['foo'] = 'testString' dialog_node_next_step_model = {} # DialogNodeNextStep dialog_node_next_step_model['behavior'] = 'get_user_input' @@ -9960,15 +10102,15 @@ def test_workspace_serialization(self): dialog_node_model['digress_out'] = 'allow_returning' dialog_node_model['digress_out_slots'] = 'not_allowed' dialog_node_model['user_label'] = 'testString' - dialog_node_model['disambiguation_opt_out'] = True + dialog_node_model['disambiguation_opt_out'] = False dialog_node_model['disabled'] = True - dialog_node_model['created'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) - dialog_node_model['updated'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + dialog_node_model['created'] = "2019-01-01T12:00:00Z" + dialog_node_model['updated'] = "2019-01-01T12:00:00Z" counterexample_model = {} # Counterexample counterexample_model['text'] = 'testString' - counterexample_model['created'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) - counterexample_model['updated'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + counterexample_model['created'] = "2019-01-01T12:00:00Z" + counterexample_model['updated'] = "2019-01-01T12:00:00Z" workspace_system_settings_tooling_model = {} # WorkspaceSystemSettingsTooling workspace_system_settings_tooling_model['store_generic_responses'] = True @@ -9976,24 +10118,24 @@ def test_workspace_serialization(self): workspace_system_settings_disambiguation_model = {} # WorkspaceSystemSettingsDisambiguation workspace_system_settings_disambiguation_model['prompt'] = 'testString' workspace_system_settings_disambiguation_model['none_of_the_above_prompt'] = 'testString' - workspace_system_settings_disambiguation_model['enabled'] = True + workspace_system_settings_disambiguation_model['enabled'] = False workspace_system_settings_disambiguation_model['sensitivity'] = 'auto' workspace_system_settings_disambiguation_model['randomize'] = True workspace_system_settings_disambiguation_model['max_suggestions'] = 1 workspace_system_settings_disambiguation_model['suggestion_text_policy'] = 'testString' workspace_system_settings_system_entities_model = {} # WorkspaceSystemSettingsSystemEntities - workspace_system_settings_system_entities_model['enabled'] = True + workspace_system_settings_system_entities_model['enabled'] = False workspace_system_settings_off_topic_model = {} # WorkspaceSystemSettingsOffTopic - workspace_system_settings_off_topic_model['enabled'] = True + workspace_system_settings_off_topic_model['enabled'] = False workspace_system_settings_model = {} # WorkspaceSystemSettings workspace_system_settings_model['tooling'] = workspace_system_settings_tooling_model workspace_system_settings_model['disambiguation'] = workspace_system_settings_disambiguation_model workspace_system_settings_model['human_agent_assist'] = {} - workspace_system_settings_model['spelling_suggestions'] = True - workspace_system_settings_model['spelling_auto_correct'] = True + workspace_system_settings_model['spelling_suggestions'] = False + workspace_system_settings_model['spelling_auto_correct'] = False workspace_system_settings_model['system_entities'] = workspace_system_settings_system_entities_model workspace_system_settings_model['off_topic'] = workspace_system_settings_off_topic_model @@ -10013,14 +10155,14 @@ def test_workspace_serialization(self): example_model = {} # Example example_model['text'] = 'testString' example_model['mentions'] = [mention_model] - example_model['created'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) - example_model['updated'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + example_model['created'] = "2019-01-01T12:00:00Z" + example_model['updated'] = "2019-01-01T12:00:00Z" intent_model = {} # Intent intent_model['intent'] = 'testString' intent_model['description'] = 'testString' - intent_model['created'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) - intent_model['updated'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + intent_model['created'] = "2019-01-01T12:00:00Z" + intent_model['updated'] = "2019-01-01T12:00:00Z" intent_model['examples'] = [example_model] value_model = {} # Value @@ -10029,16 +10171,16 @@ def test_workspace_serialization(self): value_model['type'] = 'synonyms' value_model['synonyms'] = ['testString'] value_model['patterns'] = ['testString'] - value_model['created'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) - value_model['updated'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + value_model['created'] = "2019-01-01T12:00:00Z" + value_model['updated'] = "2019-01-01T12:00:00Z" entity_model = {} # Entity entity_model['entity'] = 'testString' entity_model['description'] = 'testString' entity_model['metadata'] = {} entity_model['fuzzy_match'] = True - entity_model['created'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) - entity_model['updated'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + entity_model['created'] = "2019-01-01T12:00:00Z" + entity_model['updated'] = "2019-01-01T12:00:00Z" entity_model['values'] = [value_model] # Construct a json representation of a Workspace model @@ -10049,10 +10191,10 @@ def test_workspace_serialization(self): workspace_model_json['workspace_id'] = 'testString' workspace_model_json['dialog_nodes'] = [dialog_node_model] workspace_model_json['counterexamples'] = [counterexample_model] - workspace_model_json['created'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) - workspace_model_json['updated'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + workspace_model_json['created'] = "2019-01-01T12:00:00Z" + workspace_model_json['updated'] = "2019-01-01T12:00:00Z" workspace_model_json['metadata'] = {} - workspace_model_json['learning_opt_out'] = True + workspace_model_json['learning_opt_out'] = False workspace_model_json['system_settings'] = workspace_system_settings_model workspace_model_json['status'] = 'Non Existent' workspace_model_json['webhooks'] = [webhook_model] @@ -10074,7 +10216,7 @@ def test_workspace_serialization(self): workspace_model_json2 = workspace_model.to_dict() assert workspace_model_json2 == workspace_model_json -class TestWorkspaceCollection(): +class TestModel_WorkspaceCollection(): """ Test Class for WorkspaceCollection """ @@ -10111,11 +10253,11 @@ def test_workspace_collection_serialization(self): dialog_node_output_model['generic'] = [dialog_node_output_generic_model] dialog_node_output_model['integrations'] = {} dialog_node_output_model['modifiers'] = dialog_node_output_modifiers_model - dialog_node_output_model['foo'] = { 'foo': 'bar' } + dialog_node_output_model['foo'] = 'testString' dialog_node_context_model = {} # DialogNodeContext dialog_node_context_model['integrations'] = {} - dialog_node_context_model['foo'] = { 'foo': 'bar' } + dialog_node_context_model['foo'] = 'testString' dialog_node_next_step_model = {} # DialogNodeNextStep dialog_node_next_step_model['behavior'] = 'get_user_input' @@ -10148,15 +10290,15 @@ def test_workspace_collection_serialization(self): dialog_node_model['digress_out'] = 'allow_returning' dialog_node_model['digress_out_slots'] = 'not_allowed' dialog_node_model['user_label'] = 'testString' - dialog_node_model['disambiguation_opt_out'] = True + dialog_node_model['disambiguation_opt_out'] = False dialog_node_model['disabled'] = True - dialog_node_model['created'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) - dialog_node_model['updated'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + dialog_node_model['created'] = "2019-01-01T12:00:00Z" + dialog_node_model['updated'] = "2019-01-01T12:00:00Z" counterexample_model = {} # Counterexample counterexample_model['text'] = 'testString' - counterexample_model['created'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) - counterexample_model['updated'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + counterexample_model['created'] = "2019-01-01T12:00:00Z" + counterexample_model['updated'] = "2019-01-01T12:00:00Z" workspace_system_settings_tooling_model = {} # WorkspaceSystemSettingsTooling workspace_system_settings_tooling_model['store_generic_responses'] = True @@ -10164,24 +10306,24 @@ def test_workspace_collection_serialization(self): workspace_system_settings_disambiguation_model = {} # WorkspaceSystemSettingsDisambiguation workspace_system_settings_disambiguation_model['prompt'] = 'testString' workspace_system_settings_disambiguation_model['none_of_the_above_prompt'] = 'testString' - workspace_system_settings_disambiguation_model['enabled'] = True + workspace_system_settings_disambiguation_model['enabled'] = False workspace_system_settings_disambiguation_model['sensitivity'] = 'auto' workspace_system_settings_disambiguation_model['randomize'] = True workspace_system_settings_disambiguation_model['max_suggestions'] = 1 workspace_system_settings_disambiguation_model['suggestion_text_policy'] = 'testString' workspace_system_settings_system_entities_model = {} # WorkspaceSystemSettingsSystemEntities - workspace_system_settings_system_entities_model['enabled'] = True + workspace_system_settings_system_entities_model['enabled'] = False workspace_system_settings_off_topic_model = {} # WorkspaceSystemSettingsOffTopic - workspace_system_settings_off_topic_model['enabled'] = True + workspace_system_settings_off_topic_model['enabled'] = False workspace_system_settings_model = {} # WorkspaceSystemSettings workspace_system_settings_model['tooling'] = workspace_system_settings_tooling_model workspace_system_settings_model['disambiguation'] = workspace_system_settings_disambiguation_model workspace_system_settings_model['human_agent_assist'] = {} - workspace_system_settings_model['spelling_suggestions'] = True - workspace_system_settings_model['spelling_auto_correct'] = True + workspace_system_settings_model['spelling_suggestions'] = False + workspace_system_settings_model['spelling_auto_correct'] = False workspace_system_settings_model['system_entities'] = workspace_system_settings_system_entities_model workspace_system_settings_model['off_topic'] = workspace_system_settings_off_topic_model @@ -10201,14 +10343,14 @@ def test_workspace_collection_serialization(self): example_model = {} # Example example_model['text'] = 'testString' example_model['mentions'] = [mention_model] - example_model['created'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) - example_model['updated'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + example_model['created'] = "2019-01-01T12:00:00Z" + example_model['updated'] = "2019-01-01T12:00:00Z" intent_model = {} # Intent intent_model['intent'] = 'testString' intent_model['description'] = 'testString' - intent_model['created'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) - intent_model['updated'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + intent_model['created'] = "2019-01-01T12:00:00Z" + intent_model['updated'] = "2019-01-01T12:00:00Z" intent_model['examples'] = [example_model] value_model = {} # Value @@ -10217,16 +10359,16 @@ def test_workspace_collection_serialization(self): value_model['type'] = 'synonyms' value_model['synonyms'] = ['testString'] value_model['patterns'] = ['testString'] - value_model['created'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) - value_model['updated'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + value_model['created'] = "2019-01-01T12:00:00Z" + value_model['updated'] = "2019-01-01T12:00:00Z" entity_model = {} # Entity entity_model['entity'] = 'testString' entity_model['description'] = 'testString' entity_model['metadata'] = {} entity_model['fuzzy_match'] = True - entity_model['created'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) - entity_model['updated'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + entity_model['created'] = "2019-01-01T12:00:00Z" + entity_model['updated'] = "2019-01-01T12:00:00Z" entity_model['values'] = [value_model] workspace_model = {} # Workspace @@ -10236,10 +10378,10 @@ def test_workspace_collection_serialization(self): workspace_model['workspace_id'] = 'testString' workspace_model['dialog_nodes'] = [dialog_node_model] workspace_model['counterexamples'] = [counterexample_model] - workspace_model['created'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) - workspace_model['updated'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + workspace_model['created'] = "2019-01-01T12:00:00Z" + workspace_model['updated'] = "2019-01-01T12:00:00Z" workspace_model['metadata'] = {} - workspace_model['learning_opt_out'] = True + workspace_model['learning_opt_out'] = False workspace_model['system_settings'] = workspace_system_settings_model workspace_model['status'] = 'Non Existent' workspace_model['webhooks'] = [webhook_model] @@ -10274,7 +10416,7 @@ def test_workspace_collection_serialization(self): workspace_collection_model_json2 = workspace_collection_model.to_dict() assert workspace_collection_model_json2 == workspace_collection_model_json -class TestWorkspaceSystemSettings(): +class TestModel_WorkspaceSystemSettings(): """ Test Class for WorkspaceSystemSettings """ @@ -10292,25 +10434,25 @@ def test_workspace_system_settings_serialization(self): workspace_system_settings_disambiguation_model = {} # WorkspaceSystemSettingsDisambiguation workspace_system_settings_disambiguation_model['prompt'] = 'testString' workspace_system_settings_disambiguation_model['none_of_the_above_prompt'] = 'testString' - workspace_system_settings_disambiguation_model['enabled'] = True + workspace_system_settings_disambiguation_model['enabled'] = False workspace_system_settings_disambiguation_model['sensitivity'] = 'auto' workspace_system_settings_disambiguation_model['randomize'] = True workspace_system_settings_disambiguation_model['max_suggestions'] = 1 workspace_system_settings_disambiguation_model['suggestion_text_policy'] = 'testString' workspace_system_settings_system_entities_model = {} # WorkspaceSystemSettingsSystemEntities - workspace_system_settings_system_entities_model['enabled'] = True + workspace_system_settings_system_entities_model['enabled'] = False workspace_system_settings_off_topic_model = {} # WorkspaceSystemSettingsOffTopic - workspace_system_settings_off_topic_model['enabled'] = True + workspace_system_settings_off_topic_model['enabled'] = False # Construct a json representation of a WorkspaceSystemSettings model workspace_system_settings_model_json = {} workspace_system_settings_model_json['tooling'] = workspace_system_settings_tooling_model workspace_system_settings_model_json['disambiguation'] = workspace_system_settings_disambiguation_model workspace_system_settings_model_json['human_agent_assist'] = {} - workspace_system_settings_model_json['spelling_suggestions'] = True - workspace_system_settings_model_json['spelling_auto_correct'] = True + workspace_system_settings_model_json['spelling_suggestions'] = False + workspace_system_settings_model_json['spelling_auto_correct'] = False workspace_system_settings_model_json['system_entities'] = workspace_system_settings_system_entities_model workspace_system_settings_model_json['off_topic'] = workspace_system_settings_off_topic_model @@ -10329,7 +10471,7 @@ def test_workspace_system_settings_serialization(self): workspace_system_settings_model_json2 = workspace_system_settings_model.to_dict() assert workspace_system_settings_model_json2 == workspace_system_settings_model_json -class TestWorkspaceSystemSettingsDisambiguation(): +class TestModel_WorkspaceSystemSettingsDisambiguation(): """ Test Class for WorkspaceSystemSettingsDisambiguation """ @@ -10343,7 +10485,7 @@ def test_workspace_system_settings_disambiguation_serialization(self): workspace_system_settings_disambiguation_model_json = {} workspace_system_settings_disambiguation_model_json['prompt'] = 'testString' workspace_system_settings_disambiguation_model_json['none_of_the_above_prompt'] = 'testString' - workspace_system_settings_disambiguation_model_json['enabled'] = True + workspace_system_settings_disambiguation_model_json['enabled'] = False workspace_system_settings_disambiguation_model_json['sensitivity'] = 'auto' workspace_system_settings_disambiguation_model_json['randomize'] = True workspace_system_settings_disambiguation_model_json['max_suggestions'] = 1 @@ -10364,7 +10506,7 @@ def test_workspace_system_settings_disambiguation_serialization(self): workspace_system_settings_disambiguation_model_json2 = workspace_system_settings_disambiguation_model.to_dict() assert workspace_system_settings_disambiguation_model_json2 == workspace_system_settings_disambiguation_model_json -class TestWorkspaceSystemSettingsOffTopic(): +class TestModel_WorkspaceSystemSettingsOffTopic(): """ Test Class for WorkspaceSystemSettingsOffTopic """ @@ -10376,7 +10518,7 @@ def test_workspace_system_settings_off_topic_serialization(self): # Construct a json representation of a WorkspaceSystemSettingsOffTopic model workspace_system_settings_off_topic_model_json = {} - workspace_system_settings_off_topic_model_json['enabled'] = True + workspace_system_settings_off_topic_model_json['enabled'] = False # Construct a model instance of WorkspaceSystemSettingsOffTopic by calling from_dict on the json representation workspace_system_settings_off_topic_model = WorkspaceSystemSettingsOffTopic.from_dict(workspace_system_settings_off_topic_model_json) @@ -10393,7 +10535,7 @@ def test_workspace_system_settings_off_topic_serialization(self): workspace_system_settings_off_topic_model_json2 = workspace_system_settings_off_topic_model.to_dict() assert workspace_system_settings_off_topic_model_json2 == workspace_system_settings_off_topic_model_json -class TestWorkspaceSystemSettingsSystemEntities(): +class TestModel_WorkspaceSystemSettingsSystemEntities(): """ Test Class for WorkspaceSystemSettingsSystemEntities """ @@ -10405,7 +10547,7 @@ def test_workspace_system_settings_system_entities_serialization(self): # Construct a json representation of a WorkspaceSystemSettingsSystemEntities model workspace_system_settings_system_entities_model_json = {} - workspace_system_settings_system_entities_model_json['enabled'] = True + workspace_system_settings_system_entities_model_json['enabled'] = False # Construct a model instance of WorkspaceSystemSettingsSystemEntities by calling from_dict on the json representation workspace_system_settings_system_entities_model = WorkspaceSystemSettingsSystemEntities.from_dict(workspace_system_settings_system_entities_model_json) @@ -10422,7 +10564,7 @@ def test_workspace_system_settings_system_entities_serialization(self): workspace_system_settings_system_entities_model_json2 = workspace_system_settings_system_entities_model.to_dict() assert workspace_system_settings_system_entities_model_json2 == workspace_system_settings_system_entities_model_json -class TestWorkspaceSystemSettingsTooling(): +class TestModel_WorkspaceSystemSettingsTooling(): """ Test Class for WorkspaceSystemSettingsTooling """ @@ -10451,7 +10593,7 @@ def test_workspace_system_settings_tooling_serialization(self): workspace_system_settings_tooling_model_json2 = workspace_system_settings_tooling_model.to_dict() assert workspace_system_settings_tooling_model_json2 == workspace_system_settings_tooling_model_json -class TestDialogNodeOutputGenericDialogNodeOutputResponseTypeChannelTransfer(): +class TestModel_DialogNodeOutputGenericDialogNodeOutputResponseTypeChannelTransfer(): """ Test Class for DialogNodeOutputGenericDialogNodeOutputResponseTypeChannelTransfer """ @@ -10497,7 +10639,7 @@ def test_dialog_node_output_generic_dialog_node_output_response_type_channel_tra dialog_node_output_generic_dialog_node_output_response_type_channel_transfer_model_json2 = dialog_node_output_generic_dialog_node_output_response_type_channel_transfer_model.to_dict() assert dialog_node_output_generic_dialog_node_output_response_type_channel_transfer_model_json2 == dialog_node_output_generic_dialog_node_output_response_type_channel_transfer_model_json -class TestDialogNodeOutputGenericDialogNodeOutputResponseTypeConnectToAgent(): +class TestModel_DialogNodeOutputGenericDialogNodeOutputResponseTypeConnectToAgent(): """ Test Class for DialogNodeOutputGenericDialogNodeOutputResponseTypeConnectToAgent """ @@ -10542,7 +10684,7 @@ def test_dialog_node_output_generic_dialog_node_output_response_type_connect_to_ dialog_node_output_generic_dialog_node_output_response_type_connect_to_agent_model_json2 = dialog_node_output_generic_dialog_node_output_response_type_connect_to_agent_model.to_dict() assert dialog_node_output_generic_dialog_node_output_response_type_connect_to_agent_model_json2 == dialog_node_output_generic_dialog_node_output_response_type_connect_to_agent_model_json -class TestDialogNodeOutputGenericDialogNodeOutputResponseTypeImage(): +class TestModel_DialogNodeOutputGenericDialogNodeOutputResponseTypeImage(): """ Test Class for DialogNodeOutputGenericDialogNodeOutputResponseTypeImage """ @@ -10564,6 +10706,7 @@ def test_dialog_node_output_generic_dialog_node_output_response_type_image_seria dialog_node_output_generic_dialog_node_output_response_type_image_model_json['title'] = 'testString' dialog_node_output_generic_dialog_node_output_response_type_image_model_json['description'] = 'testString' dialog_node_output_generic_dialog_node_output_response_type_image_model_json['channels'] = [response_generic_channel_model] + dialog_node_output_generic_dialog_node_output_response_type_image_model_json['alt_text'] = 'testString' # Construct a model instance of DialogNodeOutputGenericDialogNodeOutputResponseTypeImage by calling from_dict on the json representation dialog_node_output_generic_dialog_node_output_response_type_image_model = DialogNodeOutputGenericDialogNodeOutputResponseTypeImage.from_dict(dialog_node_output_generic_dialog_node_output_response_type_image_model_json) @@ -10580,7 +10723,7 @@ def test_dialog_node_output_generic_dialog_node_output_response_type_image_seria dialog_node_output_generic_dialog_node_output_response_type_image_model_json2 = dialog_node_output_generic_dialog_node_output_response_type_image_model.to_dict() assert dialog_node_output_generic_dialog_node_output_response_type_image_model_json2 == dialog_node_output_generic_dialog_node_output_response_type_image_model_json -class TestDialogNodeOutputGenericDialogNodeOutputResponseTypeOption(): +class TestModel_DialogNodeOutputGenericDialogNodeOutputResponseTypeOption(): """ Test Class for DialogNodeOutputGenericDialogNodeOutputResponseTypeOption """ @@ -10594,11 +10737,11 @@ def test_dialog_node_output_generic_dialog_node_output_response_type_option_seri message_input_model = {} # MessageInput message_input_model['text'] = 'testString' - message_input_model['spelling_suggestions'] = True - message_input_model['spelling_auto_correct'] = True + message_input_model['spelling_suggestions'] = False + message_input_model['spelling_auto_correct'] = False message_input_model['suggested_text'] = 'testString' message_input_model['original_text'] = 'testString' - message_input_model['foo'] = { 'foo': 'bar' } + message_input_model['foo'] = 'testString' runtime_intent_model = {} # RuntimeIntent runtime_intent_model['intent'] = 'testString' @@ -10690,7 +10833,7 @@ def test_dialog_node_output_generic_dialog_node_output_response_type_option_seri dialog_node_output_generic_dialog_node_output_response_type_option_model_json2 = dialog_node_output_generic_dialog_node_output_response_type_option_model.to_dict() assert dialog_node_output_generic_dialog_node_output_response_type_option_model_json2 == dialog_node_output_generic_dialog_node_output_response_type_option_model_json -class TestDialogNodeOutputGenericDialogNodeOutputResponseTypePause(): +class TestModel_DialogNodeOutputGenericDialogNodeOutputResponseTypePause(): """ Test Class for DialogNodeOutputGenericDialogNodeOutputResponseTypePause """ @@ -10727,7 +10870,7 @@ def test_dialog_node_output_generic_dialog_node_output_response_type_pause_seria dialog_node_output_generic_dialog_node_output_response_type_pause_model_json2 = dialog_node_output_generic_dialog_node_output_response_type_pause_model.to_dict() assert dialog_node_output_generic_dialog_node_output_response_type_pause_model_json2 == dialog_node_output_generic_dialog_node_output_response_type_pause_model_json -class TestDialogNodeOutputGenericDialogNodeOutputResponseTypeSearchSkill(): +class TestModel_DialogNodeOutputGenericDialogNodeOutputResponseTypeSearchSkill(): """ Test Class for DialogNodeOutputGenericDialogNodeOutputResponseTypeSearchSkill """ @@ -10748,7 +10891,7 @@ def test_dialog_node_output_generic_dialog_node_output_response_type_search_skil dialog_node_output_generic_dialog_node_output_response_type_search_skill_model_json['query'] = 'testString' dialog_node_output_generic_dialog_node_output_response_type_search_skill_model_json['query_type'] = 'natural_language' dialog_node_output_generic_dialog_node_output_response_type_search_skill_model_json['filter'] = 'testString' - dialog_node_output_generic_dialog_node_output_response_type_search_skill_model_json['discovery_version'] = 'testString' + dialog_node_output_generic_dialog_node_output_response_type_search_skill_model_json['discovery_version'] = '2018-12-03' dialog_node_output_generic_dialog_node_output_response_type_search_skill_model_json['channels'] = [response_generic_channel_model] # Construct a model instance of DialogNodeOutputGenericDialogNodeOutputResponseTypeSearchSkill by calling from_dict on the json representation @@ -10766,7 +10909,7 @@ def test_dialog_node_output_generic_dialog_node_output_response_type_search_skil dialog_node_output_generic_dialog_node_output_response_type_search_skill_model_json2 = dialog_node_output_generic_dialog_node_output_response_type_search_skill_model.to_dict() assert dialog_node_output_generic_dialog_node_output_response_type_search_skill_model_json2 == dialog_node_output_generic_dialog_node_output_response_type_search_skill_model_json -class TestDialogNodeOutputGenericDialogNodeOutputResponseTypeText(): +class TestModel_DialogNodeOutputGenericDialogNodeOutputResponseTypeText(): """ Test Class for DialogNodeOutputGenericDialogNodeOutputResponseTypeText """ @@ -10789,7 +10932,7 @@ def test_dialog_node_output_generic_dialog_node_output_response_type_text_serial dialog_node_output_generic_dialog_node_output_response_type_text_model_json['response_type'] = 'text' dialog_node_output_generic_dialog_node_output_response_type_text_model_json['values'] = [dialog_node_output_text_values_element_model] dialog_node_output_generic_dialog_node_output_response_type_text_model_json['selection_policy'] = 'sequential' - dialog_node_output_generic_dialog_node_output_response_type_text_model_json['delimiter'] = 'testString' + dialog_node_output_generic_dialog_node_output_response_type_text_model_json['delimiter'] = '\n' dialog_node_output_generic_dialog_node_output_response_type_text_model_json['channels'] = [response_generic_channel_model] # Construct a model instance of DialogNodeOutputGenericDialogNodeOutputResponseTypeText by calling from_dict on the json representation @@ -10807,7 +10950,7 @@ def test_dialog_node_output_generic_dialog_node_output_response_type_text_serial dialog_node_output_generic_dialog_node_output_response_type_text_model_json2 = dialog_node_output_generic_dialog_node_output_response_type_text_model.to_dict() assert dialog_node_output_generic_dialog_node_output_response_type_text_model_json2 == dialog_node_output_generic_dialog_node_output_response_type_text_model_json -class TestDialogNodeOutputGenericDialogNodeOutputResponseTypeUserDefined(): +class TestModel_DialogNodeOutputGenericDialogNodeOutputResponseTypeUserDefined(): """ Test Class for DialogNodeOutputGenericDialogNodeOutputResponseTypeUserDefined """ @@ -10843,7 +10986,7 @@ def test_dialog_node_output_generic_dialog_node_output_response_type_user_define dialog_node_output_generic_dialog_node_output_response_type_user_defined_model_json2 = dialog_node_output_generic_dialog_node_output_response_type_user_defined_model.to_dict() assert dialog_node_output_generic_dialog_node_output_response_type_user_defined_model_json2 == dialog_node_output_generic_dialog_node_output_response_type_user_defined_model_json -class TestRuntimeResponseGenericRuntimeResponseTypeChannelTransfer(): +class TestModel_RuntimeResponseGenericRuntimeResponseTypeChannelTransfer(): """ Test Class for RuntimeResponseGenericRuntimeResponseTypeChannelTransfer """ @@ -10889,7 +11032,7 @@ def test_runtime_response_generic_runtime_response_type_channel_transfer_seriali runtime_response_generic_runtime_response_type_channel_transfer_model_json2 = runtime_response_generic_runtime_response_type_channel_transfer_model.to_dict() assert runtime_response_generic_runtime_response_type_channel_transfer_model_json2 == runtime_response_generic_runtime_response_type_channel_transfer_model_json -class TestRuntimeResponseGenericRuntimeResponseTypeConnectToAgent(): +class TestModel_RuntimeResponseGenericRuntimeResponseTypeConnectToAgent(): """ Test Class for RuntimeResponseGenericRuntimeResponseTypeConnectToAgent """ @@ -10936,7 +11079,7 @@ def test_runtime_response_generic_runtime_response_type_connect_to_agent_seriali runtime_response_generic_runtime_response_type_connect_to_agent_model_json2 = runtime_response_generic_runtime_response_type_connect_to_agent_model.to_dict() assert runtime_response_generic_runtime_response_type_connect_to_agent_model_json2 == runtime_response_generic_runtime_response_type_connect_to_agent_model_json -class TestRuntimeResponseGenericRuntimeResponseTypeImage(): +class TestModel_RuntimeResponseGenericRuntimeResponseTypeImage(): """ Test Class for RuntimeResponseGenericRuntimeResponseTypeImage """ @@ -10958,6 +11101,7 @@ def test_runtime_response_generic_runtime_response_type_image_serialization(self runtime_response_generic_runtime_response_type_image_model_json['title'] = 'testString' runtime_response_generic_runtime_response_type_image_model_json['description'] = 'testString' runtime_response_generic_runtime_response_type_image_model_json['channels'] = [response_generic_channel_model] + runtime_response_generic_runtime_response_type_image_model_json['alt_text'] = 'testString' # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeImage by calling from_dict on the json representation runtime_response_generic_runtime_response_type_image_model = RuntimeResponseGenericRuntimeResponseTypeImage.from_dict(runtime_response_generic_runtime_response_type_image_model_json) @@ -10974,7 +11118,7 @@ def test_runtime_response_generic_runtime_response_type_image_serialization(self runtime_response_generic_runtime_response_type_image_model_json2 = runtime_response_generic_runtime_response_type_image_model.to_dict() assert runtime_response_generic_runtime_response_type_image_model_json2 == runtime_response_generic_runtime_response_type_image_model_json -class TestRuntimeResponseGenericRuntimeResponseTypeOption(): +class TestModel_RuntimeResponseGenericRuntimeResponseTypeOption(): """ Test Class for RuntimeResponseGenericRuntimeResponseTypeOption """ @@ -10988,11 +11132,11 @@ def test_runtime_response_generic_runtime_response_type_option_serialization(sel message_input_model = {} # MessageInput message_input_model['text'] = 'testString' - message_input_model['spelling_suggestions'] = True - message_input_model['spelling_auto_correct'] = True + message_input_model['spelling_suggestions'] = False + message_input_model['spelling_auto_correct'] = False message_input_model['suggested_text'] = 'testString' message_input_model['original_text'] = 'testString' - message_input_model['foo'] = { 'foo': 'bar' } + message_input_model['foo'] = 'testString' runtime_intent_model = {} # RuntimeIntent runtime_intent_model['intent'] = 'testString' @@ -11084,7 +11228,7 @@ def test_runtime_response_generic_runtime_response_type_option_serialization(sel runtime_response_generic_runtime_response_type_option_model_json2 = runtime_response_generic_runtime_response_type_option_model.to_dict() assert runtime_response_generic_runtime_response_type_option_model_json2 == runtime_response_generic_runtime_response_type_option_model_json -class TestRuntimeResponseGenericRuntimeResponseTypePause(): +class TestModel_RuntimeResponseGenericRuntimeResponseTypePause(): """ Test Class for RuntimeResponseGenericRuntimeResponseTypePause """ @@ -11121,7 +11265,7 @@ def test_runtime_response_generic_runtime_response_type_pause_serialization(self runtime_response_generic_runtime_response_type_pause_model_json2 = runtime_response_generic_runtime_response_type_pause_model.to_dict() assert runtime_response_generic_runtime_response_type_pause_model_json2 == runtime_response_generic_runtime_response_type_pause_model_json -class TestRuntimeResponseGenericRuntimeResponseTypeSuggestion(): +class TestModel_RuntimeResponseGenericRuntimeResponseTypeSuggestion(): """ Test Class for RuntimeResponseGenericRuntimeResponseTypeSuggestion """ @@ -11135,11 +11279,11 @@ def test_runtime_response_generic_runtime_response_type_suggestion_serialization message_input_model = {} # MessageInput message_input_model['text'] = 'testString' - message_input_model['spelling_suggestions'] = True - message_input_model['spelling_auto_correct'] = True + message_input_model['spelling_suggestions'] = False + message_input_model['spelling_auto_correct'] = False message_input_model['suggested_text'] = 'testString' message_input_model['original_text'] = 'testString' - message_input_model['foo'] = { 'foo': 'bar' } + message_input_model['foo'] = 'testString' runtime_intent_model = {} # RuntimeIntent runtime_intent_model['intent'] = 'testString' @@ -11231,7 +11375,7 @@ def test_runtime_response_generic_runtime_response_type_suggestion_serialization runtime_response_generic_runtime_response_type_suggestion_model_json2 = runtime_response_generic_runtime_response_type_suggestion_model.to_dict() assert runtime_response_generic_runtime_response_type_suggestion_model_json2 == runtime_response_generic_runtime_response_type_suggestion_model_json -class TestRuntimeResponseGenericRuntimeResponseTypeText(): +class TestModel_RuntimeResponseGenericRuntimeResponseTypeText(): """ Test Class for RuntimeResponseGenericRuntimeResponseTypeText """ @@ -11267,7 +11411,7 @@ def test_runtime_response_generic_runtime_response_type_text_serialization(self) runtime_response_generic_runtime_response_type_text_model_json2 = runtime_response_generic_runtime_response_type_text_model.to_dict() assert runtime_response_generic_runtime_response_type_text_model_json2 == runtime_response_generic_runtime_response_type_text_model_json -class TestRuntimeResponseGenericRuntimeResponseTypeUserDefined(): +class TestModel_RuntimeResponseGenericRuntimeResponseTypeUserDefined(): """ Test Class for RuntimeResponseGenericRuntimeResponseTypeUserDefined """ diff --git a/test/unit/test_assistant_v2.py b/test/unit/test_assistant_v2.py index 2f5958c6..1ba9a3ee 100644 --- a/test/unit/test_assistant_v2.py +++ b/test/unit/test_assistant_v2.py @@ -51,6 +51,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -121,6 +123,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -199,6 +203,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -211,7 +217,7 @@ def test_message_all_params(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v2/assistants/testString/sessions/testString/message') - mock_response = '{"output": {"generic": [{"response_type": "option", "title": "title", "description": "description", "preference": "dropdown", "options": [{"label": "label", "value": {"input": {"message_type": "text", "text": "text", "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "suggestion_id": "suggestion_id", "options": {"restart": false, "alternate_intents": false, "spelling": {"suggestions": false, "auto_correct": true}, "debug": false, "return_context": true, "export": true}}}}], "channels": [{"channel": "channel"}]}], "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": {"anyKey": "anyValue"}}, "result_variable": "result_variable", "credentials": "credentials"}], "debug": {"nodes_visited": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "message": "message", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "branch_exited": false, "branch_exited_reason": "completed"}, "user_defined": {"mapKey": {"anyKey": "anyValue"}}, "spelling": {"text": "text", "original_text": "original_text", "suggested_text": "suggested_text"}}, "context": {"global": {"system": {"timezone": "timezone", "user_id": "user_id", "turn_count": 10, "locale": "en-us", "reference_time": "reference_time"}, "session_id": "session_id"}, "skills": {"mapKey": {"user_defined": {"mapKey": {"anyKey": "anyValue"}}, "system": {"state": "state"}}}}, "user_id": "user_id"}' + mock_response = '{"output": {"generic": [{"response_type": "option", "title": "title", "description": "description", "preference": "dropdown", "options": [{"label": "label", "value": {"input": {"message_type": "text", "text": "text", "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": "anyValue"}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "suggestion_id": "suggestion_id", "options": {"restart": false, "alternate_intents": false, "spelling": {"suggestions": false, "auto_correct": true}, "debug": false, "return_context": false, "export": false}}}}], "channels": [{"channel": "channel"}]}], "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": "anyValue"}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "debug": {"nodes_visited": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "message": "message", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "branch_exited": false, "branch_exited_reason": "completed"}, "user_defined": {"mapKey": "anyValue"}, "spelling": {"text": "text", "original_text": "original_text", "suggested_text": "suggested_text"}}, "context": {"global": {"system": {"timezone": "timezone", "user_id": "user_id", "turn_count": 10, "locale": "en-us", "reference_time": "reference_time", "session_start_time": "session_start_time", "state": "state"}, "session_id": "session_id"}, "skills": {"mapKey": {"user_defined": {"mapKey": {"anyKey": "anyValue"}}, "system": {"state": "state"}}}}, "user_id": "user_id"}' responses.add(responses.POST, url, body=mock_response, @@ -285,12 +291,12 @@ def test_message_all_params(self): # Construct a dict representation of a MessageInputOptions model message_input_options_model = {} - message_input_options_model['restart'] = True - message_input_options_model['alternate_intents'] = True + message_input_options_model['restart'] = False + message_input_options_model['alternate_intents'] = False message_input_options_model['spelling'] = message_input_options_spelling_model - message_input_options_model['debug'] = True - message_input_options_model['return_context'] = True - message_input_options_model['export'] = True + message_input_options_model['debug'] = False + message_input_options_model['return_context'] = False + message_input_options_model['export'] = False # Construct a dict representation of a MessageInput model message_input_model = {} @@ -308,6 +314,8 @@ def test_message_all_params(self): message_context_global_system_model['turn_count'] = 38 message_context_global_system_model['locale'] = 'en-us' message_context_global_system_model['reference_time'] = 'testString' + message_context_global_system_model['session_start_time'] = 'testString' + message_context_global_system_model['state'] = 'testString' # Construct a dict representation of a MessageContextGlobal model message_context_global_model = {} @@ -316,7 +324,7 @@ def test_message_all_params(self): # Construct a dict representation of a MessageContextSkillSystem model message_context_skill_system_model = {} message_context_skill_system_model['state'] = 'testString' - message_context_skill_system_model['foo'] = { 'foo': 'bar' } + message_context_skill_system_model['foo'] = 'testString' # Construct a dict representation of a MessageContextSkill model message_context_skill_model = {} @@ -362,7 +370,7 @@ def test_message_required_params(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v2/assistants/testString/sessions/testString/message') - mock_response = '{"output": {"generic": [{"response_type": "option", "title": "title", "description": "description", "preference": "dropdown", "options": [{"label": "label", "value": {"input": {"message_type": "text", "text": "text", "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "suggestion_id": "suggestion_id", "options": {"restart": false, "alternate_intents": false, "spelling": {"suggestions": false, "auto_correct": true}, "debug": false, "return_context": true, "export": true}}}}], "channels": [{"channel": "channel"}]}], "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": {"anyKey": "anyValue"}}, "result_variable": "result_variable", "credentials": "credentials"}], "debug": {"nodes_visited": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "message": "message", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "branch_exited": false, "branch_exited_reason": "completed"}, "user_defined": {"mapKey": {"anyKey": "anyValue"}}, "spelling": {"text": "text", "original_text": "original_text", "suggested_text": "suggested_text"}}, "context": {"global": {"system": {"timezone": "timezone", "user_id": "user_id", "turn_count": 10, "locale": "en-us", "reference_time": "reference_time"}, "session_id": "session_id"}, "skills": {"mapKey": {"user_defined": {"mapKey": {"anyKey": "anyValue"}}, "system": {"state": "state"}}}}, "user_id": "user_id"}' + mock_response = '{"output": {"generic": [{"response_type": "option", "title": "title", "description": "description", "preference": "dropdown", "options": [{"label": "label", "value": {"input": {"message_type": "text", "text": "text", "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": "anyValue"}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "suggestion_id": "suggestion_id", "options": {"restart": false, "alternate_intents": false, "spelling": {"suggestions": false, "auto_correct": true}, "debug": false, "return_context": false, "export": false}}}}], "channels": [{"channel": "channel"}]}], "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": "anyValue"}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "debug": {"nodes_visited": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "message": "message", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "branch_exited": false, "branch_exited_reason": "completed"}, "user_defined": {"mapKey": "anyValue"}, "spelling": {"text": "text", "original_text": "original_text", "suggested_text": "suggested_text"}}, "context": {"global": {"system": {"timezone": "timezone", "user_id": "user_id", "turn_count": 10, "locale": "en-us", "reference_time": "reference_time", "session_start_time": "session_start_time", "state": "state"}, "session_id": "session_id"}, "skills": {"mapKey": {"user_defined": {"mapKey": {"anyKey": "anyValue"}}, "system": {"state": "state"}}}}, "user_id": "user_id"}' responses.add(responses.POST, url, body=mock_response, @@ -392,7 +400,7 @@ def test_message_value_error(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v2/assistants/testString/sessions/testString/message') - mock_response = '{"output": {"generic": [{"response_type": "option", "title": "title", "description": "description", "preference": "dropdown", "options": [{"label": "label", "value": {"input": {"message_type": "text", "text": "text", "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "suggestion_id": "suggestion_id", "options": {"restart": false, "alternate_intents": false, "spelling": {"suggestions": false, "auto_correct": true}, "debug": false, "return_context": true, "export": true}}}}], "channels": [{"channel": "channel"}]}], "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": {"anyKey": "anyValue"}}, "result_variable": "result_variable", "credentials": "credentials"}], "debug": {"nodes_visited": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "message": "message", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "branch_exited": false, "branch_exited_reason": "completed"}, "user_defined": {"mapKey": {"anyKey": "anyValue"}}, "spelling": {"text": "text", "original_text": "original_text", "suggested_text": "suggested_text"}}, "context": {"global": {"system": {"timezone": "timezone", "user_id": "user_id", "turn_count": 10, "locale": "en-us", "reference_time": "reference_time"}, "session_id": "session_id"}, "skills": {"mapKey": {"user_defined": {"mapKey": {"anyKey": "anyValue"}}, "system": {"state": "state"}}}}, "user_id": "user_id"}' + mock_response = '{"output": {"generic": [{"response_type": "option", "title": "title", "description": "description", "preference": "dropdown", "options": [{"label": "label", "value": {"input": {"message_type": "text", "text": "text", "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": "anyValue"}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "suggestion_id": "suggestion_id", "options": {"restart": false, "alternate_intents": false, "spelling": {"suggestions": false, "auto_correct": true}, "debug": false, "return_context": false, "export": false}}}}], "channels": [{"channel": "channel"}]}], "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": "anyValue"}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "debug": {"nodes_visited": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "message": "message", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "branch_exited": false, "branch_exited_reason": "completed"}, "user_defined": {"mapKey": "anyValue"}, "spelling": {"text": "text", "original_text": "original_text", "suggested_text": "suggested_text"}}, "context": {"global": {"system": {"timezone": "timezone", "user_id": "user_id", "turn_count": 10, "locale": "en-us", "reference_time": "reference_time", "session_start_time": "session_start_time", "state": "state"}, "session_id": "session_id"}, "skills": {"mapKey": {"user_defined": {"mapKey": {"anyKey": "anyValue"}}, "system": {"state": "state"}}}}, "user_id": "user_id"}' responses.add(responses.POST, url, body=mock_response, @@ -424,6 +432,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -436,7 +446,7 @@ def test_message_stateless_all_params(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v2/assistants/testString/message') - mock_response = '{"output": {"generic": [{"response_type": "option", "title": "title", "description": "description", "preference": "dropdown", "options": [{"label": "label", "value": {"input": {"message_type": "text", "text": "text", "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "suggestion_id": "suggestion_id", "options": {"restart": false, "alternate_intents": false, "spelling": {"suggestions": false, "auto_correct": true}, "debug": false, "return_context": true, "export": true}}}}], "channels": [{"channel": "channel"}]}], "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": {"anyKey": "anyValue"}}, "result_variable": "result_variable", "credentials": "credentials"}], "debug": {"nodes_visited": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "message": "message", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "branch_exited": false, "branch_exited_reason": "completed"}, "user_defined": {"mapKey": {"anyKey": "anyValue"}}, "spelling": {"text": "text", "original_text": "original_text", "suggested_text": "suggested_text"}}, "context": {"global": {"system": {"timezone": "timezone", "user_id": "user_id", "turn_count": 10, "locale": "en-us", "reference_time": "reference_time"}, "session_id": "session_id"}, "skills": {"mapKey": {"user_defined": {"mapKey": {"anyKey": "anyValue"}}, "system": {"state": "state"}}}}, "user_id": "user_id"}' + mock_response = '{"output": {"generic": [{"response_type": "option", "title": "title", "description": "description", "preference": "dropdown", "options": [{"label": "label", "value": {"input": {"message_type": "text", "text": "text", "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": "anyValue"}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "suggestion_id": "suggestion_id", "options": {"restart": false, "alternate_intents": false, "spelling": {"suggestions": false, "auto_correct": true}, "debug": false, "return_context": false, "export": false}}}}], "channels": [{"channel": "channel"}]}], "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": "anyValue"}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "debug": {"nodes_visited": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "message": "message", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "branch_exited": false, "branch_exited_reason": "completed"}, "user_defined": {"mapKey": "anyValue"}, "spelling": {"text": "text", "original_text": "original_text", "suggested_text": "suggested_text"}}, "context": {"global": {"system": {"timezone": "timezone", "user_id": "user_id", "turn_count": 10, "locale": "en-us", "reference_time": "reference_time", "session_start_time": "session_start_time", "state": "state"}, "session_id": "session_id"}, "skills": {"mapKey": {"user_defined": {"mapKey": {"anyKey": "anyValue"}}, "system": {"state": "state"}}}}, "user_id": "user_id"}' responses.add(responses.POST, url, body=mock_response, @@ -510,10 +520,10 @@ def test_message_stateless_all_params(self): # Construct a dict representation of a MessageInputOptionsStateless model message_input_options_stateless_model = {} - message_input_options_stateless_model['restart'] = True - message_input_options_stateless_model['alternate_intents'] = True + message_input_options_stateless_model['restart'] = False + message_input_options_stateless_model['alternate_intents'] = False message_input_options_stateless_model['spelling'] = message_input_options_spelling_model - message_input_options_stateless_model['debug'] = True + message_input_options_stateless_model['debug'] = False # Construct a dict representation of a MessageInputStateless model message_input_stateless_model = {} @@ -531,6 +541,8 @@ def test_message_stateless_all_params(self): message_context_global_system_model['turn_count'] = 38 message_context_global_system_model['locale'] = 'en-us' message_context_global_system_model['reference_time'] = 'testString' + message_context_global_system_model['session_start_time'] = 'testString' + message_context_global_system_model['state'] = 'testString' # Construct a dict representation of a MessageContextGlobalStateless model message_context_global_stateless_model = {} @@ -540,7 +552,7 @@ def test_message_stateless_all_params(self): # Construct a dict representation of a MessageContextSkillSystem model message_context_skill_system_model = {} message_context_skill_system_model['state'] = 'testString' - message_context_skill_system_model['foo'] = { 'foo': 'bar' } + message_context_skill_system_model['foo'] = 'testString' # Construct a dict representation of a MessageContextSkill model message_context_skill_model = {} @@ -584,7 +596,7 @@ def test_message_stateless_required_params(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v2/assistants/testString/message') - mock_response = '{"output": {"generic": [{"response_type": "option", "title": "title", "description": "description", "preference": "dropdown", "options": [{"label": "label", "value": {"input": {"message_type": "text", "text": "text", "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "suggestion_id": "suggestion_id", "options": {"restart": false, "alternate_intents": false, "spelling": {"suggestions": false, "auto_correct": true}, "debug": false, "return_context": true, "export": true}}}}], "channels": [{"channel": "channel"}]}], "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": {"anyKey": "anyValue"}}, "result_variable": "result_variable", "credentials": "credentials"}], "debug": {"nodes_visited": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "message": "message", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "branch_exited": false, "branch_exited_reason": "completed"}, "user_defined": {"mapKey": {"anyKey": "anyValue"}}, "spelling": {"text": "text", "original_text": "original_text", "suggested_text": "suggested_text"}}, "context": {"global": {"system": {"timezone": "timezone", "user_id": "user_id", "turn_count": 10, "locale": "en-us", "reference_time": "reference_time"}, "session_id": "session_id"}, "skills": {"mapKey": {"user_defined": {"mapKey": {"anyKey": "anyValue"}}, "system": {"state": "state"}}}}, "user_id": "user_id"}' + mock_response = '{"output": {"generic": [{"response_type": "option", "title": "title", "description": "description", "preference": "dropdown", "options": [{"label": "label", "value": {"input": {"message_type": "text", "text": "text", "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": "anyValue"}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "suggestion_id": "suggestion_id", "options": {"restart": false, "alternate_intents": false, "spelling": {"suggestions": false, "auto_correct": true}, "debug": false, "return_context": false, "export": false}}}}], "channels": [{"channel": "channel"}]}], "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": "anyValue"}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "debug": {"nodes_visited": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "message": "message", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "branch_exited": false, "branch_exited_reason": "completed"}, "user_defined": {"mapKey": "anyValue"}, "spelling": {"text": "text", "original_text": "original_text", "suggested_text": "suggested_text"}}, "context": {"global": {"system": {"timezone": "timezone", "user_id": "user_id", "turn_count": 10, "locale": "en-us", "reference_time": "reference_time", "session_start_time": "session_start_time", "state": "state"}, "session_id": "session_id"}, "skills": {"mapKey": {"user_defined": {"mapKey": {"anyKey": "anyValue"}}, "system": {"state": "state"}}}}, "user_id": "user_id"}' responses.add(responses.POST, url, body=mock_response, @@ -612,7 +624,7 @@ def test_message_stateless_value_error(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v2/assistants/testString/message') - mock_response = '{"output": {"generic": [{"response_type": "option", "title": "title", "description": "description", "preference": "dropdown", "options": [{"label": "label", "value": {"input": {"message_type": "text", "text": "text", "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "suggestion_id": "suggestion_id", "options": {"restart": false, "alternate_intents": false, "spelling": {"suggestions": false, "auto_correct": true}, "debug": false, "return_context": true, "export": true}}}}], "channels": [{"channel": "channel"}]}], "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": {"anyKey": "anyValue"}}, "result_variable": "result_variable", "credentials": "credentials"}], "debug": {"nodes_visited": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "message": "message", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "branch_exited": false, "branch_exited_reason": "completed"}, "user_defined": {"mapKey": {"anyKey": "anyValue"}}, "spelling": {"text": "text", "original_text": "original_text", "suggested_text": "suggested_text"}}, "context": {"global": {"system": {"timezone": "timezone", "user_id": "user_id", "turn_count": 10, "locale": "en-us", "reference_time": "reference_time"}, "session_id": "session_id"}, "skills": {"mapKey": {"user_defined": {"mapKey": {"anyKey": "anyValue"}}, "system": {"state": "state"}}}}, "user_id": "user_id"}' + mock_response = '{"output": {"generic": [{"response_type": "option", "title": "title", "description": "description", "preference": "dropdown", "options": [{"label": "label", "value": {"input": {"message_type": "text", "text": "text", "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": "anyValue"}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "suggestion_id": "suggestion_id", "options": {"restart": false, "alternate_intents": false, "spelling": {"suggestions": false, "auto_correct": true}, "debug": false, "return_context": false, "export": false}}}}], "channels": [{"channel": "channel"}]}], "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": "anyValue"}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "debug": {"nodes_visited": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "message": "message", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "branch_exited": false, "branch_exited_reason": "completed"}, "user_defined": {"mapKey": "anyValue"}, "spelling": {"text": "text", "original_text": "original_text", "suggested_text": "suggested_text"}}, "context": {"global": {"system": {"timezone": "timezone", "user_id": "user_id", "turn_count": 10, "locale": "en-us", "reference_time": "reference_time", "session_start_time": "session_start_time", "state": "state"}, "session_id": "session_id"}, "skills": {"mapKey": {"user_defined": {"mapKey": {"anyKey": "anyValue"}}, "system": {"state": "state"}}}}, "user_id": "user_id"}' responses.add(responses.POST, url, body=mock_response, @@ -652,6 +664,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -664,7 +678,7 @@ def test_bulk_classify_all_params(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v2/skills/testString/workspace/bulk_classify') - mock_response = '{"output": [{"input": {"text": "text"}, "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "intents": [{"intent": "intent", "confidence": 10}]}]}' + mock_response = '{"output": [{"input": {"text": "text"}, "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": "anyValue"}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "intents": [{"intent": "intent", "confidence": 10}]}]}' responses.add(responses.POST, url, body=mock_response, @@ -701,7 +715,7 @@ def test_bulk_classify_required_params(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v2/skills/testString/workspace/bulk_classify') - mock_response = '{"output": [{"input": {"text": "text"}, "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "intents": [{"intent": "intent", "confidence": 10}]}]}' + mock_response = '{"output": [{"input": {"text": "text"}, "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": "anyValue"}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "intents": [{"intent": "intent", "confidence": 10}]}]}' responses.add(responses.POST, url, body=mock_response, @@ -729,7 +743,7 @@ def test_bulk_classify_value_error(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v2/skills/testString/workspace/bulk_classify') - mock_response = '{"output": [{"input": {"text": "text"}, "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "intents": [{"intent": "intent", "confidence": 10}]}]}' + mock_response = '{"output": [{"input": {"text": "text"}, "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": "anyValue"}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "intents": [{"intent": "intent", "confidence": 10}]}]}' responses.add(responses.POST, url, body=mock_response, @@ -769,6 +783,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -781,7 +797,7 @@ def test_list_logs_all_params(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v2/assistants/testString/logs') - mock_response = '{"logs": [{"log_id": "log_id", "request": {"input": {"message_type": "text", "text": "text", "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "suggestion_id": "suggestion_id", "options": {"restart": false, "alternate_intents": false, "spelling": {"suggestions": false, "auto_correct": true}, "debug": false, "return_context": true, "export": true}}, "context": {"global": {"system": {"timezone": "timezone", "user_id": "user_id", "turn_count": 10, "locale": "en-us", "reference_time": "reference_time"}, "session_id": "session_id"}, "skills": {"mapKey": {"user_defined": {"mapKey": {"anyKey": "anyValue"}}, "system": {"state": "state"}}}}, "user_id": "user_id"}, "response": {"output": {"generic": [{"response_type": "option", "title": "title", "description": "description", "preference": "dropdown", "options": [{"label": "label", "value": {"input": {"message_type": "text", "text": "text", "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "suggestion_id": "suggestion_id", "options": {"restart": false, "alternate_intents": false, "spelling": {"suggestions": false, "auto_correct": true}, "debug": false, "return_context": true, "export": true}}}}], "channels": [{"channel": "channel"}]}], "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": {"anyKey": "anyValue"}}, "result_variable": "result_variable", "credentials": "credentials"}], "debug": {"nodes_visited": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "message": "message", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "branch_exited": false, "branch_exited_reason": "completed"}, "user_defined": {"mapKey": {"anyKey": "anyValue"}}, "spelling": {"text": "text", "original_text": "original_text", "suggested_text": "suggested_text"}}, "context": {"global": {"system": {"timezone": "timezone", "user_id": "user_id", "turn_count": 10, "locale": "en-us", "reference_time": "reference_time"}, "session_id": "session_id"}, "skills": {"mapKey": {"user_defined": {"mapKey": {"anyKey": "anyValue"}}, "system": {"state": "state"}}}}, "user_id": "user_id"}, "assistant_id": "assistant_id", "session_id": "session_id", "skill_id": "skill_id", "snapshot": "snapshot", "request_timestamp": "request_timestamp", "response_timestamp": "response_timestamp", "language": "language", "customer_id": "customer_id"}], "pagination": {"next_url": "next_url", "matched": 7, "next_cursor": "next_cursor"}}' + mock_response = '{"logs": [{"log_id": "log_id", "request": {"input": {"message_type": "text", "text": "text", "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": "anyValue"}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "suggestion_id": "suggestion_id", "options": {"restart": false, "alternate_intents": false, "spelling": {"suggestions": false, "auto_correct": true}, "debug": false, "return_context": false, "export": false}}, "context": {"global": {"system": {"timezone": "timezone", "user_id": "user_id", "turn_count": 10, "locale": "en-us", "reference_time": "reference_time", "session_start_time": "session_start_time", "state": "state"}, "session_id": "session_id"}, "skills": {"mapKey": {"user_defined": {"mapKey": {"anyKey": "anyValue"}}, "system": {"state": "state"}}}}, "user_id": "user_id"}, "response": {"output": {"generic": [{"response_type": "option", "title": "title", "description": "description", "preference": "dropdown", "options": [{"label": "label", "value": {"input": {"message_type": "text", "text": "text", "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": "anyValue"}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "suggestion_id": "suggestion_id", "options": {"restart": false, "alternate_intents": false, "spelling": {"suggestions": false, "auto_correct": true}, "debug": false, "return_context": false, "export": false}}}}], "channels": [{"channel": "channel"}]}], "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": "anyValue"}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "debug": {"nodes_visited": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "message": "message", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "branch_exited": false, "branch_exited_reason": "completed"}, "user_defined": {"mapKey": "anyValue"}, "spelling": {"text": "text", "original_text": "original_text", "suggested_text": "suggested_text"}}, "context": {"global": {"system": {"timezone": "timezone", "user_id": "user_id", "turn_count": 10, "locale": "en-us", "reference_time": "reference_time", "session_start_time": "session_start_time", "state": "state"}, "session_id": "session_id"}, "skills": {"mapKey": {"user_defined": {"mapKey": {"anyKey": "anyValue"}}, "system": {"state": "state"}}}}, "user_id": "user_id"}, "assistant_id": "assistant_id", "session_id": "session_id", "skill_id": "skill_id", "snapshot": "snapshot", "request_timestamp": "request_timestamp", "response_timestamp": "response_timestamp", "language": "language", "customer_id": "customer_id"}], "pagination": {"next_url": "next_url", "matched": 7, "next_cursor": "next_cursor"}}' responses.add(responses.GET, url, body=mock_response, @@ -824,7 +840,7 @@ def test_list_logs_required_params(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v2/assistants/testString/logs') - mock_response = '{"logs": [{"log_id": "log_id", "request": {"input": {"message_type": "text", "text": "text", "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "suggestion_id": "suggestion_id", "options": {"restart": false, "alternate_intents": false, "spelling": {"suggestions": false, "auto_correct": true}, "debug": false, "return_context": true, "export": true}}, "context": {"global": {"system": {"timezone": "timezone", "user_id": "user_id", "turn_count": 10, "locale": "en-us", "reference_time": "reference_time"}, "session_id": "session_id"}, "skills": {"mapKey": {"user_defined": {"mapKey": {"anyKey": "anyValue"}}, "system": {"state": "state"}}}}, "user_id": "user_id"}, "response": {"output": {"generic": [{"response_type": "option", "title": "title", "description": "description", "preference": "dropdown", "options": [{"label": "label", "value": {"input": {"message_type": "text", "text": "text", "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "suggestion_id": "suggestion_id", "options": {"restart": false, "alternate_intents": false, "spelling": {"suggestions": false, "auto_correct": true}, "debug": false, "return_context": true, "export": true}}}}], "channels": [{"channel": "channel"}]}], "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": {"anyKey": "anyValue"}}, "result_variable": "result_variable", "credentials": "credentials"}], "debug": {"nodes_visited": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "message": "message", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "branch_exited": false, "branch_exited_reason": "completed"}, "user_defined": {"mapKey": {"anyKey": "anyValue"}}, "spelling": {"text": "text", "original_text": "original_text", "suggested_text": "suggested_text"}}, "context": {"global": {"system": {"timezone": "timezone", "user_id": "user_id", "turn_count": 10, "locale": "en-us", "reference_time": "reference_time"}, "session_id": "session_id"}, "skills": {"mapKey": {"user_defined": {"mapKey": {"anyKey": "anyValue"}}, "system": {"state": "state"}}}}, "user_id": "user_id"}, "assistant_id": "assistant_id", "session_id": "session_id", "skill_id": "skill_id", "snapshot": "snapshot", "request_timestamp": "request_timestamp", "response_timestamp": "response_timestamp", "language": "language", "customer_id": "customer_id"}], "pagination": {"next_url": "next_url", "matched": 7, "next_cursor": "next_cursor"}}' + mock_response = '{"logs": [{"log_id": "log_id", "request": {"input": {"message_type": "text", "text": "text", "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": "anyValue"}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "suggestion_id": "suggestion_id", "options": {"restart": false, "alternate_intents": false, "spelling": {"suggestions": false, "auto_correct": true}, "debug": false, "return_context": false, "export": false}}, "context": {"global": {"system": {"timezone": "timezone", "user_id": "user_id", "turn_count": 10, "locale": "en-us", "reference_time": "reference_time", "session_start_time": "session_start_time", "state": "state"}, "session_id": "session_id"}, "skills": {"mapKey": {"user_defined": {"mapKey": {"anyKey": "anyValue"}}, "system": {"state": "state"}}}}, "user_id": "user_id"}, "response": {"output": {"generic": [{"response_type": "option", "title": "title", "description": "description", "preference": "dropdown", "options": [{"label": "label", "value": {"input": {"message_type": "text", "text": "text", "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": "anyValue"}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "suggestion_id": "suggestion_id", "options": {"restart": false, "alternate_intents": false, "spelling": {"suggestions": false, "auto_correct": true}, "debug": false, "return_context": false, "export": false}}}}], "channels": [{"channel": "channel"}]}], "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": "anyValue"}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "debug": {"nodes_visited": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "message": "message", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "branch_exited": false, "branch_exited_reason": "completed"}, "user_defined": {"mapKey": "anyValue"}, "spelling": {"text": "text", "original_text": "original_text", "suggested_text": "suggested_text"}}, "context": {"global": {"system": {"timezone": "timezone", "user_id": "user_id", "turn_count": 10, "locale": "en-us", "reference_time": "reference_time", "session_start_time": "session_start_time", "state": "state"}, "session_id": "session_id"}, "skills": {"mapKey": {"user_defined": {"mapKey": {"anyKey": "anyValue"}}, "system": {"state": "state"}}}}, "user_id": "user_id"}, "assistant_id": "assistant_id", "session_id": "session_id", "skill_id": "skill_id", "snapshot": "snapshot", "request_timestamp": "request_timestamp", "response_timestamp": "response_timestamp", "language": "language", "customer_id": "customer_id"}], "pagination": {"next_url": "next_url", "matched": 7, "next_cursor": "next_cursor"}}' responses.add(responses.GET, url, body=mock_response, @@ -852,7 +868,7 @@ def test_list_logs_value_error(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v2/assistants/testString/logs') - mock_response = '{"logs": [{"log_id": "log_id", "request": {"input": {"message_type": "text", "text": "text", "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "suggestion_id": "suggestion_id", "options": {"restart": false, "alternate_intents": false, "spelling": {"suggestions": false, "auto_correct": true}, "debug": false, "return_context": true, "export": true}}, "context": {"global": {"system": {"timezone": "timezone", "user_id": "user_id", "turn_count": 10, "locale": "en-us", "reference_time": "reference_time"}, "session_id": "session_id"}, "skills": {"mapKey": {"user_defined": {"mapKey": {"anyKey": "anyValue"}}, "system": {"state": "state"}}}}, "user_id": "user_id"}, "response": {"output": {"generic": [{"response_type": "option", "title": "title", "description": "description", "preference": "dropdown", "options": [{"label": "label", "value": {"input": {"message_type": "text", "text": "text", "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "suggestion_id": "suggestion_id", "options": {"restart": false, "alternate_intents": false, "spelling": {"suggestions": false, "auto_correct": true}, "debug": false, "return_context": true, "export": true}}}}], "channels": [{"channel": "channel"}]}], "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": {"anyKey": "anyValue"}}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": {"anyKey": "anyValue"}}, "result_variable": "result_variable", "credentials": "credentials"}], "debug": {"nodes_visited": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "message": "message", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "branch_exited": false, "branch_exited_reason": "completed"}, "user_defined": {"mapKey": {"anyKey": "anyValue"}}, "spelling": {"text": "text", "original_text": "original_text", "suggested_text": "suggested_text"}}, "context": {"global": {"system": {"timezone": "timezone", "user_id": "user_id", "turn_count": 10, "locale": "en-us", "reference_time": "reference_time"}, "session_id": "session_id"}, "skills": {"mapKey": {"user_defined": {"mapKey": {"anyKey": "anyValue"}}, "system": {"state": "state"}}}}, "user_id": "user_id"}, "assistant_id": "assistant_id", "session_id": "session_id", "skill_id": "skill_id", "snapshot": "snapshot", "request_timestamp": "request_timestamp", "response_timestamp": "response_timestamp", "language": "language", "customer_id": "customer_id"}], "pagination": {"next_url": "next_url", "matched": 7, "next_cursor": "next_cursor"}}' + mock_response = '{"logs": [{"log_id": "log_id", "request": {"input": {"message_type": "text", "text": "text", "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": "anyValue"}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "suggestion_id": "suggestion_id", "options": {"restart": false, "alternate_intents": false, "spelling": {"suggestions": false, "auto_correct": true}, "debug": false, "return_context": false, "export": false}}, "context": {"global": {"system": {"timezone": "timezone", "user_id": "user_id", "turn_count": 10, "locale": "en-us", "reference_time": "reference_time", "session_start_time": "session_start_time", "state": "state"}, "session_id": "session_id"}, "skills": {"mapKey": {"user_defined": {"mapKey": {"anyKey": "anyValue"}}, "system": {"state": "state"}}}}, "user_id": "user_id"}, "response": {"output": {"generic": [{"response_type": "option", "title": "title", "description": "description", "preference": "dropdown", "options": [{"label": "label", "value": {"input": {"message_type": "text", "text": "text", "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": "anyValue"}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "suggestion_id": "suggestion_id", "options": {"restart": false, "alternate_intents": false, "spelling": {"suggestions": false, "auto_correct": true}, "debug": false, "return_context": false, "export": false}}}}], "channels": [{"channel": "channel"}]}], "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "metadata": {"mapKey": "anyValue"}, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "actions": [{"name": "name", "type": "client", "parameters": {"mapKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "debug": {"nodes_visited": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "message": "message", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "branch_exited": false, "branch_exited_reason": "completed"}, "user_defined": {"mapKey": "anyValue"}, "spelling": {"text": "text", "original_text": "original_text", "suggested_text": "suggested_text"}}, "context": {"global": {"system": {"timezone": "timezone", "user_id": "user_id", "turn_count": 10, "locale": "en-us", "reference_time": "reference_time", "session_start_time": "session_start_time", "state": "state"}, "session_id": "session_id"}, "skills": {"mapKey": {"user_defined": {"mapKey": {"anyKey": "anyValue"}}, "system": {"state": "state"}}}}, "user_id": "user_id"}, "assistant_id": "assistant_id", "session_id": "session_id", "skill_id": "skill_id", "snapshot": "snapshot", "request_timestamp": "request_timestamp", "response_timestamp": "response_timestamp", "language": "language", "customer_id": "customer_id"}], "pagination": {"next_url": "next_url", "matched": 7, "next_cursor": "next_cursor"}}' responses.add(responses.GET, url, body=mock_response, @@ -892,6 +908,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -961,7 +979,7 @@ def test_delete_user_data_value_error(self): # Start of Model Tests ############################################################################## # region -class TestAgentAvailabilityMessage(): +class TestModel_AgentAvailabilityMessage(): """ Test Class for AgentAvailabilityMessage """ @@ -990,7 +1008,7 @@ def test_agent_availability_message_serialization(self): agent_availability_message_model_json2 = agent_availability_message_model.to_dict() assert agent_availability_message_model_json2 == agent_availability_message_model_json -class TestBulkClassifyOutput(): +class TestModel_BulkClassifyOutput(): """ Test Class for BulkClassifyOutput """ @@ -1080,7 +1098,7 @@ def test_bulk_classify_output_serialization(self): bulk_classify_output_model_json2 = bulk_classify_output_model.to_dict() assert bulk_classify_output_model_json2 == bulk_classify_output_model_json -class TestBulkClassifyResponse(): +class TestModel_BulkClassifyResponse(): """ Test Class for BulkClassifyResponse """ @@ -1173,7 +1191,7 @@ def test_bulk_classify_response_serialization(self): bulk_classify_response_model_json2 = bulk_classify_response_model.to_dict() assert bulk_classify_response_model_json2 == bulk_classify_response_model_json -class TestBulkClassifyUtterance(): +class TestModel_BulkClassifyUtterance(): """ Test Class for BulkClassifyUtterance """ @@ -1202,7 +1220,7 @@ def test_bulk_classify_utterance_serialization(self): bulk_classify_utterance_model_json2 = bulk_classify_utterance_model.to_dict() assert bulk_classify_utterance_model_json2 == bulk_classify_utterance_model_json -class TestCaptureGroup(): +class TestModel_CaptureGroup(): """ Test Class for CaptureGroup """ @@ -1232,7 +1250,7 @@ def test_capture_group_serialization(self): capture_group_model_json2 = capture_group_model.to_dict() assert capture_group_model_json2 == capture_group_model_json -class TestChannelTransferInfo(): +class TestModel_ChannelTransferInfo(): """ Test Class for ChannelTransferInfo """ @@ -1269,7 +1287,7 @@ def test_channel_transfer_info_serialization(self): channel_transfer_info_model_json2 = channel_transfer_info_model.to_dict() assert channel_transfer_info_model_json2 == channel_transfer_info_model_json -class TestChannelTransferTarget(): +class TestModel_ChannelTransferTarget(): """ Test Class for ChannelTransferTarget """ @@ -1303,7 +1321,7 @@ def test_channel_transfer_target_serialization(self): channel_transfer_target_model_json2 = channel_transfer_target_model.to_dict() assert channel_transfer_target_model_json2 == channel_transfer_target_model_json -class TestChannelTransferTargetChat(): +class TestModel_ChannelTransferTargetChat(): """ Test Class for ChannelTransferTargetChat """ @@ -1332,7 +1350,7 @@ def test_channel_transfer_target_chat_serialization(self): channel_transfer_target_chat_model_json2 = channel_transfer_target_chat_model.to_dict() assert channel_transfer_target_chat_model_json2 == channel_transfer_target_chat_model_json -class TestDialogLogMessage(): +class TestModel_DialogLogMessage(): """ Test Class for DialogLogMessage """ @@ -1370,7 +1388,7 @@ def test_dialog_log_message_serialization(self): dialog_log_message_model_json2 = dialog_log_message_model.to_dict() assert dialog_log_message_model_json2 == dialog_log_message_model_json -class TestDialogNodeAction(): +class TestModel_DialogNodeAction(): """ Test Class for DialogNodeAction """ @@ -1403,7 +1421,7 @@ def test_dialog_node_action_serialization(self): dialog_node_action_model_json2 = dialog_node_action_model.to_dict() assert dialog_node_action_model_json2 == dialog_node_action_model_json -class TestDialogNodeOutputConnectToAgentTransferInfo(): +class TestModel_DialogNodeOutputConnectToAgentTransferInfo(): """ Test Class for DialogNodeOutputConnectToAgentTransferInfo """ @@ -1432,7 +1450,7 @@ def test_dialog_node_output_connect_to_agent_transfer_info_serialization(self): dialog_node_output_connect_to_agent_transfer_info_model_json2 = dialog_node_output_connect_to_agent_transfer_info_model.to_dict() assert dialog_node_output_connect_to_agent_transfer_info_model_json2 == dialog_node_output_connect_to_agent_transfer_info_model_json -class TestDialogNodeOutputOptionsElement(): +class TestModel_DialogNodeOutputOptionsElement(): """ Test Class for DialogNodeOutputOptionsElement """ @@ -1503,12 +1521,12 @@ def test_dialog_node_output_options_element_serialization(self): message_input_options_spelling_model['auto_correct'] = True message_input_options_model = {} # MessageInputOptions - message_input_options_model['restart'] = True - message_input_options_model['alternate_intents'] = True + message_input_options_model['restart'] = False + message_input_options_model['alternate_intents'] = False message_input_options_model['spelling'] = message_input_options_spelling_model - message_input_options_model['debug'] = True - message_input_options_model['return_context'] = True - message_input_options_model['export'] = True + message_input_options_model['debug'] = False + message_input_options_model['return_context'] = False + message_input_options_model['export'] = False message_input_model = {} # MessageInput message_input_model['message_type'] = 'text' @@ -1541,7 +1559,7 @@ def test_dialog_node_output_options_element_serialization(self): dialog_node_output_options_element_model_json2 = dialog_node_output_options_element_model.to_dict() assert dialog_node_output_options_element_model_json2 == dialog_node_output_options_element_model_json -class TestDialogNodeOutputOptionsElementValue(): +class TestModel_DialogNodeOutputOptionsElementValue(): """ Test Class for DialogNodeOutputOptionsElementValue """ @@ -1612,12 +1630,12 @@ def test_dialog_node_output_options_element_value_serialization(self): message_input_options_spelling_model['auto_correct'] = True message_input_options_model = {} # MessageInputOptions - message_input_options_model['restart'] = True - message_input_options_model['alternate_intents'] = True + message_input_options_model['restart'] = False + message_input_options_model['alternate_intents'] = False message_input_options_model['spelling'] = message_input_options_spelling_model - message_input_options_model['debug'] = True - message_input_options_model['return_context'] = True - message_input_options_model['export'] = True + message_input_options_model['debug'] = False + message_input_options_model['return_context'] = False + message_input_options_model['export'] = False message_input_model = {} # MessageInput message_input_model['message_type'] = 'text' @@ -1646,7 +1664,7 @@ def test_dialog_node_output_options_element_value_serialization(self): dialog_node_output_options_element_value_model_json2 = dialog_node_output_options_element_value_model.to_dict() assert dialog_node_output_options_element_value_model_json2 == dialog_node_output_options_element_value_model_json -class TestDialogNodesVisited(): +class TestModel_DialogNodesVisited(): """ Test Class for DialogNodesVisited """ @@ -1677,7 +1695,7 @@ def test_dialog_nodes_visited_serialization(self): dialog_nodes_visited_model_json2 = dialog_nodes_visited_model.to_dict() assert dialog_nodes_visited_model_json2 == dialog_nodes_visited_model_json -class TestDialogSuggestion(): +class TestModel_DialogSuggestion(): """ Test Class for DialogSuggestion """ @@ -1748,12 +1766,12 @@ def test_dialog_suggestion_serialization(self): message_input_options_spelling_model['auto_correct'] = True message_input_options_model = {} # MessageInputOptions - message_input_options_model['restart'] = True - message_input_options_model['alternate_intents'] = True + message_input_options_model['restart'] = False + message_input_options_model['alternate_intents'] = False message_input_options_model['spelling'] = message_input_options_spelling_model - message_input_options_model['debug'] = True - message_input_options_model['return_context'] = True - message_input_options_model['export'] = True + message_input_options_model['debug'] = False + message_input_options_model['return_context'] = False + message_input_options_model['export'] = False message_input_model = {} # MessageInput message_input_model['message_type'] = 'text' @@ -1787,7 +1805,7 @@ def test_dialog_suggestion_serialization(self): dialog_suggestion_model_json2 = dialog_suggestion_model.to_dict() assert dialog_suggestion_model_json2 == dialog_suggestion_model_json -class TestDialogSuggestionValue(): +class TestModel_DialogSuggestionValue(): """ Test Class for DialogSuggestionValue """ @@ -1858,12 +1876,12 @@ def test_dialog_suggestion_value_serialization(self): message_input_options_spelling_model['auto_correct'] = True message_input_options_model = {} # MessageInputOptions - message_input_options_model['restart'] = True - message_input_options_model['alternate_intents'] = True + message_input_options_model['restart'] = False + message_input_options_model['alternate_intents'] = False message_input_options_model['spelling'] = message_input_options_spelling_model - message_input_options_model['debug'] = True - message_input_options_model['return_context'] = True - message_input_options_model['export'] = True + message_input_options_model['debug'] = False + message_input_options_model['return_context'] = False + message_input_options_model['export'] = False message_input_model = {} # MessageInput message_input_model['message_type'] = 'text' @@ -1892,7 +1910,7 @@ def test_dialog_suggestion_value_serialization(self): dialog_suggestion_value_model_json2 = dialog_suggestion_value_model.to_dict() assert dialog_suggestion_value_model_json2 == dialog_suggestion_value_model_json -class TestLog(): +class TestModel_Log(): """ Test Class for Log """ @@ -1963,12 +1981,12 @@ def test_log_serialization(self): message_input_options_spelling_model['auto_correct'] = True message_input_options_model = {} # MessageInputOptions - message_input_options_model['restart'] = True - message_input_options_model['alternate_intents'] = True + message_input_options_model['restart'] = False + message_input_options_model['alternate_intents'] = False message_input_options_model['spelling'] = message_input_options_spelling_model - message_input_options_model['debug'] = True - message_input_options_model['return_context'] = True - message_input_options_model['export'] = True + message_input_options_model['debug'] = False + message_input_options_model['return_context'] = False + message_input_options_model['export'] = False message_input_model = {} # MessageInput message_input_model['message_type'] = 'text' @@ -1984,6 +2002,8 @@ def test_log_serialization(self): message_context_global_system_model['turn_count'] = 38 message_context_global_system_model['locale'] = 'en-us' message_context_global_system_model['reference_time'] = 'testString' + message_context_global_system_model['session_start_time'] = 'testString' + message_context_global_system_model['state'] = 'testString' message_context_global_model = {} # MessageContextGlobal message_context_global_model['system'] = message_context_global_system_model @@ -1991,7 +2011,7 @@ def test_log_serialization(self): message_context_skill_system_model = {} # MessageContextSkillSystem message_context_skill_system_model['state'] = 'testString' - message_context_skill_system_model['foo'] = { 'foo': 'bar' } + message_context_skill_system_model['foo'] = 'testString' message_context_skill_model = {} # MessageContextSkill message_context_skill_model['user_defined'] = {} @@ -2100,7 +2120,7 @@ def test_log_serialization(self): log_model_json2 = log_model.to_dict() assert log_model_json2 == log_model_json -class TestLogCollection(): +class TestModel_LogCollection(): """ Test Class for LogCollection """ @@ -2171,12 +2191,12 @@ def test_log_collection_serialization(self): message_input_options_spelling_model['auto_correct'] = True message_input_options_model = {} # MessageInputOptions - message_input_options_model['restart'] = True - message_input_options_model['alternate_intents'] = True + message_input_options_model['restart'] = False + message_input_options_model['alternate_intents'] = False message_input_options_model['spelling'] = message_input_options_spelling_model - message_input_options_model['debug'] = True - message_input_options_model['return_context'] = True - message_input_options_model['export'] = True + message_input_options_model['debug'] = False + message_input_options_model['return_context'] = False + message_input_options_model['export'] = False message_input_model = {} # MessageInput message_input_model['message_type'] = 'text' @@ -2192,6 +2212,8 @@ def test_log_collection_serialization(self): message_context_global_system_model['turn_count'] = 38 message_context_global_system_model['locale'] = 'en-us' message_context_global_system_model['reference_time'] = 'testString' + message_context_global_system_model['session_start_time'] = 'testString' + message_context_global_system_model['state'] = 'testString' message_context_global_model = {} # MessageContextGlobal message_context_global_model['system'] = message_context_global_system_model @@ -2199,7 +2221,7 @@ def test_log_collection_serialization(self): message_context_skill_system_model = {} # MessageContextSkillSystem message_context_skill_system_model['state'] = 'testString' - message_context_skill_system_model['foo'] = { 'foo': 'bar' } + message_context_skill_system_model['foo'] = 'testString' message_context_skill_model = {} # MessageContextSkill message_context_skill_model['user_defined'] = {} @@ -2317,7 +2339,7 @@ def test_log_collection_serialization(self): log_collection_model_json2 = log_collection_model.to_dict() assert log_collection_model_json2 == log_collection_model_json -class TestLogPagination(): +class TestModel_LogPagination(): """ Test Class for LogPagination """ @@ -2348,7 +2370,7 @@ def test_log_pagination_serialization(self): log_pagination_model_json2 = log_pagination_model.to_dict() assert log_pagination_model_json2 == log_pagination_model_json -class TestMessageContext(): +class TestModel_MessageContext(): """ Test Class for MessageContext """ @@ -2366,6 +2388,8 @@ def test_message_context_serialization(self): message_context_global_system_model['turn_count'] = 38 message_context_global_system_model['locale'] = 'en-us' message_context_global_system_model['reference_time'] = 'testString' + message_context_global_system_model['session_start_time'] = 'testString' + message_context_global_system_model['state'] = 'testString' message_context_global_model = {} # MessageContextGlobal message_context_global_model['system'] = message_context_global_system_model @@ -2373,7 +2397,7 @@ def test_message_context_serialization(self): message_context_skill_system_model = {} # MessageContextSkillSystem message_context_skill_system_model['state'] = 'testString' - message_context_skill_system_model['foo'] = { 'foo': 'bar' } + message_context_skill_system_model['foo'] = 'testString' message_context_skill_model = {} # MessageContextSkill message_context_skill_model['user_defined'] = {} @@ -2399,7 +2423,7 @@ def test_message_context_serialization(self): message_context_model_json2 = message_context_model.to_dict() assert message_context_model_json2 == message_context_model_json -class TestMessageContextGlobal(): +class TestModel_MessageContextGlobal(): """ Test Class for MessageContextGlobal """ @@ -2417,6 +2441,8 @@ def test_message_context_global_serialization(self): message_context_global_system_model['turn_count'] = 38 message_context_global_system_model['locale'] = 'en-us' message_context_global_system_model['reference_time'] = 'testString' + message_context_global_system_model['session_start_time'] = 'testString' + message_context_global_system_model['state'] = 'testString' # Construct a json representation of a MessageContextGlobal model message_context_global_model_json = {} @@ -2438,7 +2464,7 @@ def test_message_context_global_serialization(self): message_context_global_model_json2 = message_context_global_model.to_dict() assert message_context_global_model_json2 == message_context_global_model_json -class TestMessageContextGlobalStateless(): +class TestModel_MessageContextGlobalStateless(): """ Test Class for MessageContextGlobalStateless """ @@ -2456,6 +2482,8 @@ def test_message_context_global_stateless_serialization(self): message_context_global_system_model['turn_count'] = 38 message_context_global_system_model['locale'] = 'en-us' message_context_global_system_model['reference_time'] = 'testString' + message_context_global_system_model['session_start_time'] = 'testString' + message_context_global_system_model['state'] = 'testString' # Construct a json representation of a MessageContextGlobalStateless model message_context_global_stateless_model_json = {} @@ -2477,7 +2505,7 @@ def test_message_context_global_stateless_serialization(self): message_context_global_stateless_model_json2 = message_context_global_stateless_model.to_dict() assert message_context_global_stateless_model_json2 == message_context_global_stateless_model_json -class TestMessageContextGlobalSystem(): +class TestModel_MessageContextGlobalSystem(): """ Test Class for MessageContextGlobalSystem """ @@ -2494,6 +2522,8 @@ def test_message_context_global_system_serialization(self): message_context_global_system_model_json['turn_count'] = 38 message_context_global_system_model_json['locale'] = 'en-us' message_context_global_system_model_json['reference_time'] = 'testString' + message_context_global_system_model_json['session_start_time'] = 'testString' + message_context_global_system_model_json['state'] = 'testString' # Construct a model instance of MessageContextGlobalSystem by calling from_dict on the json representation message_context_global_system_model = MessageContextGlobalSystem.from_dict(message_context_global_system_model_json) @@ -2510,7 +2540,7 @@ def test_message_context_global_system_serialization(self): message_context_global_system_model_json2 = message_context_global_system_model.to_dict() assert message_context_global_system_model_json2 == message_context_global_system_model_json -class TestMessageContextSkill(): +class TestModel_MessageContextSkill(): """ Test Class for MessageContextSkill """ @@ -2524,7 +2554,7 @@ def test_message_context_skill_serialization(self): message_context_skill_system_model = {} # MessageContextSkillSystem message_context_skill_system_model['state'] = 'testString' - message_context_skill_system_model['foo'] = { 'foo': 'bar' } + message_context_skill_system_model['foo'] = 'testString' # Construct a json representation of a MessageContextSkill model message_context_skill_model_json = {} @@ -2546,7 +2576,7 @@ def test_message_context_skill_serialization(self): message_context_skill_model_json2 = message_context_skill_model.to_dict() assert message_context_skill_model_json2 == message_context_skill_model_json -class TestMessageContextSkillSystem(): +class TestModel_MessageContextSkillSystem(): """ Test Class for MessageContextSkillSystem """ @@ -2559,7 +2589,7 @@ def test_message_context_skill_system_serialization(self): # Construct a json representation of a MessageContextSkillSystem model message_context_skill_system_model_json = {} message_context_skill_system_model_json['state'] = 'testString' - message_context_skill_system_model_json['foo'] = { 'foo': 'bar' } + message_context_skill_system_model_json['foo'] = 'testString' # Construct a model instance of MessageContextSkillSystem by calling from_dict on the json representation message_context_skill_system_model = MessageContextSkillSystem.from_dict(message_context_skill_system_model_json) @@ -2576,7 +2606,17 @@ def test_message_context_skill_system_serialization(self): message_context_skill_system_model_json2 = message_context_skill_system_model.to_dict() assert message_context_skill_system_model_json2 == message_context_skill_system_model_json -class TestMessageContextStateless(): + # Test get_properties and set_properties methods. + message_context_skill_system_model.set_properties({}) + actual_dict = message_context_skill_system_model.get_properties() + assert actual_dict == {} + + expected_dict = {'foo': 'testString'} + message_context_skill_system_model.set_properties(expected_dict) + actual_dict = message_context_skill_system_model.get_properties() + assert actual_dict == expected_dict + +class TestModel_MessageContextStateless(): """ Test Class for MessageContextStateless """ @@ -2594,6 +2634,8 @@ def test_message_context_stateless_serialization(self): message_context_global_system_model['turn_count'] = 38 message_context_global_system_model['locale'] = 'en-us' message_context_global_system_model['reference_time'] = 'testString' + message_context_global_system_model['session_start_time'] = 'testString' + message_context_global_system_model['state'] = 'testString' message_context_global_stateless_model = {} # MessageContextGlobalStateless message_context_global_stateless_model['system'] = message_context_global_system_model @@ -2601,7 +2643,7 @@ def test_message_context_stateless_serialization(self): message_context_skill_system_model = {} # MessageContextSkillSystem message_context_skill_system_model['state'] = 'testString' - message_context_skill_system_model['foo'] = { 'foo': 'bar' } + message_context_skill_system_model['foo'] = 'testString' message_context_skill_model = {} # MessageContextSkill message_context_skill_model['user_defined'] = {} @@ -2627,7 +2669,7 @@ def test_message_context_stateless_serialization(self): message_context_stateless_model_json2 = message_context_stateless_model.to_dict() assert message_context_stateless_model_json2 == message_context_stateless_model_json -class TestMessageInput(): +class TestModel_MessageInput(): """ Test Class for MessageInput """ @@ -2698,12 +2740,12 @@ def test_message_input_serialization(self): message_input_options_spelling_model['auto_correct'] = True message_input_options_model = {} # MessageInputOptions - message_input_options_model['restart'] = True - message_input_options_model['alternate_intents'] = True + message_input_options_model['restart'] = False + message_input_options_model['alternate_intents'] = False message_input_options_model['spelling'] = message_input_options_spelling_model - message_input_options_model['debug'] = True - message_input_options_model['return_context'] = True - message_input_options_model['export'] = True + message_input_options_model['debug'] = False + message_input_options_model['return_context'] = False + message_input_options_model['export'] = False # Construct a json representation of a MessageInput model message_input_model_json = {} @@ -2729,7 +2771,7 @@ def test_message_input_serialization(self): message_input_model_json2 = message_input_model.to_dict() assert message_input_model_json2 == message_input_model_json -class TestMessageInputOptions(): +class TestModel_MessageInputOptions(): """ Test Class for MessageInputOptions """ @@ -2747,12 +2789,12 @@ def test_message_input_options_serialization(self): # Construct a json representation of a MessageInputOptions model message_input_options_model_json = {} - message_input_options_model_json['restart'] = True - message_input_options_model_json['alternate_intents'] = True + message_input_options_model_json['restart'] = False + message_input_options_model_json['alternate_intents'] = False message_input_options_model_json['spelling'] = message_input_options_spelling_model - message_input_options_model_json['debug'] = True - message_input_options_model_json['return_context'] = True - message_input_options_model_json['export'] = True + message_input_options_model_json['debug'] = False + message_input_options_model_json['return_context'] = False + message_input_options_model_json['export'] = False # Construct a model instance of MessageInputOptions by calling from_dict on the json representation message_input_options_model = MessageInputOptions.from_dict(message_input_options_model_json) @@ -2769,7 +2811,7 @@ def test_message_input_options_serialization(self): message_input_options_model_json2 = message_input_options_model.to_dict() assert message_input_options_model_json2 == message_input_options_model_json -class TestMessageInputOptionsSpelling(): +class TestModel_MessageInputOptionsSpelling(): """ Test Class for MessageInputOptionsSpelling """ @@ -2799,7 +2841,7 @@ def test_message_input_options_spelling_serialization(self): message_input_options_spelling_model_json2 = message_input_options_spelling_model.to_dict() assert message_input_options_spelling_model_json2 == message_input_options_spelling_model_json -class TestMessageInputOptionsStateless(): +class TestModel_MessageInputOptionsStateless(): """ Test Class for MessageInputOptionsStateless """ @@ -2817,10 +2859,10 @@ def test_message_input_options_stateless_serialization(self): # Construct a json representation of a MessageInputOptionsStateless model message_input_options_stateless_model_json = {} - message_input_options_stateless_model_json['restart'] = True - message_input_options_stateless_model_json['alternate_intents'] = True + message_input_options_stateless_model_json['restart'] = False + message_input_options_stateless_model_json['alternate_intents'] = False message_input_options_stateless_model_json['spelling'] = message_input_options_spelling_model - message_input_options_stateless_model_json['debug'] = True + message_input_options_stateless_model_json['debug'] = False # Construct a model instance of MessageInputOptionsStateless by calling from_dict on the json representation message_input_options_stateless_model = MessageInputOptionsStateless.from_dict(message_input_options_stateless_model_json) @@ -2837,7 +2879,7 @@ def test_message_input_options_stateless_serialization(self): message_input_options_stateless_model_json2 = message_input_options_stateless_model.to_dict() assert message_input_options_stateless_model_json2 == message_input_options_stateless_model_json -class TestMessageInputStateless(): +class TestModel_MessageInputStateless(): """ Test Class for MessageInputStateless """ @@ -2908,10 +2950,10 @@ def test_message_input_stateless_serialization(self): message_input_options_spelling_model['auto_correct'] = True message_input_options_stateless_model = {} # MessageInputOptionsStateless - message_input_options_stateless_model['restart'] = True - message_input_options_stateless_model['alternate_intents'] = True + message_input_options_stateless_model['restart'] = False + message_input_options_stateless_model['alternate_intents'] = False message_input_options_stateless_model['spelling'] = message_input_options_spelling_model - message_input_options_stateless_model['debug'] = True + message_input_options_stateless_model['debug'] = False # Construct a json representation of a MessageInputStateless model message_input_stateless_model_json = {} @@ -2937,7 +2979,7 @@ def test_message_input_stateless_serialization(self): message_input_stateless_model_json2 = message_input_stateless_model.to_dict() assert message_input_stateless_model_json2 == message_input_stateless_model_json -class TestMessageOutput(): +class TestModel_MessageOutput(): """ Test Class for MessageOutput """ @@ -3008,12 +3050,12 @@ def test_message_output_serialization(self): message_input_options_spelling_model['auto_correct'] = True message_input_options_model = {} # MessageInputOptions - message_input_options_model['restart'] = True - message_input_options_model['alternate_intents'] = True + message_input_options_model['restart'] = False + message_input_options_model['alternate_intents'] = False message_input_options_model['spelling'] = message_input_options_spelling_model - message_input_options_model['debug'] = True - message_input_options_model['return_context'] = True - message_input_options_model['export'] = True + message_input_options_model['debug'] = False + message_input_options_model['return_context'] = False + message_input_options_model['export'] = False message_input_model = {} # MessageInput message_input_model['message_type'] = 'text' @@ -3099,7 +3141,7 @@ def test_message_output_serialization(self): message_output_model_json2 = message_output_model.to_dict() assert message_output_model_json2 == message_output_model_json -class TestMessageOutputDebug(): +class TestModel_MessageOutputDebug(): """ Test Class for MessageOutputDebug """ @@ -3148,7 +3190,7 @@ def test_message_output_debug_serialization(self): message_output_debug_model_json2 = message_output_debug_model.to_dict() assert message_output_debug_model_json2 == message_output_debug_model_json -class TestMessageOutputSpelling(): +class TestModel_MessageOutputSpelling(): """ Test Class for MessageOutputSpelling """ @@ -3179,7 +3221,7 @@ def test_message_output_spelling_serialization(self): message_output_spelling_model_json2 = message_output_spelling_model.to_dict() assert message_output_spelling_model_json2 == message_output_spelling_model_json -class TestMessageRequest(): +class TestModel_MessageRequest(): """ Test Class for MessageRequest """ @@ -3250,10 +3292,10 @@ def test_message_request_serialization(self): message_input_options_spelling_model['auto_correct'] = True message_input_options_model = {} # MessageInputOptions - message_input_options_model['restart'] = True - message_input_options_model['alternate_intents'] = True + message_input_options_model['restart'] = False + message_input_options_model['alternate_intents'] = False message_input_options_model['spelling'] = message_input_options_spelling_model - message_input_options_model['debug'] = True + message_input_options_model['debug'] = False message_input_options_model['return_context'] = True message_input_options_model['export'] = True @@ -3271,6 +3313,8 @@ def test_message_request_serialization(self): message_context_global_system_model['turn_count'] = 38 message_context_global_system_model['locale'] = 'en-us' message_context_global_system_model['reference_time'] = 'testString' + message_context_global_system_model['session_start_time'] = 'testString' + message_context_global_system_model['state'] = 'testString' message_context_global_model = {} # MessageContextGlobal message_context_global_model['system'] = message_context_global_system_model @@ -3278,7 +3322,7 @@ def test_message_request_serialization(self): message_context_skill_system_model = {} # MessageContextSkillSystem message_context_skill_system_model['state'] = 'testString' - message_context_skill_system_model['foo'] = { 'foo': 'bar' } + message_context_skill_system_model['foo'] = 'testString' message_context_skill_model = {} # MessageContextSkill message_context_skill_model['user_defined'] = {} @@ -3309,7 +3353,7 @@ def test_message_request_serialization(self): message_request_model_json2 = message_request_model.to_dict() assert message_request_model_json2 == message_request_model_json -class TestMessageResponse(): +class TestModel_MessageResponse(): """ Test Class for MessageResponse """ @@ -3380,12 +3424,12 @@ def test_message_response_serialization(self): message_input_options_spelling_model['auto_correct'] = True message_input_options_model = {} # MessageInputOptions - message_input_options_model['restart'] = True - message_input_options_model['alternate_intents'] = True + message_input_options_model['restart'] = False + message_input_options_model['alternate_intents'] = False message_input_options_model['spelling'] = message_input_options_spelling_model - message_input_options_model['debug'] = True - message_input_options_model['return_context'] = True - message_input_options_model['export'] = True + message_input_options_model['debug'] = False + message_input_options_model['return_context'] = False + message_input_options_model['export'] = False message_input_model = {} # MessageInput message_input_model['message_type'] = 'text' @@ -3461,6 +3505,8 @@ def test_message_response_serialization(self): message_context_global_system_model['turn_count'] = 38 message_context_global_system_model['locale'] = 'en-us' message_context_global_system_model['reference_time'] = 'testString' + message_context_global_system_model['session_start_time'] = 'testString' + message_context_global_system_model['state'] = 'testString' message_context_global_model = {} # MessageContextGlobal message_context_global_model['system'] = message_context_global_system_model @@ -3468,7 +3514,7 @@ def test_message_response_serialization(self): message_context_skill_system_model = {} # MessageContextSkillSystem message_context_skill_system_model['state'] = 'testString' - message_context_skill_system_model['foo'] = { 'foo': 'bar' } + message_context_skill_system_model['foo'] = 'testString' message_context_skill_model = {} # MessageContextSkill message_context_skill_model['user_defined'] = {} @@ -3499,7 +3545,7 @@ def test_message_response_serialization(self): message_response_model_json2 = message_response_model.to_dict() assert message_response_model_json2 == message_response_model_json -class TestMessageResponseStateless(): +class TestModel_MessageResponseStateless(): """ Test Class for MessageResponseStateless """ @@ -3570,12 +3616,12 @@ def test_message_response_stateless_serialization(self): message_input_options_spelling_model['auto_correct'] = True message_input_options_model = {} # MessageInputOptions - message_input_options_model['restart'] = True - message_input_options_model['alternate_intents'] = True + message_input_options_model['restart'] = False + message_input_options_model['alternate_intents'] = False message_input_options_model['spelling'] = message_input_options_spelling_model - message_input_options_model['debug'] = True - message_input_options_model['return_context'] = True - message_input_options_model['export'] = True + message_input_options_model['debug'] = False + message_input_options_model['return_context'] = False + message_input_options_model['export'] = False message_input_model = {} # MessageInput message_input_model['message_type'] = 'text' @@ -3651,6 +3697,8 @@ def test_message_response_stateless_serialization(self): message_context_global_system_model['turn_count'] = 38 message_context_global_system_model['locale'] = 'en-us' message_context_global_system_model['reference_time'] = 'testString' + message_context_global_system_model['session_start_time'] = 'testString' + message_context_global_system_model['state'] = 'testString' message_context_global_stateless_model = {} # MessageContextGlobalStateless message_context_global_stateless_model['system'] = message_context_global_system_model @@ -3658,7 +3706,7 @@ def test_message_response_stateless_serialization(self): message_context_skill_system_model = {} # MessageContextSkillSystem message_context_skill_system_model['state'] = 'testString' - message_context_skill_system_model['foo'] = { 'foo': 'bar' } + message_context_skill_system_model['foo'] = 'testString' message_context_skill_model = {} # MessageContextSkill message_context_skill_model['user_defined'] = {} @@ -3689,7 +3737,7 @@ def test_message_response_stateless_serialization(self): message_response_stateless_model_json2 = message_response_stateless_model.to_dict() assert message_response_stateless_model_json2 == message_response_stateless_model_json -class TestResponseGenericChannel(): +class TestModel_ResponseGenericChannel(): """ Test Class for ResponseGenericChannel """ @@ -3718,7 +3766,7 @@ def test_response_generic_channel_serialization(self): response_generic_channel_model_json2 = response_generic_channel_model.to_dict() assert response_generic_channel_model_json2 == response_generic_channel_model_json -class TestRuntimeEntity(): +class TestModel_RuntimeEntity(): """ Test Class for RuntimeEntity """ @@ -3796,7 +3844,7 @@ def test_runtime_entity_serialization(self): runtime_entity_model_json2 = runtime_entity_model.to_dict() assert runtime_entity_model_json2 == runtime_entity_model_json -class TestRuntimeEntityAlternative(): +class TestModel_RuntimeEntityAlternative(): """ Test Class for RuntimeEntityAlternative """ @@ -3826,7 +3874,7 @@ def test_runtime_entity_alternative_serialization(self): runtime_entity_alternative_model_json2 = runtime_entity_alternative_model.to_dict() assert runtime_entity_alternative_model_json2 == runtime_entity_alternative_model_json -class TestRuntimeEntityInterpretation(): +class TestModel_RuntimeEntityInterpretation(): """ Test Class for RuntimeEntityInterpretation """ @@ -3880,7 +3928,7 @@ def test_runtime_entity_interpretation_serialization(self): runtime_entity_interpretation_model_json2 = runtime_entity_interpretation_model.to_dict() assert runtime_entity_interpretation_model_json2 == runtime_entity_interpretation_model_json -class TestRuntimeEntityRole(): +class TestModel_RuntimeEntityRole(): """ Test Class for RuntimeEntityRole """ @@ -3909,7 +3957,7 @@ def test_runtime_entity_role_serialization(self): runtime_entity_role_model_json2 = runtime_entity_role_model.to_dict() assert runtime_entity_role_model_json2 == runtime_entity_role_model_json -class TestRuntimeIntent(): +class TestModel_RuntimeIntent(): """ Test Class for RuntimeIntent """ @@ -3939,7 +3987,7 @@ def test_runtime_intent_serialization(self): runtime_intent_model_json2 = runtime_intent_model.to_dict() assert runtime_intent_model_json2 == runtime_intent_model_json -class TestSearchResult(): +class TestModel_SearchResult(): """ Test Class for SearchResult """ @@ -3961,6 +4009,10 @@ def test_search_result_serialization(self): search_result_highlight_model['url'] = ['testString'] search_result_highlight_model['foo'] = ['testString'] + search_result_answer_model = {} # SearchResultAnswer + search_result_answer_model['text'] = 'testString' + search_result_answer_model['confidence'] = 0 + # Construct a json representation of a SearchResult model search_result_model_json = {} search_result_model_json['id'] = 'testString' @@ -3969,6 +4021,7 @@ def test_search_result_serialization(self): search_result_model_json['title'] = 'testString' search_result_model_json['url'] = 'testString' search_result_model_json['highlight'] = search_result_highlight_model + search_result_model_json['answers'] = [search_result_answer_model] # Construct a model instance of SearchResult by calling from_dict on the json representation search_result_model = SearchResult.from_dict(search_result_model_json) @@ -3985,7 +4038,37 @@ def test_search_result_serialization(self): search_result_model_json2 = search_result_model.to_dict() assert search_result_model_json2 == search_result_model_json -class TestSearchResultHighlight(): +class TestModel_SearchResultAnswer(): + """ + Test Class for SearchResultAnswer + """ + + def test_search_result_answer_serialization(self): + """ + Test serialization/deserialization for SearchResultAnswer + """ + + # Construct a json representation of a SearchResultAnswer model + search_result_answer_model_json = {} + search_result_answer_model_json['text'] = 'testString' + search_result_answer_model_json['confidence'] = 0 + + # Construct a model instance of SearchResultAnswer by calling from_dict on the json representation + search_result_answer_model = SearchResultAnswer.from_dict(search_result_answer_model_json) + assert search_result_answer_model != False + + # Construct a model instance of SearchResultAnswer by calling from_dict on the json representation + search_result_answer_model_dict = SearchResultAnswer.from_dict(search_result_answer_model_json).__dict__ + search_result_answer_model2 = SearchResultAnswer(**search_result_answer_model_dict) + + # Verify the model instances are equivalent + assert search_result_answer_model == search_result_answer_model2 + + # Convert model instance back to dict and verify no loss of data + search_result_answer_model_json2 = search_result_answer_model.to_dict() + assert search_result_answer_model_json2 == search_result_answer_model_json + +class TestModel_SearchResultHighlight(): """ Test Class for SearchResultHighlight """ @@ -4017,7 +4100,17 @@ def test_search_result_highlight_serialization(self): search_result_highlight_model_json2 = search_result_highlight_model.to_dict() assert search_result_highlight_model_json2 == search_result_highlight_model_json -class TestSearchResultMetadata(): + # Test get_properties and set_properties methods. + search_result_highlight_model.set_properties({}) + actual_dict = search_result_highlight_model.get_properties() + assert actual_dict == {} + + expected_dict = {'foo': ['testString']} + search_result_highlight_model.set_properties(expected_dict) + actual_dict = search_result_highlight_model.get_properties() + assert actual_dict == expected_dict + +class TestModel_SearchResultMetadata(): """ Test Class for SearchResultMetadata """ @@ -4047,7 +4140,7 @@ def test_search_result_metadata_serialization(self): search_result_metadata_model_json2 = search_result_metadata_model.to_dict() assert search_result_metadata_model_json2 == search_result_metadata_model_json -class TestSessionResponse(): +class TestModel_SessionResponse(): """ Test Class for SessionResponse """ @@ -4076,7 +4169,7 @@ def test_session_response_serialization(self): session_response_model_json2 = session_response_model.to_dict() assert session_response_model_json2 == session_response_model_json -class TestLogMessageSourceAction(): +class TestModel_LogMessageSourceAction(): """ Test Class for LogMessageSourceAction """ @@ -4106,7 +4199,7 @@ def test_log_message_source_action_serialization(self): log_message_source_action_model_json2 = log_message_source_action_model.to_dict() assert log_message_source_action_model_json2 == log_message_source_action_model_json -class TestLogMessageSourceDialogNode(): +class TestModel_LogMessageSourceDialogNode(): """ Test Class for LogMessageSourceDialogNode """ @@ -4136,7 +4229,7 @@ def test_log_message_source_dialog_node_serialization(self): log_message_source_dialog_node_model_json2 = log_message_source_dialog_node_model.to_dict() assert log_message_source_dialog_node_model_json2 == log_message_source_dialog_node_model_json -class TestLogMessageSourceHandler(): +class TestModel_LogMessageSourceHandler(): """ Test Class for LogMessageSourceHandler """ @@ -4168,7 +4261,7 @@ def test_log_message_source_handler_serialization(self): log_message_source_handler_model_json2 = log_message_source_handler_model.to_dict() assert log_message_source_handler_model_json2 == log_message_source_handler_model_json -class TestLogMessageSourceStep(): +class TestModel_LogMessageSourceStep(): """ Test Class for LogMessageSourceStep """ @@ -4199,7 +4292,7 @@ def test_log_message_source_step_serialization(self): log_message_source_step_model_json2 = log_message_source_step_model.to_dict() assert log_message_source_step_model_json2 == log_message_source_step_model_json -class TestRuntimeResponseGenericRuntimeResponseTypeChannelTransfer(): +class TestModel_RuntimeResponseGenericRuntimeResponseTypeChannelTransfer(): """ Test Class for RuntimeResponseGenericRuntimeResponseTypeChannelTransfer """ @@ -4245,7 +4338,7 @@ def test_runtime_response_generic_runtime_response_type_channel_transfer_seriali runtime_response_generic_runtime_response_type_channel_transfer_model_json2 = runtime_response_generic_runtime_response_type_channel_transfer_model.to_dict() assert runtime_response_generic_runtime_response_type_channel_transfer_model_json2 == runtime_response_generic_runtime_response_type_channel_transfer_model_json -class TestRuntimeResponseGenericRuntimeResponseTypeConnectToAgent(): +class TestModel_RuntimeResponseGenericRuntimeResponseTypeConnectToAgent(): """ Test Class for RuntimeResponseGenericRuntimeResponseTypeConnectToAgent """ @@ -4291,7 +4384,7 @@ def test_runtime_response_generic_runtime_response_type_connect_to_agent_seriali runtime_response_generic_runtime_response_type_connect_to_agent_model_json2 = runtime_response_generic_runtime_response_type_connect_to_agent_model.to_dict() assert runtime_response_generic_runtime_response_type_connect_to_agent_model_json2 == runtime_response_generic_runtime_response_type_connect_to_agent_model_json -class TestRuntimeResponseGenericRuntimeResponseTypeImage(): +class TestModel_RuntimeResponseGenericRuntimeResponseTypeImage(): """ Test Class for RuntimeResponseGenericRuntimeResponseTypeImage """ @@ -4313,6 +4406,7 @@ def test_runtime_response_generic_runtime_response_type_image_serialization(self runtime_response_generic_runtime_response_type_image_model_json['title'] = 'testString' runtime_response_generic_runtime_response_type_image_model_json['description'] = 'testString' runtime_response_generic_runtime_response_type_image_model_json['channels'] = [response_generic_channel_model] + runtime_response_generic_runtime_response_type_image_model_json['alt_text'] = 'testString' # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeImage by calling from_dict on the json representation runtime_response_generic_runtime_response_type_image_model = RuntimeResponseGenericRuntimeResponseTypeImage.from_dict(runtime_response_generic_runtime_response_type_image_model_json) @@ -4329,7 +4423,7 @@ def test_runtime_response_generic_runtime_response_type_image_serialization(self runtime_response_generic_runtime_response_type_image_model_json2 = runtime_response_generic_runtime_response_type_image_model.to_dict() assert runtime_response_generic_runtime_response_type_image_model_json2 == runtime_response_generic_runtime_response_type_image_model_json -class TestRuntimeResponseGenericRuntimeResponseTypeOption(): +class TestModel_RuntimeResponseGenericRuntimeResponseTypeOption(): """ Test Class for RuntimeResponseGenericRuntimeResponseTypeOption """ @@ -4400,12 +4494,12 @@ def test_runtime_response_generic_runtime_response_type_option_serialization(sel message_input_options_spelling_model['auto_correct'] = True message_input_options_model = {} # MessageInputOptions - message_input_options_model['restart'] = True - message_input_options_model['alternate_intents'] = True + message_input_options_model['restart'] = False + message_input_options_model['alternate_intents'] = False message_input_options_model['spelling'] = message_input_options_spelling_model - message_input_options_model['debug'] = True - message_input_options_model['return_context'] = True - message_input_options_model['export'] = True + message_input_options_model['debug'] = False + message_input_options_model['return_context'] = False + message_input_options_model['export'] = False message_input_model = {} # MessageInput message_input_model['message_type'] = 'text' @@ -4449,7 +4543,7 @@ def test_runtime_response_generic_runtime_response_type_option_serialization(sel runtime_response_generic_runtime_response_type_option_model_json2 = runtime_response_generic_runtime_response_type_option_model.to_dict() assert runtime_response_generic_runtime_response_type_option_model_json2 == runtime_response_generic_runtime_response_type_option_model_json -class TestRuntimeResponseGenericRuntimeResponseTypePause(): +class TestModel_RuntimeResponseGenericRuntimeResponseTypePause(): """ Test Class for RuntimeResponseGenericRuntimeResponseTypePause """ @@ -4486,7 +4580,7 @@ def test_runtime_response_generic_runtime_response_type_pause_serialization(self runtime_response_generic_runtime_response_type_pause_model_json2 = runtime_response_generic_runtime_response_type_pause_model.to_dict() assert runtime_response_generic_runtime_response_type_pause_model_json2 == runtime_response_generic_runtime_response_type_pause_model_json -class TestRuntimeResponseGenericRuntimeResponseTypeSearch(): +class TestModel_RuntimeResponseGenericRuntimeResponseTypeSearch(): """ Test Class for RuntimeResponseGenericRuntimeResponseTypeSearch """ @@ -4508,6 +4602,10 @@ def test_runtime_response_generic_runtime_response_type_search_serialization(sel search_result_highlight_model['url'] = ['testString'] search_result_highlight_model['foo'] = ['testString'] + search_result_answer_model = {} # SearchResultAnswer + search_result_answer_model['text'] = 'testString' + search_result_answer_model['confidence'] = 0 + search_result_model = {} # SearchResult search_result_model['id'] = 'testString' search_result_model['result_metadata'] = search_result_metadata_model @@ -4515,6 +4613,7 @@ def test_runtime_response_generic_runtime_response_type_search_serialization(sel search_result_model['title'] = 'testString' search_result_model['url'] = 'testString' search_result_model['highlight'] = search_result_highlight_model + search_result_model['answers'] = [search_result_answer_model] response_generic_channel_model = {} # ResponseGenericChannel response_generic_channel_model['channel'] = 'testString' @@ -4542,7 +4641,7 @@ def test_runtime_response_generic_runtime_response_type_search_serialization(sel runtime_response_generic_runtime_response_type_search_model_json2 = runtime_response_generic_runtime_response_type_search_model.to_dict() assert runtime_response_generic_runtime_response_type_search_model_json2 == runtime_response_generic_runtime_response_type_search_model_json -class TestRuntimeResponseGenericRuntimeResponseTypeSuggestion(): +class TestModel_RuntimeResponseGenericRuntimeResponseTypeSuggestion(): """ Test Class for RuntimeResponseGenericRuntimeResponseTypeSuggestion """ @@ -4613,12 +4712,12 @@ def test_runtime_response_generic_runtime_response_type_suggestion_serialization message_input_options_spelling_model['auto_correct'] = True message_input_options_model = {} # MessageInputOptions - message_input_options_model['restart'] = True - message_input_options_model['alternate_intents'] = True + message_input_options_model['restart'] = False + message_input_options_model['alternate_intents'] = False message_input_options_model['spelling'] = message_input_options_spelling_model - message_input_options_model['debug'] = True - message_input_options_model['return_context'] = True - message_input_options_model['export'] = True + message_input_options_model['debug'] = False + message_input_options_model['return_context'] = False + message_input_options_model['export'] = False message_input_model = {} # MessageInput message_input_model['message_type'] = 'text' @@ -4661,7 +4760,7 @@ def test_runtime_response_generic_runtime_response_type_suggestion_serialization runtime_response_generic_runtime_response_type_suggestion_model_json2 = runtime_response_generic_runtime_response_type_suggestion_model.to_dict() assert runtime_response_generic_runtime_response_type_suggestion_model_json2 == runtime_response_generic_runtime_response_type_suggestion_model_json -class TestRuntimeResponseGenericRuntimeResponseTypeText(): +class TestModel_RuntimeResponseGenericRuntimeResponseTypeText(): """ Test Class for RuntimeResponseGenericRuntimeResponseTypeText """ @@ -4697,7 +4796,7 @@ def test_runtime_response_generic_runtime_response_type_text_serialization(self) runtime_response_generic_runtime_response_type_text_model_json2 = runtime_response_generic_runtime_response_type_text_model.to_dict() assert runtime_response_generic_runtime_response_type_text_model_json2 == runtime_response_generic_runtime_response_type_text_model_json -class TestRuntimeResponseGenericRuntimeResponseTypeUserDefined(): +class TestModel_RuntimeResponseGenericRuntimeResponseTypeUserDefined(): """ Test Class for RuntimeResponseGenericRuntimeResponseTypeUserDefined """ diff --git a/test/unit/test_compare_comply_v1.py b/test/unit/test_compare_comply_v1.py index 57a6378b..72c554ba 100644 --- a/test/unit/test_compare_comply_v1.py +++ b/test/unit/test_compare_comply_v1.py @@ -55,6 +55,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -171,6 +173,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -287,6 +291,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -403,6 +409,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -427,8 +435,8 @@ def test_compare_documents_all_params(self): file_2 = io.BytesIO(b'This is a mock file.').getvalue() file_1_content_type = 'application/pdf' file_2_content_type = 'application/pdf' - file_1_label = 'testString' - file_2_label = 'testString' + file_1_label = 'file_1' + file_2_label = 'file_2' model = 'contracts' # Invoke method @@ -533,6 +541,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -710,6 +720,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -842,6 +854,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -946,6 +960,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -1060,6 +1076,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -1205,6 +1223,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -1266,6 +1286,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -1336,6 +1358,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -1452,7 +1476,7 @@ def test_update_batch_value_error(self): # Start of Model Tests ############################################################################## # region -class TestAddress(): +class TestModel_Address(): """ Test Class for Address """ @@ -1488,7 +1512,7 @@ def test_address_serialization(self): address_model_json2 = address_model.to_dict() assert address_model_json2 == address_model_json -class TestAlignedElement(): +class TestModel_AlignedElement(): """ Test Class for AlignedElement """ @@ -1549,7 +1573,7 @@ def test_aligned_element_serialization(self): aligned_element_model_json2 = aligned_element_model.to_dict() assert aligned_element_model_json2 == aligned_element_model_json -class TestAttribute(): +class TestModel_Attribute(): """ Test Class for Attribute """ @@ -1586,7 +1610,7 @@ def test_attribute_serialization(self): attribute_model_json2 = attribute_model.to_dict() assert attribute_model_json2 == attribute_model_json -class TestBatchStatus(): +class TestModel_BatchStatus(): """ Test Class for BatchStatus """ @@ -1614,8 +1638,8 @@ def test_batch_status_serialization(self): batch_status_model_json['batch_id'] = 'testString' batch_status_model_json['document_counts'] = doc_counts_model batch_status_model_json['status'] = 'testString' - batch_status_model_json['created'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) - batch_status_model_json['updated'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + batch_status_model_json['created'] = "2019-01-01T12:00:00Z" + batch_status_model_json['updated'] = "2019-01-01T12:00:00Z" # Construct a model instance of BatchStatus by calling from_dict on the json representation batch_status_model = BatchStatus.from_dict(batch_status_model_json) @@ -1632,7 +1656,7 @@ def test_batch_status_serialization(self): batch_status_model_json2 = batch_status_model.to_dict() assert batch_status_model_json2 == batch_status_model_json -class TestBatches(): +class TestModel_Batches(): """ Test Class for Batches """ @@ -1659,8 +1683,8 @@ def test_batches_serialization(self): batch_status_model['batch_id'] = 'testString' batch_status_model['document_counts'] = doc_counts_model batch_status_model['status'] = 'testString' - batch_status_model['created'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) - batch_status_model['updated'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + batch_status_model['created'] = "2019-01-01T12:00:00Z" + batch_status_model['updated'] = "2019-01-01T12:00:00Z" # Construct a json representation of a Batches model batches_model_json = {} @@ -1681,7 +1705,7 @@ def test_batches_serialization(self): batches_model_json2 = batches_model.to_dict() assert batches_model_json2 == batches_model_json -class TestBodyCells(): +class TestModel_BodyCells(): """ Test Class for BodyCells """ @@ -1734,7 +1758,7 @@ def test_body_cells_serialization(self): body_cells_model_json2 = body_cells_model.to_dict() assert body_cells_model_json2 == body_cells_model_json -class TestCategory(): +class TestModel_Category(): """ Test Class for Category """ @@ -1765,7 +1789,7 @@ def test_category_serialization(self): category_model_json2 = category_model.to_dict() assert category_model_json2 == category_model_json -class TestCategoryComparison(): +class TestModel_CategoryComparison(): """ Test Class for CategoryComparison """ @@ -1794,7 +1818,7 @@ def test_category_comparison_serialization(self): category_comparison_model_json2 = category_comparison_model.to_dict() assert category_comparison_model_json2 == category_comparison_model_json -class TestClassifyReturn(): +class TestModel_ClassifyReturn(): """ Test Class for ClassifyReturn """ @@ -2056,7 +2080,7 @@ def test_classify_return_serialization(self): classify_return_model_json2 = classify_return_model.to_dict() assert classify_return_model_json2 == classify_return_model_json -class TestColumnHeaders(): +class TestModel_ColumnHeaders(): """ Test Class for ColumnHeaders """ @@ -2092,7 +2116,7 @@ def test_column_headers_serialization(self): column_headers_model_json2 = column_headers_model.to_dict() assert column_headers_model_json2 == column_headers_model_json -class TestCompareReturn(): +class TestModel_CompareReturn(): """ Test Class for CompareReturn """ @@ -2174,7 +2198,7 @@ def test_compare_return_serialization(self): compare_return_model_json2 = compare_return_model.to_dict() assert compare_return_model_json2 == compare_return_model_json -class TestContact(): +class TestModel_Contact(): """ Test Class for Contact """ @@ -2204,7 +2228,7 @@ def test_contact_serialization(self): contact_model_json2 = contact_model.to_dict() assert contact_model_json2 == contact_model_json -class TestContexts(): +class TestModel_Contexts(): """ Test Class for Contexts """ @@ -2240,7 +2264,7 @@ def test_contexts_serialization(self): contexts_model_json2 = contexts_model.to_dict() assert contexts_model_json2 == contexts_model_json -class TestContractAmts(): +class TestModel_ContractAmts(): """ Test Class for ContractAmts """ @@ -2285,7 +2309,7 @@ def test_contract_amts_serialization(self): contract_amts_model_json2 = contract_amts_model.to_dict() assert contract_amts_model_json2 == contract_amts_model_json -class TestContractCurrencies(): +class TestModel_ContractCurrencies(): """ Test Class for ContractCurrencies """ @@ -2324,7 +2348,7 @@ def test_contract_currencies_serialization(self): contract_currencies_model_json2 = contract_currencies_model.to_dict() assert contract_currencies_model_json2 == contract_currencies_model_json -class TestContractTerms(): +class TestModel_ContractTerms(): """ Test Class for ContractTerms """ @@ -2369,7 +2393,7 @@ def test_contract_terms_serialization(self): contract_terms_model_json2 = contract_terms_model.to_dict() assert contract_terms_model_json2 == contract_terms_model_json -class TestContractTypes(): +class TestModel_ContractTypes(): """ Test Class for ContractTypes """ @@ -2407,7 +2431,7 @@ def test_contract_types_serialization(self): contract_types_model_json2 = contract_types_model.to_dict() assert contract_types_model_json2 == contract_types_model_json -class TestDocCounts(): +class TestModel_DocCounts(): """ Test Class for DocCounts """ @@ -2439,7 +2463,7 @@ def test_doc_counts_serialization(self): doc_counts_model_json2 = doc_counts_model.to_dict() assert doc_counts_model_json2 == doc_counts_model_json -class TestDocInfo(): +class TestModel_DocInfo(): """ Test Class for DocInfo """ @@ -2470,7 +2494,7 @@ def test_doc_info_serialization(self): doc_info_model_json2 = doc_info_model.to_dict() assert doc_info_model_json2 == doc_info_model_json -class TestDocStructure(): +class TestModel_DocStructure(): """ Test Class for DocStructure """ @@ -2525,7 +2549,7 @@ def test_doc_structure_serialization(self): doc_structure_model_json2 = doc_structure_model.to_dict() assert doc_structure_model_json2 == doc_structure_model_json -class TestDocument(): +class TestModel_Document(): """ Test Class for Document """ @@ -2557,7 +2581,7 @@ def test_document_serialization(self): document_model_json2 = document_model.to_dict() assert document_model_json2 == document_model_json -class TestEffectiveDates(): +class TestModel_EffectiveDates(): """ Test Class for EffectiveDates """ @@ -2596,7 +2620,7 @@ def test_effective_dates_serialization(self): effective_dates_model_json2 = effective_dates_model.to_dict() assert effective_dates_model_json2 == effective_dates_model_json -class TestElement(): +class TestModel_Element(): """ Test Class for Element """ @@ -2654,7 +2678,7 @@ def test_element_serialization(self): element_model_json2 = element_model.to_dict() assert element_model_json2 == element_model_json -class TestElementLocations(): +class TestModel_ElementLocations(): """ Test Class for ElementLocations """ @@ -2684,7 +2708,7 @@ def test_element_locations_serialization(self): element_locations_model_json2 = element_locations_model.to_dict() assert element_locations_model_json2 == element_locations_model_json -class TestElementPair(): +class TestModel_ElementPair(): """ Test Class for ElementPair """ @@ -2739,7 +2763,7 @@ def test_element_pair_serialization(self): element_pair_model_json2 = element_pair_model.to_dict() assert element_pair_model_json2 == element_pair_model_json -class TestFeedbackDataInput(): +class TestModel_FeedbackDataInput(): """ Test Class for FeedbackDataInput """ @@ -2807,7 +2831,7 @@ def test_feedback_data_input_serialization(self): feedback_data_input_model_json2 = feedback_data_input_model.to_dict() assert feedback_data_input_model_json2 == feedback_data_input_model_json -class TestFeedbackDataOutput(): +class TestModel_FeedbackDataOutput(): """ Test Class for FeedbackDataOutput """ @@ -2883,7 +2907,7 @@ def test_feedback_data_output_serialization(self): feedback_data_output_model_json2 = feedback_data_output_model.to_dict() assert feedback_data_output_model_json2 == feedback_data_output_model_json -class TestFeedbackDeleted(): +class TestModel_FeedbackDeleted(): """ Test Class for FeedbackDeleted """ @@ -2913,7 +2937,7 @@ def test_feedback_deleted_serialization(self): feedback_deleted_model_json2 = feedback_deleted_model.to_dict() assert feedback_deleted_model_json2 == feedback_deleted_model_json -class TestFeedbackList(): +class TestModel_FeedbackList(): """ Test Class for FeedbackList """ @@ -2975,7 +2999,7 @@ def test_feedback_list_serialization(self): get_feedback_model = {} # GetFeedback get_feedback_model['feedback_id'] = '9730b437-cb86-4d40-9a84-ff6948bb3dd1' - get_feedback_model['created'] = datetime_to_string(string_to_datetime("2018-07-03T10:16:05-0500")) + get_feedback_model['created'] = "2018-07-03T15:16:05Z" get_feedback_model['comment'] = 'testString' get_feedback_model['feedback_data'] = feedback_data_output_model @@ -2998,7 +3022,7 @@ def test_feedback_list_serialization(self): feedback_list_model_json2 = feedback_list_model.to_dict() assert feedback_list_model_json2 == feedback_list_model_json -class TestFeedbackReturn(): +class TestModel_FeedbackReturn(): """ Test Class for FeedbackReturn """ @@ -3063,7 +3087,7 @@ def test_feedback_return_serialization(self): feedback_return_model_json['feedback_id'] = 'testString' feedback_return_model_json['user_id'] = 'testString' feedback_return_model_json['comment'] = 'testString' - feedback_return_model_json['created'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + feedback_return_model_json['created'] = "2019-01-01T12:00:00Z" feedback_return_model_json['feedback_data'] = feedback_data_output_model # Construct a model instance of FeedbackReturn by calling from_dict on the json representation @@ -3081,7 +3105,7 @@ def test_feedback_return_serialization(self): feedback_return_model_json2 = feedback_return_model.to_dict() assert feedback_return_model_json2 == feedback_return_model_json -class TestGetFeedback(): +class TestModel_GetFeedback(): """ Test Class for GetFeedback """ @@ -3144,7 +3168,7 @@ def test_get_feedback_serialization(self): # Construct a json representation of a GetFeedback model get_feedback_model_json = {} get_feedback_model_json['feedback_id'] = 'testString' - get_feedback_model_json['created'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + get_feedback_model_json['created'] = "2019-01-01T12:00:00Z" get_feedback_model_json['comment'] = 'testString' get_feedback_model_json['feedback_data'] = feedback_data_output_model @@ -3163,7 +3187,7 @@ def test_get_feedback_serialization(self): get_feedback_model_json2 = get_feedback_model.to_dict() assert get_feedback_model_json2 == get_feedback_model_json -class TestHTMLReturn(): +class TestModel_HTMLReturn(): """ Test Class for HTMLReturn """ @@ -3196,7 +3220,7 @@ def test_html_return_serialization(self): html_return_model_json2 = html_return_model.to_dict() assert html_return_model_json2 == html_return_model_json -class TestInterpretation(): +class TestModel_Interpretation(): """ Test Class for Interpretation """ @@ -3227,7 +3251,7 @@ def test_interpretation_serialization(self): interpretation_model_json2 = interpretation_model.to_dict() assert interpretation_model_json2 == interpretation_model_json -class TestKey(): +class TestModel_Key(): """ Test Class for Key """ @@ -3264,7 +3288,7 @@ def test_key_serialization(self): key_model_json2 = key_model.to_dict() assert key_model_json2 == key_model_json -class TestKeyValuePair(): +class TestModel_KeyValuePair(): """ Test Class for KeyValuePair """ @@ -3310,7 +3334,7 @@ def test_key_value_pair_serialization(self): key_value_pair_model_json2 = key_value_pair_model.to_dict() assert key_value_pair_model_json2 == key_value_pair_model_json -class TestLabel(): +class TestModel_Label(): """ Test Class for Label """ @@ -3340,7 +3364,7 @@ def test_label_serialization(self): label_model_json2 = label_model.to_dict() assert label_model_json2 == label_model_json -class TestLeadingSentence(): +class TestModel_LeadingSentence(): """ Test Class for LeadingSentence """ @@ -3381,7 +3405,7 @@ def test_leading_sentence_serialization(self): leading_sentence_model_json2 = leading_sentence_model.to_dict() assert leading_sentence_model_json2 == leading_sentence_model_json -class TestLocation(): +class TestModel_Location(): """ Test Class for Location """ @@ -3411,7 +3435,7 @@ def test_location_serialization(self): location_model_json2 = location_model.to_dict() assert location_model_json2 == location_model_json -class TestMention(): +class TestModel_Mention(): """ Test Class for Mention """ @@ -3447,7 +3471,7 @@ def test_mention_serialization(self): mention_model_json2 = mention_model.to_dict() assert mention_model_json2 == mention_model_json -class TestOriginalLabelsIn(): +class TestModel_OriginalLabelsIn(): """ Test Class for OriginalLabelsIn """ @@ -3493,7 +3517,7 @@ def test_original_labels_in_serialization(self): original_labels_in_model_json2 = original_labels_in_model.to_dict() assert original_labels_in_model_json2 == original_labels_in_model_json -class TestOriginalLabelsOut(): +class TestModel_OriginalLabelsOut(): """ Test Class for OriginalLabelsOut """ @@ -3539,7 +3563,7 @@ def test_original_labels_out_serialization(self): original_labels_out_model_json2 = original_labels_out_model.to_dict() assert original_labels_out_model_json2 == original_labels_out_model_json -class TestPagination(): +class TestModel_Pagination(): """ Test Class for Pagination """ @@ -3572,7 +3596,7 @@ def test_pagination_serialization(self): pagination_model_json2 = pagination_model.to_dict() assert pagination_model_json2 == pagination_model_json -class TestParagraphs(): +class TestModel_Paragraphs(): """ Test Class for Paragraphs """ @@ -3607,7 +3631,7 @@ def test_paragraphs_serialization(self): paragraphs_model_json2 = paragraphs_model.to_dict() assert paragraphs_model_json2 == paragraphs_model_json -class TestParties(): +class TestModel_Parties(): """ Test Class for Parties """ @@ -3659,7 +3683,7 @@ def test_parties_serialization(self): parties_model_json2 = parties_model.to_dict() assert parties_model_json2 == parties_model_json -class TestPaymentTerms(): +class TestModel_PaymentTerms(): """ Test Class for PaymentTerms """ @@ -3704,7 +3728,7 @@ def test_payment_terms_serialization(self): payment_terms_model_json2 = payment_terms_model.to_dict() assert payment_terms_model_json2 == payment_terms_model_json -class TestRowHeaders(): +class TestModel_RowHeaders(): """ Test Class for RowHeaders """ @@ -3746,7 +3770,7 @@ def test_row_headers_serialization(self): row_headers_model_json2 = row_headers_model.to_dict() assert row_headers_model_json2 == row_headers_model_json -class TestSectionTitle(): +class TestModel_SectionTitle(): """ Test Class for SectionTitle """ @@ -3782,7 +3806,7 @@ def test_section_title_serialization(self): section_title_model_json2 = section_title_model.to_dict() assert section_title_model_json2 == section_title_model_json -class TestSectionTitles(): +class TestModel_SectionTitles(): """ Test Class for SectionTitles """ @@ -3824,7 +3848,7 @@ def test_section_titles_serialization(self): section_titles_model_json2 = section_titles_model.to_dict() assert section_titles_model_json2 == section_titles_model_json -class TestShortDoc(): +class TestModel_ShortDoc(): """ Test Class for ShortDoc """ @@ -3854,7 +3878,7 @@ def test_short_doc_serialization(self): short_doc_model_json2 = short_doc_model.to_dict() assert short_doc_model_json2 == short_doc_model_json -class TestTableHeaders(): +class TestModel_TableHeaders(): """ Test Class for TableHeaders """ @@ -3889,7 +3913,7 @@ def test_table_headers_serialization(self): table_headers_model_json2 = table_headers_model.to_dict() assert table_headers_model_json2 == table_headers_model_json -class TestTableReturn(): +class TestModel_TableReturn(): """ Test Class for TableReturn """ @@ -4020,7 +4044,7 @@ def test_table_return_serialization(self): table_return_model_json2 = table_return_model.to_dict() assert table_return_model_json2 == table_return_model_json -class TestTableTitle(): +class TestModel_TableTitle(): """ Test Class for TableTitle """ @@ -4056,7 +4080,7 @@ def test_table_title_serialization(self): table_title_model_json2 = table_title_model.to_dict() assert table_title_model_json2 == table_title_model_json -class TestTables(): +class TestModel_Tables(): """ Test Class for Tables """ @@ -4176,7 +4200,7 @@ def test_tables_serialization(self): tables_model_json2 = tables_model.to_dict() assert tables_model_json2 == tables_model_json -class TestTerminationDates(): +class TestModel_TerminationDates(): """ Test Class for TerminationDates """ @@ -4215,7 +4239,7 @@ def test_termination_dates_serialization(self): termination_dates_model_json2 = termination_dates_model.to_dict() assert termination_dates_model_json2 == termination_dates_model_json -class TestTypeLabel(): +class TestModel_TypeLabel(): """ Test Class for TypeLabel """ @@ -4252,7 +4276,7 @@ def test_type_label_serialization(self): type_label_model_json2 = type_label_model.to_dict() assert type_label_model_json2 == type_label_model_json -class TestTypeLabelComparison(): +class TestModel_TypeLabelComparison(): """ Test Class for TypeLabelComparison """ @@ -4287,7 +4311,7 @@ def test_type_label_comparison_serialization(self): type_label_comparison_model_json2 = type_label_comparison_model.to_dict() assert type_label_comparison_model_json2 == type_label_comparison_model_json -class TestUnalignedElement(): +class TestModel_UnalignedElement(): """ Test Class for UnalignedElement """ @@ -4342,7 +4366,7 @@ def test_unaligned_element_serialization(self): unaligned_element_model_json2 = unaligned_element_model.to_dict() assert unaligned_element_model_json2 == unaligned_element_model_json -class TestUpdatedLabelsIn(): +class TestModel_UpdatedLabelsIn(): """ Test Class for UpdatedLabelsIn """ @@ -4388,7 +4412,7 @@ def test_updated_labels_in_serialization(self): updated_labels_in_model_json2 = updated_labels_in_model.to_dict() assert updated_labels_in_model_json2 == updated_labels_in_model_json -class TestUpdatedLabelsOut(): +class TestModel_UpdatedLabelsOut(): """ Test Class for UpdatedLabelsOut """ @@ -4434,7 +4458,7 @@ def test_updated_labels_out_serialization(self): updated_labels_out_model_json2 = updated_labels_out_model.to_dict() assert updated_labels_out_model_json2 == updated_labels_out_model_json -class TestValue(): +class TestModel_Value(): """ Test Class for Value """ diff --git a/test/unit/test_discovery_v1.py b/test/unit/test_discovery_v1.py index 27d6361d..96f2d9cb 100644 --- a/test/unit/test_discovery_v1.py +++ b/test/unit/test_discovery_v1.py @@ -56,6 +56,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -137,6 +139,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -230,6 +234,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -300,6 +306,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -384,6 +392,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -454,6 +464,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -542,6 +554,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -554,7 +568,7 @@ def test_create_configuration_all_params(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/environments/testString/configurations') - mock_response = '{"configuration_id": "configuration_id", "name": "name", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "description": "description", "conversions": {"pdf": {"heading": {"fonts": [{"level": 5, "min_size": 8, "max_size": 8, "bold": true, "italic": true, "name": "name"}]}}, "word": {"heading": {"fonts": [{"level": 5, "min_size": 8, "max_size": 8, "bold": true, "italic": true, "name": "name"}], "styles": [{"level": 5, "names": ["names"]}]}}, "html": {"exclude_tags_completely": ["exclude_tags_completely"], "exclude_tags_keep_content": ["exclude_tags_keep_content"], "keep_content": {"xpaths": ["xpaths"]}, "exclude_content": {"xpaths": ["xpaths"]}, "keep_tag_attributes": ["keep_tag_attributes"], "exclude_tag_attributes": ["exclude_tag_attributes"]}, "segment": {"enabled": false, "selector_tags": ["selector_tags"], "annotated_fields": ["annotated_fields"]}, "json_normalizations": [{"operation": "copy", "source_field": "source_field", "destination_field": "destination_field"}], "image_text_recognition": true}, "enrichments": [{"description": "description", "destination_field": "destination_field", "source_field": "source_field", "overwrite": false, "enrichment": "enrichment", "ignore_downstream_errors": true, "options": {"features": {"keywords": {"sentiment": false, "emotion": false, "limit": 5}, "entities": {"sentiment": false, "emotion": false, "limit": 5, "mentions": true, "mention_types": false, "sentence_locations": true, "model": "model"}, "sentiment": {"document": true, "targets": ["target"]}, "emotion": {"document": true, "targets": ["target"]}, "categories": {"mapKey": {"anyKey": "anyValue"}}, "semantic_roles": {"entities": true, "keywords": true, "limit": 5}, "relations": {"model": "model"}, "concepts": {"limit": 5}}, "language": "ar", "model": "model"}}], "normalizations": [{"operation": "copy", "source_field": "source_field", "destination_field": "destination_field"}], "source": {"type": "box", "credential_id": "credential_id", "schedule": {"enabled": false, "time_zone": "time_zone", "frequency": "daily"}, "options": {"folders": [{"owner_user_id": "owner_user_id", "folder_id": "folder_id", "limit": 5}], "objects": [{"name": "name", "limit": 5}], "site_collections": [{"site_collection_path": "site_collection_path", "limit": 5}], "urls": [{"url": "url", "limit_to_starting_hosts": false, "crawl_speed": "gentle", "allow_untrusted_certificate": false, "maximum_hops": 12, "request_timeout": 15, "override_robots_txt": false, "blacklist": ["blacklist"]}], "buckets": [{"name": "name", "limit": 5}], "crawl_all_buckets": false}}}' + mock_response = '{"configuration_id": "configuration_id", "name": "name", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "description": "description", "conversions": {"pdf": {"heading": {"fonts": [{"level": 5, "min_size": 8, "max_size": 8, "bold": true, "italic": true, "name": "name"}]}}, "word": {"heading": {"fonts": [{"level": 5, "min_size": 8, "max_size": 8, "bold": true, "italic": true, "name": "name"}], "styles": [{"level": 5, "names": ["names"]}]}}, "html": {"exclude_tags_completely": ["exclude_tags_completely"], "exclude_tags_keep_content": ["exclude_tags_keep_content"], "keep_content": {"xpaths": ["xpaths"]}, "exclude_content": {"xpaths": ["xpaths"]}, "keep_tag_attributes": ["keep_tag_attributes"], "exclude_tag_attributes": ["exclude_tag_attributes"]}, "segment": {"enabled": false, "selector_tags": ["selector_tags"], "annotated_fields": ["annotated_fields"]}, "json_normalizations": [{"operation": "copy", "source_field": "source_field", "destination_field": "destination_field"}], "image_text_recognition": true}, "enrichments": [{"description": "description", "destination_field": "destination_field", "source_field": "source_field", "overwrite": false, "enrichment": "enrichment", "ignore_downstream_errors": false, "options": {"features": {"keywords": {"sentiment": false, "emotion": false, "limit": 5}, "entities": {"sentiment": false, "emotion": false, "limit": 5, "mentions": true, "mention_types": false, "sentence_locations": true, "model": "model"}, "sentiment": {"document": true, "targets": ["target"]}, "emotion": {"document": true, "targets": ["target"]}, "categories": {"mapKey": "anyValue"}, "semantic_roles": {"entities": true, "keywords": true, "limit": 5}, "relations": {"model": "model"}, "concepts": {"limit": 5}}, "language": "ar", "model": "model"}}], "normalizations": [{"operation": "copy", "source_field": "source_field", "destination_field": "destination_field"}], "source": {"type": "box", "credential_id": "credential_id", "schedule": {"enabled": true, "time_zone": "America/New_York", "frequency": "daily"}, "options": {"folders": [{"owner_user_id": "owner_user_id", "folder_id": "folder_id", "limit": 5}], "objects": [{"name": "name", "limit": 5}], "site_collections": [{"site_collection_path": "site_collection_path", "limit": 5}], "urls": [{"url": "url", "limit_to_starting_hosts": true, "crawl_speed": "normal", "allow_untrusted_certificate": false, "maximum_hops": 12, "request_timeout": 15, "override_robots_txt": false, "blacklist": ["blacklist"]}], "buckets": [{"name": "name", "limit": 5}], "crawl_all_buckets": false}}}' responses.add(responses.POST, url, body=mock_response, @@ -607,8 +621,8 @@ def test_create_configuration_all_params(self): # Construct a dict representation of a SegmentSettings model segment_settings_model = {} - segment_settings_model['enabled'] = True - segment_settings_model['selector_tags'] = ['testString'] + segment_settings_model['enabled'] = False + segment_settings_model['selector_tags'] = ['h1', 'h2'] segment_settings_model['annotated_fields'] = ['testString'] # Construct a dict representation of a NormalizationOperation model @@ -688,15 +702,15 @@ def test_create_configuration_all_params(self): enrichment_model['description'] = 'testString' enrichment_model['destination_field'] = 'testString' enrichment_model['source_field'] = 'testString' - enrichment_model['overwrite'] = True + enrichment_model['overwrite'] = False enrichment_model['enrichment'] = 'testString' - enrichment_model['ignore_downstream_errors'] = True + enrichment_model['ignore_downstream_errors'] = False enrichment_model['options'] = enrichment_options_model # Construct a dict representation of a SourceSchedule model source_schedule_model = {} source_schedule_model['enabled'] = True - source_schedule_model['time_zone'] = 'testString' + source_schedule_model['time_zone'] = 'America/New_York' source_schedule_model['frequency'] = 'daily' # Construct a dict representation of a SourceOptionsFolder model @@ -719,11 +733,11 @@ def test_create_configuration_all_params(self): source_options_web_crawl_model = {} source_options_web_crawl_model['url'] = 'testString' source_options_web_crawl_model['limit_to_starting_hosts'] = True - source_options_web_crawl_model['crawl_speed'] = 'gentle' - source_options_web_crawl_model['allow_untrusted_certificate'] = True + source_options_web_crawl_model['crawl_speed'] = 'normal' + source_options_web_crawl_model['allow_untrusted_certificate'] = False source_options_web_crawl_model['maximum_hops'] = 38 source_options_web_crawl_model['request_timeout'] = 38 - source_options_web_crawl_model['override_robots_txt'] = True + source_options_web_crawl_model['override_robots_txt'] = False source_options_web_crawl_model['blacklist'] = ['testString'] # Construct a dict representation of a SourceOptionsBuckets model @@ -788,7 +802,7 @@ def test_create_configuration_value_error(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/environments/testString/configurations') - mock_response = '{"configuration_id": "configuration_id", "name": "name", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "description": "description", "conversions": {"pdf": {"heading": {"fonts": [{"level": 5, "min_size": 8, "max_size": 8, "bold": true, "italic": true, "name": "name"}]}}, "word": {"heading": {"fonts": [{"level": 5, "min_size": 8, "max_size": 8, "bold": true, "italic": true, "name": "name"}], "styles": [{"level": 5, "names": ["names"]}]}}, "html": {"exclude_tags_completely": ["exclude_tags_completely"], "exclude_tags_keep_content": ["exclude_tags_keep_content"], "keep_content": {"xpaths": ["xpaths"]}, "exclude_content": {"xpaths": ["xpaths"]}, "keep_tag_attributes": ["keep_tag_attributes"], "exclude_tag_attributes": ["exclude_tag_attributes"]}, "segment": {"enabled": false, "selector_tags": ["selector_tags"], "annotated_fields": ["annotated_fields"]}, "json_normalizations": [{"operation": "copy", "source_field": "source_field", "destination_field": "destination_field"}], "image_text_recognition": true}, "enrichments": [{"description": "description", "destination_field": "destination_field", "source_field": "source_field", "overwrite": false, "enrichment": "enrichment", "ignore_downstream_errors": true, "options": {"features": {"keywords": {"sentiment": false, "emotion": false, "limit": 5}, "entities": {"sentiment": false, "emotion": false, "limit": 5, "mentions": true, "mention_types": false, "sentence_locations": true, "model": "model"}, "sentiment": {"document": true, "targets": ["target"]}, "emotion": {"document": true, "targets": ["target"]}, "categories": {"mapKey": {"anyKey": "anyValue"}}, "semantic_roles": {"entities": true, "keywords": true, "limit": 5}, "relations": {"model": "model"}, "concepts": {"limit": 5}}, "language": "ar", "model": "model"}}], "normalizations": [{"operation": "copy", "source_field": "source_field", "destination_field": "destination_field"}], "source": {"type": "box", "credential_id": "credential_id", "schedule": {"enabled": false, "time_zone": "time_zone", "frequency": "daily"}, "options": {"folders": [{"owner_user_id": "owner_user_id", "folder_id": "folder_id", "limit": 5}], "objects": [{"name": "name", "limit": 5}], "site_collections": [{"site_collection_path": "site_collection_path", "limit": 5}], "urls": [{"url": "url", "limit_to_starting_hosts": false, "crawl_speed": "gentle", "allow_untrusted_certificate": false, "maximum_hops": 12, "request_timeout": 15, "override_robots_txt": false, "blacklist": ["blacklist"]}], "buckets": [{"name": "name", "limit": 5}], "crawl_all_buckets": false}}}' + mock_response = '{"configuration_id": "configuration_id", "name": "name", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "description": "description", "conversions": {"pdf": {"heading": {"fonts": [{"level": 5, "min_size": 8, "max_size": 8, "bold": true, "italic": true, "name": "name"}]}}, "word": {"heading": {"fonts": [{"level": 5, "min_size": 8, "max_size": 8, "bold": true, "italic": true, "name": "name"}], "styles": [{"level": 5, "names": ["names"]}]}}, "html": {"exclude_tags_completely": ["exclude_tags_completely"], "exclude_tags_keep_content": ["exclude_tags_keep_content"], "keep_content": {"xpaths": ["xpaths"]}, "exclude_content": {"xpaths": ["xpaths"]}, "keep_tag_attributes": ["keep_tag_attributes"], "exclude_tag_attributes": ["exclude_tag_attributes"]}, "segment": {"enabled": false, "selector_tags": ["selector_tags"], "annotated_fields": ["annotated_fields"]}, "json_normalizations": [{"operation": "copy", "source_field": "source_field", "destination_field": "destination_field"}], "image_text_recognition": true}, "enrichments": [{"description": "description", "destination_field": "destination_field", "source_field": "source_field", "overwrite": false, "enrichment": "enrichment", "ignore_downstream_errors": false, "options": {"features": {"keywords": {"sentiment": false, "emotion": false, "limit": 5}, "entities": {"sentiment": false, "emotion": false, "limit": 5, "mentions": true, "mention_types": false, "sentence_locations": true, "model": "model"}, "sentiment": {"document": true, "targets": ["target"]}, "emotion": {"document": true, "targets": ["target"]}, "categories": {"mapKey": "anyValue"}, "semantic_roles": {"entities": true, "keywords": true, "limit": 5}, "relations": {"model": "model"}, "concepts": {"limit": 5}}, "language": "ar", "model": "model"}}], "normalizations": [{"operation": "copy", "source_field": "source_field", "destination_field": "destination_field"}], "source": {"type": "box", "credential_id": "credential_id", "schedule": {"enabled": true, "time_zone": "America/New_York", "frequency": "daily"}, "options": {"folders": [{"owner_user_id": "owner_user_id", "folder_id": "folder_id", "limit": 5}], "objects": [{"name": "name", "limit": 5}], "site_collections": [{"site_collection_path": "site_collection_path", "limit": 5}], "urls": [{"url": "url", "limit_to_starting_hosts": true, "crawl_speed": "normal", "allow_untrusted_certificate": false, "maximum_hops": 12, "request_timeout": 15, "override_robots_txt": false, "blacklist": ["blacklist"]}], "buckets": [{"name": "name", "limit": 5}], "crawl_all_buckets": false}}}' responses.add(responses.POST, url, body=mock_response, @@ -841,8 +855,8 @@ def test_create_configuration_value_error(self): # Construct a dict representation of a SegmentSettings model segment_settings_model = {} - segment_settings_model['enabled'] = True - segment_settings_model['selector_tags'] = ['testString'] + segment_settings_model['enabled'] = False + segment_settings_model['selector_tags'] = ['h1', 'h2'] segment_settings_model['annotated_fields'] = ['testString'] # Construct a dict representation of a NormalizationOperation model @@ -922,15 +936,15 @@ def test_create_configuration_value_error(self): enrichment_model['description'] = 'testString' enrichment_model['destination_field'] = 'testString' enrichment_model['source_field'] = 'testString' - enrichment_model['overwrite'] = True + enrichment_model['overwrite'] = False enrichment_model['enrichment'] = 'testString' - enrichment_model['ignore_downstream_errors'] = True + enrichment_model['ignore_downstream_errors'] = False enrichment_model['options'] = enrichment_options_model # Construct a dict representation of a SourceSchedule model source_schedule_model = {} source_schedule_model['enabled'] = True - source_schedule_model['time_zone'] = 'testString' + source_schedule_model['time_zone'] = 'America/New_York' source_schedule_model['frequency'] = 'daily' # Construct a dict representation of a SourceOptionsFolder model @@ -953,11 +967,11 @@ def test_create_configuration_value_error(self): source_options_web_crawl_model = {} source_options_web_crawl_model['url'] = 'testString' source_options_web_crawl_model['limit_to_starting_hosts'] = True - source_options_web_crawl_model['crawl_speed'] = 'gentle' - source_options_web_crawl_model['allow_untrusted_certificate'] = True + source_options_web_crawl_model['crawl_speed'] = 'normal' + source_options_web_crawl_model['allow_untrusted_certificate'] = False source_options_web_crawl_model['maximum_hops'] = 38 source_options_web_crawl_model['request_timeout'] = 38 - source_options_web_crawl_model['override_robots_txt'] = True + source_options_web_crawl_model['override_robots_txt'] = False source_options_web_crawl_model['blacklist'] = ['testString'] # Construct a dict representation of a SourceOptionsBuckets model @@ -1011,6 +1025,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -1023,7 +1039,7 @@ def test_list_configurations_all_params(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/environments/testString/configurations') - mock_response = '{"configurations": [{"configuration_id": "configuration_id", "name": "name", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "description": "description", "conversions": {"pdf": {"heading": {"fonts": [{"level": 5, "min_size": 8, "max_size": 8, "bold": true, "italic": true, "name": "name"}]}}, "word": {"heading": {"fonts": [{"level": 5, "min_size": 8, "max_size": 8, "bold": true, "italic": true, "name": "name"}], "styles": [{"level": 5, "names": ["names"]}]}}, "html": {"exclude_tags_completely": ["exclude_tags_completely"], "exclude_tags_keep_content": ["exclude_tags_keep_content"], "keep_content": {"xpaths": ["xpaths"]}, "exclude_content": {"xpaths": ["xpaths"]}, "keep_tag_attributes": ["keep_tag_attributes"], "exclude_tag_attributes": ["exclude_tag_attributes"]}, "segment": {"enabled": false, "selector_tags": ["selector_tags"], "annotated_fields": ["annotated_fields"]}, "json_normalizations": [{"operation": "copy", "source_field": "source_field", "destination_field": "destination_field"}], "image_text_recognition": true}, "enrichments": [{"description": "description", "destination_field": "destination_field", "source_field": "source_field", "overwrite": false, "enrichment": "enrichment", "ignore_downstream_errors": true, "options": {"features": {"keywords": {"sentiment": false, "emotion": false, "limit": 5}, "entities": {"sentiment": false, "emotion": false, "limit": 5, "mentions": true, "mention_types": false, "sentence_locations": true, "model": "model"}, "sentiment": {"document": true, "targets": ["target"]}, "emotion": {"document": true, "targets": ["target"]}, "categories": {"mapKey": {"anyKey": "anyValue"}}, "semantic_roles": {"entities": true, "keywords": true, "limit": 5}, "relations": {"model": "model"}, "concepts": {"limit": 5}}, "language": "ar", "model": "model"}}], "normalizations": [{"operation": "copy", "source_field": "source_field", "destination_field": "destination_field"}], "source": {"type": "box", "credential_id": "credential_id", "schedule": {"enabled": false, "time_zone": "time_zone", "frequency": "daily"}, "options": {"folders": [{"owner_user_id": "owner_user_id", "folder_id": "folder_id", "limit": 5}], "objects": [{"name": "name", "limit": 5}], "site_collections": [{"site_collection_path": "site_collection_path", "limit": 5}], "urls": [{"url": "url", "limit_to_starting_hosts": false, "crawl_speed": "gentle", "allow_untrusted_certificate": false, "maximum_hops": 12, "request_timeout": 15, "override_robots_txt": false, "blacklist": ["blacklist"]}], "buckets": [{"name": "name", "limit": 5}], "crawl_all_buckets": false}}}]}' + mock_response = '{"configurations": [{"configuration_id": "configuration_id", "name": "name", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "description": "description", "conversions": {"pdf": {"heading": {"fonts": [{"level": 5, "min_size": 8, "max_size": 8, "bold": true, "italic": true, "name": "name"}]}}, "word": {"heading": {"fonts": [{"level": 5, "min_size": 8, "max_size": 8, "bold": true, "italic": true, "name": "name"}], "styles": [{"level": 5, "names": ["names"]}]}}, "html": {"exclude_tags_completely": ["exclude_tags_completely"], "exclude_tags_keep_content": ["exclude_tags_keep_content"], "keep_content": {"xpaths": ["xpaths"]}, "exclude_content": {"xpaths": ["xpaths"]}, "keep_tag_attributes": ["keep_tag_attributes"], "exclude_tag_attributes": ["exclude_tag_attributes"]}, "segment": {"enabled": false, "selector_tags": ["selector_tags"], "annotated_fields": ["annotated_fields"]}, "json_normalizations": [{"operation": "copy", "source_field": "source_field", "destination_field": "destination_field"}], "image_text_recognition": true}, "enrichments": [{"description": "description", "destination_field": "destination_field", "source_field": "source_field", "overwrite": false, "enrichment": "enrichment", "ignore_downstream_errors": false, "options": {"features": {"keywords": {"sentiment": false, "emotion": false, "limit": 5}, "entities": {"sentiment": false, "emotion": false, "limit": 5, "mentions": true, "mention_types": false, "sentence_locations": true, "model": "model"}, "sentiment": {"document": true, "targets": ["target"]}, "emotion": {"document": true, "targets": ["target"]}, "categories": {"mapKey": "anyValue"}, "semantic_roles": {"entities": true, "keywords": true, "limit": 5}, "relations": {"model": "model"}, "concepts": {"limit": 5}}, "language": "ar", "model": "model"}}], "normalizations": [{"operation": "copy", "source_field": "source_field", "destination_field": "destination_field"}], "source": {"type": "box", "credential_id": "credential_id", "schedule": {"enabled": true, "time_zone": "America/New_York", "frequency": "daily"}, "options": {"folders": [{"owner_user_id": "owner_user_id", "folder_id": "folder_id", "limit": 5}], "objects": [{"name": "name", "limit": 5}], "site_collections": [{"site_collection_path": "site_collection_path", "limit": 5}], "urls": [{"url": "url", "limit_to_starting_hosts": true, "crawl_speed": "normal", "allow_untrusted_certificate": false, "maximum_hops": 12, "request_timeout": 15, "override_robots_txt": false, "blacklist": ["blacklist"]}], "buckets": [{"name": "name", "limit": 5}], "crawl_all_buckets": false}}}]}' responses.add(responses.GET, url, body=mock_response, @@ -1057,7 +1073,7 @@ def test_list_configurations_required_params(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/environments/testString/configurations') - mock_response = '{"configurations": [{"configuration_id": "configuration_id", "name": "name", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "description": "description", "conversions": {"pdf": {"heading": {"fonts": [{"level": 5, "min_size": 8, "max_size": 8, "bold": true, "italic": true, "name": "name"}]}}, "word": {"heading": {"fonts": [{"level": 5, "min_size": 8, "max_size": 8, "bold": true, "italic": true, "name": "name"}], "styles": [{"level": 5, "names": ["names"]}]}}, "html": {"exclude_tags_completely": ["exclude_tags_completely"], "exclude_tags_keep_content": ["exclude_tags_keep_content"], "keep_content": {"xpaths": ["xpaths"]}, "exclude_content": {"xpaths": ["xpaths"]}, "keep_tag_attributes": ["keep_tag_attributes"], "exclude_tag_attributes": ["exclude_tag_attributes"]}, "segment": {"enabled": false, "selector_tags": ["selector_tags"], "annotated_fields": ["annotated_fields"]}, "json_normalizations": [{"operation": "copy", "source_field": "source_field", "destination_field": "destination_field"}], "image_text_recognition": true}, "enrichments": [{"description": "description", "destination_field": "destination_field", "source_field": "source_field", "overwrite": false, "enrichment": "enrichment", "ignore_downstream_errors": true, "options": {"features": {"keywords": {"sentiment": false, "emotion": false, "limit": 5}, "entities": {"sentiment": false, "emotion": false, "limit": 5, "mentions": true, "mention_types": false, "sentence_locations": true, "model": "model"}, "sentiment": {"document": true, "targets": ["target"]}, "emotion": {"document": true, "targets": ["target"]}, "categories": {"mapKey": {"anyKey": "anyValue"}}, "semantic_roles": {"entities": true, "keywords": true, "limit": 5}, "relations": {"model": "model"}, "concepts": {"limit": 5}}, "language": "ar", "model": "model"}}], "normalizations": [{"operation": "copy", "source_field": "source_field", "destination_field": "destination_field"}], "source": {"type": "box", "credential_id": "credential_id", "schedule": {"enabled": false, "time_zone": "time_zone", "frequency": "daily"}, "options": {"folders": [{"owner_user_id": "owner_user_id", "folder_id": "folder_id", "limit": 5}], "objects": [{"name": "name", "limit": 5}], "site_collections": [{"site_collection_path": "site_collection_path", "limit": 5}], "urls": [{"url": "url", "limit_to_starting_hosts": false, "crawl_speed": "gentle", "allow_untrusted_certificate": false, "maximum_hops": 12, "request_timeout": 15, "override_robots_txt": false, "blacklist": ["blacklist"]}], "buckets": [{"name": "name", "limit": 5}], "crawl_all_buckets": false}}}]}' + mock_response = '{"configurations": [{"configuration_id": "configuration_id", "name": "name", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "description": "description", "conversions": {"pdf": {"heading": {"fonts": [{"level": 5, "min_size": 8, "max_size": 8, "bold": true, "italic": true, "name": "name"}]}}, "word": {"heading": {"fonts": [{"level": 5, "min_size": 8, "max_size": 8, "bold": true, "italic": true, "name": "name"}], "styles": [{"level": 5, "names": ["names"]}]}}, "html": {"exclude_tags_completely": ["exclude_tags_completely"], "exclude_tags_keep_content": ["exclude_tags_keep_content"], "keep_content": {"xpaths": ["xpaths"]}, "exclude_content": {"xpaths": ["xpaths"]}, "keep_tag_attributes": ["keep_tag_attributes"], "exclude_tag_attributes": ["exclude_tag_attributes"]}, "segment": {"enabled": false, "selector_tags": ["selector_tags"], "annotated_fields": ["annotated_fields"]}, "json_normalizations": [{"operation": "copy", "source_field": "source_field", "destination_field": "destination_field"}], "image_text_recognition": true}, "enrichments": [{"description": "description", "destination_field": "destination_field", "source_field": "source_field", "overwrite": false, "enrichment": "enrichment", "ignore_downstream_errors": false, "options": {"features": {"keywords": {"sentiment": false, "emotion": false, "limit": 5}, "entities": {"sentiment": false, "emotion": false, "limit": 5, "mentions": true, "mention_types": false, "sentence_locations": true, "model": "model"}, "sentiment": {"document": true, "targets": ["target"]}, "emotion": {"document": true, "targets": ["target"]}, "categories": {"mapKey": "anyValue"}, "semantic_roles": {"entities": true, "keywords": true, "limit": 5}, "relations": {"model": "model"}, "concepts": {"limit": 5}}, "language": "ar", "model": "model"}}], "normalizations": [{"operation": "copy", "source_field": "source_field", "destination_field": "destination_field"}], "source": {"type": "box", "credential_id": "credential_id", "schedule": {"enabled": true, "time_zone": "America/New_York", "frequency": "daily"}, "options": {"folders": [{"owner_user_id": "owner_user_id", "folder_id": "folder_id", "limit": 5}], "objects": [{"name": "name", "limit": 5}], "site_collections": [{"site_collection_path": "site_collection_path", "limit": 5}], "urls": [{"url": "url", "limit_to_starting_hosts": true, "crawl_speed": "normal", "allow_untrusted_certificate": false, "maximum_hops": 12, "request_timeout": 15, "override_robots_txt": false, "blacklist": ["blacklist"]}], "buckets": [{"name": "name", "limit": 5}], "crawl_all_buckets": false}}}]}' responses.add(responses.GET, url, body=mock_response, @@ -1085,7 +1101,7 @@ def test_list_configurations_value_error(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/environments/testString/configurations') - mock_response = '{"configurations": [{"configuration_id": "configuration_id", "name": "name", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "description": "description", "conversions": {"pdf": {"heading": {"fonts": [{"level": 5, "min_size": 8, "max_size": 8, "bold": true, "italic": true, "name": "name"}]}}, "word": {"heading": {"fonts": [{"level": 5, "min_size": 8, "max_size": 8, "bold": true, "italic": true, "name": "name"}], "styles": [{"level": 5, "names": ["names"]}]}}, "html": {"exclude_tags_completely": ["exclude_tags_completely"], "exclude_tags_keep_content": ["exclude_tags_keep_content"], "keep_content": {"xpaths": ["xpaths"]}, "exclude_content": {"xpaths": ["xpaths"]}, "keep_tag_attributes": ["keep_tag_attributes"], "exclude_tag_attributes": ["exclude_tag_attributes"]}, "segment": {"enabled": false, "selector_tags": ["selector_tags"], "annotated_fields": ["annotated_fields"]}, "json_normalizations": [{"operation": "copy", "source_field": "source_field", "destination_field": "destination_field"}], "image_text_recognition": true}, "enrichments": [{"description": "description", "destination_field": "destination_field", "source_field": "source_field", "overwrite": false, "enrichment": "enrichment", "ignore_downstream_errors": true, "options": {"features": {"keywords": {"sentiment": false, "emotion": false, "limit": 5}, "entities": {"sentiment": false, "emotion": false, "limit": 5, "mentions": true, "mention_types": false, "sentence_locations": true, "model": "model"}, "sentiment": {"document": true, "targets": ["target"]}, "emotion": {"document": true, "targets": ["target"]}, "categories": {"mapKey": {"anyKey": "anyValue"}}, "semantic_roles": {"entities": true, "keywords": true, "limit": 5}, "relations": {"model": "model"}, "concepts": {"limit": 5}}, "language": "ar", "model": "model"}}], "normalizations": [{"operation": "copy", "source_field": "source_field", "destination_field": "destination_field"}], "source": {"type": "box", "credential_id": "credential_id", "schedule": {"enabled": false, "time_zone": "time_zone", "frequency": "daily"}, "options": {"folders": [{"owner_user_id": "owner_user_id", "folder_id": "folder_id", "limit": 5}], "objects": [{"name": "name", "limit": 5}], "site_collections": [{"site_collection_path": "site_collection_path", "limit": 5}], "urls": [{"url": "url", "limit_to_starting_hosts": false, "crawl_speed": "gentle", "allow_untrusted_certificate": false, "maximum_hops": 12, "request_timeout": 15, "override_robots_txt": false, "blacklist": ["blacklist"]}], "buckets": [{"name": "name", "limit": 5}], "crawl_all_buckets": false}}}]}' + mock_response = '{"configurations": [{"configuration_id": "configuration_id", "name": "name", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "description": "description", "conversions": {"pdf": {"heading": {"fonts": [{"level": 5, "min_size": 8, "max_size": 8, "bold": true, "italic": true, "name": "name"}]}}, "word": {"heading": {"fonts": [{"level": 5, "min_size": 8, "max_size": 8, "bold": true, "italic": true, "name": "name"}], "styles": [{"level": 5, "names": ["names"]}]}}, "html": {"exclude_tags_completely": ["exclude_tags_completely"], "exclude_tags_keep_content": ["exclude_tags_keep_content"], "keep_content": {"xpaths": ["xpaths"]}, "exclude_content": {"xpaths": ["xpaths"]}, "keep_tag_attributes": ["keep_tag_attributes"], "exclude_tag_attributes": ["exclude_tag_attributes"]}, "segment": {"enabled": false, "selector_tags": ["selector_tags"], "annotated_fields": ["annotated_fields"]}, "json_normalizations": [{"operation": "copy", "source_field": "source_field", "destination_field": "destination_field"}], "image_text_recognition": true}, "enrichments": [{"description": "description", "destination_field": "destination_field", "source_field": "source_field", "overwrite": false, "enrichment": "enrichment", "ignore_downstream_errors": false, "options": {"features": {"keywords": {"sentiment": false, "emotion": false, "limit": 5}, "entities": {"sentiment": false, "emotion": false, "limit": 5, "mentions": true, "mention_types": false, "sentence_locations": true, "model": "model"}, "sentiment": {"document": true, "targets": ["target"]}, "emotion": {"document": true, "targets": ["target"]}, "categories": {"mapKey": "anyValue"}, "semantic_roles": {"entities": true, "keywords": true, "limit": 5}, "relations": {"model": "model"}, "concepts": {"limit": 5}}, "language": "ar", "model": "model"}}], "normalizations": [{"operation": "copy", "source_field": "source_field", "destination_field": "destination_field"}], "source": {"type": "box", "credential_id": "credential_id", "schedule": {"enabled": true, "time_zone": "America/New_York", "frequency": "daily"}, "options": {"folders": [{"owner_user_id": "owner_user_id", "folder_id": "folder_id", "limit": 5}], "objects": [{"name": "name", "limit": 5}], "site_collections": [{"site_collection_path": "site_collection_path", "limit": 5}], "urls": [{"url": "url", "limit_to_starting_hosts": true, "crawl_speed": "normal", "allow_untrusted_certificate": false, "maximum_hops": 12, "request_timeout": 15, "override_robots_txt": false, "blacklist": ["blacklist"]}], "buckets": [{"name": "name", "limit": 5}], "crawl_all_buckets": false}}}]}' responses.add(responses.GET, url, body=mock_response, @@ -1115,6 +1131,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -1127,7 +1145,7 @@ def test_get_configuration_all_params(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/environments/testString/configurations/testString') - mock_response = '{"configuration_id": "configuration_id", "name": "name", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "description": "description", "conversions": {"pdf": {"heading": {"fonts": [{"level": 5, "min_size": 8, "max_size": 8, "bold": true, "italic": true, "name": "name"}]}}, "word": {"heading": {"fonts": [{"level": 5, "min_size": 8, "max_size": 8, "bold": true, "italic": true, "name": "name"}], "styles": [{"level": 5, "names": ["names"]}]}}, "html": {"exclude_tags_completely": ["exclude_tags_completely"], "exclude_tags_keep_content": ["exclude_tags_keep_content"], "keep_content": {"xpaths": ["xpaths"]}, "exclude_content": {"xpaths": ["xpaths"]}, "keep_tag_attributes": ["keep_tag_attributes"], "exclude_tag_attributes": ["exclude_tag_attributes"]}, "segment": {"enabled": false, "selector_tags": ["selector_tags"], "annotated_fields": ["annotated_fields"]}, "json_normalizations": [{"operation": "copy", "source_field": "source_field", "destination_field": "destination_field"}], "image_text_recognition": true}, "enrichments": [{"description": "description", "destination_field": "destination_field", "source_field": "source_field", "overwrite": false, "enrichment": "enrichment", "ignore_downstream_errors": true, "options": {"features": {"keywords": {"sentiment": false, "emotion": false, "limit": 5}, "entities": {"sentiment": false, "emotion": false, "limit": 5, "mentions": true, "mention_types": false, "sentence_locations": true, "model": "model"}, "sentiment": {"document": true, "targets": ["target"]}, "emotion": {"document": true, "targets": ["target"]}, "categories": {"mapKey": {"anyKey": "anyValue"}}, "semantic_roles": {"entities": true, "keywords": true, "limit": 5}, "relations": {"model": "model"}, "concepts": {"limit": 5}}, "language": "ar", "model": "model"}}], "normalizations": [{"operation": "copy", "source_field": "source_field", "destination_field": "destination_field"}], "source": {"type": "box", "credential_id": "credential_id", "schedule": {"enabled": false, "time_zone": "time_zone", "frequency": "daily"}, "options": {"folders": [{"owner_user_id": "owner_user_id", "folder_id": "folder_id", "limit": 5}], "objects": [{"name": "name", "limit": 5}], "site_collections": [{"site_collection_path": "site_collection_path", "limit": 5}], "urls": [{"url": "url", "limit_to_starting_hosts": false, "crawl_speed": "gentle", "allow_untrusted_certificate": false, "maximum_hops": 12, "request_timeout": 15, "override_robots_txt": false, "blacklist": ["blacklist"]}], "buckets": [{"name": "name", "limit": 5}], "crawl_all_buckets": false}}}' + mock_response = '{"configuration_id": "configuration_id", "name": "name", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "description": "description", "conversions": {"pdf": {"heading": {"fonts": [{"level": 5, "min_size": 8, "max_size": 8, "bold": true, "italic": true, "name": "name"}]}}, "word": {"heading": {"fonts": [{"level": 5, "min_size": 8, "max_size": 8, "bold": true, "italic": true, "name": "name"}], "styles": [{"level": 5, "names": ["names"]}]}}, "html": {"exclude_tags_completely": ["exclude_tags_completely"], "exclude_tags_keep_content": ["exclude_tags_keep_content"], "keep_content": {"xpaths": ["xpaths"]}, "exclude_content": {"xpaths": ["xpaths"]}, "keep_tag_attributes": ["keep_tag_attributes"], "exclude_tag_attributes": ["exclude_tag_attributes"]}, "segment": {"enabled": false, "selector_tags": ["selector_tags"], "annotated_fields": ["annotated_fields"]}, "json_normalizations": [{"operation": "copy", "source_field": "source_field", "destination_field": "destination_field"}], "image_text_recognition": true}, "enrichments": [{"description": "description", "destination_field": "destination_field", "source_field": "source_field", "overwrite": false, "enrichment": "enrichment", "ignore_downstream_errors": false, "options": {"features": {"keywords": {"sentiment": false, "emotion": false, "limit": 5}, "entities": {"sentiment": false, "emotion": false, "limit": 5, "mentions": true, "mention_types": false, "sentence_locations": true, "model": "model"}, "sentiment": {"document": true, "targets": ["target"]}, "emotion": {"document": true, "targets": ["target"]}, "categories": {"mapKey": "anyValue"}, "semantic_roles": {"entities": true, "keywords": true, "limit": 5}, "relations": {"model": "model"}, "concepts": {"limit": 5}}, "language": "ar", "model": "model"}}], "normalizations": [{"operation": "copy", "source_field": "source_field", "destination_field": "destination_field"}], "source": {"type": "box", "credential_id": "credential_id", "schedule": {"enabled": true, "time_zone": "America/New_York", "frequency": "daily"}, "options": {"folders": [{"owner_user_id": "owner_user_id", "folder_id": "folder_id", "limit": 5}], "objects": [{"name": "name", "limit": 5}], "site_collections": [{"site_collection_path": "site_collection_path", "limit": 5}], "urls": [{"url": "url", "limit_to_starting_hosts": true, "crawl_speed": "normal", "allow_untrusted_certificate": false, "maximum_hops": 12, "request_timeout": 15, "override_robots_txt": false, "blacklist": ["blacklist"]}], "buckets": [{"name": "name", "limit": 5}], "crawl_all_buckets": false}}}' responses.add(responses.GET, url, body=mock_response, @@ -1157,7 +1175,7 @@ def test_get_configuration_value_error(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/environments/testString/configurations/testString') - mock_response = '{"configuration_id": "configuration_id", "name": "name", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "description": "description", "conversions": {"pdf": {"heading": {"fonts": [{"level": 5, "min_size": 8, "max_size": 8, "bold": true, "italic": true, "name": "name"}]}}, "word": {"heading": {"fonts": [{"level": 5, "min_size": 8, "max_size": 8, "bold": true, "italic": true, "name": "name"}], "styles": [{"level": 5, "names": ["names"]}]}}, "html": {"exclude_tags_completely": ["exclude_tags_completely"], "exclude_tags_keep_content": ["exclude_tags_keep_content"], "keep_content": {"xpaths": ["xpaths"]}, "exclude_content": {"xpaths": ["xpaths"]}, "keep_tag_attributes": ["keep_tag_attributes"], "exclude_tag_attributes": ["exclude_tag_attributes"]}, "segment": {"enabled": false, "selector_tags": ["selector_tags"], "annotated_fields": ["annotated_fields"]}, "json_normalizations": [{"operation": "copy", "source_field": "source_field", "destination_field": "destination_field"}], "image_text_recognition": true}, "enrichments": [{"description": "description", "destination_field": "destination_field", "source_field": "source_field", "overwrite": false, "enrichment": "enrichment", "ignore_downstream_errors": true, "options": {"features": {"keywords": {"sentiment": false, "emotion": false, "limit": 5}, "entities": {"sentiment": false, "emotion": false, "limit": 5, "mentions": true, "mention_types": false, "sentence_locations": true, "model": "model"}, "sentiment": {"document": true, "targets": ["target"]}, "emotion": {"document": true, "targets": ["target"]}, "categories": {"mapKey": {"anyKey": "anyValue"}}, "semantic_roles": {"entities": true, "keywords": true, "limit": 5}, "relations": {"model": "model"}, "concepts": {"limit": 5}}, "language": "ar", "model": "model"}}], "normalizations": [{"operation": "copy", "source_field": "source_field", "destination_field": "destination_field"}], "source": {"type": "box", "credential_id": "credential_id", "schedule": {"enabled": false, "time_zone": "time_zone", "frequency": "daily"}, "options": {"folders": [{"owner_user_id": "owner_user_id", "folder_id": "folder_id", "limit": 5}], "objects": [{"name": "name", "limit": 5}], "site_collections": [{"site_collection_path": "site_collection_path", "limit": 5}], "urls": [{"url": "url", "limit_to_starting_hosts": false, "crawl_speed": "gentle", "allow_untrusted_certificate": false, "maximum_hops": 12, "request_timeout": 15, "override_robots_txt": false, "blacklist": ["blacklist"]}], "buckets": [{"name": "name", "limit": 5}], "crawl_all_buckets": false}}}' + mock_response = '{"configuration_id": "configuration_id", "name": "name", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "description": "description", "conversions": {"pdf": {"heading": {"fonts": [{"level": 5, "min_size": 8, "max_size": 8, "bold": true, "italic": true, "name": "name"}]}}, "word": {"heading": {"fonts": [{"level": 5, "min_size": 8, "max_size": 8, "bold": true, "italic": true, "name": "name"}], "styles": [{"level": 5, "names": ["names"]}]}}, "html": {"exclude_tags_completely": ["exclude_tags_completely"], "exclude_tags_keep_content": ["exclude_tags_keep_content"], "keep_content": {"xpaths": ["xpaths"]}, "exclude_content": {"xpaths": ["xpaths"]}, "keep_tag_attributes": ["keep_tag_attributes"], "exclude_tag_attributes": ["exclude_tag_attributes"]}, "segment": {"enabled": false, "selector_tags": ["selector_tags"], "annotated_fields": ["annotated_fields"]}, "json_normalizations": [{"operation": "copy", "source_field": "source_field", "destination_field": "destination_field"}], "image_text_recognition": true}, "enrichments": [{"description": "description", "destination_field": "destination_field", "source_field": "source_field", "overwrite": false, "enrichment": "enrichment", "ignore_downstream_errors": false, "options": {"features": {"keywords": {"sentiment": false, "emotion": false, "limit": 5}, "entities": {"sentiment": false, "emotion": false, "limit": 5, "mentions": true, "mention_types": false, "sentence_locations": true, "model": "model"}, "sentiment": {"document": true, "targets": ["target"]}, "emotion": {"document": true, "targets": ["target"]}, "categories": {"mapKey": "anyValue"}, "semantic_roles": {"entities": true, "keywords": true, "limit": 5}, "relations": {"model": "model"}, "concepts": {"limit": 5}}, "language": "ar", "model": "model"}}], "normalizations": [{"operation": "copy", "source_field": "source_field", "destination_field": "destination_field"}], "source": {"type": "box", "credential_id": "credential_id", "schedule": {"enabled": true, "time_zone": "America/New_York", "frequency": "daily"}, "options": {"folders": [{"owner_user_id": "owner_user_id", "folder_id": "folder_id", "limit": 5}], "objects": [{"name": "name", "limit": 5}], "site_collections": [{"site_collection_path": "site_collection_path", "limit": 5}], "urls": [{"url": "url", "limit_to_starting_hosts": true, "crawl_speed": "normal", "allow_untrusted_certificate": false, "maximum_hops": 12, "request_timeout": 15, "override_robots_txt": false, "blacklist": ["blacklist"]}], "buckets": [{"name": "name", "limit": 5}], "crawl_all_buckets": false}}}' responses.add(responses.GET, url, body=mock_response, @@ -1189,6 +1207,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -1201,7 +1221,7 @@ def test_update_configuration_all_params(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/environments/testString/configurations/testString') - mock_response = '{"configuration_id": "configuration_id", "name": "name", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "description": "description", "conversions": {"pdf": {"heading": {"fonts": [{"level": 5, "min_size": 8, "max_size": 8, "bold": true, "italic": true, "name": "name"}]}}, "word": {"heading": {"fonts": [{"level": 5, "min_size": 8, "max_size": 8, "bold": true, "italic": true, "name": "name"}], "styles": [{"level": 5, "names": ["names"]}]}}, "html": {"exclude_tags_completely": ["exclude_tags_completely"], "exclude_tags_keep_content": ["exclude_tags_keep_content"], "keep_content": {"xpaths": ["xpaths"]}, "exclude_content": {"xpaths": ["xpaths"]}, "keep_tag_attributes": ["keep_tag_attributes"], "exclude_tag_attributes": ["exclude_tag_attributes"]}, "segment": {"enabled": false, "selector_tags": ["selector_tags"], "annotated_fields": ["annotated_fields"]}, "json_normalizations": [{"operation": "copy", "source_field": "source_field", "destination_field": "destination_field"}], "image_text_recognition": true}, "enrichments": [{"description": "description", "destination_field": "destination_field", "source_field": "source_field", "overwrite": false, "enrichment": "enrichment", "ignore_downstream_errors": true, "options": {"features": {"keywords": {"sentiment": false, "emotion": false, "limit": 5}, "entities": {"sentiment": false, "emotion": false, "limit": 5, "mentions": true, "mention_types": false, "sentence_locations": true, "model": "model"}, "sentiment": {"document": true, "targets": ["target"]}, "emotion": {"document": true, "targets": ["target"]}, "categories": {"mapKey": {"anyKey": "anyValue"}}, "semantic_roles": {"entities": true, "keywords": true, "limit": 5}, "relations": {"model": "model"}, "concepts": {"limit": 5}}, "language": "ar", "model": "model"}}], "normalizations": [{"operation": "copy", "source_field": "source_field", "destination_field": "destination_field"}], "source": {"type": "box", "credential_id": "credential_id", "schedule": {"enabled": false, "time_zone": "time_zone", "frequency": "daily"}, "options": {"folders": [{"owner_user_id": "owner_user_id", "folder_id": "folder_id", "limit": 5}], "objects": [{"name": "name", "limit": 5}], "site_collections": [{"site_collection_path": "site_collection_path", "limit": 5}], "urls": [{"url": "url", "limit_to_starting_hosts": false, "crawl_speed": "gentle", "allow_untrusted_certificate": false, "maximum_hops": 12, "request_timeout": 15, "override_robots_txt": false, "blacklist": ["blacklist"]}], "buckets": [{"name": "name", "limit": 5}], "crawl_all_buckets": false}}}' + mock_response = '{"configuration_id": "configuration_id", "name": "name", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "description": "description", "conversions": {"pdf": {"heading": {"fonts": [{"level": 5, "min_size": 8, "max_size": 8, "bold": true, "italic": true, "name": "name"}]}}, "word": {"heading": {"fonts": [{"level": 5, "min_size": 8, "max_size": 8, "bold": true, "italic": true, "name": "name"}], "styles": [{"level": 5, "names": ["names"]}]}}, "html": {"exclude_tags_completely": ["exclude_tags_completely"], "exclude_tags_keep_content": ["exclude_tags_keep_content"], "keep_content": {"xpaths": ["xpaths"]}, "exclude_content": {"xpaths": ["xpaths"]}, "keep_tag_attributes": ["keep_tag_attributes"], "exclude_tag_attributes": ["exclude_tag_attributes"]}, "segment": {"enabled": false, "selector_tags": ["selector_tags"], "annotated_fields": ["annotated_fields"]}, "json_normalizations": [{"operation": "copy", "source_field": "source_field", "destination_field": "destination_field"}], "image_text_recognition": true}, "enrichments": [{"description": "description", "destination_field": "destination_field", "source_field": "source_field", "overwrite": false, "enrichment": "enrichment", "ignore_downstream_errors": false, "options": {"features": {"keywords": {"sentiment": false, "emotion": false, "limit": 5}, "entities": {"sentiment": false, "emotion": false, "limit": 5, "mentions": true, "mention_types": false, "sentence_locations": true, "model": "model"}, "sentiment": {"document": true, "targets": ["target"]}, "emotion": {"document": true, "targets": ["target"]}, "categories": {"mapKey": "anyValue"}, "semantic_roles": {"entities": true, "keywords": true, "limit": 5}, "relations": {"model": "model"}, "concepts": {"limit": 5}}, "language": "ar", "model": "model"}}], "normalizations": [{"operation": "copy", "source_field": "source_field", "destination_field": "destination_field"}], "source": {"type": "box", "credential_id": "credential_id", "schedule": {"enabled": true, "time_zone": "America/New_York", "frequency": "daily"}, "options": {"folders": [{"owner_user_id": "owner_user_id", "folder_id": "folder_id", "limit": 5}], "objects": [{"name": "name", "limit": 5}], "site_collections": [{"site_collection_path": "site_collection_path", "limit": 5}], "urls": [{"url": "url", "limit_to_starting_hosts": true, "crawl_speed": "normal", "allow_untrusted_certificate": false, "maximum_hops": 12, "request_timeout": 15, "override_robots_txt": false, "blacklist": ["blacklist"]}], "buckets": [{"name": "name", "limit": 5}], "crawl_all_buckets": false}}}' responses.add(responses.PUT, url, body=mock_response, @@ -1254,8 +1274,8 @@ def test_update_configuration_all_params(self): # Construct a dict representation of a SegmentSettings model segment_settings_model = {} - segment_settings_model['enabled'] = True - segment_settings_model['selector_tags'] = ['testString'] + segment_settings_model['enabled'] = False + segment_settings_model['selector_tags'] = ['h1', 'h2'] segment_settings_model['annotated_fields'] = ['testString'] # Construct a dict representation of a NormalizationOperation model @@ -1335,15 +1355,15 @@ def test_update_configuration_all_params(self): enrichment_model['description'] = 'testString' enrichment_model['destination_field'] = 'testString' enrichment_model['source_field'] = 'testString' - enrichment_model['overwrite'] = True + enrichment_model['overwrite'] = False enrichment_model['enrichment'] = 'testString' - enrichment_model['ignore_downstream_errors'] = True + enrichment_model['ignore_downstream_errors'] = False enrichment_model['options'] = enrichment_options_model # Construct a dict representation of a SourceSchedule model source_schedule_model = {} source_schedule_model['enabled'] = True - source_schedule_model['time_zone'] = 'testString' + source_schedule_model['time_zone'] = 'America/New_York' source_schedule_model['frequency'] = 'daily' # Construct a dict representation of a SourceOptionsFolder model @@ -1366,11 +1386,11 @@ def test_update_configuration_all_params(self): source_options_web_crawl_model = {} source_options_web_crawl_model['url'] = 'testString' source_options_web_crawl_model['limit_to_starting_hosts'] = True - source_options_web_crawl_model['crawl_speed'] = 'gentle' - source_options_web_crawl_model['allow_untrusted_certificate'] = True + source_options_web_crawl_model['crawl_speed'] = 'normal' + source_options_web_crawl_model['allow_untrusted_certificate'] = False source_options_web_crawl_model['maximum_hops'] = 38 source_options_web_crawl_model['request_timeout'] = 38 - source_options_web_crawl_model['override_robots_txt'] = True + source_options_web_crawl_model['override_robots_txt'] = False source_options_web_crawl_model['blacklist'] = ['testString'] # Construct a dict representation of a SourceOptionsBuckets model @@ -1437,7 +1457,7 @@ def test_update_configuration_value_error(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/environments/testString/configurations/testString') - mock_response = '{"configuration_id": "configuration_id", "name": "name", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "description": "description", "conversions": {"pdf": {"heading": {"fonts": [{"level": 5, "min_size": 8, "max_size": 8, "bold": true, "italic": true, "name": "name"}]}}, "word": {"heading": {"fonts": [{"level": 5, "min_size": 8, "max_size": 8, "bold": true, "italic": true, "name": "name"}], "styles": [{"level": 5, "names": ["names"]}]}}, "html": {"exclude_tags_completely": ["exclude_tags_completely"], "exclude_tags_keep_content": ["exclude_tags_keep_content"], "keep_content": {"xpaths": ["xpaths"]}, "exclude_content": {"xpaths": ["xpaths"]}, "keep_tag_attributes": ["keep_tag_attributes"], "exclude_tag_attributes": ["exclude_tag_attributes"]}, "segment": {"enabled": false, "selector_tags": ["selector_tags"], "annotated_fields": ["annotated_fields"]}, "json_normalizations": [{"operation": "copy", "source_field": "source_field", "destination_field": "destination_field"}], "image_text_recognition": true}, "enrichments": [{"description": "description", "destination_field": "destination_field", "source_field": "source_field", "overwrite": false, "enrichment": "enrichment", "ignore_downstream_errors": true, "options": {"features": {"keywords": {"sentiment": false, "emotion": false, "limit": 5}, "entities": {"sentiment": false, "emotion": false, "limit": 5, "mentions": true, "mention_types": false, "sentence_locations": true, "model": "model"}, "sentiment": {"document": true, "targets": ["target"]}, "emotion": {"document": true, "targets": ["target"]}, "categories": {"mapKey": {"anyKey": "anyValue"}}, "semantic_roles": {"entities": true, "keywords": true, "limit": 5}, "relations": {"model": "model"}, "concepts": {"limit": 5}}, "language": "ar", "model": "model"}}], "normalizations": [{"operation": "copy", "source_field": "source_field", "destination_field": "destination_field"}], "source": {"type": "box", "credential_id": "credential_id", "schedule": {"enabled": false, "time_zone": "time_zone", "frequency": "daily"}, "options": {"folders": [{"owner_user_id": "owner_user_id", "folder_id": "folder_id", "limit": 5}], "objects": [{"name": "name", "limit": 5}], "site_collections": [{"site_collection_path": "site_collection_path", "limit": 5}], "urls": [{"url": "url", "limit_to_starting_hosts": false, "crawl_speed": "gentle", "allow_untrusted_certificate": false, "maximum_hops": 12, "request_timeout": 15, "override_robots_txt": false, "blacklist": ["blacklist"]}], "buckets": [{"name": "name", "limit": 5}], "crawl_all_buckets": false}}}' + mock_response = '{"configuration_id": "configuration_id", "name": "name", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "description": "description", "conversions": {"pdf": {"heading": {"fonts": [{"level": 5, "min_size": 8, "max_size": 8, "bold": true, "italic": true, "name": "name"}]}}, "word": {"heading": {"fonts": [{"level": 5, "min_size": 8, "max_size": 8, "bold": true, "italic": true, "name": "name"}], "styles": [{"level": 5, "names": ["names"]}]}}, "html": {"exclude_tags_completely": ["exclude_tags_completely"], "exclude_tags_keep_content": ["exclude_tags_keep_content"], "keep_content": {"xpaths": ["xpaths"]}, "exclude_content": {"xpaths": ["xpaths"]}, "keep_tag_attributes": ["keep_tag_attributes"], "exclude_tag_attributes": ["exclude_tag_attributes"]}, "segment": {"enabled": false, "selector_tags": ["selector_tags"], "annotated_fields": ["annotated_fields"]}, "json_normalizations": [{"operation": "copy", "source_field": "source_field", "destination_field": "destination_field"}], "image_text_recognition": true}, "enrichments": [{"description": "description", "destination_field": "destination_field", "source_field": "source_field", "overwrite": false, "enrichment": "enrichment", "ignore_downstream_errors": false, "options": {"features": {"keywords": {"sentiment": false, "emotion": false, "limit": 5}, "entities": {"sentiment": false, "emotion": false, "limit": 5, "mentions": true, "mention_types": false, "sentence_locations": true, "model": "model"}, "sentiment": {"document": true, "targets": ["target"]}, "emotion": {"document": true, "targets": ["target"]}, "categories": {"mapKey": "anyValue"}, "semantic_roles": {"entities": true, "keywords": true, "limit": 5}, "relations": {"model": "model"}, "concepts": {"limit": 5}}, "language": "ar", "model": "model"}}], "normalizations": [{"operation": "copy", "source_field": "source_field", "destination_field": "destination_field"}], "source": {"type": "box", "credential_id": "credential_id", "schedule": {"enabled": true, "time_zone": "America/New_York", "frequency": "daily"}, "options": {"folders": [{"owner_user_id": "owner_user_id", "folder_id": "folder_id", "limit": 5}], "objects": [{"name": "name", "limit": 5}], "site_collections": [{"site_collection_path": "site_collection_path", "limit": 5}], "urls": [{"url": "url", "limit_to_starting_hosts": true, "crawl_speed": "normal", "allow_untrusted_certificate": false, "maximum_hops": 12, "request_timeout": 15, "override_robots_txt": false, "blacklist": ["blacklist"]}], "buckets": [{"name": "name", "limit": 5}], "crawl_all_buckets": false}}}' responses.add(responses.PUT, url, body=mock_response, @@ -1490,8 +1510,8 @@ def test_update_configuration_value_error(self): # Construct a dict representation of a SegmentSettings model segment_settings_model = {} - segment_settings_model['enabled'] = True - segment_settings_model['selector_tags'] = ['testString'] + segment_settings_model['enabled'] = False + segment_settings_model['selector_tags'] = ['h1', 'h2'] segment_settings_model['annotated_fields'] = ['testString'] # Construct a dict representation of a NormalizationOperation model @@ -1571,15 +1591,15 @@ def test_update_configuration_value_error(self): enrichment_model['description'] = 'testString' enrichment_model['destination_field'] = 'testString' enrichment_model['source_field'] = 'testString' - enrichment_model['overwrite'] = True + enrichment_model['overwrite'] = False enrichment_model['enrichment'] = 'testString' - enrichment_model['ignore_downstream_errors'] = True + enrichment_model['ignore_downstream_errors'] = False enrichment_model['options'] = enrichment_options_model # Construct a dict representation of a SourceSchedule model source_schedule_model = {} source_schedule_model['enabled'] = True - source_schedule_model['time_zone'] = 'testString' + source_schedule_model['time_zone'] = 'America/New_York' source_schedule_model['frequency'] = 'daily' # Construct a dict representation of a SourceOptionsFolder model @@ -1602,11 +1622,11 @@ def test_update_configuration_value_error(self): source_options_web_crawl_model = {} source_options_web_crawl_model['url'] = 'testString' source_options_web_crawl_model['limit_to_starting_hosts'] = True - source_options_web_crawl_model['crawl_speed'] = 'gentle' - source_options_web_crawl_model['allow_untrusted_certificate'] = True + source_options_web_crawl_model['crawl_speed'] = 'normal' + source_options_web_crawl_model['allow_untrusted_certificate'] = False source_options_web_crawl_model['maximum_hops'] = 38 source_options_web_crawl_model['request_timeout'] = 38 - source_options_web_crawl_model['override_robots_txt'] = True + source_options_web_crawl_model['override_robots_txt'] = False source_options_web_crawl_model['blacklist'] = ['testString'] # Construct a dict representation of a SourceOptionsBuckets model @@ -1662,6 +1682,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -1746,6 +1768,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -1758,7 +1782,7 @@ def test_create_collection_all_params(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/environments/testString/collections') - mock_response = '{"collection_id": "collection_id", "name": "name", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "status": "active", "configuration_id": "configuration_id", "language": "language", "document_counts": {"available": 9, "processing": 10, "failed": 6, "pending": 7}, "disk_usage": {"used_bytes": 10}, "training_status": {"total_examples": 14, "available": false, "processing": true, "minimum_queries_added": false, "minimum_examples_added": true, "sufficient_label_diversity": true, "notices": 7, "successfully_trained": "2019-01-01T12:00:00.000Z", "data_updated": "2019-01-01T12:00:00.000Z"}, "crawl_status": {"source_crawl": {"status": "running", "next_crawl": "2019-01-01T12:00:00.000Z"}}, "smart_document_understanding": {"enabled": false, "total_annotated_pages": 21, "total_pages": 11, "total_documents": 15, "custom_fields": {"defined": 7, "maximum_allowed": 15}}}' + mock_response = '{"collection_id": "collection_id", "name": "name", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "status": "active", "configuration_id": "configuration_id", "language": "language", "document_counts": {"available": 9, "processing": 10, "failed": 6, "pending": 7}, "disk_usage": {"used_bytes": 10}, "training_status": {"total_examples": 14, "available": false, "processing": true, "minimum_queries_added": false, "minimum_examples_added": true, "sufficient_label_diversity": true, "notices": 7, "successfully_trained": "2019-01-01T12:00:00.000Z", "data_updated": "2019-01-01T12:00:00.000Z"}, "crawl_status": {"source_crawl": {"status": "running", "next_crawl": "2019-01-01T12:00:00.000Z"}}, "smart_document_understanding": {"enabled": true, "total_annotated_pages": 21, "total_pages": 11, "total_documents": 15, "custom_fields": {"defined": 7, "maximum_allowed": 15}}}' responses.add(responses.POST, url, body=mock_response, @@ -1800,7 +1824,7 @@ def test_create_collection_value_error(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/environments/testString/collections') - mock_response = '{"collection_id": "collection_id", "name": "name", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "status": "active", "configuration_id": "configuration_id", "language": "language", "document_counts": {"available": 9, "processing": 10, "failed": 6, "pending": 7}, "disk_usage": {"used_bytes": 10}, "training_status": {"total_examples": 14, "available": false, "processing": true, "minimum_queries_added": false, "minimum_examples_added": true, "sufficient_label_diversity": true, "notices": 7, "successfully_trained": "2019-01-01T12:00:00.000Z", "data_updated": "2019-01-01T12:00:00.000Z"}, "crawl_status": {"source_crawl": {"status": "running", "next_crawl": "2019-01-01T12:00:00.000Z"}}, "smart_document_understanding": {"enabled": false, "total_annotated_pages": 21, "total_pages": 11, "total_documents": 15, "custom_fields": {"defined": 7, "maximum_allowed": 15}}}' + mock_response = '{"collection_id": "collection_id", "name": "name", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "status": "active", "configuration_id": "configuration_id", "language": "language", "document_counts": {"available": 9, "processing": 10, "failed": 6, "pending": 7}, "disk_usage": {"used_bytes": 10}, "training_status": {"total_examples": 14, "available": false, "processing": true, "minimum_queries_added": false, "minimum_examples_added": true, "sufficient_label_diversity": true, "notices": 7, "successfully_trained": "2019-01-01T12:00:00.000Z", "data_updated": "2019-01-01T12:00:00.000Z"}, "crawl_status": {"source_crawl": {"status": "running", "next_crawl": "2019-01-01T12:00:00.000Z"}}, "smart_document_understanding": {"enabled": true, "total_annotated_pages": 21, "total_pages": 11, "total_documents": 15, "custom_fields": {"defined": 7, "maximum_allowed": 15}}}' responses.add(responses.POST, url, body=mock_response, @@ -1835,6 +1859,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -1847,7 +1873,7 @@ def test_list_collections_all_params(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/environments/testString/collections') - mock_response = '{"collections": [{"collection_id": "collection_id", "name": "name", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "status": "active", "configuration_id": "configuration_id", "language": "language", "document_counts": {"available": 9, "processing": 10, "failed": 6, "pending": 7}, "disk_usage": {"used_bytes": 10}, "training_status": {"total_examples": 14, "available": false, "processing": true, "minimum_queries_added": false, "minimum_examples_added": true, "sufficient_label_diversity": true, "notices": 7, "successfully_trained": "2019-01-01T12:00:00.000Z", "data_updated": "2019-01-01T12:00:00.000Z"}, "crawl_status": {"source_crawl": {"status": "running", "next_crawl": "2019-01-01T12:00:00.000Z"}}, "smart_document_understanding": {"enabled": false, "total_annotated_pages": 21, "total_pages": 11, "total_documents": 15, "custom_fields": {"defined": 7, "maximum_allowed": 15}}}]}' + mock_response = '{"collections": [{"collection_id": "collection_id", "name": "name", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "status": "active", "configuration_id": "configuration_id", "language": "language", "document_counts": {"available": 9, "processing": 10, "failed": 6, "pending": 7}, "disk_usage": {"used_bytes": 10}, "training_status": {"total_examples": 14, "available": false, "processing": true, "minimum_queries_added": false, "minimum_examples_added": true, "sufficient_label_diversity": true, "notices": 7, "successfully_trained": "2019-01-01T12:00:00.000Z", "data_updated": "2019-01-01T12:00:00.000Z"}, "crawl_status": {"source_crawl": {"status": "running", "next_crawl": "2019-01-01T12:00:00.000Z"}}, "smart_document_understanding": {"enabled": true, "total_annotated_pages": 21, "total_pages": 11, "total_documents": 15, "custom_fields": {"defined": 7, "maximum_allowed": 15}}}]}' responses.add(responses.GET, url, body=mock_response, @@ -1881,7 +1907,7 @@ def test_list_collections_required_params(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/environments/testString/collections') - mock_response = '{"collections": [{"collection_id": "collection_id", "name": "name", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "status": "active", "configuration_id": "configuration_id", "language": "language", "document_counts": {"available": 9, "processing": 10, "failed": 6, "pending": 7}, "disk_usage": {"used_bytes": 10}, "training_status": {"total_examples": 14, "available": false, "processing": true, "minimum_queries_added": false, "minimum_examples_added": true, "sufficient_label_diversity": true, "notices": 7, "successfully_trained": "2019-01-01T12:00:00.000Z", "data_updated": "2019-01-01T12:00:00.000Z"}, "crawl_status": {"source_crawl": {"status": "running", "next_crawl": "2019-01-01T12:00:00.000Z"}}, "smart_document_understanding": {"enabled": false, "total_annotated_pages": 21, "total_pages": 11, "total_documents": 15, "custom_fields": {"defined": 7, "maximum_allowed": 15}}}]}' + mock_response = '{"collections": [{"collection_id": "collection_id", "name": "name", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "status": "active", "configuration_id": "configuration_id", "language": "language", "document_counts": {"available": 9, "processing": 10, "failed": 6, "pending": 7}, "disk_usage": {"used_bytes": 10}, "training_status": {"total_examples": 14, "available": false, "processing": true, "minimum_queries_added": false, "minimum_examples_added": true, "sufficient_label_diversity": true, "notices": 7, "successfully_trained": "2019-01-01T12:00:00.000Z", "data_updated": "2019-01-01T12:00:00.000Z"}, "crawl_status": {"source_crawl": {"status": "running", "next_crawl": "2019-01-01T12:00:00.000Z"}}, "smart_document_understanding": {"enabled": true, "total_annotated_pages": 21, "total_pages": 11, "total_documents": 15, "custom_fields": {"defined": 7, "maximum_allowed": 15}}}]}' responses.add(responses.GET, url, body=mock_response, @@ -1909,7 +1935,7 @@ def test_list_collections_value_error(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/environments/testString/collections') - mock_response = '{"collections": [{"collection_id": "collection_id", "name": "name", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "status": "active", "configuration_id": "configuration_id", "language": "language", "document_counts": {"available": 9, "processing": 10, "failed": 6, "pending": 7}, "disk_usage": {"used_bytes": 10}, "training_status": {"total_examples": 14, "available": false, "processing": true, "minimum_queries_added": false, "minimum_examples_added": true, "sufficient_label_diversity": true, "notices": 7, "successfully_trained": "2019-01-01T12:00:00.000Z", "data_updated": "2019-01-01T12:00:00.000Z"}, "crawl_status": {"source_crawl": {"status": "running", "next_crawl": "2019-01-01T12:00:00.000Z"}}, "smart_document_understanding": {"enabled": false, "total_annotated_pages": 21, "total_pages": 11, "total_documents": 15, "custom_fields": {"defined": 7, "maximum_allowed": 15}}}]}' + mock_response = '{"collections": [{"collection_id": "collection_id", "name": "name", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "status": "active", "configuration_id": "configuration_id", "language": "language", "document_counts": {"available": 9, "processing": 10, "failed": 6, "pending": 7}, "disk_usage": {"used_bytes": 10}, "training_status": {"total_examples": 14, "available": false, "processing": true, "minimum_queries_added": false, "minimum_examples_added": true, "sufficient_label_diversity": true, "notices": 7, "successfully_trained": "2019-01-01T12:00:00.000Z", "data_updated": "2019-01-01T12:00:00.000Z"}, "crawl_status": {"source_crawl": {"status": "running", "next_crawl": "2019-01-01T12:00:00.000Z"}}, "smart_document_understanding": {"enabled": true, "total_annotated_pages": 21, "total_pages": 11, "total_documents": 15, "custom_fields": {"defined": 7, "maximum_allowed": 15}}}]}' responses.add(responses.GET, url, body=mock_response, @@ -1939,6 +1965,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -1951,7 +1979,7 @@ def test_get_collection_all_params(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/environments/testString/collections/testString') - mock_response = '{"collection_id": "collection_id", "name": "name", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "status": "active", "configuration_id": "configuration_id", "language": "language", "document_counts": {"available": 9, "processing": 10, "failed": 6, "pending": 7}, "disk_usage": {"used_bytes": 10}, "training_status": {"total_examples": 14, "available": false, "processing": true, "minimum_queries_added": false, "minimum_examples_added": true, "sufficient_label_diversity": true, "notices": 7, "successfully_trained": "2019-01-01T12:00:00.000Z", "data_updated": "2019-01-01T12:00:00.000Z"}, "crawl_status": {"source_crawl": {"status": "running", "next_crawl": "2019-01-01T12:00:00.000Z"}}, "smart_document_understanding": {"enabled": false, "total_annotated_pages": 21, "total_pages": 11, "total_documents": 15, "custom_fields": {"defined": 7, "maximum_allowed": 15}}}' + mock_response = '{"collection_id": "collection_id", "name": "name", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "status": "active", "configuration_id": "configuration_id", "language": "language", "document_counts": {"available": 9, "processing": 10, "failed": 6, "pending": 7}, "disk_usage": {"used_bytes": 10}, "training_status": {"total_examples": 14, "available": false, "processing": true, "minimum_queries_added": false, "minimum_examples_added": true, "sufficient_label_diversity": true, "notices": 7, "successfully_trained": "2019-01-01T12:00:00.000Z", "data_updated": "2019-01-01T12:00:00.000Z"}, "crawl_status": {"source_crawl": {"status": "running", "next_crawl": "2019-01-01T12:00:00.000Z"}}, "smart_document_understanding": {"enabled": true, "total_annotated_pages": 21, "total_pages": 11, "total_documents": 15, "custom_fields": {"defined": 7, "maximum_allowed": 15}}}' responses.add(responses.GET, url, body=mock_response, @@ -1981,7 +2009,7 @@ def test_get_collection_value_error(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/environments/testString/collections/testString') - mock_response = '{"collection_id": "collection_id", "name": "name", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "status": "active", "configuration_id": "configuration_id", "language": "language", "document_counts": {"available": 9, "processing": 10, "failed": 6, "pending": 7}, "disk_usage": {"used_bytes": 10}, "training_status": {"total_examples": 14, "available": false, "processing": true, "minimum_queries_added": false, "minimum_examples_added": true, "sufficient_label_diversity": true, "notices": 7, "successfully_trained": "2019-01-01T12:00:00.000Z", "data_updated": "2019-01-01T12:00:00.000Z"}, "crawl_status": {"source_crawl": {"status": "running", "next_crawl": "2019-01-01T12:00:00.000Z"}}, "smart_document_understanding": {"enabled": false, "total_annotated_pages": 21, "total_pages": 11, "total_documents": 15, "custom_fields": {"defined": 7, "maximum_allowed": 15}}}' + mock_response = '{"collection_id": "collection_id", "name": "name", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "status": "active", "configuration_id": "configuration_id", "language": "language", "document_counts": {"available": 9, "processing": 10, "failed": 6, "pending": 7}, "disk_usage": {"used_bytes": 10}, "training_status": {"total_examples": 14, "available": false, "processing": true, "minimum_queries_added": false, "minimum_examples_added": true, "sufficient_label_diversity": true, "notices": 7, "successfully_trained": "2019-01-01T12:00:00.000Z", "data_updated": "2019-01-01T12:00:00.000Z"}, "crawl_status": {"source_crawl": {"status": "running", "next_crawl": "2019-01-01T12:00:00.000Z"}}, "smart_document_understanding": {"enabled": true, "total_annotated_pages": 21, "total_pages": 11, "total_documents": 15, "custom_fields": {"defined": 7, "maximum_allowed": 15}}}' responses.add(responses.GET, url, body=mock_response, @@ -2013,6 +2041,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -2025,7 +2055,7 @@ def test_update_collection_all_params(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/environments/testString/collections/testString') - mock_response = '{"collection_id": "collection_id", "name": "name", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "status": "active", "configuration_id": "configuration_id", "language": "language", "document_counts": {"available": 9, "processing": 10, "failed": 6, "pending": 7}, "disk_usage": {"used_bytes": 10}, "training_status": {"total_examples": 14, "available": false, "processing": true, "minimum_queries_added": false, "minimum_examples_added": true, "sufficient_label_diversity": true, "notices": 7, "successfully_trained": "2019-01-01T12:00:00.000Z", "data_updated": "2019-01-01T12:00:00.000Z"}, "crawl_status": {"source_crawl": {"status": "running", "next_crawl": "2019-01-01T12:00:00.000Z"}}, "smart_document_understanding": {"enabled": false, "total_annotated_pages": 21, "total_pages": 11, "total_documents": 15, "custom_fields": {"defined": 7, "maximum_allowed": 15}}}' + mock_response = '{"collection_id": "collection_id", "name": "name", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "status": "active", "configuration_id": "configuration_id", "language": "language", "document_counts": {"available": 9, "processing": 10, "failed": 6, "pending": 7}, "disk_usage": {"used_bytes": 10}, "training_status": {"total_examples": 14, "available": false, "processing": true, "minimum_queries_added": false, "minimum_examples_added": true, "sufficient_label_diversity": true, "notices": 7, "successfully_trained": "2019-01-01T12:00:00.000Z", "data_updated": "2019-01-01T12:00:00.000Z"}, "crawl_status": {"source_crawl": {"status": "running", "next_crawl": "2019-01-01T12:00:00.000Z"}}, "smart_document_understanding": {"enabled": true, "total_annotated_pages": 21, "total_pages": 11, "total_documents": 15, "custom_fields": {"defined": 7, "maximum_allowed": 15}}}' responses.add(responses.PUT, url, body=mock_response, @@ -2066,7 +2096,7 @@ def test_update_collection_value_error(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/environments/testString/collections/testString') - mock_response = '{"collection_id": "collection_id", "name": "name", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "status": "active", "configuration_id": "configuration_id", "language": "language", "document_counts": {"available": 9, "processing": 10, "failed": 6, "pending": 7}, "disk_usage": {"used_bytes": 10}, "training_status": {"total_examples": 14, "available": false, "processing": true, "minimum_queries_added": false, "minimum_examples_added": true, "sufficient_label_diversity": true, "notices": 7, "successfully_trained": "2019-01-01T12:00:00.000Z", "data_updated": "2019-01-01T12:00:00.000Z"}, "crawl_status": {"source_crawl": {"status": "running", "next_crawl": "2019-01-01T12:00:00.000Z"}}, "smart_document_understanding": {"enabled": false, "total_annotated_pages": 21, "total_pages": 11, "total_documents": 15, "custom_fields": {"defined": 7, "maximum_allowed": 15}}}' + mock_response = '{"collection_id": "collection_id", "name": "name", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "status": "active", "configuration_id": "configuration_id", "language": "language", "document_counts": {"available": 9, "processing": 10, "failed": 6, "pending": 7}, "disk_usage": {"used_bytes": 10}, "training_status": {"total_examples": 14, "available": false, "processing": true, "minimum_queries_added": false, "minimum_examples_added": true, "sufficient_label_diversity": true, "notices": 7, "successfully_trained": "2019-01-01T12:00:00.000Z", "data_updated": "2019-01-01T12:00:00.000Z"}, "crawl_status": {"source_crawl": {"status": "running", "next_crawl": "2019-01-01T12:00:00.000Z"}}, "smart_document_understanding": {"enabled": true, "total_annotated_pages": 21, "total_pages": 11, "total_documents": 15, "custom_fields": {"defined": 7, "maximum_allowed": 15}}}' responses.add(responses.PUT, url, body=mock_response, @@ -2102,6 +2132,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -2176,6 +2208,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -2260,6 +2294,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -2334,6 +2370,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -2425,6 +2463,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -2493,6 +2533,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -2567,6 +2609,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -2683,6 +2727,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -2751,6 +2797,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -2825,6 +2873,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -2940,6 +2990,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -3018,6 +3070,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -3130,6 +3184,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -3208,6 +3264,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -3326,6 +3384,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -3414,6 +3474,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -3426,7 +3488,7 @@ def test_query_all_params(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/environments/testString/collections/testString/query') - mock_response = '{"matching_results": 16, "results": [{"id": "id", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "collection_id": "collection_id", "result_metadata": {"score": 5, "confidence": 10}}], "aggregations": [{"type": "histogram", "matching_results": 16, "field": "field", "interval": 8}], "passages": [{"document_id": "document_id", "passage_score": 13, "passage_text": "passage_text", "start_offset": 12, "end_offset": 10, "field": "field"}], "duplicates_removed": 18, "session_token": "session_token", "retrieval_details": {"document_retrieval_strategy": "untrained"}, "suggested_query": "suggested_query"}' + mock_response = '{"matching_results": 16, "results": [{"id": "id", "metadata": {"mapKey": "anyValue"}, "collection_id": "collection_id", "result_metadata": {"score": 5, "confidence": 10}}], "aggregations": [{"type": "histogram", "matching_results": 16, "field": "field", "interval": 8}], "passages": [{"document_id": "document_id", "passage_score": 13, "passage_text": "passage_text", "start_offset": 12, "end_offset": 10, "field": "field"}], "duplicates_removed": 18, "session_token": "session_token", "retrieval_details": {"document_retrieval_strategy": "untrained"}, "suggested_query": "suggested_query"}' responses.add(responses.POST, url, body=mock_response, @@ -3445,18 +3507,18 @@ def test_query_all_params(self): return_ = 'testString' offset = 38 sort = 'testString' - highlight = True + highlight = False passages_fields = 'testString' passages_count = 100 passages_characters = 50 - deduplicate = True + deduplicate = False deduplicate_field = 'testString' - similar = True + similar = False similar_document_ids = 'testString' similar_fields = 'testString' bias = 'testString' - spelling_suggestions = True - x_watson_logging_opt_out = True + spelling_suggestions = False + x_watson_logging_opt_out = False # Invoke method response = _service.query( @@ -3500,17 +3562,17 @@ def test_query_all_params(self): assert req_body['return'] == 'testString' assert req_body['offset'] == 38 assert req_body['sort'] == 'testString' - assert req_body['highlight'] == True + assert req_body['highlight'] == False assert req_body['passages.fields'] == 'testString' assert req_body['passages.count'] == 100 assert req_body['passages.characters'] == 50 - assert req_body['deduplicate'] == True + assert req_body['deduplicate'] == False assert req_body['deduplicate.field'] == 'testString' - assert req_body['similar'] == True + assert req_body['similar'] == False assert req_body['similar.document_ids'] == 'testString' assert req_body['similar.fields'] == 'testString' assert req_body['bias'] == 'testString' - assert req_body['spelling_suggestions'] == True + assert req_body['spelling_suggestions'] == False @responses.activate @@ -3520,7 +3582,7 @@ def test_query_required_params(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/environments/testString/collections/testString/query') - mock_response = '{"matching_results": 16, "results": [{"id": "id", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "collection_id": "collection_id", "result_metadata": {"score": 5, "confidence": 10}}], "aggregations": [{"type": "histogram", "matching_results": 16, "field": "field", "interval": 8}], "passages": [{"document_id": "document_id", "passage_score": 13, "passage_text": "passage_text", "start_offset": 12, "end_offset": 10, "field": "field"}], "duplicates_removed": 18, "session_token": "session_token", "retrieval_details": {"document_retrieval_strategy": "untrained"}, "suggested_query": "suggested_query"}' + mock_response = '{"matching_results": 16, "results": [{"id": "id", "metadata": {"mapKey": "anyValue"}, "collection_id": "collection_id", "result_metadata": {"score": 5, "confidence": 10}}], "aggregations": [{"type": "histogram", "matching_results": 16, "field": "field", "interval": 8}], "passages": [{"document_id": "document_id", "passage_score": 13, "passage_text": "passage_text", "start_offset": 12, "end_offset": 10, "field": "field"}], "duplicates_removed": 18, "session_token": "session_token", "retrieval_details": {"document_retrieval_strategy": "untrained"}, "suggested_query": "suggested_query"}' responses.add(responses.POST, url, body=mock_response, @@ -3550,7 +3612,7 @@ def test_query_value_error(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/environments/testString/collections/testString/query') - mock_response = '{"matching_results": 16, "results": [{"id": "id", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "collection_id": "collection_id", "result_metadata": {"score": 5, "confidence": 10}}], "aggregations": [{"type": "histogram", "matching_results": 16, "field": "field", "interval": 8}], "passages": [{"document_id": "document_id", "passage_score": 13, "passage_text": "passage_text", "start_offset": 12, "end_offset": 10, "field": "field"}], "duplicates_removed": 18, "session_token": "session_token", "retrieval_details": {"document_retrieval_strategy": "untrained"}, "suggested_query": "suggested_query"}' + mock_response = '{"matching_results": 16, "results": [{"id": "id", "metadata": {"mapKey": "anyValue"}, "collection_id": "collection_id", "result_metadata": {"score": 5, "confidence": 10}}], "aggregations": [{"type": "histogram", "matching_results": 16, "field": "field", "interval": 8}], "passages": [{"document_id": "document_id", "passage_score": 13, "passage_text": "passage_text", "start_offset": 12, "end_offset": 10, "field": "field"}], "duplicates_removed": 18, "session_token": "session_token", "retrieval_details": {"document_retrieval_strategy": "untrained"}, "suggested_query": "suggested_query"}' responses.add(responses.POST, url, body=mock_response, @@ -3582,6 +3644,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -3594,7 +3658,7 @@ def test_query_notices_all_params(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/environments/testString/collections/testString/notices') - mock_response = '{"matching_results": 16, "results": [{"id": "id", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "collection_id": "collection_id", "result_metadata": {"score": 5, "confidence": 10}, "code": 4, "filename": "filename", "file_type": "pdf", "sha1": "sha1", "notices": [{"notice_id": "notice_id", "created": "2019-01-01T12:00:00.000Z", "document_id": "document_id", "query_id": "query_id", "severity": "warning", "step": "step", "description": "description"}]}], "aggregations": [{"type": "histogram", "matching_results": 16, "field": "field", "interval": 8}], "passages": [{"document_id": "document_id", "passage_score": 13, "passage_text": "passage_text", "start_offset": 12, "end_offset": 10, "field": "field"}], "duplicates_removed": 18}' + mock_response = '{"matching_results": 16, "results": [{"id": "id", "metadata": {"mapKey": "anyValue"}, "collection_id": "collection_id", "result_metadata": {"score": 5, "confidence": 10}, "code": 4, "filename": "filename", "file_type": "pdf", "sha1": "sha1", "notices": [{"notice_id": "notice_id", "created": "2019-01-01T12:00:00.000Z", "document_id": "document_id", "query_id": "query_id", "severity": "warning", "step": "step", "description": "description"}]}], "aggregations": [{"type": "histogram", "matching_results": 16, "field": "field", "interval": 8}], "passages": [{"document_id": "document_id", "passage_score": 13, "passage_text": "passage_text", "start_offset": 12, "end_offset": 10, "field": "field"}], "duplicates_removed": 18}' responses.add(responses.GET, url, body=mock_response, @@ -3613,12 +3677,12 @@ def test_query_notices_all_params(self): return_ = ['testString'] offset = 38 sort = ['testString'] - highlight = True + highlight = False passages_fields = ['testString'] passages_count = 100 passages_characters = 50 deduplicate_field = 'testString' - similar = True + similar = False similar_document_ids = ['testString'] similar_fields = ['testString'] @@ -3678,7 +3742,7 @@ def test_query_notices_required_params(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/environments/testString/collections/testString/notices') - mock_response = '{"matching_results": 16, "results": [{"id": "id", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "collection_id": "collection_id", "result_metadata": {"score": 5, "confidence": 10}, "code": 4, "filename": "filename", "file_type": "pdf", "sha1": "sha1", "notices": [{"notice_id": "notice_id", "created": "2019-01-01T12:00:00.000Z", "document_id": "document_id", "query_id": "query_id", "severity": "warning", "step": "step", "description": "description"}]}], "aggregations": [{"type": "histogram", "matching_results": 16, "field": "field", "interval": 8}], "passages": [{"document_id": "document_id", "passage_score": 13, "passage_text": "passage_text", "start_offset": 12, "end_offset": 10, "field": "field"}], "duplicates_removed": 18}' + mock_response = '{"matching_results": 16, "results": [{"id": "id", "metadata": {"mapKey": "anyValue"}, "collection_id": "collection_id", "result_metadata": {"score": 5, "confidence": 10}, "code": 4, "filename": "filename", "file_type": "pdf", "sha1": "sha1", "notices": [{"notice_id": "notice_id", "created": "2019-01-01T12:00:00.000Z", "document_id": "document_id", "query_id": "query_id", "severity": "warning", "step": "step", "description": "description"}]}], "aggregations": [{"type": "histogram", "matching_results": 16, "field": "field", "interval": 8}], "passages": [{"document_id": "document_id", "passage_score": 13, "passage_text": "passage_text", "start_offset": 12, "end_offset": 10, "field": "field"}], "duplicates_removed": 18}' responses.add(responses.GET, url, body=mock_response, @@ -3708,7 +3772,7 @@ def test_query_notices_value_error(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/environments/testString/collections/testString/notices') - mock_response = '{"matching_results": 16, "results": [{"id": "id", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "collection_id": "collection_id", "result_metadata": {"score": 5, "confidence": 10}, "code": 4, "filename": "filename", "file_type": "pdf", "sha1": "sha1", "notices": [{"notice_id": "notice_id", "created": "2019-01-01T12:00:00.000Z", "document_id": "document_id", "query_id": "query_id", "severity": "warning", "step": "step", "description": "description"}]}], "aggregations": [{"type": "histogram", "matching_results": 16, "field": "field", "interval": 8}], "passages": [{"document_id": "document_id", "passage_score": 13, "passage_text": "passage_text", "start_offset": 12, "end_offset": 10, "field": "field"}], "duplicates_removed": 18}' + mock_response = '{"matching_results": 16, "results": [{"id": "id", "metadata": {"mapKey": "anyValue"}, "collection_id": "collection_id", "result_metadata": {"score": 5, "confidence": 10}, "code": 4, "filename": "filename", "file_type": "pdf", "sha1": "sha1", "notices": [{"notice_id": "notice_id", "created": "2019-01-01T12:00:00.000Z", "document_id": "document_id", "query_id": "query_id", "severity": "warning", "step": "step", "description": "description"}]}], "aggregations": [{"type": "histogram", "matching_results": 16, "field": "field", "interval": 8}], "passages": [{"document_id": "document_id", "passage_score": 13, "passage_text": "passage_text", "start_offset": 12, "end_offset": 10, "field": "field"}], "duplicates_removed": 18}' responses.add(responses.GET, url, body=mock_response, @@ -3740,6 +3804,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -3752,7 +3818,7 @@ def test_federated_query_all_params(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/environments/testString/query') - mock_response = '{"matching_results": 16, "results": [{"id": "id", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "collection_id": "collection_id", "result_metadata": {"score": 5, "confidence": 10}}], "aggregations": [{"type": "histogram", "matching_results": 16, "field": "field", "interval": 8}], "passages": [{"document_id": "document_id", "passage_score": 13, "passage_text": "passage_text", "start_offset": 12, "end_offset": 10, "field": "field"}], "duplicates_removed": 18, "session_token": "session_token", "retrieval_details": {"document_retrieval_strategy": "untrained"}, "suggested_query": "suggested_query"}' + mock_response = '{"matching_results": 16, "results": [{"id": "id", "metadata": {"mapKey": "anyValue"}, "collection_id": "collection_id", "result_metadata": {"score": 5, "confidence": 10}}], "aggregations": [{"type": "histogram", "matching_results": 16, "field": "field", "interval": 8}], "passages": [{"document_id": "document_id", "passage_score": 13, "passage_text": "passage_text", "start_offset": 12, "end_offset": 10, "field": "field"}], "duplicates_removed": 18, "session_token": "session_token", "retrieval_details": {"document_retrieval_strategy": "untrained"}, "suggested_query": "suggested_query"}' responses.add(responses.POST, url, body=mock_response, @@ -3771,17 +3837,17 @@ def test_federated_query_all_params(self): return_ = 'testString' offset = 38 sort = 'testString' - highlight = True + highlight = False passages_fields = 'testString' passages_count = 100 passages_characters = 50 - deduplicate = True + deduplicate = False deduplicate_field = 'testString' - similar = True + similar = False similar_document_ids = 'testString' similar_fields = 'testString' bias = 'testString' - x_watson_logging_opt_out = True + x_watson_logging_opt_out = False # Invoke method response = _service.federated_query( @@ -3825,13 +3891,13 @@ def test_federated_query_all_params(self): assert req_body['return'] == 'testString' assert req_body['offset'] == 38 assert req_body['sort'] == 'testString' - assert req_body['highlight'] == True + assert req_body['highlight'] == False assert req_body['passages.fields'] == 'testString' assert req_body['passages.count'] == 100 assert req_body['passages.characters'] == 50 - assert req_body['deduplicate'] == True + assert req_body['deduplicate'] == False assert req_body['deduplicate.field'] == 'testString' - assert req_body['similar'] == True + assert req_body['similar'] == False assert req_body['similar.document_ids'] == 'testString' assert req_body['similar.fields'] == 'testString' assert req_body['bias'] == 'testString' @@ -3844,7 +3910,7 @@ def test_federated_query_required_params(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/environments/testString/query') - mock_response = '{"matching_results": 16, "results": [{"id": "id", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "collection_id": "collection_id", "result_metadata": {"score": 5, "confidence": 10}}], "aggregations": [{"type": "histogram", "matching_results": 16, "field": "field", "interval": 8}], "passages": [{"document_id": "document_id", "passage_score": 13, "passage_text": "passage_text", "start_offset": 12, "end_offset": 10, "field": "field"}], "duplicates_removed": 18, "session_token": "session_token", "retrieval_details": {"document_retrieval_strategy": "untrained"}, "suggested_query": "suggested_query"}' + mock_response = '{"matching_results": 16, "results": [{"id": "id", "metadata": {"mapKey": "anyValue"}, "collection_id": "collection_id", "result_metadata": {"score": 5, "confidence": 10}}], "aggregations": [{"type": "histogram", "matching_results": 16, "field": "field", "interval": 8}], "passages": [{"document_id": "document_id", "passage_score": 13, "passage_text": "passage_text", "start_offset": 12, "end_offset": 10, "field": "field"}], "duplicates_removed": 18, "session_token": "session_token", "retrieval_details": {"document_retrieval_strategy": "untrained"}, "suggested_query": "suggested_query"}' responses.add(responses.POST, url, body=mock_response, @@ -3863,13 +3929,13 @@ def test_federated_query_required_params(self): return_ = 'testString' offset = 38 sort = 'testString' - highlight = True + highlight = False passages_fields = 'testString' passages_count = 100 passages_characters = 50 - deduplicate = True + deduplicate = False deduplicate_field = 'testString' - similar = True + similar = False similar_document_ids = 'testString' similar_fields = 'testString' bias = 'testString' @@ -3915,13 +3981,13 @@ def test_federated_query_required_params(self): assert req_body['return'] == 'testString' assert req_body['offset'] == 38 assert req_body['sort'] == 'testString' - assert req_body['highlight'] == True + assert req_body['highlight'] == False assert req_body['passages.fields'] == 'testString' assert req_body['passages.count'] == 100 assert req_body['passages.characters'] == 50 - assert req_body['deduplicate'] == True + assert req_body['deduplicate'] == False assert req_body['deduplicate.field'] == 'testString' - assert req_body['similar'] == True + assert req_body['similar'] == False assert req_body['similar.document_ids'] == 'testString' assert req_body['similar.fields'] == 'testString' assert req_body['bias'] == 'testString' @@ -3934,7 +4000,7 @@ def test_federated_query_value_error(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/environments/testString/query') - mock_response = '{"matching_results": 16, "results": [{"id": "id", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "collection_id": "collection_id", "result_metadata": {"score": 5, "confidence": 10}}], "aggregations": [{"type": "histogram", "matching_results": 16, "field": "field", "interval": 8}], "passages": [{"document_id": "document_id", "passage_score": 13, "passage_text": "passage_text", "start_offset": 12, "end_offset": 10, "field": "field"}], "duplicates_removed": 18, "session_token": "session_token", "retrieval_details": {"document_retrieval_strategy": "untrained"}, "suggested_query": "suggested_query"}' + mock_response = '{"matching_results": 16, "results": [{"id": "id", "metadata": {"mapKey": "anyValue"}, "collection_id": "collection_id", "result_metadata": {"score": 5, "confidence": 10}}], "aggregations": [{"type": "histogram", "matching_results": 16, "field": "field", "interval": 8}], "passages": [{"document_id": "document_id", "passage_score": 13, "passage_text": "passage_text", "start_offset": 12, "end_offset": 10, "field": "field"}], "duplicates_removed": 18, "session_token": "session_token", "retrieval_details": {"document_retrieval_strategy": "untrained"}, "suggested_query": "suggested_query"}' responses.add(responses.POST, url, body=mock_response, @@ -3953,13 +4019,13 @@ def test_federated_query_value_error(self): return_ = 'testString' offset = 38 sort = 'testString' - highlight = True + highlight = False passages_fields = 'testString' passages_count = 100 passages_characters = 50 - deduplicate = True + deduplicate = False deduplicate_field = 'testString' - similar = True + similar = False similar_document_ids = 'testString' similar_fields = 'testString' bias = 'testString' @@ -3985,6 +4051,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -3997,7 +4065,7 @@ def test_federated_query_notices_all_params(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/environments/testString/notices') - mock_response = '{"matching_results": 16, "results": [{"id": "id", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "collection_id": "collection_id", "result_metadata": {"score": 5, "confidence": 10}, "code": 4, "filename": "filename", "file_type": "pdf", "sha1": "sha1", "notices": [{"notice_id": "notice_id", "created": "2019-01-01T12:00:00.000Z", "document_id": "document_id", "query_id": "query_id", "severity": "warning", "step": "step", "description": "description"}]}], "aggregations": [{"type": "histogram", "matching_results": 16, "field": "field", "interval": 8}], "passages": [{"document_id": "document_id", "passage_score": 13, "passage_text": "passage_text", "start_offset": 12, "end_offset": 10, "field": "field"}], "duplicates_removed": 18}' + mock_response = '{"matching_results": 16, "results": [{"id": "id", "metadata": {"mapKey": "anyValue"}, "collection_id": "collection_id", "result_metadata": {"score": 5, "confidence": 10}, "code": 4, "filename": "filename", "file_type": "pdf", "sha1": "sha1", "notices": [{"notice_id": "notice_id", "created": "2019-01-01T12:00:00.000Z", "document_id": "document_id", "query_id": "query_id", "severity": "warning", "step": "step", "description": "description"}]}], "aggregations": [{"type": "histogram", "matching_results": 16, "field": "field", "interval": 8}], "passages": [{"document_id": "document_id", "passage_score": 13, "passage_text": "passage_text", "start_offset": 12, "end_offset": 10, "field": "field"}], "duplicates_removed": 18}' responses.add(responses.GET, url, body=mock_response, @@ -4015,9 +4083,9 @@ def test_federated_query_notices_all_params(self): return_ = ['testString'] offset = 38 sort = ['testString'] - highlight = True + highlight = False deduplicate_field = 'testString' - similar = True + similar = False similar_document_ids = ['testString'] similar_fields = ['testString'] @@ -4070,7 +4138,7 @@ def test_federated_query_notices_required_params(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/environments/testString/notices') - mock_response = '{"matching_results": 16, "results": [{"id": "id", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "collection_id": "collection_id", "result_metadata": {"score": 5, "confidence": 10}, "code": 4, "filename": "filename", "file_type": "pdf", "sha1": "sha1", "notices": [{"notice_id": "notice_id", "created": "2019-01-01T12:00:00.000Z", "document_id": "document_id", "query_id": "query_id", "severity": "warning", "step": "step", "description": "description"}]}], "aggregations": [{"type": "histogram", "matching_results": 16, "field": "field", "interval": 8}], "passages": [{"document_id": "document_id", "passage_score": 13, "passage_text": "passage_text", "start_offset": 12, "end_offset": 10, "field": "field"}], "duplicates_removed": 18}' + mock_response = '{"matching_results": 16, "results": [{"id": "id", "metadata": {"mapKey": "anyValue"}, "collection_id": "collection_id", "result_metadata": {"score": 5, "confidence": 10}, "code": 4, "filename": "filename", "file_type": "pdf", "sha1": "sha1", "notices": [{"notice_id": "notice_id", "created": "2019-01-01T12:00:00.000Z", "document_id": "document_id", "query_id": "query_id", "severity": "warning", "step": "step", "description": "description"}]}], "aggregations": [{"type": "histogram", "matching_results": 16, "field": "field", "interval": 8}], "passages": [{"document_id": "document_id", "passage_score": 13, "passage_text": "passage_text", "start_offset": 12, "end_offset": 10, "field": "field"}], "duplicates_removed": 18}' responses.add(responses.GET, url, body=mock_response, @@ -4104,7 +4172,7 @@ def test_federated_query_notices_value_error(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/environments/testString/notices') - mock_response = '{"matching_results": 16, "results": [{"id": "id", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "collection_id": "collection_id", "result_metadata": {"score": 5, "confidence": 10}, "code": 4, "filename": "filename", "file_type": "pdf", "sha1": "sha1", "notices": [{"notice_id": "notice_id", "created": "2019-01-01T12:00:00.000Z", "document_id": "document_id", "query_id": "query_id", "severity": "warning", "step": "step", "description": "description"}]}], "aggregations": [{"type": "histogram", "matching_results": 16, "field": "field", "interval": 8}], "passages": [{"document_id": "document_id", "passage_score": 13, "passage_text": "passage_text", "start_offset": 12, "end_offset": 10, "field": "field"}], "duplicates_removed": 18}' + mock_response = '{"matching_results": 16, "results": [{"id": "id", "metadata": {"mapKey": "anyValue"}, "collection_id": "collection_id", "result_metadata": {"score": 5, "confidence": 10}, "code": 4, "filename": "filename", "file_type": "pdf", "sha1": "sha1", "notices": [{"notice_id": "notice_id", "created": "2019-01-01T12:00:00.000Z", "document_id": "document_id", "query_id": "query_id", "severity": "warning", "step": "step", "description": "description"}]}], "aggregations": [{"type": "histogram", "matching_results": 16, "field": "field", "interval": 8}], "passages": [{"document_id": "document_id", "passage_score": 13, "passage_text": "passage_text", "start_offset": 12, "end_offset": 10, "field": "field"}], "duplicates_removed": 18}' responses.add(responses.GET, url, body=mock_response, @@ -4136,6 +4204,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -4270,6 +4340,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -4344,6 +4416,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -4444,6 +4518,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -4512,6 +4588,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -4590,6 +4668,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -4662,6 +4742,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -4740,6 +4822,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -4832,6 +4916,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -4908,6 +4994,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -5000,6 +5088,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -5092,6 +5182,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -5170,6 +5262,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -5193,7 +5287,7 @@ def test_create_event_all_params(self): event_data_model = {} event_data_model['environment_id'] = 'testString' event_data_model['session_token'] = 'testString' - event_data_model['client_timestamp'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + event_data_model['client_timestamp'] = "2019-01-01T12:00:00Z" event_data_model['display_rank'] = 38 event_data_model['collection_id'] = 'testString' event_data_model['document_id'] = 'testString' @@ -5236,7 +5330,7 @@ def test_create_event_value_error(self): event_data_model = {} event_data_model['environment_id'] = 'testString' event_data_model['session_token'] = 'testString' - event_data_model['client_timestamp'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + event_data_model['client_timestamp'] = "2019-01-01T12:00:00Z" event_data_model['display_rank'] = 38 event_data_model['collection_id'] = 'testString' event_data_model['document_id'] = 'testString' @@ -5266,6 +5360,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -5371,6 +5467,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -5468,6 +5566,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -5565,6 +5665,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -5662,6 +5764,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -5759,6 +5863,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -5862,6 +5968,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -5874,7 +5982,7 @@ def test_list_credentials_all_params(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/environments/testString/credentials') - mock_response = '{"credentials": [{"credential_id": "credential_id", "source_type": "box", "credential_details": {"credential_type": "oauth2", "client_id": "client_id", "enterprise_id": "enterprise_id", "url": "url", "username": "username", "organization_url": "organization_url", "site_collection.path": "site_collection_path", "client_secret": "client_secret", "public_key_id": "public_key_id", "private_key": "private_key", "passphrase": "passphrase", "password": "password", "gateway_id": "gateway_id", "source_version": "online", "web_application_url": "web_application_url", "domain": "domain", "endpoint": "endpoint", "access_key_id": "access_key_id", "secret_access_key": "secret_access_key"}, "status": "connected"}]}' + mock_response = '{"credentials": [{"credential_id": "credential_id", "source_type": "box", "credential_details": {"credential_type": "oauth2", "client_id": "client_id", "enterprise_id": "enterprise_id", "url": "url", "username": "username", "organization_url": "organization_url", "site_collection.path": "site_collection_path", "client_secret": "client_secret", "public_key_id": "public_key_id", "private_key": "private_key", "passphrase": "passphrase", "password": "password", "gateway_id": "gateway_id", "source_version": "online", "web_application_url": "web_application_url", "domain": "domain", "endpoint": "endpoint", "access_key_id": "access_key_id", "secret_access_key": "secret_access_key"}, "status": {"authenticated": false, "error_message": "error_message"}}]}' responses.add(responses.GET, url, body=mock_response, @@ -5902,7 +6010,7 @@ def test_list_credentials_value_error(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/environments/testString/credentials') - mock_response = '{"credentials": [{"credential_id": "credential_id", "source_type": "box", "credential_details": {"credential_type": "oauth2", "client_id": "client_id", "enterprise_id": "enterprise_id", "url": "url", "username": "username", "organization_url": "organization_url", "site_collection.path": "site_collection_path", "client_secret": "client_secret", "public_key_id": "public_key_id", "private_key": "private_key", "passphrase": "passphrase", "password": "password", "gateway_id": "gateway_id", "source_version": "online", "web_application_url": "web_application_url", "domain": "domain", "endpoint": "endpoint", "access_key_id": "access_key_id", "secret_access_key": "secret_access_key"}, "status": "connected"}]}' + mock_response = '{"credentials": [{"credential_id": "credential_id", "source_type": "box", "credential_details": {"credential_type": "oauth2", "client_id": "client_id", "enterprise_id": "enterprise_id", "url": "url", "username": "username", "organization_url": "organization_url", "site_collection.path": "site_collection_path", "client_secret": "client_secret", "public_key_id": "public_key_id", "private_key": "private_key", "passphrase": "passphrase", "password": "password", "gateway_id": "gateway_id", "source_version": "online", "web_application_url": "web_application_url", "domain": "domain", "endpoint": "endpoint", "access_key_id": "access_key_id", "secret_access_key": "secret_access_key"}, "status": {"authenticated": false, "error_message": "error_message"}}]}' responses.add(responses.GET, url, body=mock_response, @@ -5932,6 +6040,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -5944,7 +6054,7 @@ def test_create_credentials_all_params(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/environments/testString/credentials') - mock_response = '{"credential_id": "credential_id", "source_type": "box", "credential_details": {"credential_type": "oauth2", "client_id": "client_id", "enterprise_id": "enterprise_id", "url": "url", "username": "username", "organization_url": "organization_url", "site_collection.path": "site_collection_path", "client_secret": "client_secret", "public_key_id": "public_key_id", "private_key": "private_key", "passphrase": "passphrase", "password": "password", "gateway_id": "gateway_id", "source_version": "online", "web_application_url": "web_application_url", "domain": "domain", "endpoint": "endpoint", "access_key_id": "access_key_id", "secret_access_key": "secret_access_key"}, "status": "connected"}' + mock_response = '{"credential_id": "credential_id", "source_type": "box", "credential_details": {"credential_type": "oauth2", "client_id": "client_id", "enterprise_id": "enterprise_id", "url": "url", "username": "username", "organization_url": "organization_url", "site_collection.path": "site_collection_path", "client_secret": "client_secret", "public_key_id": "public_key_id", "private_key": "private_key", "passphrase": "passphrase", "password": "password", "gateway_id": "gateway_id", "source_version": "online", "web_application_url": "web_application_url", "domain": "domain", "endpoint": "endpoint", "access_key_id": "access_key_id", "secret_access_key": "secret_access_key"}, "status": {"authenticated": false, "error_message": "error_message"}}' responses.add(responses.POST, url, body=mock_response, @@ -5973,11 +6083,16 @@ def test_create_credentials_all_params(self): credential_details_model['access_key_id'] = 'testString' credential_details_model['secret_access_key'] = 'testString' + # Construct a dict representation of a StatusDetails model + status_details_model = {} + status_details_model['authenticated'] = True + status_details_model['error_message'] = 'testString' + # Set up parameter values environment_id = 'testString' source_type = 'box' credential_details = credential_details_model - status = 'connected' + status = status_details_model # Invoke method response = _service.create_credentials( @@ -5995,7 +6110,7 @@ def test_create_credentials_all_params(self): req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) assert req_body['source_type'] == 'box' assert req_body['credential_details'] == credential_details_model - assert req_body['status'] == 'connected' + assert req_body['status'] == status_details_model @responses.activate @@ -6005,7 +6120,7 @@ def test_create_credentials_value_error(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/environments/testString/credentials') - mock_response = '{"credential_id": "credential_id", "source_type": "box", "credential_details": {"credential_type": "oauth2", "client_id": "client_id", "enterprise_id": "enterprise_id", "url": "url", "username": "username", "organization_url": "organization_url", "site_collection.path": "site_collection_path", "client_secret": "client_secret", "public_key_id": "public_key_id", "private_key": "private_key", "passphrase": "passphrase", "password": "password", "gateway_id": "gateway_id", "source_version": "online", "web_application_url": "web_application_url", "domain": "domain", "endpoint": "endpoint", "access_key_id": "access_key_id", "secret_access_key": "secret_access_key"}, "status": "connected"}' + mock_response = '{"credential_id": "credential_id", "source_type": "box", "credential_details": {"credential_type": "oauth2", "client_id": "client_id", "enterprise_id": "enterprise_id", "url": "url", "username": "username", "organization_url": "organization_url", "site_collection.path": "site_collection_path", "client_secret": "client_secret", "public_key_id": "public_key_id", "private_key": "private_key", "passphrase": "passphrase", "password": "password", "gateway_id": "gateway_id", "source_version": "online", "web_application_url": "web_application_url", "domain": "domain", "endpoint": "endpoint", "access_key_id": "access_key_id", "secret_access_key": "secret_access_key"}, "status": {"authenticated": false, "error_message": "error_message"}}' responses.add(responses.POST, url, body=mock_response, @@ -6034,11 +6149,16 @@ def test_create_credentials_value_error(self): credential_details_model['access_key_id'] = 'testString' credential_details_model['secret_access_key'] = 'testString' + # Construct a dict representation of a StatusDetails model + status_details_model = {} + status_details_model['authenticated'] = True + status_details_model['error_message'] = 'testString' + # Set up parameter values environment_id = 'testString' source_type = 'box' credential_details = credential_details_model - status = 'connected' + status = status_details_model # Pass in all but one required param and check for a ValueError req_param_dict = { @@ -6060,6 +6180,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -6072,7 +6194,7 @@ def test_get_credentials_all_params(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/environments/testString/credentials/testString') - mock_response = '{"credential_id": "credential_id", "source_type": "box", "credential_details": {"credential_type": "oauth2", "client_id": "client_id", "enterprise_id": "enterprise_id", "url": "url", "username": "username", "organization_url": "organization_url", "site_collection.path": "site_collection_path", "client_secret": "client_secret", "public_key_id": "public_key_id", "private_key": "private_key", "passphrase": "passphrase", "password": "password", "gateway_id": "gateway_id", "source_version": "online", "web_application_url": "web_application_url", "domain": "domain", "endpoint": "endpoint", "access_key_id": "access_key_id", "secret_access_key": "secret_access_key"}, "status": "connected"}' + mock_response = '{"credential_id": "credential_id", "source_type": "box", "credential_details": {"credential_type": "oauth2", "client_id": "client_id", "enterprise_id": "enterprise_id", "url": "url", "username": "username", "organization_url": "organization_url", "site_collection.path": "site_collection_path", "client_secret": "client_secret", "public_key_id": "public_key_id", "private_key": "private_key", "passphrase": "passphrase", "password": "password", "gateway_id": "gateway_id", "source_version": "online", "web_application_url": "web_application_url", "domain": "domain", "endpoint": "endpoint", "access_key_id": "access_key_id", "secret_access_key": "secret_access_key"}, "status": {"authenticated": false, "error_message": "error_message"}}' responses.add(responses.GET, url, body=mock_response, @@ -6102,7 +6224,7 @@ def test_get_credentials_value_error(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/environments/testString/credentials/testString') - mock_response = '{"credential_id": "credential_id", "source_type": "box", "credential_details": {"credential_type": "oauth2", "client_id": "client_id", "enterprise_id": "enterprise_id", "url": "url", "username": "username", "organization_url": "organization_url", "site_collection.path": "site_collection_path", "client_secret": "client_secret", "public_key_id": "public_key_id", "private_key": "private_key", "passphrase": "passphrase", "password": "password", "gateway_id": "gateway_id", "source_version": "online", "web_application_url": "web_application_url", "domain": "domain", "endpoint": "endpoint", "access_key_id": "access_key_id", "secret_access_key": "secret_access_key"}, "status": "connected"}' + mock_response = '{"credential_id": "credential_id", "source_type": "box", "credential_details": {"credential_type": "oauth2", "client_id": "client_id", "enterprise_id": "enterprise_id", "url": "url", "username": "username", "organization_url": "organization_url", "site_collection.path": "site_collection_path", "client_secret": "client_secret", "public_key_id": "public_key_id", "private_key": "private_key", "passphrase": "passphrase", "password": "password", "gateway_id": "gateway_id", "source_version": "online", "web_application_url": "web_application_url", "domain": "domain", "endpoint": "endpoint", "access_key_id": "access_key_id", "secret_access_key": "secret_access_key"}, "status": {"authenticated": false, "error_message": "error_message"}}' responses.add(responses.GET, url, body=mock_response, @@ -6134,6 +6256,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -6146,7 +6270,7 @@ def test_update_credentials_all_params(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/environments/testString/credentials/testString') - mock_response = '{"credential_id": "credential_id", "source_type": "box", "credential_details": {"credential_type": "oauth2", "client_id": "client_id", "enterprise_id": "enterprise_id", "url": "url", "username": "username", "organization_url": "organization_url", "site_collection.path": "site_collection_path", "client_secret": "client_secret", "public_key_id": "public_key_id", "private_key": "private_key", "passphrase": "passphrase", "password": "password", "gateway_id": "gateway_id", "source_version": "online", "web_application_url": "web_application_url", "domain": "domain", "endpoint": "endpoint", "access_key_id": "access_key_id", "secret_access_key": "secret_access_key"}, "status": "connected"}' + mock_response = '{"credential_id": "credential_id", "source_type": "box", "credential_details": {"credential_type": "oauth2", "client_id": "client_id", "enterprise_id": "enterprise_id", "url": "url", "username": "username", "organization_url": "organization_url", "site_collection.path": "site_collection_path", "client_secret": "client_secret", "public_key_id": "public_key_id", "private_key": "private_key", "passphrase": "passphrase", "password": "password", "gateway_id": "gateway_id", "source_version": "online", "web_application_url": "web_application_url", "domain": "domain", "endpoint": "endpoint", "access_key_id": "access_key_id", "secret_access_key": "secret_access_key"}, "status": {"authenticated": false, "error_message": "error_message"}}' responses.add(responses.PUT, url, body=mock_response, @@ -6175,12 +6299,17 @@ def test_update_credentials_all_params(self): credential_details_model['access_key_id'] = 'testString' credential_details_model['secret_access_key'] = 'testString' + # Construct a dict representation of a StatusDetails model + status_details_model = {} + status_details_model['authenticated'] = True + status_details_model['error_message'] = 'testString' + # Set up parameter values environment_id = 'testString' credential_id = 'testString' source_type = 'box' credential_details = credential_details_model - status = 'connected' + status = status_details_model # Invoke method response = _service.update_credentials( @@ -6199,7 +6328,7 @@ def test_update_credentials_all_params(self): req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) assert req_body['source_type'] == 'box' assert req_body['credential_details'] == credential_details_model - assert req_body['status'] == 'connected' + assert req_body['status'] == status_details_model @responses.activate @@ -6209,7 +6338,7 @@ def test_update_credentials_value_error(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/environments/testString/credentials/testString') - mock_response = '{"credential_id": "credential_id", "source_type": "box", "credential_details": {"credential_type": "oauth2", "client_id": "client_id", "enterprise_id": "enterprise_id", "url": "url", "username": "username", "organization_url": "organization_url", "site_collection.path": "site_collection_path", "client_secret": "client_secret", "public_key_id": "public_key_id", "private_key": "private_key", "passphrase": "passphrase", "password": "password", "gateway_id": "gateway_id", "source_version": "online", "web_application_url": "web_application_url", "domain": "domain", "endpoint": "endpoint", "access_key_id": "access_key_id", "secret_access_key": "secret_access_key"}, "status": "connected"}' + mock_response = '{"credential_id": "credential_id", "source_type": "box", "credential_details": {"credential_type": "oauth2", "client_id": "client_id", "enterprise_id": "enterprise_id", "url": "url", "username": "username", "organization_url": "organization_url", "site_collection.path": "site_collection_path", "client_secret": "client_secret", "public_key_id": "public_key_id", "private_key": "private_key", "passphrase": "passphrase", "password": "password", "gateway_id": "gateway_id", "source_version": "online", "web_application_url": "web_application_url", "domain": "domain", "endpoint": "endpoint", "access_key_id": "access_key_id", "secret_access_key": "secret_access_key"}, "status": {"authenticated": false, "error_message": "error_message"}}' responses.add(responses.PUT, url, body=mock_response, @@ -6238,12 +6367,17 @@ def test_update_credentials_value_error(self): credential_details_model['access_key_id'] = 'testString' credential_details_model['secret_access_key'] = 'testString' + # Construct a dict representation of a StatusDetails model + status_details_model = {} + status_details_model['authenticated'] = True + status_details_model['error_message'] = 'testString' + # Set up parameter values environment_id = 'testString' credential_id = 'testString' source_type = 'box' credential_details = credential_details_model - status = 'connected' + status = status_details_model # Pass in all but one required param and check for a ValueError req_param_dict = { @@ -6266,6 +6400,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -6350,6 +6486,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -6420,6 +6558,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -6523,6 +6663,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -6597,6 +6739,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -6672,7 +6816,7 @@ def test_delete_gateway_value_error(self): # Start of Model Tests ############################################################################## # region -class TestAggregationResult(): +class TestModel_AggregationResult(): """ Test Class for AggregationResult """ @@ -6702,7 +6846,7 @@ def test_aggregation_result_serialization(self): aggregation_result_model_json2 = aggregation_result_model.to_dict() assert aggregation_result_model_json2 == aggregation_result_model_json -class TestCollection(): +class TestModel_Collection(): """ Test Class for Collection """ @@ -6731,12 +6875,12 @@ def test_collection_serialization(self): training_status_model['minimum_examples_added'] = False training_status_model['sufficient_label_diversity'] = False training_status_model['notices'] = 0 - training_status_model['successfully_trained'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) - training_status_model['data_updated'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + training_status_model['successfully_trained'] = "2019-01-01T12:00:00Z" + training_status_model['data_updated'] = "2019-01-01T12:00:00Z" source_status_model = {} # SourceStatus source_status_model['status'] = 'complete' - source_status_model['next_crawl'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + source_status_model['next_crawl'] = "2019-01-01T12:00:00Z" collection_crawl_status_model = {} # CollectionCrawlStatus collection_crawl_status_model['source_crawl'] = source_status_model @@ -6757,8 +6901,8 @@ def test_collection_serialization(self): collection_model_json['collection_id'] = 'testString' collection_model_json['name'] = 'testString' collection_model_json['description'] = 'testString' - collection_model_json['created'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) - collection_model_json['updated'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + collection_model_json['created'] = "2019-01-01T12:00:00Z" + collection_model_json['updated'] = "2019-01-01T12:00:00Z" collection_model_json['status'] = 'active' collection_model_json['configuration_id'] = 'testString' collection_model_json['language'] = 'testString' @@ -6783,7 +6927,7 @@ def test_collection_serialization(self): collection_model_json2 = collection_model.to_dict() assert collection_model_json2 == collection_model_json -class TestCollectionCrawlStatus(): +class TestModel_CollectionCrawlStatus(): """ Test Class for CollectionCrawlStatus """ @@ -6797,7 +6941,7 @@ def test_collection_crawl_status_serialization(self): source_status_model = {} # SourceStatus source_status_model['status'] = 'running' - source_status_model['next_crawl'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + source_status_model['next_crawl'] = "2019-01-01T12:00:00Z" # Construct a json representation of a CollectionCrawlStatus model collection_crawl_status_model_json = {} @@ -6818,7 +6962,7 @@ def test_collection_crawl_status_serialization(self): collection_crawl_status_model_json2 = collection_crawl_status_model.to_dict() assert collection_crawl_status_model_json2 == collection_crawl_status_model_json -class TestCollectionDiskUsage(): +class TestModel_CollectionDiskUsage(): """ Test Class for CollectionDiskUsage """ @@ -6847,7 +6991,7 @@ def test_collection_disk_usage_serialization(self): collection_disk_usage_model_json2 = collection_disk_usage_model.to_dict() assert collection_disk_usage_model_json2 == collection_disk_usage_model_json -class TestCollectionUsage(): +class TestModel_CollectionUsage(): """ Test Class for CollectionUsage """ @@ -6877,7 +7021,7 @@ def test_collection_usage_serialization(self): collection_usage_model_json2 = collection_usage_model.to_dict() assert collection_usage_model_json2 == collection_usage_model_json -class TestCompletions(): +class TestModel_Completions(): """ Test Class for Completions """ @@ -6906,7 +7050,7 @@ def test_completions_serialization(self): completions_model_json2 = completions_model.to_dict() assert completions_model_json2 == completions_model_json -class TestConfiguration(): +class TestModel_Configuration(): """ Test Class for Configuration """ @@ -6956,7 +7100,7 @@ def test_configuration_serialization(self): segment_settings_model = {} # SegmentSettings segment_settings_model['enabled'] = True - segment_settings_model['selector_tags'] = ['testString'] + segment_settings_model['selector_tags'] = ['h1', 'h2'] segment_settings_model['annotated_fields'] = ['custom-field-1', 'custom-field-2'] normalization_operation_model = {} # NormalizationOperation @@ -7024,9 +7168,9 @@ def test_configuration_serialization(self): enrichment_model['description'] = 'testString' enrichment_model['destination_field'] = 'enriched_title' enrichment_model['source_field'] = 'title' - enrichment_model['overwrite'] = True + enrichment_model['overwrite'] = False enrichment_model['enrichment'] = 'natural_language_understanding' - enrichment_model['ignore_downstream_errors'] = True + enrichment_model['ignore_downstream_errors'] = False enrichment_model['options'] = enrichment_options_model source_schedule_model = {} # SourceSchedule @@ -7050,11 +7194,11 @@ def test_configuration_serialization(self): source_options_web_crawl_model = {} # SourceOptionsWebCrawl source_options_web_crawl_model['url'] = 'testString' source_options_web_crawl_model['limit_to_starting_hosts'] = True - source_options_web_crawl_model['crawl_speed'] = 'gentle' - source_options_web_crawl_model['allow_untrusted_certificate'] = True + source_options_web_crawl_model['crawl_speed'] = 'normal' + source_options_web_crawl_model['allow_untrusted_certificate'] = False source_options_web_crawl_model['maximum_hops'] = 38 source_options_web_crawl_model['request_timeout'] = 38 - source_options_web_crawl_model['override_robots_txt'] = True + source_options_web_crawl_model['override_robots_txt'] = False source_options_web_crawl_model['blacklist'] = ['testString'] source_options_buckets_model = {} # SourceOptionsBuckets @@ -7079,8 +7223,8 @@ def test_configuration_serialization(self): configuration_model_json = {} configuration_model_json['configuration_id'] = 'testString' configuration_model_json['name'] = 'testString' - configuration_model_json['created'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) - configuration_model_json['updated'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + configuration_model_json['created'] = "2019-01-01T12:00:00Z" + configuration_model_json['updated'] = "2019-01-01T12:00:00Z" configuration_model_json['description'] = 'testString' configuration_model_json['conversions'] = conversions_model configuration_model_json['enrichments'] = [enrichment_model] @@ -7102,7 +7246,7 @@ def test_configuration_serialization(self): configuration_model_json2 = configuration_model.to_dict() assert configuration_model_json2 == configuration_model_json -class TestConversions(): +class TestModel_Conversions(): """ Test Class for Conversions """ @@ -7151,8 +7295,8 @@ def test_conversions_serialization(self): html_settings_model['exclude_tag_attributes'] = ['testString'] segment_settings_model = {} # SegmentSettings - segment_settings_model['enabled'] = True - segment_settings_model['selector_tags'] = ['testString'] + segment_settings_model['enabled'] = False + segment_settings_model['selector_tags'] = ['h1', 'h2'] segment_settings_model['annotated_fields'] = ['testString'] normalization_operation_model = {} # NormalizationOperation @@ -7184,7 +7328,7 @@ def test_conversions_serialization(self): conversions_model_json2 = conversions_model.to_dict() assert conversions_model_json2 == conversions_model_json -class TestCreateEventResponse(): +class TestModel_CreateEventResponse(): """ Test Class for CreateEventResponse """ @@ -7199,7 +7343,7 @@ def test_create_event_response_serialization(self): event_data_model = {} # EventData event_data_model['environment_id'] = 'testString' event_data_model['session_token'] = 'testString' - event_data_model['client_timestamp'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + event_data_model['client_timestamp'] = "2019-01-01T12:00:00Z" event_data_model['display_rank'] = 38 event_data_model['collection_id'] = 'testString' event_data_model['document_id'] = 'testString' @@ -7225,7 +7369,7 @@ def test_create_event_response_serialization(self): create_event_response_model_json2 = create_event_response_model.to_dict() assert create_event_response_model_json2 == create_event_response_model_json -class TestCredentialDetails(): +class TestModel_CredentialDetails(): """ Test Class for CredentialDetails """ @@ -7272,7 +7416,7 @@ def test_credential_details_serialization(self): credential_details_model_json2 = credential_details_model.to_dict() assert credential_details_model_json2 == credential_details_model_json -class TestCredentials(): +class TestModel_Credentials(): """ Test Class for Credentials """ @@ -7305,12 +7449,16 @@ def test_credentials_serialization(self): credential_details_model['access_key_id'] = 'testString' credential_details_model['secret_access_key'] = 'testString' + status_details_model = {} # StatusDetails + status_details_model['authenticated'] = True + status_details_model['error_message'] = 'testString' + # Construct a json representation of a Credentials model credentials_model_json = {} credentials_model_json['credential_id'] = 'testString' credentials_model_json['source_type'] = 'box' credentials_model_json['credential_details'] = credential_details_model - credentials_model_json['status'] = 'connected' + credentials_model_json['status'] = status_details_model # Construct a model instance of Credentials by calling from_dict on the json representation credentials_model = Credentials.from_dict(credentials_model_json) @@ -7327,7 +7475,7 @@ def test_credentials_serialization(self): credentials_model_json2 = credentials_model.to_dict() assert credentials_model_json2 == credentials_model_json -class TestCredentialsList(): +class TestModel_CredentialsList(): """ Test Class for CredentialsList """ @@ -7360,11 +7508,15 @@ def test_credentials_list_serialization(self): credential_details_model['access_key_id'] = 'testString' credential_details_model['secret_access_key'] = 'testString' + status_details_model = {} # StatusDetails + status_details_model['authenticated'] = True + status_details_model['error_message'] = 'testString' + credentials_model = {} # Credentials credentials_model['credential_id'] = '00000d8c-0000-00e8-ba89-0ed5f89f718b' credentials_model['source_type'] = 'salesforce' credentials_model['credential_details'] = credential_details_model - credentials_model['status'] = 'connected' + credentials_model['status'] = status_details_model # Construct a json representation of a CredentialsList model credentials_list_model_json = {} @@ -7385,7 +7537,7 @@ def test_credentials_list_serialization(self): credentials_list_model_json2 = credentials_list_model.to_dict() assert credentials_list_model_json2 == credentials_list_model_json -class TestDeleteCollectionResponse(): +class TestModel_DeleteCollectionResponse(): """ Test Class for DeleteCollectionResponse """ @@ -7415,7 +7567,7 @@ def test_delete_collection_response_serialization(self): delete_collection_response_model_json2 = delete_collection_response_model.to_dict() assert delete_collection_response_model_json2 == delete_collection_response_model_json -class TestDeleteConfigurationResponse(): +class TestModel_DeleteConfigurationResponse(): """ Test Class for DeleteConfigurationResponse """ @@ -7429,7 +7581,7 @@ def test_delete_configuration_response_serialization(self): notice_model = {} # Notice notice_model['notice_id'] = 'configuration_in_use' - notice_model['created'] = datetime_to_string(string_to_datetime("2016-09-28T12:34:00.000Z")) + notice_model['created'] = "2016-09-28T12:34:00Z" notice_model['document_id'] = 'testString' notice_model['query_id'] = 'testString' notice_model['severity'] = 'warning' @@ -7457,7 +7609,7 @@ def test_delete_configuration_response_serialization(self): delete_configuration_response_model_json2 = delete_configuration_response_model.to_dict() assert delete_configuration_response_model_json2 == delete_configuration_response_model_json -class TestDeleteCredentials(): +class TestModel_DeleteCredentials(): """ Test Class for DeleteCredentials """ @@ -7487,7 +7639,7 @@ def test_delete_credentials_serialization(self): delete_credentials_model_json2 = delete_credentials_model.to_dict() assert delete_credentials_model_json2 == delete_credentials_model_json -class TestDeleteDocumentResponse(): +class TestModel_DeleteDocumentResponse(): """ Test Class for DeleteDocumentResponse """ @@ -7517,7 +7669,7 @@ def test_delete_document_response_serialization(self): delete_document_response_model_json2 = delete_document_response_model.to_dict() assert delete_document_response_model_json2 == delete_document_response_model_json -class TestDeleteEnvironmentResponse(): +class TestModel_DeleteEnvironmentResponse(): """ Test Class for DeleteEnvironmentResponse """ @@ -7547,7 +7699,7 @@ def test_delete_environment_response_serialization(self): delete_environment_response_model_json2 = delete_environment_response_model.to_dict() assert delete_environment_response_model_json2 == delete_environment_response_model_json -class TestDiskUsage(): +class TestModel_DiskUsage(): """ Test Class for DiskUsage """ @@ -7577,7 +7729,7 @@ def test_disk_usage_serialization(self): disk_usage_model_json2 = disk_usage_model.to_dict() assert disk_usage_model_json2 == disk_usage_model_json -class TestDocumentAccepted(): +class TestModel_DocumentAccepted(): """ Test Class for DocumentAccepted """ @@ -7591,7 +7743,7 @@ def test_document_accepted_serialization(self): notice_model = {} # Notice notice_model['notice_id'] = 'testString' - notice_model['created'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + notice_model['created'] = "2019-01-01T12:00:00Z" notice_model['document_id'] = 'testString' notice_model['query_id'] = 'testString' notice_model['severity'] = 'warning' @@ -7619,7 +7771,7 @@ def test_document_accepted_serialization(self): document_accepted_model_json2 = document_accepted_model.to_dict() assert document_accepted_model_json2 == document_accepted_model_json -class TestDocumentCounts(): +class TestModel_DocumentCounts(): """ Test Class for DocumentCounts """ @@ -7651,7 +7803,7 @@ def test_document_counts_serialization(self): document_counts_model_json2 = document_counts_model.to_dict() assert document_counts_model_json2 == document_counts_model_json -class TestDocumentStatus(): +class TestModel_DocumentStatus(): """ Test Class for DocumentStatus """ @@ -7665,7 +7817,7 @@ def test_document_status_serialization(self): notice_model = {} # Notice notice_model['notice_id'] = 'index_342' - notice_model['created'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + notice_model['created'] = "2019-01-01T12:00:00Z" notice_model['document_id'] = 'f1360220-ea2d-4271-9d62-89a910b13c37' notice_model['query_id'] = 'testString' notice_model['severity'] = 'warning' @@ -7698,7 +7850,7 @@ def test_document_status_serialization(self): document_status_model_json2 = document_status_model.to_dict() assert document_status_model_json2 == document_status_model_json -class TestEnrichment(): +class TestModel_Enrichment(): """ Test Class for Enrichment """ @@ -7763,9 +7915,9 @@ def test_enrichment_serialization(self): enrichment_model_json['description'] = 'testString' enrichment_model_json['destination_field'] = 'testString' enrichment_model_json['source_field'] = 'testString' - enrichment_model_json['overwrite'] = True + enrichment_model_json['overwrite'] = False enrichment_model_json['enrichment'] = 'testString' - enrichment_model_json['ignore_downstream_errors'] = True + enrichment_model_json['ignore_downstream_errors'] = False enrichment_model_json['options'] = enrichment_options_model # Construct a model instance of Enrichment by calling from_dict on the json representation @@ -7783,7 +7935,7 @@ def test_enrichment_serialization(self): enrichment_model_json2 = enrichment_model.to_dict() assert enrichment_model_json2 == enrichment_model_json -class TestEnrichmentOptions(): +class TestModel_EnrichmentOptions(): """ Test Class for EnrichmentOptions """ @@ -7859,7 +8011,7 @@ def test_enrichment_options_serialization(self): enrichment_options_model_json2 = enrichment_options_model.to_dict() assert enrichment_options_model_json2 == enrichment_options_model_json -class TestEnvironment(): +class TestModel_Environment(): """ Test Class for Environment """ @@ -7899,8 +8051,8 @@ def test_environment_serialization(self): environment_model_json['environment_id'] = 'testString' environment_model_json['name'] = 'testString' environment_model_json['description'] = 'testString' - environment_model_json['created'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) - environment_model_json['updated'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + environment_model_json['created'] = "2019-01-01T12:00:00Z" + environment_model_json['updated'] = "2019-01-01T12:00:00Z" environment_model_json['status'] = 'active' environment_model_json['read_only'] = True environment_model_json['size'] = 'LT' @@ -7923,7 +8075,7 @@ def test_environment_serialization(self): environment_model_json2 = environment_model.to_dict() assert environment_model_json2 == environment_model_json -class TestEnvironmentDocuments(): +class TestModel_EnvironmentDocuments(): """ Test Class for EnvironmentDocuments """ @@ -7953,7 +8105,7 @@ def test_environment_documents_serialization(self): environment_documents_model_json2 = environment_documents_model.to_dict() assert environment_documents_model_json2 == environment_documents_model_json -class TestEventData(): +class TestModel_EventData(): """ Test Class for EventData """ @@ -7967,7 +8119,7 @@ def test_event_data_serialization(self): event_data_model_json = {} event_data_model_json['environment_id'] = 'testString' event_data_model_json['session_token'] = 'testString' - event_data_model_json['client_timestamp'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + event_data_model_json['client_timestamp'] = "2019-01-01T12:00:00Z" event_data_model_json['display_rank'] = 38 event_data_model_json['collection_id'] = 'testString' event_data_model_json['document_id'] = 'testString' @@ -7988,7 +8140,7 @@ def test_event_data_serialization(self): event_data_model_json2 = event_data_model.to_dict() assert event_data_model_json2 == event_data_model_json -class TestExpansion(): +class TestModel_Expansion(): """ Test Class for Expansion """ @@ -8018,7 +8170,7 @@ def test_expansion_serialization(self): expansion_model_json2 = expansion_model.to_dict() assert expansion_model_json2 == expansion_model_json -class TestExpansions(): +class TestModel_Expansions(): """ Test Class for Expansions """ @@ -8053,7 +8205,7 @@ def test_expansions_serialization(self): expansions_model_json2 = expansions_model.to_dict() assert expansions_model_json2 == expansions_model_json -class TestField(): +class TestModel_Field(): """ Test Class for Field """ @@ -8083,7 +8235,7 @@ def test_field_serialization(self): field_model_json2 = field_model.to_dict() assert field_model_json2 == field_model_json -class TestFontSetting(): +class TestModel_FontSetting(): """ Test Class for FontSetting """ @@ -8117,7 +8269,7 @@ def test_font_setting_serialization(self): font_setting_model_json2 = font_setting_model.to_dict() assert font_setting_model_json2 == font_setting_model_json -class TestGateway(): +class TestModel_Gateway(): """ Test Class for Gateway """ @@ -8150,7 +8302,7 @@ def test_gateway_serialization(self): gateway_model_json2 = gateway_model.to_dict() assert gateway_model_json2 == gateway_model_json -class TestGatewayDelete(): +class TestModel_GatewayDelete(): """ Test Class for GatewayDelete """ @@ -8180,7 +8332,7 @@ def test_gateway_delete_serialization(self): gateway_delete_model_json2 = gateway_delete_model.to_dict() assert gateway_delete_model_json2 == gateway_delete_model_json -class TestGatewayList(): +class TestModel_GatewayList(): """ Test Class for GatewayList """ @@ -8218,7 +8370,7 @@ def test_gateway_list_serialization(self): gateway_list_model_json2 = gateway_list_model.to_dict() assert gateway_list_model_json2 == gateway_list_model_json -class TestHtmlSettings(): +class TestModel_HtmlSettings(): """ Test Class for HtmlSettings """ @@ -8257,7 +8409,7 @@ def test_html_settings_serialization(self): html_settings_model_json2 = html_settings_model.to_dict() assert html_settings_model_json2 == html_settings_model_json -class TestIndexCapacity(): +class TestModel_IndexCapacity(): """ Test Class for IndexCapacity """ @@ -8302,7 +8454,7 @@ def test_index_capacity_serialization(self): index_capacity_model_json2 = index_capacity_model.to_dict() assert index_capacity_model_json2 == index_capacity_model_json -class TestListCollectionFieldsResponse(): +class TestModel_ListCollectionFieldsResponse(): """ Test Class for ListCollectionFieldsResponse """ @@ -8337,7 +8489,7 @@ def test_list_collection_fields_response_serialization(self): list_collection_fields_response_model_json2 = list_collection_fields_response_model.to_dict() assert list_collection_fields_response_model_json2 == list_collection_fields_response_model_json -class TestListCollectionsResponse(): +class TestModel_ListCollectionsResponse(): """ Test Class for ListCollectionsResponse """ @@ -8366,12 +8518,12 @@ def test_list_collections_response_serialization(self): training_status_model['minimum_examples_added'] = True training_status_model['sufficient_label_diversity'] = True training_status_model['notices'] = 38 - training_status_model['successfully_trained'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) - training_status_model['data_updated'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + training_status_model['successfully_trained'] = "2019-01-01T12:00:00Z" + training_status_model['data_updated'] = "2019-01-01T12:00:00Z" source_status_model = {} # SourceStatus source_status_model['status'] = 'running' - source_status_model['next_crawl'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + source_status_model['next_crawl'] = "2019-01-01T12:00:00Z" collection_crawl_status_model = {} # CollectionCrawlStatus collection_crawl_status_model['source_crawl'] = source_status_model @@ -8391,8 +8543,8 @@ def test_list_collections_response_serialization(self): collection_model['collection_id'] = 'f1360220-ea2d-4271-9d62-89a910b13c37' collection_model['name'] = 'example' collection_model['description'] = 'this is a demo collection' - collection_model['created'] = datetime_to_string(string_to_datetime("2015-08-24T18:42:25.324Z")) - collection_model['updated'] = datetime_to_string(string_to_datetime("2015-08-24T18:42:25.324Z")) + collection_model['created'] = "2015-08-24T18:42:25.324000Z" + collection_model['updated'] = "2015-08-24T18:42:25.324000Z" collection_model['status'] = 'active' collection_model['configuration_id'] = '6963be41-2dea-4f79-8f52-127c63c479b0' collection_model['language'] = 'en' @@ -8421,7 +8573,7 @@ def test_list_collections_response_serialization(self): list_collections_response_model_json2 = list_collections_response_model.to_dict() assert list_collections_response_model_json2 == list_collections_response_model_json -class TestListConfigurationsResponse(): +class TestModel_ListConfigurationsResponse(): """ Test Class for ListConfigurationsResponse """ @@ -8470,8 +8622,8 @@ def test_list_configurations_response_serialization(self): html_settings_model['exclude_tag_attributes'] = ['testString'] segment_settings_model = {} # SegmentSettings - segment_settings_model['enabled'] = True - segment_settings_model['selector_tags'] = ['testString'] + segment_settings_model['enabled'] = False + segment_settings_model['selector_tags'] = ['h1', 'h2'] segment_settings_model['annotated_fields'] = ['testString'] normalization_operation_model = {} # NormalizationOperation @@ -8539,14 +8691,14 @@ def test_list_configurations_response_serialization(self): enrichment_model['description'] = 'testString' enrichment_model['destination_field'] = 'testString' enrichment_model['source_field'] = 'testString' - enrichment_model['overwrite'] = True + enrichment_model['overwrite'] = False enrichment_model['enrichment'] = 'testString' - enrichment_model['ignore_downstream_errors'] = True + enrichment_model['ignore_downstream_errors'] = False enrichment_model['options'] = enrichment_options_model source_schedule_model = {} # SourceSchedule source_schedule_model['enabled'] = True - source_schedule_model['time_zone'] = 'testString' + source_schedule_model['time_zone'] = 'America/New_York' source_schedule_model['frequency'] = 'daily' source_options_folder_model = {} # SourceOptionsFolder @@ -8565,11 +8717,11 @@ def test_list_configurations_response_serialization(self): source_options_web_crawl_model = {} # SourceOptionsWebCrawl source_options_web_crawl_model['url'] = 'testString' source_options_web_crawl_model['limit_to_starting_hosts'] = True - source_options_web_crawl_model['crawl_speed'] = 'gentle' - source_options_web_crawl_model['allow_untrusted_certificate'] = True + source_options_web_crawl_model['crawl_speed'] = 'normal' + source_options_web_crawl_model['allow_untrusted_certificate'] = False source_options_web_crawl_model['maximum_hops'] = 38 source_options_web_crawl_model['request_timeout'] = 38 - source_options_web_crawl_model['override_robots_txt'] = True + source_options_web_crawl_model['override_robots_txt'] = False source_options_web_crawl_model['blacklist'] = ['testString'] source_options_buckets_model = {} # SourceOptionsBuckets @@ -8593,8 +8745,8 @@ def test_list_configurations_response_serialization(self): configuration_model = {} # Configuration configuration_model['configuration_id'] = 'testString' configuration_model['name'] = 'testString' - configuration_model['created'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) - configuration_model['updated'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + configuration_model['created'] = "2019-01-01T12:00:00Z" + configuration_model['updated'] = "2019-01-01T12:00:00Z" configuration_model['description'] = 'testString' configuration_model['conversions'] = conversions_model configuration_model['enrichments'] = [enrichment_model] @@ -8620,7 +8772,7 @@ def test_list_configurations_response_serialization(self): list_configurations_response_model_json2 = list_configurations_response_model.to_dict() assert list_configurations_response_model_json2 == list_configurations_response_model_json -class TestListEnvironmentsResponse(): +class TestModel_ListEnvironmentsResponse(): """ Test Class for ListEnvironmentsResponse """ @@ -8659,8 +8811,8 @@ def test_list_environments_response_serialization(self): environment_model['environment_id'] = 'ecbda78e-fb06-40b1-a43f-a039fac0adc6' environment_model['name'] = 'byod_environment' environment_model['description'] = 'Private Data Environment' - environment_model['created'] = datetime_to_string(string_to_datetime("2017-07-14T12:54:40.985Z")) - environment_model['updated'] = datetime_to_string(string_to_datetime("2017-07-14T12:54:40.985Z")) + environment_model['created'] = "2017-07-14T12:54:40.985000Z" + environment_model['updated'] = "2017-07-14T12:54:40.985000Z" environment_model['status'] = 'active' environment_model['read_only'] = False environment_model['size'] = 'LT' @@ -8687,7 +8839,7 @@ def test_list_environments_response_serialization(self): list_environments_response_model_json2 = list_environments_response_model.to_dict() assert list_environments_response_model_json2 == list_environments_response_model_json -class TestLogQueryResponse(): +class TestModel_LogQueryResponse(): """ Test Class for LogQueryResponse """ @@ -8716,8 +8868,8 @@ def test_log_query_response_serialization(self): log_query_response_result_model['document_type'] = 'query' log_query_response_result_model['natural_language_query'] = 'testString' log_query_response_result_model['document_results'] = log_query_response_result_documents_model - log_query_response_result_model['created_timestamp'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) - log_query_response_result_model['client_timestamp'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + log_query_response_result_model['created_timestamp'] = "2019-01-01T12:00:00Z" + log_query_response_result_model['client_timestamp'] = "2019-01-01T12:00:00Z" log_query_response_result_model['query_id'] = 'testString' log_query_response_result_model['session_token'] = 'testString' log_query_response_result_model['collection_id'] = 'testString' @@ -8746,7 +8898,7 @@ def test_log_query_response_serialization(self): log_query_response_model_json2 = log_query_response_model.to_dict() assert log_query_response_model_json2 == log_query_response_model_json -class TestLogQueryResponseResult(): +class TestModel_LogQueryResponseResult(): """ Test Class for LogQueryResponseResult """ @@ -8776,8 +8928,8 @@ def test_log_query_response_result_serialization(self): log_query_response_result_model_json['document_type'] = 'query' log_query_response_result_model_json['natural_language_query'] = 'testString' log_query_response_result_model_json['document_results'] = log_query_response_result_documents_model - log_query_response_result_model_json['created_timestamp'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) - log_query_response_result_model_json['client_timestamp'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + log_query_response_result_model_json['created_timestamp'] = "2019-01-01T12:00:00Z" + log_query_response_result_model_json['client_timestamp'] = "2019-01-01T12:00:00Z" log_query_response_result_model_json['query_id'] = 'testString' log_query_response_result_model_json['session_token'] = 'testString' log_query_response_result_model_json['collection_id'] = 'testString' @@ -8801,7 +8953,7 @@ def test_log_query_response_result_serialization(self): log_query_response_result_model_json2 = log_query_response_result_model.to_dict() assert log_query_response_result_model_json2 == log_query_response_result_model_json -class TestLogQueryResponseResultDocuments(): +class TestModel_LogQueryResponseResultDocuments(): """ Test Class for LogQueryResponseResultDocuments """ @@ -8840,7 +8992,7 @@ def test_log_query_response_result_documents_serialization(self): log_query_response_result_documents_model_json2 = log_query_response_result_documents_model.to_dict() assert log_query_response_result_documents_model_json2 == log_query_response_result_documents_model_json -class TestLogQueryResponseResultDocumentsResult(): +class TestModel_LogQueryResponseResultDocumentsResult(): """ Test Class for LogQueryResponseResultDocumentsResult """ @@ -8873,7 +9025,7 @@ def test_log_query_response_result_documents_result_serialization(self): log_query_response_result_documents_result_model_json2 = log_query_response_result_documents_result_model.to_dict() assert log_query_response_result_documents_result_model_json2 == log_query_response_result_documents_result_model_json -class TestMetricAggregation(): +class TestModel_MetricAggregation(): """ Test Class for MetricAggregation """ @@ -8886,7 +9038,7 @@ def test_metric_aggregation_serialization(self): # Construct dict forms of any model objects needed in order to build this model. metric_aggregation_result_model = {} # MetricAggregationResult - metric_aggregation_result_model['key_as_string'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + metric_aggregation_result_model['key_as_string'] = "2019-01-01T12:00:00Z" metric_aggregation_result_model['key'] = 26 metric_aggregation_result_model['matching_results'] = 38 metric_aggregation_result_model['event_rate'] = 72.5 @@ -8912,7 +9064,7 @@ def test_metric_aggregation_serialization(self): metric_aggregation_model_json2 = metric_aggregation_model.to_dict() assert metric_aggregation_model_json2 == metric_aggregation_model_json -class TestMetricAggregationResult(): +class TestModel_MetricAggregationResult(): """ Test Class for MetricAggregationResult """ @@ -8924,7 +9076,7 @@ def test_metric_aggregation_result_serialization(self): # Construct a json representation of a MetricAggregationResult model metric_aggregation_result_model_json = {} - metric_aggregation_result_model_json['key_as_string'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + metric_aggregation_result_model_json['key_as_string'] = "2019-01-01T12:00:00Z" metric_aggregation_result_model_json['key'] = 26 metric_aggregation_result_model_json['matching_results'] = 38 metric_aggregation_result_model_json['event_rate'] = 72.5 @@ -8944,7 +9096,7 @@ def test_metric_aggregation_result_serialization(self): metric_aggregation_result_model_json2 = metric_aggregation_result_model.to_dict() assert metric_aggregation_result_model_json2 == metric_aggregation_result_model_json -class TestMetricResponse(): +class TestModel_MetricResponse(): """ Test Class for MetricResponse """ @@ -8957,7 +9109,7 @@ def test_metric_response_serialization(self): # Construct dict forms of any model objects needed in order to build this model. metric_aggregation_result_model = {} # MetricAggregationResult - metric_aggregation_result_model['key_as_string'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + metric_aggregation_result_model['key_as_string'] = "2019-01-01T12:00:00Z" metric_aggregation_result_model['key'] = 26 metric_aggregation_result_model['matching_results'] = 38 metric_aggregation_result_model['event_rate'] = 72.5 @@ -8986,7 +9138,7 @@ def test_metric_response_serialization(self): metric_response_model_json2 = metric_response_model.to_dict() assert metric_response_model_json2 == metric_response_model_json -class TestMetricTokenAggregation(): +class TestModel_MetricTokenAggregation(): """ Test Class for MetricTokenAggregation """ @@ -9023,7 +9175,7 @@ def test_metric_token_aggregation_serialization(self): metric_token_aggregation_model_json2 = metric_token_aggregation_model.to_dict() assert metric_token_aggregation_model_json2 == metric_token_aggregation_model_json -class TestMetricTokenAggregationResult(): +class TestModel_MetricTokenAggregationResult(): """ Test Class for MetricTokenAggregationResult """ @@ -9054,7 +9206,7 @@ def test_metric_token_aggregation_result_serialization(self): metric_token_aggregation_result_model_json2 = metric_token_aggregation_result_model.to_dict() assert metric_token_aggregation_result_model_json2 == metric_token_aggregation_result_model_json -class TestMetricTokenResponse(): +class TestModel_MetricTokenResponse(): """ Test Class for MetricTokenResponse """ @@ -9094,7 +9246,7 @@ def test_metric_token_response_serialization(self): metric_token_response_model_json2 = metric_token_response_model.to_dict() assert metric_token_response_model_json2 == metric_token_response_model_json -class TestNluEnrichmentConcepts(): +class TestModel_NluEnrichmentConcepts(): """ Test Class for NluEnrichmentConcepts """ @@ -9123,7 +9275,7 @@ def test_nlu_enrichment_concepts_serialization(self): nlu_enrichment_concepts_model_json2 = nlu_enrichment_concepts_model.to_dict() assert nlu_enrichment_concepts_model_json2 == nlu_enrichment_concepts_model_json -class TestNluEnrichmentEmotion(): +class TestModel_NluEnrichmentEmotion(): """ Test Class for NluEnrichmentEmotion """ @@ -9153,7 +9305,7 @@ def test_nlu_enrichment_emotion_serialization(self): nlu_enrichment_emotion_model_json2 = nlu_enrichment_emotion_model.to_dict() assert nlu_enrichment_emotion_model_json2 == nlu_enrichment_emotion_model_json -class TestNluEnrichmentEntities(): +class TestModel_NluEnrichmentEntities(): """ Test Class for NluEnrichmentEntities """ @@ -9188,7 +9340,7 @@ def test_nlu_enrichment_entities_serialization(self): nlu_enrichment_entities_model_json2 = nlu_enrichment_entities_model.to_dict() assert nlu_enrichment_entities_model_json2 == nlu_enrichment_entities_model_json -class TestNluEnrichmentFeatures(): +class TestModel_NluEnrichmentFeatures(): """ Test Class for NluEnrichmentFeatures """ @@ -9259,7 +9411,7 @@ def test_nlu_enrichment_features_serialization(self): nlu_enrichment_features_model_json2 = nlu_enrichment_features_model.to_dict() assert nlu_enrichment_features_model_json2 == nlu_enrichment_features_model_json -class TestNluEnrichmentKeywords(): +class TestModel_NluEnrichmentKeywords(): """ Test Class for NluEnrichmentKeywords """ @@ -9290,7 +9442,7 @@ def test_nlu_enrichment_keywords_serialization(self): nlu_enrichment_keywords_model_json2 = nlu_enrichment_keywords_model.to_dict() assert nlu_enrichment_keywords_model_json2 == nlu_enrichment_keywords_model_json -class TestNluEnrichmentRelations(): +class TestModel_NluEnrichmentRelations(): """ Test Class for NluEnrichmentRelations """ @@ -9319,7 +9471,7 @@ def test_nlu_enrichment_relations_serialization(self): nlu_enrichment_relations_model_json2 = nlu_enrichment_relations_model.to_dict() assert nlu_enrichment_relations_model_json2 == nlu_enrichment_relations_model_json -class TestNluEnrichmentSemanticRoles(): +class TestModel_NluEnrichmentSemanticRoles(): """ Test Class for NluEnrichmentSemanticRoles """ @@ -9350,7 +9502,7 @@ def test_nlu_enrichment_semantic_roles_serialization(self): nlu_enrichment_semantic_roles_model_json2 = nlu_enrichment_semantic_roles_model.to_dict() assert nlu_enrichment_semantic_roles_model_json2 == nlu_enrichment_semantic_roles_model_json -class TestNluEnrichmentSentiment(): +class TestModel_NluEnrichmentSentiment(): """ Test Class for NluEnrichmentSentiment """ @@ -9380,7 +9532,7 @@ def test_nlu_enrichment_sentiment_serialization(self): nlu_enrichment_sentiment_model_json2 = nlu_enrichment_sentiment_model.to_dict() assert nlu_enrichment_sentiment_model_json2 == nlu_enrichment_sentiment_model_json -class TestNormalizationOperation(): +class TestModel_NormalizationOperation(): """ Test Class for NormalizationOperation """ @@ -9411,7 +9563,7 @@ def test_normalization_operation_serialization(self): normalization_operation_model_json2 = normalization_operation_model.to_dict() assert normalization_operation_model_json2 == normalization_operation_model_json -class TestNotice(): +class TestModel_Notice(): """ Test Class for Notice """ @@ -9424,7 +9576,7 @@ def test_notice_serialization(self): # Construct a json representation of a Notice model notice_model_json = {} notice_model_json['notice_id'] = 'testString' - notice_model_json['created'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + notice_model_json['created'] = "2019-01-01T12:00:00Z" notice_model_json['document_id'] = 'testString' notice_model_json['query_id'] = 'testString' notice_model_json['severity'] = 'warning' @@ -9446,7 +9598,7 @@ def test_notice_serialization(self): notice_model_json2 = notice_model.to_dict() assert notice_model_json2 == notice_model_json -class TestPdfHeadingDetection(): +class TestModel_PdfHeadingDetection(): """ Test Class for PdfHeadingDetection """ @@ -9485,7 +9637,7 @@ def test_pdf_heading_detection_serialization(self): pdf_heading_detection_model_json2 = pdf_heading_detection_model.to_dict() assert pdf_heading_detection_model_json2 == pdf_heading_detection_model_json -class TestPdfSettings(): +class TestModel_PdfSettings(): """ Test Class for PdfSettings """ @@ -9527,7 +9679,7 @@ def test_pdf_settings_serialization(self): pdf_settings_model_json2 = pdf_settings_model.to_dict() assert pdf_settings_model_json2 == pdf_settings_model_json -class TestQueryAggregation(): +class TestModel_QueryAggregation(): """ Test Class for QueryAggregation """ @@ -9557,7 +9709,7 @@ def test_query_aggregation_serialization(self): query_aggregation_model_json2 = query_aggregation_model.to_dict() assert query_aggregation_model_json2 == query_aggregation_model_json -class TestQueryNoticesResponse(): +class TestModel_QueryNoticesResponse(): """ Test Class for QueryNoticesResponse """ @@ -9575,7 +9727,7 @@ def test_query_notices_response_serialization(self): notice_model = {} # Notice notice_model['notice_id'] = 'xpath_not_found' - notice_model['created'] = datetime_to_string(string_to_datetime("2016-09-20T17:26:17.000Z")) + notice_model['created'] = "2016-09-20T17:26:17Z" notice_model['document_id'] = '030ba125-29db-43f2-8552-f941ae30a7a8' notice_model['query_id'] = 'testString' notice_model['severity'] = 'warning' @@ -9592,7 +9744,7 @@ def test_query_notices_response_serialization(self): query_notices_result_model['file_type'] = 'html' query_notices_result_model['sha1'] = 'de9f2c7fd25e1b3afad3e85a0bd17d9b100db4b3' query_notices_result_model['notices'] = [notice_model] - query_notices_result_model['foo'] = { 'foo': 'bar' } + query_notices_result_model['score'] = { 'foo': 'bar' } query_aggregation_model = {} # Histogram query_aggregation_model['type'] = 'histogram' @@ -9631,7 +9783,7 @@ def test_query_notices_response_serialization(self): query_notices_response_model_json2 = query_notices_response_model.to_dict() assert query_notices_response_model_json2 == query_notices_response_model_json -class TestQueryNoticesResult(): +class TestModel_QueryNoticesResult(): """ Test Class for QueryNoticesResult """ @@ -9649,7 +9801,7 @@ def test_query_notices_result_serialization(self): notice_model = {} # Notice notice_model['notice_id'] = 'testString' - notice_model['created'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + notice_model['created'] = "2019-01-01T12:00:00Z" notice_model['document_id'] = 'testString' notice_model['query_id'] = 'testString' notice_model['severity'] = 'warning' @@ -9684,7 +9836,17 @@ def test_query_notices_result_serialization(self): query_notices_result_model_json2 = query_notices_result_model.to_dict() assert query_notices_result_model_json2 == query_notices_result_model_json -class TestQueryPassages(): + # Test get_properties and set_properties methods. + query_notices_result_model.set_properties({}) + actual_dict = query_notices_result_model.get_properties() + assert actual_dict == {} + + expected_dict = {'foo': { 'foo': 'bar' }} + query_notices_result_model.set_properties(expected_dict) + actual_dict = query_notices_result_model.get_properties() + assert actual_dict == expected_dict + +class TestModel_QueryPassages(): """ Test Class for QueryPassages """ @@ -9718,7 +9880,7 @@ def test_query_passages_serialization(self): query_passages_model_json2 = query_passages_model.to_dict() assert query_passages_model_json2 == query_passages_model_json -class TestQueryResponse(): +class TestModel_QueryResponse(): """ Test Class for QueryResponse """ @@ -9739,7 +9901,7 @@ def test_query_response_serialization(self): query_result_model['metadata'] = {} query_result_model['collection_id'] = 'testString' query_result_model['result_metadata'] = query_result_metadata_model - query_result_model['foo'] = { 'foo': 'bar' } + query_result_model['score'] = { 'foo': 'bar' } query_aggregation_model = {} # Histogram query_aggregation_model['type'] = 'histogram' @@ -9784,7 +9946,7 @@ def test_query_response_serialization(self): query_response_model_json2 = query_response_model.to_dict() assert query_response_model_json2 == query_response_model_json -class TestQueryResult(): +class TestModel_QueryResult(): """ Test Class for QueryResult """ @@ -9823,7 +9985,17 @@ def test_query_result_serialization(self): query_result_model_json2 = query_result_model.to_dict() assert query_result_model_json2 == query_result_model_json -class TestQueryResultMetadata(): + # Test get_properties and set_properties methods. + query_result_model.set_properties({}) + actual_dict = query_result_model.get_properties() + assert actual_dict == {} + + expected_dict = {'foo': { 'foo': 'bar' }} + query_result_model.set_properties(expected_dict) + actual_dict = query_result_model.get_properties() + assert actual_dict == expected_dict + +class TestModel_QueryResultMetadata(): """ Test Class for QueryResultMetadata """ @@ -9853,7 +10025,7 @@ def test_query_result_metadata_serialization(self): query_result_metadata_model_json2 = query_result_metadata_model.to_dict() assert query_result_metadata_model_json2 == query_result_metadata_model_json -class TestRetrievalDetails(): +class TestModel_RetrievalDetails(): """ Test Class for RetrievalDetails """ @@ -9882,7 +10054,7 @@ def test_retrieval_details_serialization(self): retrieval_details_model_json2 = retrieval_details_model.to_dict() assert retrieval_details_model_json2 == retrieval_details_model_json -class TestSduStatus(): +class TestModel_SduStatus(): """ Test Class for SduStatus """ @@ -9921,7 +10093,7 @@ def test_sdu_status_serialization(self): sdu_status_model_json2 = sdu_status_model.to_dict() assert sdu_status_model_json2 == sdu_status_model_json -class TestSduStatusCustomFields(): +class TestModel_SduStatusCustomFields(): """ Test Class for SduStatusCustomFields """ @@ -9951,7 +10123,7 @@ def test_sdu_status_custom_fields_serialization(self): sdu_status_custom_fields_model_json2 = sdu_status_custom_fields_model.to_dict() assert sdu_status_custom_fields_model_json2 == sdu_status_custom_fields_model_json -class TestSearchStatus(): +class TestModel_SearchStatus(): """ Test Class for SearchStatus """ @@ -9983,7 +10155,7 @@ def test_search_status_serialization(self): search_status_model_json2 = search_status_model.to_dict() assert search_status_model_json2 == search_status_model_json -class TestSegmentSettings(): +class TestModel_SegmentSettings(): """ Test Class for SegmentSettings """ @@ -9995,8 +10167,8 @@ def test_segment_settings_serialization(self): # Construct a json representation of a SegmentSettings model segment_settings_model_json = {} - segment_settings_model_json['enabled'] = True - segment_settings_model_json['selector_tags'] = ['testString'] + segment_settings_model_json['enabled'] = False + segment_settings_model_json['selector_tags'] = ['h1', 'h2'] segment_settings_model_json['annotated_fields'] = ['testString'] # Construct a model instance of SegmentSettings by calling from_dict on the json representation @@ -10014,7 +10186,7 @@ def test_segment_settings_serialization(self): segment_settings_model_json2 = segment_settings_model.to_dict() assert segment_settings_model_json2 == segment_settings_model_json -class TestSource(): +class TestModel_Source(): """ Test Class for Source """ @@ -10028,7 +10200,7 @@ def test_source_serialization(self): source_schedule_model = {} # SourceSchedule source_schedule_model['enabled'] = True - source_schedule_model['time_zone'] = 'testString' + source_schedule_model['time_zone'] = 'America/New_York' source_schedule_model['frequency'] = 'daily' source_options_folder_model = {} # SourceOptionsFolder @@ -10047,11 +10219,11 @@ def test_source_serialization(self): source_options_web_crawl_model = {} # SourceOptionsWebCrawl source_options_web_crawl_model['url'] = 'testString' source_options_web_crawl_model['limit_to_starting_hosts'] = True - source_options_web_crawl_model['crawl_speed'] = 'gentle' - source_options_web_crawl_model['allow_untrusted_certificate'] = True + source_options_web_crawl_model['crawl_speed'] = 'normal' + source_options_web_crawl_model['allow_untrusted_certificate'] = False source_options_web_crawl_model['maximum_hops'] = 38 source_options_web_crawl_model['request_timeout'] = 38 - source_options_web_crawl_model['override_robots_txt'] = True + source_options_web_crawl_model['override_robots_txt'] = False source_options_web_crawl_model['blacklist'] = ['testString'] source_options_buckets_model = {} # SourceOptionsBuckets @@ -10088,7 +10260,7 @@ def test_source_serialization(self): source_model_json2 = source_model.to_dict() assert source_model_json2 == source_model_json -class TestSourceOptions(): +class TestModel_SourceOptions(): """ Test Class for SourceOptions """ @@ -10116,11 +10288,11 @@ def test_source_options_serialization(self): source_options_web_crawl_model = {} # SourceOptionsWebCrawl source_options_web_crawl_model['url'] = 'testString' source_options_web_crawl_model['limit_to_starting_hosts'] = True - source_options_web_crawl_model['crawl_speed'] = 'gentle' - source_options_web_crawl_model['allow_untrusted_certificate'] = True + source_options_web_crawl_model['crawl_speed'] = 'normal' + source_options_web_crawl_model['allow_untrusted_certificate'] = False source_options_web_crawl_model['maximum_hops'] = 38 source_options_web_crawl_model['request_timeout'] = 38 - source_options_web_crawl_model['override_robots_txt'] = True + source_options_web_crawl_model['override_robots_txt'] = False source_options_web_crawl_model['blacklist'] = ['testString'] source_options_buckets_model = {} # SourceOptionsBuckets @@ -10151,7 +10323,7 @@ def test_source_options_serialization(self): source_options_model_json2 = source_options_model.to_dict() assert source_options_model_json2 == source_options_model_json -class TestSourceOptionsBuckets(): +class TestModel_SourceOptionsBuckets(): """ Test Class for SourceOptionsBuckets """ @@ -10181,7 +10353,7 @@ def test_source_options_buckets_serialization(self): source_options_buckets_model_json2 = source_options_buckets_model.to_dict() assert source_options_buckets_model_json2 == source_options_buckets_model_json -class TestSourceOptionsFolder(): +class TestModel_SourceOptionsFolder(): """ Test Class for SourceOptionsFolder """ @@ -10212,7 +10384,7 @@ def test_source_options_folder_serialization(self): source_options_folder_model_json2 = source_options_folder_model.to_dict() assert source_options_folder_model_json2 == source_options_folder_model_json -class TestSourceOptionsObject(): +class TestModel_SourceOptionsObject(): """ Test Class for SourceOptionsObject """ @@ -10242,7 +10414,7 @@ def test_source_options_object_serialization(self): source_options_object_model_json2 = source_options_object_model.to_dict() assert source_options_object_model_json2 == source_options_object_model_json -class TestSourceOptionsSiteColl(): +class TestModel_SourceOptionsSiteColl(): """ Test Class for SourceOptionsSiteColl """ @@ -10272,7 +10444,7 @@ def test_source_options_site_coll_serialization(self): source_options_site_coll_model_json2 = source_options_site_coll_model.to_dict() assert source_options_site_coll_model_json2 == source_options_site_coll_model_json -class TestSourceOptionsWebCrawl(): +class TestModel_SourceOptionsWebCrawl(): """ Test Class for SourceOptionsWebCrawl """ @@ -10286,11 +10458,11 @@ def test_source_options_web_crawl_serialization(self): source_options_web_crawl_model_json = {} source_options_web_crawl_model_json['url'] = 'testString' source_options_web_crawl_model_json['limit_to_starting_hosts'] = True - source_options_web_crawl_model_json['crawl_speed'] = 'gentle' - source_options_web_crawl_model_json['allow_untrusted_certificate'] = True + source_options_web_crawl_model_json['crawl_speed'] = 'normal' + source_options_web_crawl_model_json['allow_untrusted_certificate'] = False source_options_web_crawl_model_json['maximum_hops'] = 38 source_options_web_crawl_model_json['request_timeout'] = 38 - source_options_web_crawl_model_json['override_robots_txt'] = True + source_options_web_crawl_model_json['override_robots_txt'] = False source_options_web_crawl_model_json['blacklist'] = ['testString'] # Construct a model instance of SourceOptionsWebCrawl by calling from_dict on the json representation @@ -10308,7 +10480,7 @@ def test_source_options_web_crawl_serialization(self): source_options_web_crawl_model_json2 = source_options_web_crawl_model.to_dict() assert source_options_web_crawl_model_json2 == source_options_web_crawl_model_json -class TestSourceSchedule(): +class TestModel_SourceSchedule(): """ Test Class for SourceSchedule """ @@ -10321,7 +10493,7 @@ def test_source_schedule_serialization(self): # Construct a json representation of a SourceSchedule model source_schedule_model_json = {} source_schedule_model_json['enabled'] = True - source_schedule_model_json['time_zone'] = 'testString' + source_schedule_model_json['time_zone'] = 'America/New_York' source_schedule_model_json['frequency'] = 'daily' # Construct a model instance of SourceSchedule by calling from_dict on the json representation @@ -10339,7 +10511,7 @@ def test_source_schedule_serialization(self): source_schedule_model_json2 = source_schedule_model.to_dict() assert source_schedule_model_json2 == source_schedule_model_json -class TestSourceStatus(): +class TestModel_SourceStatus(): """ Test Class for SourceStatus """ @@ -10352,7 +10524,7 @@ def test_source_status_serialization(self): # Construct a json representation of a SourceStatus model source_status_model_json = {} source_status_model_json['status'] = 'running' - source_status_model_json['next_crawl'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + source_status_model_json['next_crawl'] = "2019-01-01T12:00:00Z" # Construct a model instance of SourceStatus by calling from_dict on the json representation source_status_model = SourceStatus.from_dict(source_status_model_json) @@ -10369,7 +10541,37 @@ def test_source_status_serialization(self): source_status_model_json2 = source_status_model.to_dict() assert source_status_model_json2 == source_status_model_json -class TestTokenDictRule(): +class TestModel_StatusDetails(): + """ + Test Class for StatusDetails + """ + + def test_status_details_serialization(self): + """ + Test serialization/deserialization for StatusDetails + """ + + # Construct a json representation of a StatusDetails model + status_details_model_json = {} + status_details_model_json['authenticated'] = True + status_details_model_json['error_message'] = 'testString' + + # Construct a model instance of StatusDetails by calling from_dict on the json representation + status_details_model = StatusDetails.from_dict(status_details_model_json) + assert status_details_model != False + + # Construct a model instance of StatusDetails by calling from_dict on the json representation + status_details_model_dict = StatusDetails.from_dict(status_details_model_json).__dict__ + status_details_model2 = StatusDetails(**status_details_model_dict) + + # Verify the model instances are equivalent + assert status_details_model == status_details_model2 + + # Convert model instance back to dict and verify no loss of data + status_details_model_json2 = status_details_model.to_dict() + assert status_details_model_json2 == status_details_model_json + +class TestModel_TokenDictRule(): """ Test Class for TokenDictRule """ @@ -10401,7 +10603,7 @@ def test_token_dict_rule_serialization(self): token_dict_rule_model_json2 = token_dict_rule_model.to_dict() assert token_dict_rule_model_json2 == token_dict_rule_model_json -class TestTokenDictStatusResponse(): +class TestModel_TokenDictStatusResponse(): """ Test Class for TokenDictStatusResponse """ @@ -10431,7 +10633,7 @@ def test_token_dict_status_response_serialization(self): token_dict_status_response_model_json2 = token_dict_status_response_model.to_dict() assert token_dict_status_response_model_json2 == token_dict_status_response_model_json -class TestTopHitsResults(): +class TestModel_TopHitsResults(): """ Test Class for TopHitsResults """ @@ -10474,7 +10676,7 @@ def test_top_hits_results_serialization(self): top_hits_results_model_json2 = top_hits_results_model.to_dict() assert top_hits_results_model_json2 == top_hits_results_model_json -class TestTrainingDataSet(): +class TestModel_TrainingDataSet(): """ Test Class for TrainingDataSet """ @@ -10518,7 +10720,7 @@ def test_training_data_set_serialization(self): training_data_set_model_json2 = training_data_set_model.to_dict() assert training_data_set_model_json2 == training_data_set_model_json -class TestTrainingExample(): +class TestModel_TrainingExample(): """ Test Class for TrainingExample """ @@ -10549,7 +10751,7 @@ def test_training_example_serialization(self): training_example_model_json2 = training_example_model.to_dict() assert training_example_model_json2 == training_example_model_json -class TestTrainingExampleList(): +class TestModel_TrainingExampleList(): """ Test Class for TrainingExampleList """ @@ -10585,7 +10787,7 @@ def test_training_example_list_serialization(self): training_example_list_model_json2 = training_example_list_model.to_dict() assert training_example_list_model_json2 == training_example_list_model_json -class TestTrainingQuery(): +class TestModel_TrainingQuery(): """ Test Class for TrainingQuery """ @@ -10624,7 +10826,7 @@ def test_training_query_serialization(self): training_query_model_json2 = training_query_model.to_dict() assert training_query_model_json2 == training_query_model_json -class TestTrainingStatus(): +class TestModel_TrainingStatus(): """ Test Class for TrainingStatus """ @@ -10643,8 +10845,8 @@ def test_training_status_serialization(self): training_status_model_json['minimum_examples_added'] = True training_status_model_json['sufficient_label_diversity'] = True training_status_model_json['notices'] = 38 - training_status_model_json['successfully_trained'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) - training_status_model_json['data_updated'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + training_status_model_json['successfully_trained'] = "2019-01-01T12:00:00Z" + training_status_model_json['data_updated'] = "2019-01-01T12:00:00Z" # Construct a model instance of TrainingStatus by calling from_dict on the json representation training_status_model = TrainingStatus.from_dict(training_status_model_json) @@ -10661,7 +10863,7 @@ def test_training_status_serialization(self): training_status_model_json2 = training_status_model.to_dict() assert training_status_model_json2 == training_status_model_json -class TestWordHeadingDetection(): +class TestModel_WordHeadingDetection(): """ Test Class for WordHeadingDetection """ @@ -10705,7 +10907,7 @@ def test_word_heading_detection_serialization(self): word_heading_detection_model_json2 = word_heading_detection_model.to_dict() assert word_heading_detection_model_json2 == word_heading_detection_model_json -class TestWordSettings(): +class TestModel_WordSettings(): """ Test Class for WordSettings """ @@ -10752,7 +10954,7 @@ def test_word_settings_serialization(self): word_settings_model_json2 = word_settings_model.to_dict() assert word_settings_model_json2 == word_settings_model_json -class TestWordStyle(): +class TestModel_WordStyle(): """ Test Class for WordStyle """ @@ -10782,7 +10984,7 @@ def test_word_style_serialization(self): word_style_model_json2 = word_style_model.to_dict() assert word_style_model_json2 == word_style_model_json -class TestXPathPatterns(): +class TestModel_XPathPatterns(): """ Test Class for XPathPatterns """ @@ -10811,7 +11013,7 @@ def test_x_path_patterns_serialization(self): x_path_patterns_model_json2 = x_path_patterns_model.to_dict() assert x_path_patterns_model_json2 == x_path_patterns_model_json -class TestCalculation(): +class TestModel_Calculation(): """ Test Class for Calculation """ @@ -10843,7 +11045,7 @@ def test_calculation_serialization(self): calculation_model_json2 = calculation_model.to_dict() assert calculation_model_json2 == calculation_model_json -class TestFilter(): +class TestModel_Filter(): """ Test Class for Filter """ @@ -10874,7 +11076,7 @@ def test_filter_serialization(self): filter_model_json2 = filter_model.to_dict() assert filter_model_json2 == filter_model_json -class TestHistogram(): +class TestModel_Histogram(): """ Test Class for Histogram """ @@ -10906,7 +11108,7 @@ def test_histogram_serialization(self): histogram_model_json2 = histogram_model.to_dict() assert histogram_model_json2 == histogram_model_json -class TestNested(): +class TestModel_Nested(): """ Test Class for Nested """ @@ -10937,7 +11139,7 @@ def test_nested_serialization(self): nested_model_json2 = nested_model.to_dict() assert nested_model_json2 == nested_model_json -class TestTerm(): +class TestModel_Term(): """ Test Class for Term """ @@ -10969,7 +11171,7 @@ def test_term_serialization(self): term_model_json2 = term_model.to_dict() assert term_model_json2 == term_model_json -class TestTimeslice(): +class TestModel_Timeslice(): """ Test Class for Timeslice """ @@ -11002,7 +11204,7 @@ def test_timeslice_serialization(self): timeslice_model_json2 = timeslice_model.to_dict() assert timeslice_model_json2 == timeslice_model_json -class TestTopHits(): +class TestModel_TopHits(): """ Test Class for TopHits """ diff --git a/test/unit/test_discovery_v2.py b/test/unit/test_discovery_v2.py index c2861b6d..500d6a12 100644 --- a/test/unit/test_discovery_v2.py +++ b/test/unit/test_discovery_v2.py @@ -55,6 +55,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -125,6 +127,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -137,7 +141,7 @@ def test_create_collection_all_params(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v2/projects/testString/collections') - mock_response = '{"collection_id": "collection_id", "name": "name", "description": "description", "created": "2019-01-01T12:00:00.000Z", "language": "language", "enrichments": [{"enrichment_id": "enrichment_id", "fields": ["fields"]}]}' + mock_response = '{"collection_id": "collection_id", "name": "name", "description": "description", "created": "2019-01-01T12:00:00.000Z", "language": "en", "enrichments": [{"enrichment_id": "enrichment_id", "fields": ["fields"]}]}' responses.add(responses.POST, url, body=mock_response, @@ -153,7 +157,7 @@ def test_create_collection_all_params(self): project_id = 'testString' name = 'testString' description = 'testString' - language = 'testString' + language = 'en' enrichments = [collection_enrichment_model] # Invoke method @@ -173,7 +177,7 @@ def test_create_collection_all_params(self): req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) assert req_body['name'] == 'testString' assert req_body['description'] == 'testString' - assert req_body['language'] == 'testString' + assert req_body['language'] == 'en' assert req_body['enrichments'] == [collection_enrichment_model] @@ -184,7 +188,7 @@ def test_create_collection_value_error(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v2/projects/testString/collections') - mock_response = '{"collection_id": "collection_id", "name": "name", "description": "description", "created": "2019-01-01T12:00:00.000Z", "language": "language", "enrichments": [{"enrichment_id": "enrichment_id", "fields": ["fields"]}]}' + mock_response = '{"collection_id": "collection_id", "name": "name", "description": "description", "created": "2019-01-01T12:00:00.000Z", "language": "en", "enrichments": [{"enrichment_id": "enrichment_id", "fields": ["fields"]}]}' responses.add(responses.POST, url, body=mock_response, @@ -200,7 +204,7 @@ def test_create_collection_value_error(self): project_id = 'testString' name = 'testString' description = 'testString' - language = 'testString' + language = 'en' enrichments = [collection_enrichment_model] # Pass in all but one required param and check for a ValueError @@ -224,6 +228,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -236,7 +242,7 @@ def test_get_collection_all_params(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v2/projects/testString/collections/testString') - mock_response = '{"collection_id": "collection_id", "name": "name", "description": "description", "created": "2019-01-01T12:00:00.000Z", "language": "language", "enrichments": [{"enrichment_id": "enrichment_id", "fields": ["fields"]}]}' + mock_response = '{"collection_id": "collection_id", "name": "name", "description": "description", "created": "2019-01-01T12:00:00.000Z", "language": "en", "enrichments": [{"enrichment_id": "enrichment_id", "fields": ["fields"]}]}' responses.add(responses.GET, url, body=mock_response, @@ -266,7 +272,7 @@ def test_get_collection_value_error(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v2/projects/testString/collections/testString') - mock_response = '{"collection_id": "collection_id", "name": "name", "description": "description", "created": "2019-01-01T12:00:00.000Z", "language": "language", "enrichments": [{"enrichment_id": "enrichment_id", "fields": ["fields"]}]}' + mock_response = '{"collection_id": "collection_id", "name": "name", "description": "description", "created": "2019-01-01T12:00:00.000Z", "language": "en", "enrichments": [{"enrichment_id": "enrichment_id", "fields": ["fields"]}]}' responses.add(responses.GET, url, body=mock_response, @@ -298,6 +304,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -310,7 +318,7 @@ def test_update_collection_all_params(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v2/projects/testString/collections/testString') - mock_response = '{"collection_id": "collection_id", "name": "name", "description": "description", "created": "2019-01-01T12:00:00.000Z", "language": "language", "enrichments": [{"enrichment_id": "enrichment_id", "fields": ["fields"]}]}' + mock_response = '{"collection_id": "collection_id", "name": "name", "description": "description", "created": "2019-01-01T12:00:00.000Z", "language": "en", "enrichments": [{"enrichment_id": "enrichment_id", "fields": ["fields"]}]}' responses.add(responses.POST, url, body=mock_response, @@ -356,7 +364,7 @@ def test_update_collection_value_error(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v2/projects/testString/collections/testString') - mock_response = '{"collection_id": "collection_id", "name": "name", "description": "description", "created": "2019-01-01T12:00:00.000Z", "language": "language", "enrichments": [{"enrichment_id": "enrichment_id", "fields": ["fields"]}]}' + mock_response = '{"collection_id": "collection_id", "name": "name", "description": "description", "created": "2019-01-01T12:00:00.000Z", "language": "en", "enrichments": [{"enrichment_id": "enrichment_id", "fields": ["fields"]}]}' responses.add(responses.POST, url, body=mock_response, @@ -396,6 +404,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -474,6 +484,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -486,7 +498,7 @@ def test_query_all_params(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v2/projects/testString/query') - mock_response = '{"matching_results": 16, "results": [{"document_id": "document_id", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "result_metadata": {"document_retrieval_source": "search", "collection_id": "collection_id", "confidence": 10}, "document_passages": [{"passage_text": "passage_text", "start_offset": 12, "end_offset": 10, "field": "field", "confidence": 0, "answers": [{"answer_text": "answer_text", "start_offset": 12, "end_offset": 10, "confidence": 0}]}]}], "aggregations": [{"type": "filter", "match": "match", "matching_results": 16}], "retrieval_details": {"document_retrieval_strategy": "untrained"}, "suggested_query": "suggested_query", "suggested_refinements": [{"text": "text"}], "table_results": [{"table_id": "table_id", "source_document_id": "source_document_id", "collection_id": "collection_id", "table_html": "table_html", "table_html_offset": 17, "table": {"location": {"begin": 5, "end": 3}, "text": "text", "section_title": {"text": "text", "location": {"begin": 5, "end": 3}}, "title": {"text": "text", "location": {"begin": 5, "end": 3}}, "table_headers": [{"cell_id": "cell_id", "location": {"anyKey": "anyValue"}, "text": "text", "row_index_begin": 15, "row_index_end": 13, "column_index_begin": 18, "column_index_end": 16}], "row_headers": [{"cell_id": "cell_id", "location": {"begin": 5, "end": 3}, "text": "text", "text_normalized": "text_normalized", "row_index_begin": 15, "row_index_end": 13, "column_index_begin": 18, "column_index_end": 16}], "column_headers": [{"cell_id": "cell_id", "location": {"anyKey": "anyValue"}, "text": "text", "text_normalized": "text_normalized", "row_index_begin": 15, "row_index_end": 13, "column_index_begin": 18, "column_index_end": 16}], "key_value_pairs": [{"key": {"cell_id": "cell_id", "location": {"begin": 5, "end": 3}, "text": "text"}, "value": [{"cell_id": "cell_id", "location": {"begin": 5, "end": 3}, "text": "text"}]}], "body_cells": [{"cell_id": "cell_id", "location": {"begin": 5, "end": 3}, "text": "text", "row_index_begin": 15, "row_index_end": 13, "column_index_begin": 18, "column_index_end": 16, "row_header_ids": [{"id": "id"}], "row_header_texts": [{"text": "text"}], "row_header_texts_normalized": [{"text_normalized": "text_normalized"}], "column_header_ids": [{"id": "id"}], "column_header_texts": [{"text": "text"}], "column_header_texts_normalized": [{"text_normalized": "text_normalized"}], "attributes": [{"type": "type", "text": "text", "location": {"begin": 5, "end": 3}}]}], "contexts": [{"text": "text", "location": {"begin": 5, "end": 3}}]}}], "passages": [{"passage_text": "passage_text", "passage_score": 13, "document_id": "document_id", "collection_id": "collection_id", "start_offset": 12, "end_offset": 10, "field": "field", "confidence": 0, "answers": [{"answer_text": "answer_text", "start_offset": 12, "end_offset": 10, "confidence": 0}]}]}' + mock_response = '{"matching_results": 16, "results": [{"document_id": "document_id", "metadata": {"mapKey": "anyValue"}, "result_metadata": {"document_retrieval_source": "search", "collection_id": "collection_id", "confidence": 10}, "document_passages": [{"passage_text": "passage_text", "start_offset": 12, "end_offset": 10, "field": "field", "confidence": 0, "answers": [{"answer_text": "answer_text", "start_offset": 12, "end_offset": 10, "confidence": 0}]}]}], "aggregations": [{"type": "filter", "match": "match", "matching_results": 16}], "retrieval_details": {"document_retrieval_strategy": "untrained"}, "suggested_query": "suggested_query", "suggested_refinements": [{"text": "text"}], "table_results": [{"table_id": "table_id", "source_document_id": "source_document_id", "collection_id": "collection_id", "table_html": "table_html", "table_html_offset": 17, "table": {"location": {"begin": 5, "end": 3}, "text": "text", "section_title": {"text": "text", "location": {"begin": 5, "end": 3}}, "title": {"text": "text", "location": {"begin": 5, "end": 3}}, "table_headers": [{"cell_id": "cell_id", "location": {"anyKey": "anyValue"}, "text": "text", "row_index_begin": 15, "row_index_end": 13, "column_index_begin": 18, "column_index_end": 16}], "row_headers": [{"cell_id": "cell_id", "location": {"begin": 5, "end": 3}, "text": "text", "text_normalized": "text_normalized", "row_index_begin": 15, "row_index_end": 13, "column_index_begin": 18, "column_index_end": 16}], "column_headers": [{"cell_id": "cell_id", "location": {"anyKey": "anyValue"}, "text": "text", "text_normalized": "text_normalized", "row_index_begin": 15, "row_index_end": 13, "column_index_begin": 18, "column_index_end": 16}], "key_value_pairs": [{"key": {"cell_id": "cell_id", "location": {"begin": 5, "end": 3}, "text": "text"}, "value": [{"cell_id": "cell_id", "location": {"begin": 5, "end": 3}, "text": "text"}]}], "body_cells": [{"cell_id": "cell_id", "location": {"begin": 5, "end": 3}, "text": "text", "row_index_begin": 15, "row_index_end": 13, "column_index_begin": 18, "column_index_end": 16, "row_header_ids": [{"id": "id"}], "row_header_texts": [{"text": "text"}], "row_header_texts_normalized": [{"text_normalized": "text_normalized"}], "column_header_ids": [{"id": "id"}], "column_header_texts": [{"text": "text"}], "column_header_texts_normalized": [{"text_normalized": "text_normalized"}], "attributes": [{"type": "type", "text": "text", "location": {"begin": 5, "end": 3}}]}], "contexts": [{"text": "text", "location": {"begin": 5, "end": 3}}]}}], "passages": [{"passage_text": "passage_text", "passage_score": 13, "document_id": "document_id", "collection_id": "collection_id", "start_offset": 12, "end_offset": 10, "field": "field", "confidence": 0, "answers": [{"answer_text": "answer_text", "start_offset": 12, "end_offset": 10, "confidence": 0}]}]}' responses.add(responses.POST, url, body=mock_response, @@ -509,9 +521,9 @@ def test_query_all_params(self): query_large_passages_model['per_document'] = True query_large_passages_model['max_per_document'] = 38 query_large_passages_model['fields'] = ['testString'] - query_large_passages_model['count'] = 100 + query_large_passages_model['count'] = 400 query_large_passages_model['characters'] = 50 - query_large_passages_model['find_answers'] = True + query_large_passages_model['find_answers'] = False query_large_passages_model['max_answers_per_passage'] = 38 # Set up parameter values @@ -579,7 +591,7 @@ def test_query_required_params(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v2/projects/testString/query') - mock_response = '{"matching_results": 16, "results": [{"document_id": "document_id", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "result_metadata": {"document_retrieval_source": "search", "collection_id": "collection_id", "confidence": 10}, "document_passages": [{"passage_text": "passage_text", "start_offset": 12, "end_offset": 10, "field": "field", "confidence": 0, "answers": [{"answer_text": "answer_text", "start_offset": 12, "end_offset": 10, "confidence": 0}]}]}], "aggregations": [{"type": "filter", "match": "match", "matching_results": 16}], "retrieval_details": {"document_retrieval_strategy": "untrained"}, "suggested_query": "suggested_query", "suggested_refinements": [{"text": "text"}], "table_results": [{"table_id": "table_id", "source_document_id": "source_document_id", "collection_id": "collection_id", "table_html": "table_html", "table_html_offset": 17, "table": {"location": {"begin": 5, "end": 3}, "text": "text", "section_title": {"text": "text", "location": {"begin": 5, "end": 3}}, "title": {"text": "text", "location": {"begin": 5, "end": 3}}, "table_headers": [{"cell_id": "cell_id", "location": {"anyKey": "anyValue"}, "text": "text", "row_index_begin": 15, "row_index_end": 13, "column_index_begin": 18, "column_index_end": 16}], "row_headers": [{"cell_id": "cell_id", "location": {"begin": 5, "end": 3}, "text": "text", "text_normalized": "text_normalized", "row_index_begin": 15, "row_index_end": 13, "column_index_begin": 18, "column_index_end": 16}], "column_headers": [{"cell_id": "cell_id", "location": {"anyKey": "anyValue"}, "text": "text", "text_normalized": "text_normalized", "row_index_begin": 15, "row_index_end": 13, "column_index_begin": 18, "column_index_end": 16}], "key_value_pairs": [{"key": {"cell_id": "cell_id", "location": {"begin": 5, "end": 3}, "text": "text"}, "value": [{"cell_id": "cell_id", "location": {"begin": 5, "end": 3}, "text": "text"}]}], "body_cells": [{"cell_id": "cell_id", "location": {"begin": 5, "end": 3}, "text": "text", "row_index_begin": 15, "row_index_end": 13, "column_index_begin": 18, "column_index_end": 16, "row_header_ids": [{"id": "id"}], "row_header_texts": [{"text": "text"}], "row_header_texts_normalized": [{"text_normalized": "text_normalized"}], "column_header_ids": [{"id": "id"}], "column_header_texts": [{"text": "text"}], "column_header_texts_normalized": [{"text_normalized": "text_normalized"}], "attributes": [{"type": "type", "text": "text", "location": {"begin": 5, "end": 3}}]}], "contexts": [{"text": "text", "location": {"begin": 5, "end": 3}}]}}], "passages": [{"passage_text": "passage_text", "passage_score": 13, "document_id": "document_id", "collection_id": "collection_id", "start_offset": 12, "end_offset": 10, "field": "field", "confidence": 0, "answers": [{"answer_text": "answer_text", "start_offset": 12, "end_offset": 10, "confidence": 0}]}]}' + mock_response = '{"matching_results": 16, "results": [{"document_id": "document_id", "metadata": {"mapKey": "anyValue"}, "result_metadata": {"document_retrieval_source": "search", "collection_id": "collection_id", "confidence": 10}, "document_passages": [{"passage_text": "passage_text", "start_offset": 12, "end_offset": 10, "field": "field", "confidence": 0, "answers": [{"answer_text": "answer_text", "start_offset": 12, "end_offset": 10, "confidence": 0}]}]}], "aggregations": [{"type": "filter", "match": "match", "matching_results": 16}], "retrieval_details": {"document_retrieval_strategy": "untrained"}, "suggested_query": "suggested_query", "suggested_refinements": [{"text": "text"}], "table_results": [{"table_id": "table_id", "source_document_id": "source_document_id", "collection_id": "collection_id", "table_html": "table_html", "table_html_offset": 17, "table": {"location": {"begin": 5, "end": 3}, "text": "text", "section_title": {"text": "text", "location": {"begin": 5, "end": 3}}, "title": {"text": "text", "location": {"begin": 5, "end": 3}}, "table_headers": [{"cell_id": "cell_id", "location": {"anyKey": "anyValue"}, "text": "text", "row_index_begin": 15, "row_index_end": 13, "column_index_begin": 18, "column_index_end": 16}], "row_headers": [{"cell_id": "cell_id", "location": {"begin": 5, "end": 3}, "text": "text", "text_normalized": "text_normalized", "row_index_begin": 15, "row_index_end": 13, "column_index_begin": 18, "column_index_end": 16}], "column_headers": [{"cell_id": "cell_id", "location": {"anyKey": "anyValue"}, "text": "text", "text_normalized": "text_normalized", "row_index_begin": 15, "row_index_end": 13, "column_index_begin": 18, "column_index_end": 16}], "key_value_pairs": [{"key": {"cell_id": "cell_id", "location": {"begin": 5, "end": 3}, "text": "text"}, "value": [{"cell_id": "cell_id", "location": {"begin": 5, "end": 3}, "text": "text"}]}], "body_cells": [{"cell_id": "cell_id", "location": {"begin": 5, "end": 3}, "text": "text", "row_index_begin": 15, "row_index_end": 13, "column_index_begin": 18, "column_index_end": 16, "row_header_ids": [{"id": "id"}], "row_header_texts": [{"text": "text"}], "row_header_texts_normalized": [{"text_normalized": "text_normalized"}], "column_header_ids": [{"id": "id"}], "column_header_texts": [{"text": "text"}], "column_header_texts_normalized": [{"text_normalized": "text_normalized"}], "attributes": [{"type": "type", "text": "text", "location": {"begin": 5, "end": 3}}]}], "contexts": [{"text": "text", "location": {"begin": 5, "end": 3}}]}}], "passages": [{"passage_text": "passage_text", "passage_score": 13, "document_id": "document_id", "collection_id": "collection_id", "start_offset": 12, "end_offset": 10, "field": "field", "confidence": 0, "answers": [{"answer_text": "answer_text", "start_offset": 12, "end_offset": 10, "confidence": 0}]}]}' responses.add(responses.POST, url, body=mock_response, @@ -607,7 +619,7 @@ def test_query_value_error(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v2/projects/testString/query') - mock_response = '{"matching_results": 16, "results": [{"document_id": "document_id", "metadata": {"mapKey": {"anyKey": "anyValue"}}, "result_metadata": {"document_retrieval_source": "search", "collection_id": "collection_id", "confidence": 10}, "document_passages": [{"passage_text": "passage_text", "start_offset": 12, "end_offset": 10, "field": "field", "confidence": 0, "answers": [{"answer_text": "answer_text", "start_offset": 12, "end_offset": 10, "confidence": 0}]}]}], "aggregations": [{"type": "filter", "match": "match", "matching_results": 16}], "retrieval_details": {"document_retrieval_strategy": "untrained"}, "suggested_query": "suggested_query", "suggested_refinements": [{"text": "text"}], "table_results": [{"table_id": "table_id", "source_document_id": "source_document_id", "collection_id": "collection_id", "table_html": "table_html", "table_html_offset": 17, "table": {"location": {"begin": 5, "end": 3}, "text": "text", "section_title": {"text": "text", "location": {"begin": 5, "end": 3}}, "title": {"text": "text", "location": {"begin": 5, "end": 3}}, "table_headers": [{"cell_id": "cell_id", "location": {"anyKey": "anyValue"}, "text": "text", "row_index_begin": 15, "row_index_end": 13, "column_index_begin": 18, "column_index_end": 16}], "row_headers": [{"cell_id": "cell_id", "location": {"begin": 5, "end": 3}, "text": "text", "text_normalized": "text_normalized", "row_index_begin": 15, "row_index_end": 13, "column_index_begin": 18, "column_index_end": 16}], "column_headers": [{"cell_id": "cell_id", "location": {"anyKey": "anyValue"}, "text": "text", "text_normalized": "text_normalized", "row_index_begin": 15, "row_index_end": 13, "column_index_begin": 18, "column_index_end": 16}], "key_value_pairs": [{"key": {"cell_id": "cell_id", "location": {"begin": 5, "end": 3}, "text": "text"}, "value": [{"cell_id": "cell_id", "location": {"begin": 5, "end": 3}, "text": "text"}]}], "body_cells": [{"cell_id": "cell_id", "location": {"begin": 5, "end": 3}, "text": "text", "row_index_begin": 15, "row_index_end": 13, "column_index_begin": 18, "column_index_end": 16, "row_header_ids": [{"id": "id"}], "row_header_texts": [{"text": "text"}], "row_header_texts_normalized": [{"text_normalized": "text_normalized"}], "column_header_ids": [{"id": "id"}], "column_header_texts": [{"text": "text"}], "column_header_texts_normalized": [{"text_normalized": "text_normalized"}], "attributes": [{"type": "type", "text": "text", "location": {"begin": 5, "end": 3}}]}], "contexts": [{"text": "text", "location": {"begin": 5, "end": 3}}]}}], "passages": [{"passage_text": "passage_text", "passage_score": 13, "document_id": "document_id", "collection_id": "collection_id", "start_offset": 12, "end_offset": 10, "field": "field", "confidence": 0, "answers": [{"answer_text": "answer_text", "start_offset": 12, "end_offset": 10, "confidence": 0}]}]}' + mock_response = '{"matching_results": 16, "results": [{"document_id": "document_id", "metadata": {"mapKey": "anyValue"}, "result_metadata": {"document_retrieval_source": "search", "collection_id": "collection_id", "confidence": 10}, "document_passages": [{"passage_text": "passage_text", "start_offset": 12, "end_offset": 10, "field": "field", "confidence": 0, "answers": [{"answer_text": "answer_text", "start_offset": 12, "end_offset": 10, "confidence": 0}]}]}], "aggregations": [{"type": "filter", "match": "match", "matching_results": 16}], "retrieval_details": {"document_retrieval_strategy": "untrained"}, "suggested_query": "suggested_query", "suggested_refinements": [{"text": "text"}], "table_results": [{"table_id": "table_id", "source_document_id": "source_document_id", "collection_id": "collection_id", "table_html": "table_html", "table_html_offset": 17, "table": {"location": {"begin": 5, "end": 3}, "text": "text", "section_title": {"text": "text", "location": {"begin": 5, "end": 3}}, "title": {"text": "text", "location": {"begin": 5, "end": 3}}, "table_headers": [{"cell_id": "cell_id", "location": {"anyKey": "anyValue"}, "text": "text", "row_index_begin": 15, "row_index_end": 13, "column_index_begin": 18, "column_index_end": 16}], "row_headers": [{"cell_id": "cell_id", "location": {"begin": 5, "end": 3}, "text": "text", "text_normalized": "text_normalized", "row_index_begin": 15, "row_index_end": 13, "column_index_begin": 18, "column_index_end": 16}], "column_headers": [{"cell_id": "cell_id", "location": {"anyKey": "anyValue"}, "text": "text", "text_normalized": "text_normalized", "row_index_begin": 15, "row_index_end": 13, "column_index_begin": 18, "column_index_end": 16}], "key_value_pairs": [{"key": {"cell_id": "cell_id", "location": {"begin": 5, "end": 3}, "text": "text"}, "value": [{"cell_id": "cell_id", "location": {"begin": 5, "end": 3}, "text": "text"}]}], "body_cells": [{"cell_id": "cell_id", "location": {"begin": 5, "end": 3}, "text": "text", "row_index_begin": 15, "row_index_end": 13, "column_index_begin": 18, "column_index_end": 16, "row_header_ids": [{"id": "id"}], "row_header_texts": [{"text": "text"}], "row_header_texts_normalized": [{"text_normalized": "text_normalized"}], "column_header_ids": [{"id": "id"}], "column_header_texts": [{"text": "text"}], "column_header_texts_normalized": [{"text_normalized": "text_normalized"}], "attributes": [{"type": "type", "text": "text", "location": {"begin": 5, "end": 3}}]}], "contexts": [{"text": "text", "location": {"begin": 5, "end": 3}}]}}], "passages": [{"passage_text": "passage_text", "passage_score": 13, "document_id": "document_id", "collection_id": "collection_id", "start_offset": 12, "end_offset": 10, "field": "field", "confidence": 0, "answers": [{"answer_text": "answer_text", "start_offset": 12, "end_offset": 10, "confidence": 0}]}]}' responses.add(responses.POST, url, body=mock_response, @@ -637,6 +649,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -758,6 +772,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -880,6 +896,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -996,6 +1014,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -1110,6 +1130,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -1190,6 +1212,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -1216,7 +1240,7 @@ def test_add_document_all_params(self): filename = 'testString' file_content_type = 'application/json' metadata = 'testString' - x_watson_discovery_force = True + x_watson_discovery_force = False # Invoke method response = _service.add_document( @@ -1304,6 +1328,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -1331,7 +1357,7 @@ def test_update_document_all_params(self): filename = 'testString' file_content_type = 'application/json' metadata = 'testString' - x_watson_discovery_force = True + x_watson_discovery_force = False # Invoke method response = _service.update_document( @@ -1424,6 +1450,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -1447,7 +1475,7 @@ def test_delete_document_all_params(self): project_id = 'testString' collection_id = 'testString' document_id = 'testString' - x_watson_discovery_force = True + x_watson_discovery_force = False # Invoke method response = _service.delete_document( @@ -1546,6 +1574,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -1616,6 +1646,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -1680,6 +1712,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -1778,6 +1812,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -1852,6 +1888,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -1954,6 +1992,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -2032,6 +2072,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -2044,7 +2086,7 @@ def test_analyze_document_all_params(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v2/projects/testString/collections/testString/analyze') - mock_response = '{"notices": [{"notice_id": "notice_id", "created": "2019-01-01T12:00:00.000Z", "document_id": "document_id", "collection_id": "collection_id", "query_id": "query_id", "severity": "warning", "step": "step", "description": "description"}], "result": {"metadata": {"mapKey": {"anyKey": "anyValue"}}}}' + mock_response = '{"notices": [{"notice_id": "notice_id", "created": "2019-01-01T12:00:00.000Z", "document_id": "document_id", "collection_id": "collection_id", "query_id": "query_id", "severity": "warning", "step": "step", "description": "description"}], "result": {"metadata": {"mapKey": "anyValue"}}}' responses.add(responses.POST, url, body=mock_response, @@ -2082,7 +2124,7 @@ def test_analyze_document_required_params(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v2/projects/testString/collections/testString/analyze') - mock_response = '{"notices": [{"notice_id": "notice_id", "created": "2019-01-01T12:00:00.000Z", "document_id": "document_id", "collection_id": "collection_id", "query_id": "query_id", "severity": "warning", "step": "step", "description": "description"}], "result": {"metadata": {"mapKey": {"anyKey": "anyValue"}}}}' + mock_response = '{"notices": [{"notice_id": "notice_id", "created": "2019-01-01T12:00:00.000Z", "document_id": "document_id", "collection_id": "collection_id", "query_id": "query_id", "severity": "warning", "step": "step", "description": "description"}], "result": {"metadata": {"mapKey": "anyValue"}}}' responses.add(responses.POST, url, body=mock_response, @@ -2112,7 +2154,7 @@ def test_analyze_document_value_error(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v2/projects/testString/collections/testString/analyze') - mock_response = '{"notices": [{"notice_id": "notice_id", "created": "2019-01-01T12:00:00.000Z", "document_id": "document_id", "collection_id": "collection_id", "query_id": "query_id", "severity": "warning", "step": "step", "description": "description"}], "result": {"metadata": {"mapKey": {"anyKey": "anyValue"}}}}' + mock_response = '{"notices": [{"notice_id": "notice_id", "created": "2019-01-01T12:00:00.000Z", "document_id": "document_id", "collection_id": "collection_id", "query_id": "query_id", "severity": "warning", "step": "step", "description": "description"}], "result": {"metadata": {"mapKey": "anyValue"}}}' responses.add(responses.POST, url, body=mock_response, @@ -2154,6 +2196,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -2224,6 +2268,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -2372,6 +2418,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -2446,6 +2494,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -2531,6 +2581,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -2609,6 +2661,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -2670,6 +2724,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -2818,6 +2874,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -2888,6 +2946,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -2991,6 +3051,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -3065,6 +3127,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -3134,7 +3198,7 @@ def test_delete_user_data_value_error(self): # Start of Model Tests ############################################################################## # region -class TestAnalyzedDocument(): +class TestModel_AnalyzedDocument(): """ Test Class for AnalyzedDocument """ @@ -3148,7 +3212,7 @@ def test_analyzed_document_serialization(self): notice_model = {} # Notice notice_model['notice_id'] = 'testString' - notice_model['created'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + notice_model['created'] = "2019-01-01T12:00:00Z" notice_model['document_id'] = 'testString' notice_model['collection_id'] = 'testString' notice_model['query_id'] = 'testString' @@ -3180,7 +3244,7 @@ def test_analyzed_document_serialization(self): analyzed_document_model_json2 = analyzed_document_model.to_dict() assert analyzed_document_model_json2 == analyzed_document_model_json -class TestAnalyzedResult(): +class TestModel_AnalyzedResult(): """ Test Class for AnalyzedResult """ @@ -3210,7 +3274,17 @@ def test_analyzed_result_serialization(self): analyzed_result_model_json2 = analyzed_result_model.to_dict() assert analyzed_result_model_json2 == analyzed_result_model_json -class TestCollection(): + # Test get_properties and set_properties methods. + analyzed_result_model.set_properties({}) + actual_dict = analyzed_result_model.get_properties() + assert actual_dict == {} + + expected_dict = {'foo': { 'foo': 'bar' }} + analyzed_result_model.set_properties(expected_dict) + actual_dict = analyzed_result_model.get_properties() + assert actual_dict == expected_dict + +class TestModel_Collection(): """ Test Class for Collection """ @@ -3240,7 +3314,7 @@ def test_collection_serialization(self): collection_model_json2 = collection_model.to_dict() assert collection_model_json2 == collection_model_json -class TestCollectionDetails(): +class TestModel_CollectionDetails(): """ Test Class for CollectionDetails """ @@ -3261,8 +3335,8 @@ def test_collection_details_serialization(self): collection_details_model_json['collection_id'] = 'testString' collection_details_model_json['name'] = 'testString' collection_details_model_json['description'] = 'testString' - collection_details_model_json['created'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) - collection_details_model_json['language'] = 'testString' + collection_details_model_json['created'] = "2019-01-01T12:00:00Z" + collection_details_model_json['language'] = 'en' collection_details_model_json['enrichments'] = [collection_enrichment_model] # Construct a model instance of CollectionDetails by calling from_dict on the json representation @@ -3280,7 +3354,7 @@ def test_collection_details_serialization(self): collection_details_model_json2 = collection_details_model.to_dict() assert collection_details_model_json2 == collection_details_model_json -class TestCollectionEnrichment(): +class TestModel_CollectionEnrichment(): """ Test Class for CollectionEnrichment """ @@ -3310,7 +3384,7 @@ def test_collection_enrichment_serialization(self): collection_enrichment_model_json2 = collection_enrichment_model.to_dict() assert collection_enrichment_model_json2 == collection_enrichment_model_json -class TestCompletions(): +class TestModel_Completions(): """ Test Class for Completions """ @@ -3339,7 +3413,7 @@ def test_completions_serialization(self): completions_model_json2 = completions_model.to_dict() assert completions_model_json2 == completions_model_json -class TestComponentSettingsAggregation(): +class TestModel_ComponentSettingsAggregation(): """ Test Class for ComponentSettingsAggregation """ @@ -3371,7 +3445,7 @@ def test_component_settings_aggregation_serialization(self): component_settings_aggregation_model_json2 = component_settings_aggregation_model.to_dict() assert component_settings_aggregation_model_json2 == component_settings_aggregation_model_json -class TestComponentSettingsFieldsShown(): +class TestModel_ComponentSettingsFieldsShown(): """ Test Class for ComponentSettingsFieldsShown """ @@ -3410,7 +3484,7 @@ def test_component_settings_fields_shown_serialization(self): component_settings_fields_shown_model_json2 = component_settings_fields_shown_model.to_dict() assert component_settings_fields_shown_model_json2 == component_settings_fields_shown_model_json -class TestComponentSettingsFieldsShownBody(): +class TestModel_ComponentSettingsFieldsShownBody(): """ Test Class for ComponentSettingsFieldsShownBody """ @@ -3440,7 +3514,7 @@ def test_component_settings_fields_shown_body_serialization(self): component_settings_fields_shown_body_model_json2 = component_settings_fields_shown_body_model.to_dict() assert component_settings_fields_shown_body_model_json2 == component_settings_fields_shown_body_model_json -class TestComponentSettingsFieldsShownTitle(): +class TestModel_ComponentSettingsFieldsShownTitle(): """ Test Class for ComponentSettingsFieldsShownTitle """ @@ -3469,7 +3543,7 @@ def test_component_settings_fields_shown_title_serialization(self): component_settings_fields_shown_title_model_json2 = component_settings_fields_shown_title_model.to_dict() assert component_settings_fields_shown_title_model_json2 == component_settings_fields_shown_title_model_json -class TestComponentSettingsResponse(): +class TestModel_ComponentSettingsResponse(): """ Test Class for ComponentSettingsResponse """ @@ -3521,7 +3595,7 @@ def test_component_settings_response_serialization(self): component_settings_response_model_json2 = component_settings_response_model.to_dict() assert component_settings_response_model_json2 == component_settings_response_model_json -class TestCreateEnrichment(): +class TestModel_CreateEnrichment(): """ Test Class for CreateEnrichment """ @@ -3561,7 +3635,7 @@ def test_create_enrichment_serialization(self): create_enrichment_model_json2 = create_enrichment_model.to_dict() assert create_enrichment_model_json2 == create_enrichment_model_json -class TestDefaultQueryParams(): +class TestModel_DefaultQueryParams(): """ Test Class for DefaultQueryParams """ @@ -3618,7 +3692,7 @@ def test_default_query_params_serialization(self): default_query_params_model_json2 = default_query_params_model.to_dict() assert default_query_params_model_json2 == default_query_params_model_json -class TestDefaultQueryParamsPassages(): +class TestModel_DefaultQueryParamsPassages(): """ Test Class for DefaultQueryParamsPassages """ @@ -3652,7 +3726,7 @@ def test_default_query_params_passages_serialization(self): default_query_params_passages_model_json2 = default_query_params_passages_model.to_dict() assert default_query_params_passages_model_json2 == default_query_params_passages_model_json -class TestDefaultQueryParamsSuggestedRefinements(): +class TestModel_DefaultQueryParamsSuggestedRefinements(): """ Test Class for DefaultQueryParamsSuggestedRefinements """ @@ -3682,7 +3756,7 @@ def test_default_query_params_suggested_refinements_serialization(self): default_query_params_suggested_refinements_model_json2 = default_query_params_suggested_refinements_model.to_dict() assert default_query_params_suggested_refinements_model_json2 == default_query_params_suggested_refinements_model_json -class TestDefaultQueryParamsTableResults(): +class TestModel_DefaultQueryParamsTableResults(): """ Test Class for DefaultQueryParamsTableResults """ @@ -3713,7 +3787,7 @@ def test_default_query_params_table_results_serialization(self): default_query_params_table_results_model_json2 = default_query_params_table_results_model.to_dict() assert default_query_params_table_results_model_json2 == default_query_params_table_results_model_json -class TestDeleteDocumentResponse(): +class TestModel_DeleteDocumentResponse(): """ Test Class for DeleteDocumentResponse """ @@ -3743,7 +3817,7 @@ def test_delete_document_response_serialization(self): delete_document_response_model_json2 = delete_document_response_model.to_dict() assert delete_document_response_model_json2 == delete_document_response_model_json -class TestDocumentAccepted(): +class TestModel_DocumentAccepted(): """ Test Class for DocumentAccepted """ @@ -3773,7 +3847,7 @@ def test_document_accepted_serialization(self): document_accepted_model_json2 = document_accepted_model.to_dict() assert document_accepted_model_json2 == document_accepted_model_json -class TestDocumentAttribute(): +class TestModel_DocumentAttribute(): """ Test Class for DocumentAttribute """ @@ -3810,7 +3884,7 @@ def test_document_attribute_serialization(self): document_attribute_model_json2 = document_attribute_model.to_dict() assert document_attribute_model_json2 == document_attribute_model_json -class TestEnrichment(): +class TestModel_Enrichment(): """ Test Class for Enrichment """ @@ -3851,7 +3925,7 @@ def test_enrichment_serialization(self): enrichment_model_json2 = enrichment_model.to_dict() assert enrichment_model_json2 == enrichment_model_json -class TestEnrichmentOptions(): +class TestModel_EnrichmentOptions(): """ Test Class for EnrichmentOptions """ @@ -3883,7 +3957,7 @@ def test_enrichment_options_serialization(self): enrichment_options_model_json2 = enrichment_options_model.to_dict() assert enrichment_options_model_json2 == enrichment_options_model_json -class TestEnrichments(): +class TestModel_Enrichments(): """ Test Class for Enrichments """ @@ -3927,7 +4001,7 @@ def test_enrichments_serialization(self): enrichments_model_json2 = enrichments_model.to_dict() assert enrichments_model_json2 == enrichments_model_json -class TestField(): +class TestModel_Field(): """ Test Class for Field """ @@ -3958,7 +4032,7 @@ def test_field_serialization(self): field_model_json2 = field_model.to_dict() assert field_model_json2 == field_model_json -class TestListCollectionsResponse(): +class TestModel_ListCollectionsResponse(): """ Test Class for ListCollectionsResponse """ @@ -3993,7 +4067,7 @@ def test_list_collections_response_serialization(self): list_collections_response_model_json2 = list_collections_response_model.to_dict() assert list_collections_response_model_json2 == list_collections_response_model_json -class TestListFieldsResponse(): +class TestModel_ListFieldsResponse(): """ Test Class for ListFieldsResponse """ @@ -4029,7 +4103,7 @@ def test_list_fields_response_serialization(self): list_fields_response_model_json2 = list_fields_response_model.to_dict() assert list_fields_response_model_json2 == list_fields_response_model_json -class TestListProjectsResponse(): +class TestModel_ListProjectsResponse(): """ Test Class for ListProjectsResponse """ @@ -4078,7 +4152,7 @@ def test_list_projects_response_serialization(self): list_projects_response_model_json2 = list_projects_response_model.to_dict() assert list_projects_response_model_json2 == list_projects_response_model_json -class TestNotice(): +class TestModel_Notice(): """ Test Class for Notice """ @@ -4091,7 +4165,7 @@ def test_notice_serialization(self): # Construct a json representation of a Notice model notice_model_json = {} notice_model_json['notice_id'] = 'testString' - notice_model_json['created'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + notice_model_json['created'] = "2019-01-01T12:00:00Z" notice_model_json['document_id'] = 'testString' notice_model_json['collection_id'] = 'testString' notice_model_json['query_id'] = 'testString' @@ -4114,7 +4188,7 @@ def test_notice_serialization(self): notice_model_json2 = notice_model.to_dict() assert notice_model_json2 == notice_model_json -class TestProjectDetails(): +class TestModel_ProjectDetails(): """ Test Class for ProjectDetails """ @@ -4190,7 +4264,7 @@ def test_project_details_serialization(self): project_details_model_json2 = project_details_model.to_dict() assert project_details_model_json2 == project_details_model_json -class TestProjectListDetails(): +class TestModel_ProjectListDetails(): """ Test Class for ProjectListDetails """ @@ -4236,7 +4310,7 @@ def test_project_list_details_serialization(self): project_list_details_model_json2 = project_list_details_model.to_dict() assert project_list_details_model_json2 == project_list_details_model_json -class TestProjectListDetailsRelevancyTrainingStatus(): +class TestModel_ProjectListDetailsRelevancyTrainingStatus(): """ Test Class for ProjectListDetailsRelevancyTrainingStatus """ @@ -4273,7 +4347,7 @@ def test_project_list_details_relevancy_training_status_serialization(self): project_list_details_relevancy_training_status_model_json2 = project_list_details_relevancy_training_status_model.to_dict() assert project_list_details_relevancy_training_status_model_json2 == project_list_details_relevancy_training_status_model_json -class TestQueryAggregation(): +class TestModel_QueryAggregation(): """ Test Class for QueryAggregation """ @@ -4302,7 +4376,7 @@ def test_query_aggregation_serialization(self): query_aggregation_model_json2 = query_aggregation_model.to_dict() assert query_aggregation_model_json2 == query_aggregation_model_json -class TestQueryGroupByAggregationResult(): +class TestModel_QueryGroupByAggregationResult(): """ Test Class for QueryGroupByAggregationResult """ @@ -4343,7 +4417,7 @@ def test_query_group_by_aggregation_result_serialization(self): query_group_by_aggregation_result_model_json2 = query_group_by_aggregation_result_model.to_dict() assert query_group_by_aggregation_result_model_json2 == query_group_by_aggregation_result_model_json -class TestQueryHistogramAggregationResult(): +class TestModel_QueryHistogramAggregationResult(): """ Test Class for QueryHistogramAggregationResult """ @@ -4381,7 +4455,7 @@ def test_query_histogram_aggregation_result_serialization(self): query_histogram_aggregation_result_model_json2 = query_histogram_aggregation_result_model.to_dict() assert query_histogram_aggregation_result_model_json2 == query_histogram_aggregation_result_model_json -class TestQueryLargePassages(): +class TestModel_QueryLargePassages(): """ Test Class for QueryLargePassages """ @@ -4397,9 +4471,9 @@ def test_query_large_passages_serialization(self): query_large_passages_model_json['per_document'] = True query_large_passages_model_json['max_per_document'] = 38 query_large_passages_model_json['fields'] = ['testString'] - query_large_passages_model_json['count'] = 100 + query_large_passages_model_json['count'] = 400 query_large_passages_model_json['characters'] = 50 - query_large_passages_model_json['find_answers'] = True + query_large_passages_model_json['find_answers'] = False query_large_passages_model_json['max_answers_per_passage'] = 38 # Construct a model instance of QueryLargePassages by calling from_dict on the json representation @@ -4417,7 +4491,7 @@ def test_query_large_passages_serialization(self): query_large_passages_model_json2 = query_large_passages_model.to_dict() assert query_large_passages_model_json2 == query_large_passages_model_json -class TestQueryLargeSuggestedRefinements(): +class TestModel_QueryLargeSuggestedRefinements(): """ Test Class for QueryLargeSuggestedRefinements """ @@ -4447,7 +4521,7 @@ def test_query_large_suggested_refinements_serialization(self): query_large_suggested_refinements_model_json2 = query_large_suggested_refinements_model.to_dict() assert query_large_suggested_refinements_model_json2 == query_large_suggested_refinements_model_json -class TestQueryLargeTableResults(): +class TestModel_QueryLargeTableResults(): """ Test Class for QueryLargeTableResults """ @@ -4477,7 +4551,7 @@ def test_query_large_table_results_serialization(self): query_large_table_results_model_json2 = query_large_table_results_model.to_dict() assert query_large_table_results_model_json2 == query_large_table_results_model_json -class TestQueryNoticesResponse(): +class TestModel_QueryNoticesResponse(): """ Test Class for QueryNoticesResponse """ @@ -4491,7 +4565,7 @@ def test_query_notices_response_serialization(self): notice_model = {} # Notice notice_model['notice_id'] = 'testString' - notice_model['created'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + notice_model['created'] = "2019-01-01T12:00:00Z" notice_model['document_id'] = 'testString' notice_model['collection_id'] = 'testString' notice_model['query_id'] = 'testString' @@ -4519,7 +4593,7 @@ def test_query_notices_response_serialization(self): query_notices_response_model_json2 = query_notices_response_model.to_dict() assert query_notices_response_model_json2 == query_notices_response_model_json -class TestQueryResponse(): +class TestModel_QueryResponse(): """ Test Class for QueryResponse """ @@ -4555,7 +4629,7 @@ def test_query_response_serialization(self): query_result_model['metadata'] = {} query_result_model['result_metadata'] = query_result_metadata_model query_result_model['document_passages'] = [query_result_passage_model] - query_result_model['foo'] = { 'foo': 'bar' } + query_result_model['id'] = { 'foo': 'bar' } query_aggregation_model = {} # QueryFilterAggregation query_aggregation_model['type'] = 'filter' @@ -4715,7 +4789,7 @@ def test_query_response_serialization(self): query_response_model_json2 = query_response_model.to_dict() assert query_response_model_json2 == query_response_model_json -class TestQueryResponsePassage(): +class TestModel_QueryResponsePassage(): """ Test Class for QueryResponsePassage """ @@ -4760,7 +4834,7 @@ def test_query_response_passage_serialization(self): query_response_passage_model_json2 = query_response_passage_model.to_dict() assert query_response_passage_model_json2 == query_response_passage_model_json -class TestQueryResult(): +class TestModel_QueryResult(): """ Test Class for QueryResult """ @@ -4814,7 +4888,17 @@ def test_query_result_serialization(self): query_result_model_json2 = query_result_model.to_dict() assert query_result_model_json2 == query_result_model_json -class TestQueryResultMetadata(): + # Test get_properties and set_properties methods. + query_result_model.set_properties({}) + actual_dict = query_result_model.get_properties() + assert actual_dict == {} + + expected_dict = {'foo': { 'foo': 'bar' }} + query_result_model.set_properties(expected_dict) + actual_dict = query_result_model.get_properties() + assert actual_dict == expected_dict + +class TestModel_QueryResultMetadata(): """ Test Class for QueryResultMetadata """ @@ -4845,7 +4929,7 @@ def test_query_result_metadata_serialization(self): query_result_metadata_model_json2 = query_result_metadata_model.to_dict() assert query_result_metadata_model_json2 == query_result_metadata_model_json -class TestQueryResultPassage(): +class TestModel_QueryResultPassage(): """ Test Class for QueryResultPassage """ @@ -4887,7 +4971,7 @@ def test_query_result_passage_serialization(self): query_result_passage_model_json2 = query_result_passage_model.to_dict() assert query_result_passage_model_json2 == query_result_passage_model_json -class TestQuerySuggestedRefinement(): +class TestModel_QuerySuggestedRefinement(): """ Test Class for QuerySuggestedRefinement """ @@ -4916,7 +5000,7 @@ def test_query_suggested_refinement_serialization(self): query_suggested_refinement_model_json2 = query_suggested_refinement_model.to_dict() assert query_suggested_refinement_model_json2 == query_suggested_refinement_model_json -class TestQueryTableResult(): +class TestModel_QueryTableResult(): """ Test Class for QueryTableResult """ @@ -5054,7 +5138,7 @@ def test_query_table_result_serialization(self): query_table_result_model_json2 = query_table_result_model.to_dict() assert query_table_result_model_json2 == query_table_result_model_json -class TestQueryTermAggregationResult(): +class TestModel_QueryTermAggregationResult(): """ Test Class for QueryTermAggregationResult """ @@ -5095,7 +5179,7 @@ def test_query_term_aggregation_result_serialization(self): query_term_aggregation_result_model_json2 = query_term_aggregation_result_model.to_dict() assert query_term_aggregation_result_model_json2 == query_term_aggregation_result_model_json -class TestQueryTimesliceAggregationResult(): +class TestModel_QueryTimesliceAggregationResult(): """ Test Class for QueryTimesliceAggregationResult """ @@ -5134,7 +5218,7 @@ def test_query_timeslice_aggregation_result_serialization(self): query_timeslice_aggregation_result_model_json2 = query_timeslice_aggregation_result_model.to_dict() assert query_timeslice_aggregation_result_model_json2 == query_timeslice_aggregation_result_model_json -class TestQueryTopHitsAggregationResult(): +class TestModel_QueryTopHitsAggregationResult(): """ Test Class for QueryTopHitsAggregationResult """ @@ -5164,7 +5248,7 @@ def test_query_top_hits_aggregation_result_serialization(self): query_top_hits_aggregation_result_model_json2 = query_top_hits_aggregation_result_model.to_dict() assert query_top_hits_aggregation_result_model_json2 == query_top_hits_aggregation_result_model_json -class TestResultPassageAnswer(): +class TestModel_ResultPassageAnswer(): """ Test Class for ResultPassageAnswer """ @@ -5196,7 +5280,7 @@ def test_result_passage_answer_serialization(self): result_passage_answer_model_json2 = result_passage_answer_model.to_dict() assert result_passage_answer_model_json2 == result_passage_answer_model_json -class TestRetrievalDetails(): +class TestModel_RetrievalDetails(): """ Test Class for RetrievalDetails """ @@ -5225,7 +5309,7 @@ def test_retrieval_details_serialization(self): retrieval_details_model_json2 = retrieval_details_model.to_dict() assert retrieval_details_model_json2 == retrieval_details_model_json -class TestTableBodyCells(): +class TestModel_TableBodyCells(): """ Test Class for TableBodyCells """ @@ -5296,7 +5380,7 @@ def test_table_body_cells_serialization(self): table_body_cells_model_json2 = table_body_cells_model.to_dict() assert table_body_cells_model_json2 == table_body_cells_model_json -class TestTableCellKey(): +class TestModel_TableCellKey(): """ Test Class for TableCellKey """ @@ -5333,7 +5417,7 @@ def test_table_cell_key_serialization(self): table_cell_key_model_json2 = table_cell_key_model.to_dict() assert table_cell_key_model_json2 == table_cell_key_model_json -class TestTableCellValues(): +class TestModel_TableCellValues(): """ Test Class for TableCellValues """ @@ -5370,7 +5454,7 @@ def test_table_cell_values_serialization(self): table_cell_values_model_json2 = table_cell_values_model.to_dict() assert table_cell_values_model_json2 == table_cell_values_model_json -class TestTableColumnHeaderIds(): +class TestModel_TableColumnHeaderIds(): """ Test Class for TableColumnHeaderIds """ @@ -5399,7 +5483,7 @@ def test_table_column_header_ids_serialization(self): table_column_header_ids_model_json2 = table_column_header_ids_model.to_dict() assert table_column_header_ids_model_json2 == table_column_header_ids_model_json -class TestTableColumnHeaderTexts(): +class TestModel_TableColumnHeaderTexts(): """ Test Class for TableColumnHeaderTexts """ @@ -5428,7 +5512,7 @@ def test_table_column_header_texts_serialization(self): table_column_header_texts_model_json2 = table_column_header_texts_model.to_dict() assert table_column_header_texts_model_json2 == table_column_header_texts_model_json -class TestTableColumnHeaderTextsNormalized(): +class TestModel_TableColumnHeaderTextsNormalized(): """ Test Class for TableColumnHeaderTextsNormalized """ @@ -5457,7 +5541,7 @@ def test_table_column_header_texts_normalized_serialization(self): table_column_header_texts_normalized_model_json2 = table_column_header_texts_normalized_model.to_dict() assert table_column_header_texts_normalized_model_json2 == table_column_header_texts_normalized_model_json -class TestTableColumnHeaders(): +class TestModel_TableColumnHeaders(): """ Test Class for TableColumnHeaders """ @@ -5493,7 +5577,7 @@ def test_table_column_headers_serialization(self): table_column_headers_model_json2 = table_column_headers_model.to_dict() assert table_column_headers_model_json2 == table_column_headers_model_json -class TestTableElementLocation(): +class TestModel_TableElementLocation(): """ Test Class for TableElementLocation """ @@ -5523,7 +5607,7 @@ def test_table_element_location_serialization(self): table_element_location_model_json2 = table_element_location_model.to_dict() assert table_element_location_model_json2 == table_element_location_model_json -class TestTableHeaders(): +class TestModel_TableHeaders(): """ Test Class for TableHeaders """ @@ -5558,7 +5642,7 @@ def test_table_headers_serialization(self): table_headers_model_json2 = table_headers_model.to_dict() assert table_headers_model_json2 == table_headers_model_json -class TestTableKeyValuePairs(): +class TestModel_TableKeyValuePairs(): """ Test Class for TableKeyValuePairs """ @@ -5604,7 +5688,7 @@ def test_table_key_value_pairs_serialization(self): table_key_value_pairs_model_json2 = table_key_value_pairs_model.to_dict() assert table_key_value_pairs_model_json2 == table_key_value_pairs_model_json -class TestTableResultTable(): +class TestModel_TableResultTable(): """ Test Class for TableResultTable """ @@ -5734,7 +5818,7 @@ def test_table_result_table_serialization(self): table_result_table_model_json2 = table_result_table_model.to_dict() assert table_result_table_model_json2 == table_result_table_model_json -class TestTableRowHeaderIds(): +class TestModel_TableRowHeaderIds(): """ Test Class for TableRowHeaderIds """ @@ -5763,7 +5847,7 @@ def test_table_row_header_ids_serialization(self): table_row_header_ids_model_json2 = table_row_header_ids_model.to_dict() assert table_row_header_ids_model_json2 == table_row_header_ids_model_json -class TestTableRowHeaderTexts(): +class TestModel_TableRowHeaderTexts(): """ Test Class for TableRowHeaderTexts """ @@ -5792,7 +5876,7 @@ def test_table_row_header_texts_serialization(self): table_row_header_texts_model_json2 = table_row_header_texts_model.to_dict() assert table_row_header_texts_model_json2 == table_row_header_texts_model_json -class TestTableRowHeaderTextsNormalized(): +class TestModel_TableRowHeaderTextsNormalized(): """ Test Class for TableRowHeaderTextsNormalized """ @@ -5821,7 +5905,7 @@ def test_table_row_header_texts_normalized_serialization(self): table_row_header_texts_normalized_model_json2 = table_row_header_texts_normalized_model.to_dict() assert table_row_header_texts_normalized_model_json2 == table_row_header_texts_normalized_model_json -class TestTableRowHeaders(): +class TestModel_TableRowHeaders(): """ Test Class for TableRowHeaders """ @@ -5863,7 +5947,7 @@ def test_table_row_headers_serialization(self): table_row_headers_model_json2 = table_row_headers_model.to_dict() assert table_row_headers_model_json2 == table_row_headers_model_json -class TestTableTextLocation(): +class TestModel_TableTextLocation(): """ Test Class for TableTextLocation """ @@ -5899,7 +5983,7 @@ def test_table_text_location_serialization(self): table_text_location_model_json2 = table_text_location_model.to_dict() assert table_text_location_model_json2 == table_text_location_model_json -class TestTrainingExample(): +class TestModel_TrainingExample(): """ Test Class for TrainingExample """ @@ -5914,8 +5998,8 @@ def test_training_example_serialization(self): training_example_model_json['document_id'] = 'testString' training_example_model_json['collection_id'] = 'testString' training_example_model_json['relevance'] = 38 - training_example_model_json['created'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) - training_example_model_json['updated'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + training_example_model_json['created'] = "2019-01-01T12:00:00Z" + training_example_model_json['updated'] = "2019-01-01T12:00:00Z" # Construct a model instance of TrainingExample by calling from_dict on the json representation training_example_model = TrainingExample.from_dict(training_example_model_json) @@ -5932,7 +6016,7 @@ def test_training_example_serialization(self): training_example_model_json2 = training_example_model.to_dict() assert training_example_model_json2 == training_example_model_json -class TestTrainingQuery(): +class TestModel_TrainingQuery(): """ Test Class for TrainingQuery """ @@ -5948,16 +6032,16 @@ def test_training_query_serialization(self): training_example_model['document_id'] = 'testString' training_example_model['collection_id'] = 'testString' training_example_model['relevance'] = 38 - training_example_model['created'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) - training_example_model['updated'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + training_example_model['created'] = "2019-01-01T12:00:00Z" + training_example_model['updated'] = "2019-01-01T12:00:00Z" # Construct a json representation of a TrainingQuery model training_query_model_json = {} training_query_model_json['query_id'] = 'testString' training_query_model_json['natural_language_query'] = 'testString' training_query_model_json['filter'] = 'testString' - training_query_model_json['created'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) - training_query_model_json['updated'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + training_query_model_json['created'] = "2019-01-01T12:00:00Z" + training_query_model_json['updated'] = "2019-01-01T12:00:00Z" training_query_model_json['examples'] = [training_example_model] # Construct a model instance of TrainingQuery by calling from_dict on the json representation @@ -5975,7 +6059,7 @@ def test_training_query_serialization(self): training_query_model_json2 = training_query_model.to_dict() assert training_query_model_json2 == training_query_model_json -class TestTrainingQuerySet(): +class TestModel_TrainingQuerySet(): """ Test Class for TrainingQuerySet """ @@ -5991,15 +6075,15 @@ def test_training_query_set_serialization(self): training_example_model['document_id'] = 'testString' training_example_model['collection_id'] = 'testString' training_example_model['relevance'] = 38 - training_example_model['created'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) - training_example_model['updated'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + training_example_model['created'] = "2019-01-01T12:00:00Z" + training_example_model['updated'] = "2019-01-01T12:00:00Z" training_query_model = {} # TrainingQuery training_query_model['query_id'] = 'testString' training_query_model['natural_language_query'] = 'testString' training_query_model['filter'] = 'testString' - training_query_model['created'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) - training_query_model['updated'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + training_query_model['created'] = "2019-01-01T12:00:00Z" + training_query_model['updated'] = "2019-01-01T12:00:00Z" training_query_model['examples'] = [training_example_model] # Construct a json representation of a TrainingQuerySet model @@ -6021,7 +6105,7 @@ def test_training_query_set_serialization(self): training_query_set_model_json2 = training_query_set_model.to_dict() assert training_query_set_model_json2 == training_query_set_model_json -class TestQueryCalculationAggregation(): +class TestModel_QueryCalculationAggregation(): """ Test Class for QueryCalculationAggregation """ @@ -6052,7 +6136,7 @@ def test_query_calculation_aggregation_serialization(self): query_calculation_aggregation_model_json2 = query_calculation_aggregation_model.to_dict() assert query_calculation_aggregation_model_json2 == query_calculation_aggregation_model_json -class TestQueryFilterAggregation(): +class TestModel_QueryFilterAggregation(): """ Test Class for QueryFilterAggregation """ @@ -6083,7 +6167,7 @@ def test_query_filter_aggregation_serialization(self): query_filter_aggregation_model_json2 = query_filter_aggregation_model.to_dict() assert query_filter_aggregation_model_json2 == query_filter_aggregation_model_json -class TestQueryGroupByAggregation(): +class TestModel_QueryGroupByAggregation(): """ Test Class for QueryGroupByAggregation """ @@ -6112,7 +6196,7 @@ def test_query_group_by_aggregation_serialization(self): query_group_by_aggregation_model_json2 = query_group_by_aggregation_model.to_dict() assert query_group_by_aggregation_model_json2 == query_group_by_aggregation_model_json -class TestQueryHistogramAggregation(): +class TestModel_QueryHistogramAggregation(): """ Test Class for QueryHistogramAggregation """ @@ -6144,7 +6228,7 @@ def test_query_histogram_aggregation_serialization(self): query_histogram_aggregation_model_json2 = query_histogram_aggregation_model.to_dict() assert query_histogram_aggregation_model_json2 == query_histogram_aggregation_model_json -class TestQueryNestedAggregation(): +class TestModel_QueryNestedAggregation(): """ Test Class for QueryNestedAggregation """ @@ -6175,7 +6259,7 @@ def test_query_nested_aggregation_serialization(self): query_nested_aggregation_model_json2 = query_nested_aggregation_model.to_dict() assert query_nested_aggregation_model_json2 == query_nested_aggregation_model_json -class TestQueryTermAggregation(): +class TestModel_QueryTermAggregation(): """ Test Class for QueryTermAggregation """ @@ -6207,7 +6291,7 @@ def test_query_term_aggregation_serialization(self): query_term_aggregation_model_json2 = query_term_aggregation_model.to_dict() assert query_term_aggregation_model_json2 == query_term_aggregation_model_json -class TestQueryTimesliceAggregation(): +class TestModel_QueryTimesliceAggregation(): """ Test Class for QueryTimesliceAggregation """ @@ -6239,7 +6323,7 @@ def test_query_timeslice_aggregation_serialization(self): query_timeslice_aggregation_model_json2 = query_timeslice_aggregation_model.to_dict() assert query_timeslice_aggregation_model_json2 == query_timeslice_aggregation_model_json -class TestQueryTopHitsAggregation(): +class TestModel_QueryTopHitsAggregation(): """ Test Class for QueryTopHitsAggregation """ diff --git a/test/unit/test_language_translator_v3.py b/test/unit/test_language_translator_v3.py index 6b198cb1..2f39f513 100644 --- a/test/unit/test_language_translator_v3.py +++ b/test/unit/test_language_translator_v3.py @@ -31,7 +31,7 @@ import urllib from ibm_watson.language_translator_v3 import * -version = 'testString' +version = '2018-05-01' _service = LanguageTranslatorV3( authenticator=NoAuthAuthenticator(), @@ -55,6 +55,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -126,6 +128,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -221,6 +225,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -282,6 +288,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -364,6 +372,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -463,6 +473,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -576,6 +588,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -646,6 +660,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -726,6 +742,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -787,6 +805,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -900,6 +920,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -970,6 +992,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -1034,6 +1058,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -1135,7 +1161,7 @@ def test_get_translated_document_value_error(self): # Start of Model Tests ############################################################################## # region -class TestDeleteModelResult(): +class TestModel_DeleteModelResult(): """ Test Class for DeleteModelResult """ @@ -1164,7 +1190,7 @@ def test_delete_model_result_serialization(self): delete_model_result_model_json2 = delete_model_result_model.to_dict() assert delete_model_result_model_json2 == delete_model_result_model_json -class TestDocumentList(): +class TestModel_DocumentList(): """ Test Class for DocumentList """ @@ -1185,8 +1211,8 @@ def test_document_list_serialization(self): document_status_model['source'] = 'testString' document_status_model['detected_language_confidence'] = 0 document_status_model['target'] = 'testString' - document_status_model['created'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) - document_status_model['completed'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + document_status_model['created'] = "2019-01-01T12:00:00Z" + document_status_model['completed'] = "2019-01-01T12:00:00Z" document_status_model['word_count'] = 38 document_status_model['character_count'] = 38 @@ -1209,7 +1235,7 @@ def test_document_list_serialization(self): document_list_model_json2 = document_list_model.to_dict() assert document_list_model_json2 == document_list_model_json -class TestDocumentStatus(): +class TestModel_DocumentStatus(): """ Test Class for DocumentStatus """ @@ -1229,8 +1255,8 @@ def test_document_status_serialization(self): document_status_model_json['source'] = 'testString' document_status_model_json['detected_language_confidence'] = 0 document_status_model_json['target'] = 'testString' - document_status_model_json['created'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) - document_status_model_json['completed'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + document_status_model_json['created'] = "2019-01-01T12:00:00Z" + document_status_model_json['completed'] = "2019-01-01T12:00:00Z" document_status_model_json['word_count'] = 38 document_status_model_json['character_count'] = 38 @@ -1249,7 +1275,7 @@ def test_document_status_serialization(self): document_status_model_json2 = document_status_model.to_dict() assert document_status_model_json2 == document_status_model_json -class TestIdentifiableLanguage(): +class TestModel_IdentifiableLanguage(): """ Test Class for IdentifiableLanguage """ @@ -1279,7 +1305,7 @@ def test_identifiable_language_serialization(self): identifiable_language_model_json2 = identifiable_language_model.to_dict() assert identifiable_language_model_json2 == identifiable_language_model_json -class TestIdentifiableLanguages(): +class TestModel_IdentifiableLanguages(): """ Test Class for IdentifiableLanguages """ @@ -1314,7 +1340,7 @@ def test_identifiable_languages_serialization(self): identifiable_languages_model_json2 = identifiable_languages_model.to_dict() assert identifiable_languages_model_json2 == identifiable_languages_model_json -class TestIdentifiedLanguage(): +class TestModel_IdentifiedLanguage(): """ Test Class for IdentifiedLanguage """ @@ -1344,7 +1370,7 @@ def test_identified_language_serialization(self): identified_language_model_json2 = identified_language_model.to_dict() assert identified_language_model_json2 == identified_language_model_json -class TestIdentifiedLanguages(): +class TestModel_IdentifiedLanguages(): """ Test Class for IdentifiedLanguages """ @@ -1379,7 +1405,7 @@ def test_identified_languages_serialization(self): identified_languages_model_json2 = identified_languages_model.to_dict() assert identified_languages_model_json2 == identified_languages_model_json -class TestLanguage(): +class TestModel_Language(): """ Test Class for Language """ @@ -1416,7 +1442,7 @@ def test_language_serialization(self): language_model_json2 = language_model.to_dict() assert language_model_json2 == language_model_json -class TestLanguages(): +class TestModel_Languages(): """ Test Class for Languages """ @@ -1458,7 +1484,7 @@ def test_languages_serialization(self): languages_model_json2 = languages_model.to_dict() assert languages_model_json2 == languages_model_json -class TestTranslation(): +class TestModel_Translation(): """ Test Class for Translation """ @@ -1487,7 +1513,7 @@ def test_translation_serialization(self): translation_model_json2 = translation_model.to_dict() assert translation_model_json2 == translation_model_json -class TestTranslationModel(): +class TestModel_TranslationModel(): """ Test Class for TranslationModel """ @@ -1525,7 +1551,7 @@ def test_translation_model_serialization(self): translation_model_model_json2 = translation_model_model.to_dict() assert translation_model_model_json2 == translation_model_model_json -class TestTranslationModels(): +class TestModel_TranslationModels(): """ Test Class for TranslationModels """ @@ -1568,7 +1594,7 @@ def test_translation_models_serialization(self): translation_models_model_json2 = translation_models_model.to_dict() assert translation_models_model_json2 == translation_models_model_json -class TestTranslationResult(): +class TestModel_TranslationResult(): """ Test Class for TranslationResult """ diff --git a/test/unit/test_natural_language_classifier_v1.py b/test/unit/test_natural_language_classifier_v1.py index eeaec42a..37d61b97 100644 --- a/test/unit/test_natural_language_classifier_v1.py +++ b/test/unit/test_natural_language_classifier_v1.py @@ -52,6 +52,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -129,6 +131,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -224,6 +228,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -298,6 +304,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -335,6 +343,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -405,6 +415,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -470,7 +482,7 @@ def test_delete_classifier_value_error(self): # Start of Model Tests ############################################################################## # region -class TestClassification(): +class TestModel_Classification(): """ Test Class for Classification """ @@ -509,7 +521,7 @@ def test_classification_serialization(self): classification_model_json2 = classification_model.to_dict() assert classification_model_json2 == classification_model_json -class TestClassificationCollection(): +class TestModel_ClassificationCollection(): """ Test Class for ClassificationCollection """ @@ -551,7 +563,7 @@ def test_classification_collection_serialization(self): classification_collection_model_json2 = classification_collection_model.to_dict() assert classification_collection_model_json2 == classification_collection_model_json -class TestClassifiedClass(): +class TestModel_ClassifiedClass(): """ Test Class for ClassifiedClass """ @@ -581,7 +593,7 @@ def test_classified_class_serialization(self): classified_class_model_json2 = classified_class_model.to_dict() assert classified_class_model_json2 == classified_class_model_json -class TestClassifier(): +class TestModel_Classifier(): """ Test Class for Classifier """ @@ -597,7 +609,7 @@ def test_classifier_serialization(self): classifier_model_json['url'] = 'testString' classifier_model_json['status'] = 'Non Existent' classifier_model_json['classifier_id'] = 'testString' - classifier_model_json['created'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + classifier_model_json['created'] = "2019-01-01T12:00:00Z" classifier_model_json['status_description'] = 'testString' classifier_model_json['language'] = 'testString' @@ -616,7 +628,7 @@ def test_classifier_serialization(self): classifier_model_json2 = classifier_model.to_dict() assert classifier_model_json2 == classifier_model_json -class TestClassifierList(): +class TestModel_ClassifierList(): """ Test Class for ClassifierList """ @@ -633,7 +645,7 @@ def test_classifier_list_serialization(self): classifier_model['url'] = 'testString' classifier_model['status'] = 'Non Existent' classifier_model['classifier_id'] = 'testString' - classifier_model['created'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + classifier_model['created'] = "2019-01-01T12:00:00Z" classifier_model['status_description'] = 'testString' classifier_model['language'] = 'testString' @@ -656,7 +668,7 @@ def test_classifier_list_serialization(self): classifier_list_model_json2 = classifier_list_model.to_dict() assert classifier_list_model_json2 == classifier_list_model_json -class TestClassifyInput(): +class TestModel_ClassifyInput(): """ Test Class for ClassifyInput """ @@ -685,7 +697,7 @@ def test_classify_input_serialization(self): classify_input_model_json2 = classify_input_model.to_dict() assert classify_input_model_json2 == classify_input_model_json -class TestCollectionItem(): +class TestModel_CollectionItem(): """ Test Class for CollectionItem """ diff --git a/test/unit/test_natural_language_understanding_v1.py b/test/unit/test_natural_language_understanding_v1.py index cfc05517..d5bb4e7d 100644 --- a/test/unit/test_natural_language_understanding_v1.py +++ b/test/unit/test_natural_language_understanding_v1.py @@ -55,6 +55,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -90,16 +92,16 @@ def test_analyze_all_params(self): # Construct a dict representation of a EntitiesOptions model entities_options_model = {} entities_options_model['limit'] = 250 - entities_options_model['mentions'] = True + entities_options_model['mentions'] = False entities_options_model['model'] = 'testString' - entities_options_model['sentiment'] = True - entities_options_model['emotion'] = True + entities_options_model['sentiment'] = False + entities_options_model['emotion'] = False # Construct a dict representation of a KeywordsOptions model keywords_options_model = {} keywords_options_model['limit'] = 250 - keywords_options_model['sentiment'] = True - keywords_options_model['emotion'] = True + keywords_options_model['sentiment'] = False + keywords_options_model['emotion'] = False # Construct a dict representation of a MetadataOptions model metadata_options_model = {} @@ -111,8 +113,8 @@ def test_analyze_all_params(self): # Construct a dict representation of a SemanticRolesOptions model semantic_roles_options_model = {} semantic_roles_options_model['limit'] = 38 - semantic_roles_options_model['keywords'] = True - semantic_roles_options_model['entities'] = True + semantic_roles_options_model['keywords'] = False + semantic_roles_options_model['entities'] = False # Construct a dict representation of a SentimentOptions model sentiment_options_model = {} @@ -126,7 +128,7 @@ def test_analyze_all_params(self): # Construct a dict representation of a CategoriesOptions model categories_options_model = {} - categories_options_model['explanation'] = True + categories_options_model['explanation'] = False categories_options_model['limit'] = 10 categories_options_model['model'] = 'testString' @@ -163,7 +165,7 @@ def test_analyze_all_params(self): clean = True xpath = 'testString' fallback_to_raw = True - return_analyzed_text = True + return_analyzed_text = False language = 'testString' limit_text_characters = 38 @@ -194,7 +196,7 @@ def test_analyze_all_params(self): assert req_body['clean'] == True assert req_body['xpath'] == 'testString' assert req_body['fallback_to_raw'] == True - assert req_body['return_analyzed_text'] == True + assert req_body['return_analyzed_text'] == False assert req_body['language'] == 'testString' assert req_body['limit_text_characters'] == 38 @@ -229,16 +231,16 @@ def test_analyze_value_error(self): # Construct a dict representation of a EntitiesOptions model entities_options_model = {} entities_options_model['limit'] = 250 - entities_options_model['mentions'] = True + entities_options_model['mentions'] = False entities_options_model['model'] = 'testString' - entities_options_model['sentiment'] = True - entities_options_model['emotion'] = True + entities_options_model['sentiment'] = False + entities_options_model['emotion'] = False # Construct a dict representation of a KeywordsOptions model keywords_options_model = {} keywords_options_model['limit'] = 250 - keywords_options_model['sentiment'] = True - keywords_options_model['emotion'] = True + keywords_options_model['sentiment'] = False + keywords_options_model['emotion'] = False # Construct a dict representation of a MetadataOptions model metadata_options_model = {} @@ -250,8 +252,8 @@ def test_analyze_value_error(self): # Construct a dict representation of a SemanticRolesOptions model semantic_roles_options_model = {} semantic_roles_options_model['limit'] = 38 - semantic_roles_options_model['keywords'] = True - semantic_roles_options_model['entities'] = True + semantic_roles_options_model['keywords'] = False + semantic_roles_options_model['entities'] = False # Construct a dict representation of a SentimentOptions model sentiment_options_model = {} @@ -265,7 +267,7 @@ def test_analyze_value_error(self): # Construct a dict representation of a CategoriesOptions model categories_options_model = {} - categories_options_model['explanation'] = True + categories_options_model['explanation'] = False categories_options_model['limit'] = 10 categories_options_model['model'] = 'testString' @@ -302,7 +304,7 @@ def test_analyze_value_error(self): clean = True xpath = 'testString' fallback_to_raw = True - return_analyzed_text = True + return_analyzed_text = False language = 'testString' limit_text_characters = 38 @@ -336,6 +338,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -397,6 +401,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -477,6 +483,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -591,6 +599,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -652,6 +662,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -722,6 +734,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -842,6 +856,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -922,6 +938,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -1038,6 +1056,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -1099,6 +1119,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -1169,6 +1191,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -1291,6 +1315,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -1371,6 +1397,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -1487,6 +1515,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -1499,7 +1529,7 @@ def test_list_classifications_models_all_params(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/models/classifications') - mock_response = '{"models": [{"models": [{"name": "name", "user_metadata": {"mapKey": {"anyKey": "anyValue"}}, "language": "language", "description": "description", "model_version": "model_version", "workspace_id": "workspace_id", "version_description": "version_description", "features": ["features"], "status": "starting", "model_id": "model_id", "created": "2019-01-01T12:00:00.000Z", "notices": [{"message": "message"}], "last_trained": "2019-01-01T12:00:00.000Z", "last_deployed": "2019-01-01T12:00:00.000Z"}]}]}' + mock_response = '{"models": [{"name": "name", "user_metadata": {"mapKey": {"anyKey": "anyValue"}}, "language": "language", "description": "description", "model_version": "model_version", "workspace_id": "workspace_id", "version_description": "version_description", "features": ["features"], "status": "starting", "model_id": "model_id", "created": "2019-01-01T12:00:00.000Z", "notices": [{"message": "message"}], "last_trained": "2019-01-01T12:00:00.000Z", "last_deployed": "2019-01-01T12:00:00.000Z"}]}' responses.add(responses.GET, url, body=mock_response, @@ -1522,7 +1552,7 @@ def test_list_classifications_models_value_error(self): """ # Set up mock url = self.preprocess_url(_base_url + '/v1/models/classifications') - mock_response = '{"models": [{"models": [{"name": "name", "user_metadata": {"mapKey": {"anyKey": "anyValue"}}, "language": "language", "description": "description", "model_version": "model_version", "workspace_id": "workspace_id", "version_description": "version_description", "features": ["features"], "status": "starting", "model_id": "model_id", "created": "2019-01-01T12:00:00.000Z", "notices": [{"message": "message"}], "last_trained": "2019-01-01T12:00:00.000Z", "last_deployed": "2019-01-01T12:00:00.000Z"}]}]}' + mock_response = '{"models": [{"name": "name", "user_metadata": {"mapKey": {"anyKey": "anyValue"}}, "language": "language", "description": "description", "model_version": "model_version", "workspace_id": "workspace_id", "version_description": "version_description", "features": ["features"], "status": "starting", "model_id": "model_id", "created": "2019-01-01T12:00:00.000Z", "notices": [{"message": "message"}], "last_trained": "2019-01-01T12:00:00.000Z", "last_deployed": "2019-01-01T12:00:00.000Z"}]}' responses.add(responses.GET, url, body=mock_response, @@ -1548,6 +1578,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -1618,6 +1650,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -1740,6 +1774,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -1811,7 +1847,7 @@ def test_delete_classifications_model_value_error(self): # Start of Model Tests ############################################################################## # region -class TestAnalysisResults(): +class TestModel_AnalysisResults(): """ Test Class for AnalysisResults """ @@ -1878,7 +1914,7 @@ def test_analysis_results_serialization(self): categories_result_explanation_model['relevant_text'] = [categories_relevant_text_model] categories_result_model = {} # CategoriesResult - categories_result_model['label'] = '/technology and computing/software' + categories_result_model['label'] = '/technology and computing/computing/computer software and applications' categories_result_model['score'] = 0.594296 categories_result_model['explanation'] = categories_result_explanation_model @@ -2015,7 +2051,7 @@ def test_analysis_results_serialization(self): analysis_results_model_json2 = analysis_results_model.to_dict() assert analysis_results_model_json2 == analysis_results_model_json -class TestAnalysisResultsUsage(): +class TestModel_AnalysisResultsUsage(): """ Test Class for AnalysisResultsUsage """ @@ -2046,7 +2082,7 @@ def test_analysis_results_usage_serialization(self): analysis_results_usage_model_json2 = analysis_results_usage_model.to_dict() assert analysis_results_usage_model_json2 == analysis_results_usage_model_json -class TestAuthor(): +class TestModel_Author(): """ Test Class for Author """ @@ -2075,7 +2111,7 @@ def test_author_serialization(self): author_model_json2 = author_model.to_dict() assert author_model_json2 == author_model_json -class TestCategoriesModel(): +class TestModel_CategoriesModel(): """ Test Class for CategoriesModel """ @@ -2088,7 +2124,7 @@ def test_categories_model_serialization(self): # Construct dict forms of any model objects needed in order to build this model. notice_model = {} # Notice - notice_model['message'] = 'Not enough examples for class \'foo\'. 4 were given but 5 are required.' + notice_model['message'] = 'Training data validation failed: Too few examples for label insufficient_examples. Minimum of 5 required' # Construct a json representation of a CategoriesModel model categories_model_model_json = {} @@ -2102,10 +2138,10 @@ def test_categories_model_serialization(self): categories_model_model_json['features'] = ['testString'] categories_model_model_json['status'] = 'starting' categories_model_model_json['model_id'] = 'testString' - categories_model_model_json['created'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + categories_model_model_json['created'] = "2019-01-01T12:00:00Z" categories_model_model_json['notices'] = [notice_model] - categories_model_model_json['last_trained'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) - categories_model_model_json['last_deployed'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + categories_model_model_json['last_trained'] = "2019-01-01T12:00:00Z" + categories_model_model_json['last_deployed'] = "2019-01-01T12:00:00Z" # Construct a model instance of CategoriesModel by calling from_dict on the json representation categories_model_model = CategoriesModel.from_dict(categories_model_model_json) @@ -2122,7 +2158,7 @@ def test_categories_model_serialization(self): categories_model_model_json2 = categories_model_model.to_dict() assert categories_model_model_json2 == categories_model_model_json -class TestCategoriesModelList(): +class TestModel_CategoriesModelList(): """ Test Class for CategoriesModelList """ @@ -2135,7 +2171,7 @@ def test_categories_model_list_serialization(self): # Construct dict forms of any model objects needed in order to build this model. notice_model = {} # Notice - notice_model['message'] = 'Not enough examples for class \'foo\'. 4 were given but 5 are required.' + notice_model['message'] = 'Training data validation failed: Too few examples for label insufficient_examples. Minimum of 5 required' categories_model_model = {} # CategoriesModel categories_model_model['name'] = 'testString' @@ -2148,10 +2184,10 @@ def test_categories_model_list_serialization(self): categories_model_model['features'] = ['testString'] categories_model_model['status'] = 'starting' categories_model_model['model_id'] = 'testString' - categories_model_model['created'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + categories_model_model['created'] = "2019-01-01T12:00:00Z" categories_model_model['notices'] = [notice_model] - categories_model_model['last_trained'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) - categories_model_model['last_deployed'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + categories_model_model['last_trained'] = "2019-01-01T12:00:00Z" + categories_model_model['last_deployed'] = "2019-01-01T12:00:00Z" # Construct a json representation of a CategoriesModelList model categories_model_list_model_json = {} @@ -2172,7 +2208,7 @@ def test_categories_model_list_serialization(self): categories_model_list_model_json2 = categories_model_list_model.to_dict() assert categories_model_list_model_json2 == categories_model_list_model_json -class TestCategoriesOptions(): +class TestModel_CategoriesOptions(): """ Test Class for CategoriesOptions """ @@ -2184,7 +2220,7 @@ def test_categories_options_serialization(self): # Construct a json representation of a CategoriesOptions model categories_options_model_json = {} - categories_options_model_json['explanation'] = True + categories_options_model_json['explanation'] = False categories_options_model_json['limit'] = 10 categories_options_model_json['model'] = 'testString' @@ -2203,7 +2239,7 @@ def test_categories_options_serialization(self): categories_options_model_json2 = categories_options_model.to_dict() assert categories_options_model_json2 == categories_options_model_json -class TestCategoriesRelevantText(): +class TestModel_CategoriesRelevantText(): """ Test Class for CategoriesRelevantText """ @@ -2232,7 +2268,7 @@ def test_categories_relevant_text_serialization(self): categories_relevant_text_model_json2 = categories_relevant_text_model.to_dict() assert categories_relevant_text_model_json2 == categories_relevant_text_model_json -class TestCategoriesResult(): +class TestModel_CategoriesResult(): """ Test Class for CategoriesResult """ @@ -2271,7 +2307,7 @@ def test_categories_result_serialization(self): categories_result_model_json2 = categories_result_model.to_dict() assert categories_result_model_json2 == categories_result_model_json -class TestCategoriesResultExplanation(): +class TestModel_CategoriesResultExplanation(): """ Test Class for CategoriesResultExplanation """ @@ -2305,7 +2341,7 @@ def test_categories_result_explanation_serialization(self): categories_result_explanation_model_json2 = categories_result_explanation_model.to_dict() assert categories_result_explanation_model_json2 == categories_result_explanation_model_json -class TestClassificationsModel(): +class TestModel_ClassificationsModel(): """ Test Class for ClassificationsModel """ @@ -2318,7 +2354,7 @@ def test_classifications_model_serialization(self): # Construct dict forms of any model objects needed in order to build this model. notice_model = {} # Notice - notice_model['message'] = 'Not enough examples for class \'foo\'. 4 were given but 5 are required.' + notice_model['message'] = 'Training data validation failed: Too few examples for label insufficient_examples. Minimum of 5 required' # Construct a json representation of a ClassificationsModel model classifications_model_model_json = {} @@ -2332,10 +2368,10 @@ def test_classifications_model_serialization(self): classifications_model_model_json['features'] = ['testString'] classifications_model_model_json['status'] = 'starting' classifications_model_model_json['model_id'] = 'testString' - classifications_model_model_json['created'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + classifications_model_model_json['created'] = "2019-01-01T12:00:00Z" classifications_model_model_json['notices'] = [notice_model] - classifications_model_model_json['last_trained'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) - classifications_model_model_json['last_deployed'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + classifications_model_model_json['last_trained'] = "2019-01-01T12:00:00Z" + classifications_model_model_json['last_deployed'] = "2019-01-01T12:00:00Z" # Construct a model instance of ClassificationsModel by calling from_dict on the json representation classifications_model_model = ClassificationsModel.from_dict(classifications_model_model_json) @@ -2352,7 +2388,7 @@ def test_classifications_model_serialization(self): classifications_model_model_json2 = classifications_model_model.to_dict() assert classifications_model_model_json2 == classifications_model_model_json -class TestClassificationsModelList(): +class TestModel_ClassificationsModelList(): """ Test Class for ClassificationsModelList """ @@ -2365,7 +2401,7 @@ def test_classifications_model_list_serialization(self): # Construct dict forms of any model objects needed in order to build this model. notice_model = {} # Notice - notice_model['message'] = 'Not enough examples for class \'foo\'. 4 were given but 5 are required.' + notice_model['message'] = 'Training data validation failed: Too few examples for label insufficient_examples. Minimum of 5 required' classifications_model_model = {} # ClassificationsModel classifications_model_model['name'] = 'testString' @@ -2378,10 +2414,10 @@ def test_classifications_model_list_serialization(self): classifications_model_model['features'] = ['testString'] classifications_model_model['status'] = 'starting' classifications_model_model['model_id'] = 'testString' - classifications_model_model['created'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + classifications_model_model['created'] = "2019-01-01T12:00:00Z" classifications_model_model['notices'] = [notice_model] - classifications_model_model['last_trained'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) - classifications_model_model['last_deployed'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + classifications_model_model['last_trained'] = "2019-01-01T12:00:00Z" + classifications_model_model['last_deployed'] = "2019-01-01T12:00:00Z" # Construct a json representation of a ClassificationsModelList model classifications_model_list_model_json = {} @@ -2402,7 +2438,7 @@ def test_classifications_model_list_serialization(self): classifications_model_list_model_json2 = classifications_model_list_model.to_dict() assert classifications_model_list_model_json2 == classifications_model_list_model_json -class TestClassificationsOptions(): +class TestModel_ClassificationsOptions(): """ Test Class for ClassificationsOptions """ @@ -2431,7 +2467,7 @@ def test_classifications_options_serialization(self): classifications_options_model_json2 = classifications_options_model.to_dict() assert classifications_options_model_json2 == classifications_options_model_json -class TestClassificationsResult(): +class TestModel_ClassificationsResult(): """ Test Class for ClassificationsResult """ @@ -2461,7 +2497,7 @@ def test_classifications_result_serialization(self): classifications_result_model_json2 = classifications_result_model.to_dict() assert classifications_result_model_json2 == classifications_result_model_json -class TestConceptsOptions(): +class TestModel_ConceptsOptions(): """ Test Class for ConceptsOptions """ @@ -2490,7 +2526,7 @@ def test_concepts_options_serialization(self): concepts_options_model_json2 = concepts_options_model.to_dict() assert concepts_options_model_json2 == concepts_options_model_json -class TestConceptsResult(): +class TestModel_ConceptsResult(): """ Test Class for ConceptsResult """ @@ -2521,7 +2557,7 @@ def test_concepts_result_serialization(self): concepts_result_model_json2 = concepts_result_model.to_dict() assert concepts_result_model_json2 == concepts_result_model_json -class TestDeleteModelResults(): +class TestModel_DeleteModelResults(): """ Test Class for DeleteModelResults """ @@ -2550,7 +2586,7 @@ def test_delete_model_results_serialization(self): delete_model_results_model_json2 = delete_model_results_model.to_dict() assert delete_model_results_model_json2 == delete_model_results_model_json -class TestDisambiguationResult(): +class TestModel_DisambiguationResult(): """ Test Class for DisambiguationResult """ @@ -2581,7 +2617,7 @@ def test_disambiguation_result_serialization(self): disambiguation_result_model_json2 = disambiguation_result_model.to_dict() assert disambiguation_result_model_json2 == disambiguation_result_model_json -class TestDocumentEmotionResults(): +class TestModel_DocumentEmotionResults(): """ Test Class for DocumentEmotionResults """ @@ -2619,7 +2655,7 @@ def test_document_emotion_results_serialization(self): document_emotion_results_model_json2 = document_emotion_results_model.to_dict() assert document_emotion_results_model_json2 == document_emotion_results_model_json -class TestDocumentSentimentResults(): +class TestModel_DocumentSentimentResults(): """ Test Class for DocumentSentimentResults """ @@ -2649,7 +2685,7 @@ def test_document_sentiment_results_serialization(self): document_sentiment_results_model_json2 = document_sentiment_results_model.to_dict() assert document_sentiment_results_model_json2 == document_sentiment_results_model_json -class TestEmotionOptions(): +class TestModel_EmotionOptions(): """ Test Class for EmotionOptions """ @@ -2679,7 +2715,7 @@ def test_emotion_options_serialization(self): emotion_options_model_json2 = emotion_options_model.to_dict() assert emotion_options_model_json2 == emotion_options_model_json -class TestEmotionResult(): +class TestModel_EmotionResult(): """ Test Class for EmotionResult """ @@ -2725,7 +2761,7 @@ def test_emotion_result_serialization(self): emotion_result_model_json2 = emotion_result_model.to_dict() assert emotion_result_model_json2 == emotion_result_model_json -class TestEmotionScores(): +class TestModel_EmotionScores(): """ Test Class for EmotionScores """ @@ -2758,7 +2794,7 @@ def test_emotion_scores_serialization(self): emotion_scores_model_json2 = emotion_scores_model.to_dict() assert emotion_scores_model_json2 == emotion_scores_model_json -class TestEntitiesOptions(): +class TestModel_EntitiesOptions(): """ Test Class for EntitiesOptions """ @@ -2771,10 +2807,10 @@ def test_entities_options_serialization(self): # Construct a json representation of a EntitiesOptions model entities_options_model_json = {} entities_options_model_json['limit'] = 250 - entities_options_model_json['mentions'] = True + entities_options_model_json['mentions'] = False entities_options_model_json['model'] = 'testString' - entities_options_model_json['sentiment'] = True - entities_options_model_json['emotion'] = True + entities_options_model_json['sentiment'] = False + entities_options_model_json['emotion'] = False # Construct a model instance of EntitiesOptions by calling from_dict on the json representation entities_options_model = EntitiesOptions.from_dict(entities_options_model_json) @@ -2791,7 +2827,7 @@ def test_entities_options_serialization(self): entities_options_model_json2 = entities_options_model.to_dict() assert entities_options_model_json2 == entities_options_model_json -class TestEntitiesResult(): +class TestModel_EntitiesResult(): """ Test Class for EntitiesResult """ @@ -2850,7 +2886,7 @@ def test_entities_result_serialization(self): entities_result_model_json2 = entities_result_model.to_dict() assert entities_result_model_json2 == entities_result_model_json -class TestEntityMention(): +class TestModel_EntityMention(): """ Test Class for EntityMention """ @@ -2881,7 +2917,7 @@ def test_entity_mention_serialization(self): entity_mention_model_json2 = entity_mention_model.to_dict() assert entity_mention_model_json2 == entity_mention_model_json -class TestFeatureSentimentResults(): +class TestModel_FeatureSentimentResults(): """ Test Class for FeatureSentimentResults """ @@ -2910,7 +2946,7 @@ def test_feature_sentiment_results_serialization(self): feature_sentiment_results_model_json2 = feature_sentiment_results_model.to_dict() assert feature_sentiment_results_model_json2 == feature_sentiment_results_model_json -class TestFeatures(): +class TestModel_Features(): """ Test Class for Features """ @@ -2934,15 +2970,15 @@ def test_features_serialization(self): entities_options_model = {} # EntitiesOptions entities_options_model['limit'] = 250 - entities_options_model['mentions'] = True + entities_options_model['mentions'] = False entities_options_model['model'] = 'testString' - entities_options_model['sentiment'] = True - entities_options_model['emotion'] = True + entities_options_model['sentiment'] = False + entities_options_model['emotion'] = False keywords_options_model = {} # KeywordsOptions keywords_options_model['limit'] = 250 - keywords_options_model['sentiment'] = True - keywords_options_model['emotion'] = True + keywords_options_model['sentiment'] = False + keywords_options_model['emotion'] = False metadata_options_model = {} # MetadataOptions @@ -2951,8 +2987,8 @@ def test_features_serialization(self): semantic_roles_options_model = {} # SemanticRolesOptions semantic_roles_options_model['limit'] = 38 - semantic_roles_options_model['keywords'] = True - semantic_roles_options_model['entities'] = True + semantic_roles_options_model['keywords'] = False + semantic_roles_options_model['entities'] = False sentiment_options_model = {} # SentimentOptions sentiment_options_model['document'] = True @@ -2963,7 +2999,7 @@ def test_features_serialization(self): summarization_options_model['limit'] = 10 categories_options_model = {} # CategoriesOptions - categories_options_model['explanation'] = True + categories_options_model['explanation'] = False categories_options_model['limit'] = 10 categories_options_model['model'] = 'testString' @@ -3005,7 +3041,7 @@ def test_features_serialization(self): features_model_json2 = features_model.to_dict() assert features_model_json2 == features_model_json -class TestFeaturesResultsMetadata(): +class TestModel_FeaturesResultsMetadata(): """ Test Class for FeaturesResultsMetadata """ @@ -3046,7 +3082,7 @@ def test_features_results_metadata_serialization(self): features_results_metadata_model_json2 = features_results_metadata_model.to_dict() assert features_results_metadata_model_json2 == features_results_metadata_model_json -class TestFeed(): +class TestModel_Feed(): """ Test Class for Feed """ @@ -3075,7 +3111,7 @@ def test_feed_serialization(self): feed_model_json2 = feed_model.to_dict() assert feed_model_json2 == feed_model_json -class TestKeywordsOptions(): +class TestModel_KeywordsOptions(): """ Test Class for KeywordsOptions """ @@ -3088,8 +3124,8 @@ def test_keywords_options_serialization(self): # Construct a json representation of a KeywordsOptions model keywords_options_model_json = {} keywords_options_model_json['limit'] = 250 - keywords_options_model_json['sentiment'] = True - keywords_options_model_json['emotion'] = True + keywords_options_model_json['sentiment'] = False + keywords_options_model_json['emotion'] = False # Construct a model instance of KeywordsOptions by calling from_dict on the json representation keywords_options_model = KeywordsOptions.from_dict(keywords_options_model_json) @@ -3106,7 +3142,7 @@ def test_keywords_options_serialization(self): keywords_options_model_json2 = keywords_options_model.to_dict() assert keywords_options_model_json2 == keywords_options_model_json -class TestKeywordsResult(): +class TestModel_KeywordsResult(): """ Test Class for KeywordsResult """ @@ -3151,60 +3187,7 @@ def test_keywords_result_serialization(self): keywords_result_model_json2 = keywords_result_model.to_dict() assert keywords_result_model_json2 == keywords_result_model_json -class TestListClassificationsModelsResponse(): - """ - Test Class for ListClassificationsModelsResponse - """ - - def test_list_classifications_models_response_serialization(self): - """ - Test serialization/deserialization for ListClassificationsModelsResponse - """ - - # Construct dict forms of any model objects needed in order to build this model. - - notice_model = {} # Notice - notice_model['message'] = 'Not enough examples for class \'foo\'. 4 were given but 5 are required.' - - classifications_model_model = {} # ClassificationsModel - classifications_model_model['name'] = 'testString' - classifications_model_model['user_metadata'] = {} - classifications_model_model['language'] = 'testString' - classifications_model_model['description'] = 'testString' - classifications_model_model['model_version'] = 'testString' - classifications_model_model['workspace_id'] = 'testString' - classifications_model_model['version_description'] = 'testString' - classifications_model_model['features'] = ['testString'] - classifications_model_model['status'] = 'starting' - classifications_model_model['model_id'] = 'testString' - classifications_model_model['created'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) - classifications_model_model['notices'] = [notice_model] - classifications_model_model['last_trained'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) - classifications_model_model['last_deployed'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) - - classifications_model_list_model = {} # ClassificationsModelList - classifications_model_list_model['models'] = [classifications_model_model] - - # Construct a json representation of a ListClassificationsModelsResponse model - list_classifications_models_response_model_json = {} - list_classifications_models_response_model_json['models'] = [classifications_model_list_model] - - # Construct a model instance of ListClassificationsModelsResponse by calling from_dict on the json representation - list_classifications_models_response_model = ListClassificationsModelsResponse.from_dict(list_classifications_models_response_model_json) - assert list_classifications_models_response_model != False - - # Construct a model instance of ListClassificationsModelsResponse by calling from_dict on the json representation - list_classifications_models_response_model_dict = ListClassificationsModelsResponse.from_dict(list_classifications_models_response_model_json).__dict__ - list_classifications_models_response_model2 = ListClassificationsModelsResponse(**list_classifications_models_response_model_dict) - - # Verify the model instances are equivalent - assert list_classifications_models_response_model == list_classifications_models_response_model2 - - # Convert model instance back to dict and verify no loss of data - list_classifications_models_response_model_json2 = list_classifications_models_response_model.to_dict() - assert list_classifications_models_response_model_json2 == list_classifications_models_response_model_json - -class TestListModelsResults(): +class TestModel_ListModelsResults(): """ Test Class for ListModelsResults """ @@ -3225,7 +3208,7 @@ def test_list_models_results_serialization(self): model_model['model_version'] = 'testString' model_model['version'] = 'testString' model_model['version_description'] = 'testString' - model_model['created'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + model_model['created'] = "2019-01-01T12:00:00Z" # Construct a json representation of a ListModelsResults model list_models_results_model_json = {} @@ -3246,7 +3229,7 @@ def test_list_models_results_serialization(self): list_models_results_model_json2 = list_models_results_model.to_dict() assert list_models_results_model_json2 == list_models_results_model_json -class TestListSentimentModelsResponse(): +class TestModel_ListSentimentModelsResponse(): """ Test Class for ListSentimentModelsResponse """ @@ -3259,15 +3242,15 @@ def test_list_sentiment_models_response_serialization(self): # Construct dict forms of any model objects needed in order to build this model. notice_model = {} # Notice - notice_model['message'] = 'Not enough examples for class \'foo\'. 4 were given but 5 are required.' + notice_model['message'] = 'Training data validation failed: Too few examples for label insufficient_examples. Minimum of 5 required' sentiment_model_model = {} # SentimentModel sentiment_model_model['features'] = ['testString'] sentiment_model_model['status'] = 'starting' sentiment_model_model['model_id'] = 'testString' - sentiment_model_model['created'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) - sentiment_model_model['last_trained'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) - sentiment_model_model['last_deployed'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + sentiment_model_model['created'] = "2019-01-01T12:00:00Z" + sentiment_model_model['last_trained'] = "2019-01-01T12:00:00Z" + sentiment_model_model['last_deployed'] = "2019-01-01T12:00:00Z" sentiment_model_model['name'] = 'testString' sentiment_model_model['user_metadata'] = {} sentiment_model_model['language'] = 'testString' @@ -3296,7 +3279,7 @@ def test_list_sentiment_models_response_serialization(self): list_sentiment_models_response_model_json2 = list_sentiment_models_response_model.to_dict() assert list_sentiment_models_response_model_json2 == list_sentiment_models_response_model_json -class TestMetadataOptions(): +class TestModel_MetadataOptions(): """ Test Class for MetadataOptions """ @@ -3324,7 +3307,7 @@ def test_metadata_options_serialization(self): metadata_options_model_json2 = metadata_options_model.to_dict() assert metadata_options_model_json2 == metadata_options_model_json -class TestModel(): +class TestModel_Model(): """ Test Class for Model """ @@ -3344,7 +3327,7 @@ def test_model_serialization(self): model_model_json['model_version'] = 'testString' model_model_json['version'] = 'testString' model_model_json['version_description'] = 'testString' - model_model_json['created'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + model_model_json['created'] = "2019-01-01T12:00:00Z" # Construct a model instance of Model by calling from_dict on the json representation model_model = Model.from_dict(model_model_json) @@ -3361,7 +3344,7 @@ def test_model_serialization(self): model_model_json2 = model_model.to_dict() assert model_model_json2 == model_model_json -class TestNotice(): +class TestModel_Notice(): """ Test Class for Notice """ @@ -3390,7 +3373,7 @@ def test_notice_serialization(self): notice_model_json2 = notice_model.to_dict() assert notice_model_json2 == notice_model_json -class TestRelationArgument(): +class TestModel_RelationArgument(): """ Test Class for RelationArgument """ @@ -3427,7 +3410,7 @@ def test_relation_argument_serialization(self): relation_argument_model_json2 = relation_argument_model.to_dict() assert relation_argument_model_json2 == relation_argument_model_json -class TestRelationEntity(): +class TestModel_RelationEntity(): """ Test Class for RelationEntity """ @@ -3457,7 +3440,7 @@ def test_relation_entity_serialization(self): relation_entity_model_json2 = relation_entity_model.to_dict() assert relation_entity_model_json2 == relation_entity_model_json -class TestRelationsOptions(): +class TestModel_RelationsOptions(): """ Test Class for RelationsOptions """ @@ -3486,7 +3469,7 @@ def test_relations_options_serialization(self): relations_options_model_json2 = relations_options_model.to_dict() assert relations_options_model_json2 == relations_options_model_json -class TestRelationsResult(): +class TestModel_RelationsResult(): """ Test Class for RelationsResult """ @@ -3529,7 +3512,7 @@ def test_relations_result_serialization(self): relations_result_model_json2 = relations_result_model.to_dict() assert relations_result_model_json2 == relations_result_model_json -class TestSemanticRolesEntity(): +class TestModel_SemanticRolesEntity(): """ Test Class for SemanticRolesEntity """ @@ -3559,7 +3542,7 @@ def test_semantic_roles_entity_serialization(self): semantic_roles_entity_model_json2 = semantic_roles_entity_model.to_dict() assert semantic_roles_entity_model_json2 == semantic_roles_entity_model_json -class TestSemanticRolesKeyword(): +class TestModel_SemanticRolesKeyword(): """ Test Class for SemanticRolesKeyword """ @@ -3588,7 +3571,7 @@ def test_semantic_roles_keyword_serialization(self): semantic_roles_keyword_model_json2 = semantic_roles_keyword_model.to_dict() assert semantic_roles_keyword_model_json2 == semantic_roles_keyword_model_json -class TestSemanticRolesOptions(): +class TestModel_SemanticRolesOptions(): """ Test Class for SemanticRolesOptions """ @@ -3601,8 +3584,8 @@ def test_semantic_roles_options_serialization(self): # Construct a json representation of a SemanticRolesOptions model semantic_roles_options_model_json = {} semantic_roles_options_model_json['limit'] = 38 - semantic_roles_options_model_json['keywords'] = True - semantic_roles_options_model_json['entities'] = True + semantic_roles_options_model_json['keywords'] = False + semantic_roles_options_model_json['entities'] = False # Construct a model instance of SemanticRolesOptions by calling from_dict on the json representation semantic_roles_options_model = SemanticRolesOptions.from_dict(semantic_roles_options_model_json) @@ -3619,7 +3602,7 @@ def test_semantic_roles_options_serialization(self): semantic_roles_options_model_json2 = semantic_roles_options_model.to_dict() assert semantic_roles_options_model_json2 == semantic_roles_options_model_json -class TestSemanticRolesResult(): +class TestModel_SemanticRolesResult(): """ Test Class for SemanticRolesResult """ @@ -3678,7 +3661,7 @@ def test_semantic_roles_result_serialization(self): semantic_roles_result_model_json2 = semantic_roles_result_model.to_dict() assert semantic_roles_result_model_json2 == semantic_roles_result_model_json -class TestSemanticRolesResultAction(): +class TestModel_SemanticRolesResultAction(): """ Test Class for SemanticRolesResultAction """ @@ -3715,7 +3698,7 @@ def test_semantic_roles_result_action_serialization(self): semantic_roles_result_action_model_json2 = semantic_roles_result_action_model.to_dict() assert semantic_roles_result_action_model_json2 == semantic_roles_result_action_model_json -class TestSemanticRolesResultObject(): +class TestModel_SemanticRolesResultObject(): """ Test Class for SemanticRolesResultObject """ @@ -3750,7 +3733,7 @@ def test_semantic_roles_result_object_serialization(self): semantic_roles_result_object_model_json2 = semantic_roles_result_object_model.to_dict() assert semantic_roles_result_object_model_json2 == semantic_roles_result_object_model_json -class TestSemanticRolesResultSubject(): +class TestModel_SemanticRolesResultSubject(): """ Test Class for SemanticRolesResultSubject """ @@ -3790,7 +3773,7 @@ def test_semantic_roles_result_subject_serialization(self): semantic_roles_result_subject_model_json2 = semantic_roles_result_subject_model.to_dict() assert semantic_roles_result_subject_model_json2 == semantic_roles_result_subject_model_json -class TestSemanticRolesVerb(): +class TestModel_SemanticRolesVerb(): """ Test Class for SemanticRolesVerb """ @@ -3820,7 +3803,7 @@ def test_semantic_roles_verb_serialization(self): semantic_roles_verb_model_json2 = semantic_roles_verb_model.to_dict() assert semantic_roles_verb_model_json2 == semantic_roles_verb_model_json -class TestSentenceResult(): +class TestModel_SentenceResult(): """ Test Class for SentenceResult """ @@ -3850,7 +3833,7 @@ def test_sentence_result_serialization(self): sentence_result_model_json2 = sentence_result_model.to_dict() assert sentence_result_model_json2 == sentence_result_model_json -class TestSentimentModel(): +class TestModel_SentimentModel(): """ Test Class for SentimentModel """ @@ -3863,16 +3846,16 @@ def test_sentiment_model_serialization(self): # Construct dict forms of any model objects needed in order to build this model. notice_model = {} # Notice - notice_model['message'] = 'Not enough examples for class \'foo\'. 4 were given but 5 are required.' + notice_model['message'] = 'Training data validation failed: Too few examples for label insufficient_examples. Minimum of 5 required' # Construct a json representation of a SentimentModel model sentiment_model_model_json = {} sentiment_model_model_json['features'] = ['testString'] sentiment_model_model_json['status'] = 'starting' sentiment_model_model_json['model_id'] = 'testString' - sentiment_model_model_json['created'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) - sentiment_model_model_json['last_trained'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) - sentiment_model_model_json['last_deployed'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + sentiment_model_model_json['created'] = "2019-01-01T12:00:00Z" + sentiment_model_model_json['last_trained'] = "2019-01-01T12:00:00Z" + sentiment_model_model_json['last_deployed'] = "2019-01-01T12:00:00Z" sentiment_model_model_json['name'] = 'testString' sentiment_model_model_json['user_metadata'] = {} sentiment_model_model_json['language'] = 'testString' @@ -3897,7 +3880,7 @@ def test_sentiment_model_serialization(self): sentiment_model_model_json2 = sentiment_model_model.to_dict() assert sentiment_model_model_json2 == sentiment_model_model_json -class TestSentimentOptions(): +class TestModel_SentimentOptions(): """ Test Class for SentimentOptions """ @@ -3928,7 +3911,7 @@ def test_sentiment_options_serialization(self): sentiment_options_model_json2 = sentiment_options_model.to_dict() assert sentiment_options_model_json2 == sentiment_options_model_json -class TestSentimentResult(): +class TestModel_SentimentResult(): """ Test Class for SentimentResult """ @@ -3968,7 +3951,7 @@ def test_sentiment_result_serialization(self): sentiment_result_model_json2 = sentiment_result_model.to_dict() assert sentiment_result_model_json2 == sentiment_result_model_json -class TestSummarizationOptions(): +class TestModel_SummarizationOptions(): """ Test Class for SummarizationOptions """ @@ -3997,7 +3980,7 @@ def test_summarization_options_serialization(self): summarization_options_model_json2 = summarization_options_model.to_dict() assert summarization_options_model_json2 == summarization_options_model_json -class TestSyntaxOptions(): +class TestModel_SyntaxOptions(): """ Test Class for SyntaxOptions """ @@ -4033,7 +4016,7 @@ def test_syntax_options_serialization(self): syntax_options_model_json2 = syntax_options_model.to_dict() assert syntax_options_model_json2 == syntax_options_model_json -class TestSyntaxOptionsTokens(): +class TestModel_SyntaxOptionsTokens(): """ Test Class for SyntaxOptionsTokens """ @@ -4063,7 +4046,7 @@ def test_syntax_options_tokens_serialization(self): syntax_options_tokens_model_json2 = syntax_options_tokens_model.to_dict() assert syntax_options_tokens_model_json2 == syntax_options_tokens_model_json -class TestSyntaxResult(): +class TestModel_SyntaxResult(): """ Test Class for SyntaxResult """ @@ -4105,7 +4088,7 @@ def test_syntax_result_serialization(self): syntax_result_model_json2 = syntax_result_model.to_dict() assert syntax_result_model_json2 == syntax_result_model_json -class TestTargetedEmotionResults(): +class TestModel_TargetedEmotionResults(): """ Test Class for TargetedEmotionResults """ @@ -4144,7 +4127,7 @@ def test_targeted_emotion_results_serialization(self): targeted_emotion_results_model_json2 = targeted_emotion_results_model.to_dict() assert targeted_emotion_results_model_json2 == targeted_emotion_results_model_json -class TestTargetedSentimentResults(): +class TestModel_TargetedSentimentResults(): """ Test Class for TargetedSentimentResults """ @@ -4174,7 +4157,7 @@ def test_targeted_sentiment_results_serialization(self): targeted_sentiment_results_model_json2 = targeted_sentiment_results_model.to_dict() assert targeted_sentiment_results_model_json2 == targeted_sentiment_results_model_json -class TestTokenResult(): +class TestModel_TokenResult(): """ Test Class for TokenResult """ diff --git a/test/unit/test_personality_insights_v3.py b/test/unit/test_personality_insights_v3.py index d005d1fc..787ea51e 100755 --- a/test/unit/test_personality_insights_v3.py +++ b/test/unit/test_personality_insights_v3.py @@ -51,6 +51,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -77,10 +79,10 @@ def test_profile_all_params(self): content_item_model['created'] = 26 content_item_model['updated'] = 26 content_item_model['contenttype'] = 'text/plain' - content_item_model['language'] = 'ar' + content_item_model['language'] = 'en' content_item_model['parentid'] = 'testString' - content_item_model['reply'] = True - content_item_model['forward'] = True + content_item_model['reply'] = False + content_item_model['forward'] = False # Construct a dict representation of a Content model content_model = {} @@ -89,12 +91,12 @@ def test_profile_all_params(self): # Set up parameter values content = content_model accept = 'application/json' - content_type = 'application/json' - content_language = 'ar' - accept_language = 'ar' - raw_scores = True - csv_headers = True - consumption_preferences = True + content_type = 'text/plain' + content_language = 'en' + accept_language = 'en' + raw_scores = False + csv_headers = False + consumption_preferences = False # Invoke method response = _service.profile( @@ -142,10 +144,10 @@ def test_profile_required_params(self): content_item_model['created'] = 26 content_item_model['updated'] = 26 content_item_model['contenttype'] = 'text/plain' - content_item_model['language'] = 'ar' + content_item_model['language'] = 'en' content_item_model['parentid'] = 'testString' - content_item_model['reply'] = True - content_item_model['forward'] = True + content_item_model['reply'] = False + content_item_model['forward'] = False # Construct a dict representation of a Content model content_model = {} @@ -189,10 +191,10 @@ def test_profile_value_error(self): content_item_model['created'] = 26 content_item_model['updated'] = 26 content_item_model['contenttype'] = 'text/plain' - content_item_model['language'] = 'ar' + content_item_model['language'] = 'en' content_item_model['parentid'] = 'testString' - content_item_model['reply'] = True - content_item_model['forward'] = True + content_item_model['reply'] = False + content_item_model['forward'] = False # Construct a dict representation of a Content model content_model = {} @@ -224,7 +226,7 @@ def test_profile_value_error(self): # Start of Model Tests ############################################################################## # region -class TestBehavior(): +class TestModel_Behavior(): """ Test Class for Behavior """ @@ -256,7 +258,7 @@ def test_behavior_serialization(self): behavior_model_json2 = behavior_model.to_dict() assert behavior_model_json2 == behavior_model_json -class TestConsumptionPreferences(): +class TestModel_ConsumptionPreferences(): """ Test Class for ConsumptionPreferences """ @@ -287,7 +289,7 @@ def test_consumption_preferences_serialization(self): consumption_preferences_model_json2 = consumption_preferences_model.to_dict() assert consumption_preferences_model_json2 == consumption_preferences_model_json -class TestConsumptionPreferencesCategory(): +class TestModel_ConsumptionPreferencesCategory(): """ Test Class for ConsumptionPreferencesCategory """ @@ -325,7 +327,7 @@ def test_consumption_preferences_category_serialization(self): consumption_preferences_category_model_json2 = consumption_preferences_category_model.to_dict() assert consumption_preferences_category_model_json2 == consumption_preferences_category_model_json -class TestContent(): +class TestModel_Content(): """ Test Class for Content """ @@ -343,10 +345,10 @@ def test_content_serialization(self): content_item_model['created'] = 26 content_item_model['updated'] = 26 content_item_model['contenttype'] = 'text/plain' - content_item_model['language'] = 'ar' + content_item_model['language'] = 'en' content_item_model['parentid'] = 'testString' - content_item_model['reply'] = True - content_item_model['forward'] = True + content_item_model['reply'] = False + content_item_model['forward'] = False # Construct a json representation of a Content model content_model_json = {} @@ -367,7 +369,7 @@ def test_content_serialization(self): content_model_json2 = content_model.to_dict() assert content_model_json2 == content_model_json -class TestContentItem(): +class TestModel_ContentItem(): """ Test Class for ContentItem """ @@ -384,10 +386,10 @@ def test_content_item_serialization(self): content_item_model_json['created'] = 26 content_item_model_json['updated'] = 26 content_item_model_json['contenttype'] = 'text/plain' - content_item_model_json['language'] = 'ar' + content_item_model_json['language'] = 'en' content_item_model_json['parentid'] = 'testString' - content_item_model_json['reply'] = True - content_item_model_json['forward'] = True + content_item_model_json['reply'] = False + content_item_model_json['forward'] = False # Construct a model instance of ContentItem by calling from_dict on the json representation content_item_model = ContentItem.from_dict(content_item_model_json) @@ -404,7 +406,7 @@ def test_content_item_serialization(self): content_item_model_json2 = content_item_model.to_dict() assert content_item_model_json2 == content_item_model_json -class TestProfile(): +class TestModel_Profile(): """ Test Class for Profile """ @@ -471,7 +473,7 @@ def test_profile_serialization(self): profile_model_json2 = profile_model.to_dict() assert profile_model_json2 == profile_model_json -class TestTrait(): +class TestModel_Trait(): """ Test Class for Trait """ @@ -505,7 +507,7 @@ def test_trait_serialization(self): trait_model_json2 = trait_model.to_dict() assert trait_model_json2 == trait_model_json -class TestWarning(): +class TestModel_Warning(): """ Test Class for Warning """ diff --git a/test/unit/test_speech_to_text_v1.py b/test/unit/test_speech_to_text_v1.py index 986c01e6..18934165 100755 --- a/test/unit/test_speech_to_text_v1.py +++ b/test/unit/test_speech_to_text_v1.py @@ -51,6 +51,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -88,6 +90,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -168,6 +172,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -190,7 +196,7 @@ def test_recognize_all_params(self): # Set up parameter values audio = io.BytesIO(b'This is a mock file.').getvalue() content_type = 'application/octet-stream' - model = 'ar-AR_BroadbandModel' + model = 'en-US_BroadbandModel' language_customization_id = 'testString' acoustic_customization_id = 'testString' base_model_version = 'testString' @@ -200,20 +206,20 @@ def test_recognize_all_params(self): keywords_threshold = 72.5 max_alternatives = 38 word_alternatives_threshold = 72.5 - word_confidence = True - timestamps = True + word_confidence = False + timestamps = False profanity_filter = True - smart_formatting = True - speaker_labels = True + smart_formatting = False + speaker_labels = False customization_id = 'testString' grammar_name = 'testString' - redaction = True - audio_metrics = True + redaction = False + audio_metrics = False end_of_phrase_silence_time = 72.5 - split_transcript_at_phrase_end = True + split_transcript_at_phrase_end = False speech_detector_sensitivity = 72.5 background_audio_suppression = 72.5 - low_latency = True + low_latency = False # Invoke method response = _service.recognize( @@ -355,6 +361,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -464,6 +472,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -532,6 +542,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -554,7 +566,7 @@ def test_create_job_all_params(self): # Set up parameter values audio = io.BytesIO(b'This is a mock file.').getvalue() content_type = 'application/octet-stream' - model = 'ar-AR_BroadbandModel' + model = 'en-US_BroadbandModel' callback_url = 'testString' events = 'recognitions.started' user_token = 'testString' @@ -568,22 +580,22 @@ def test_create_job_all_params(self): keywords_threshold = 72.5 max_alternatives = 38 word_alternatives_threshold = 72.5 - word_confidence = True - timestamps = True + word_confidence = False + timestamps = False profanity_filter = True - smart_formatting = True - speaker_labels = True + smart_formatting = False + speaker_labels = False customization_id = 'testString' grammar_name = 'testString' - redaction = True - processing_metrics = True + redaction = False + processing_metrics = False processing_metrics_interval = 72.5 - audio_metrics = True + audio_metrics = False end_of_phrase_silence_time = 72.5 - split_transcript_at_phrase_end = True + split_transcript_at_phrase_end = False speech_detector_sensitivity = 72.5 background_audio_suppression = 72.5 - low_latency = True + low_latency = False # Invoke method response = _service.create_job( @@ -727,6 +739,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -764,6 +778,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -834,6 +850,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -908,6 +926,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -929,7 +949,7 @@ def test_create_language_model_all_params(self): # Set up parameter values name = 'testString' - base_model_name = 'de-DE_BroadbandModel' + base_model_name = 'ar-MS_Telephony' dialect = 'testString' description = 'testString' @@ -948,7 +968,7 @@ def test_create_language_model_all_params(self): # Validate body params req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) assert req_body['name'] == 'testString' - assert req_body['base_model_name'] == 'de-DE_BroadbandModel' + assert req_body['base_model_name'] == 'ar-MS_Telephony' assert req_body['dialect'] == 'testString' assert req_body['description'] == 'testString' @@ -969,7 +989,7 @@ def test_create_language_model_value_error(self): # Set up parameter values name = 'testString' - base_model_name = 'de-DE_BroadbandModel' + base_model_name = 'ar-MS_Telephony' dialect = 'testString' description = 'testString' @@ -994,6 +1014,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -1063,6 +1085,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -1133,6 +1157,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -1197,6 +1223,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -1304,6 +1332,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -1368,6 +1398,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -1442,6 +1474,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -1512,6 +1546,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -1532,7 +1568,7 @@ def test_add_corpus_all_params(self): customization_id = 'testString' corpus_name = 'testString' corpus_file = io.BytesIO(b'This is a mock file.').getvalue() - allow_overwrite = True + allow_overwrite = False # Invoke method response = _service.add_corpus( @@ -1619,6 +1655,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -1693,6 +1731,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -1771,6 +1811,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -1878,6 +1920,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -1961,6 +2005,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -2043,6 +2089,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -2117,6 +2165,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -2195,6 +2245,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -2265,6 +2317,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -2286,7 +2340,7 @@ def test_add_grammar_all_params(self): grammar_name = 'testString' grammar_file = 'testString' content_type = 'application/srgs' - allow_overwrite = True + allow_overwrite = False # Invoke method response = _service.add_grammar( @@ -2380,6 +2434,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -2454,6 +2510,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -2532,6 +2590,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -2614,6 +2674,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -2683,6 +2745,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -2753,6 +2817,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -2817,6 +2883,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -2921,6 +2989,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -2985,6 +3055,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -3004,7 +3076,7 @@ def test_upgrade_acoustic_model_all_params(self): # Set up parameter values customization_id = 'testString' custom_language_model_id = 'testString' - force = True + force = False # Invoke method response = _service.upgrade_acoustic_model( @@ -3093,6 +3165,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -3163,6 +3237,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -3185,7 +3261,7 @@ def test_add_audio_all_params(self): audio_resource = io.BytesIO(b'This is a mock file.').getvalue() content_type = 'application/zip' contained_content_type = 'audio/alaw' - allow_overwrite = True + allow_overwrite = False # Invoke method response = _service.add_audio( @@ -3276,6 +3352,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -3350,6 +3428,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -3428,6 +3508,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -3497,7 +3579,7 @@ def test_delete_user_data_value_error(self): # Start of Model Tests ############################################################################## # region -class TestAcousticModel(): +class TestModel_AcousticModel(): """ Test Class for AcousticModel """ @@ -3537,7 +3619,7 @@ def test_acoustic_model_serialization(self): acoustic_model_model_json2 = acoustic_model_model.to_dict() assert acoustic_model_model_json2 == acoustic_model_model_json -class TestAcousticModels(): +class TestModel_AcousticModels(): """ Test Class for AcousticModels """ @@ -3582,7 +3664,7 @@ def test_acoustic_models_serialization(self): acoustic_models_model_json2 = acoustic_models_model.to_dict() assert acoustic_models_model_json2 == acoustic_models_model_json -class TestAudioDetails(): +class TestModel_AudioDetails(): """ Test Class for AudioDetails """ @@ -3614,7 +3696,7 @@ def test_audio_details_serialization(self): audio_details_model_json2 = audio_details_model.to_dict() assert audio_details_model_json2 == audio_details_model_json -class TestAudioListing(): +class TestModel_AudioListing(): """ Test Class for AudioListing """ @@ -3662,7 +3744,7 @@ def test_audio_listing_serialization(self): audio_listing_model_json2 = audio_listing_model.to_dict() assert audio_listing_model_json2 == audio_listing_model_json -class TestAudioMetrics(): +class TestModel_AudioMetrics(): """ Test Class for AudioMetrics """ @@ -3710,7 +3792,7 @@ def test_audio_metrics_serialization(self): audio_metrics_model_json2 = audio_metrics_model.to_dict() assert audio_metrics_model_json2 == audio_metrics_model_json -class TestAudioMetricsDetails(): +class TestModel_AudioMetricsDetails(): """ Test Class for AudioMetricsDetails """ @@ -3754,7 +3836,7 @@ def test_audio_metrics_details_serialization(self): audio_metrics_details_model_json2 = audio_metrics_details_model.to_dict() assert audio_metrics_details_model_json2 == audio_metrics_details_model_json -class TestAudioMetricsHistogramBin(): +class TestModel_AudioMetricsHistogramBin(): """ Test Class for AudioMetricsHistogramBin """ @@ -3785,7 +3867,7 @@ def test_audio_metrics_histogram_bin_serialization(self): audio_metrics_histogram_bin_model_json2 = audio_metrics_histogram_bin_model.to_dict() assert audio_metrics_histogram_bin_model_json2 == audio_metrics_histogram_bin_model_json -class TestAudioResource(): +class TestModel_AudioResource(): """ Test Class for AudioResource """ @@ -3825,7 +3907,7 @@ def test_audio_resource_serialization(self): audio_resource_model_json2 = audio_resource_model.to_dict() assert audio_resource_model_json2 == audio_resource_model_json -class TestAudioResources(): +class TestModel_AudioResources(): """ Test Class for AudioResources """ @@ -3869,7 +3951,7 @@ def test_audio_resources_serialization(self): audio_resources_model_json2 = audio_resources_model.to_dict() assert audio_resources_model_json2 == audio_resources_model_json -class TestCorpora(): +class TestModel_Corpora(): """ Test Class for Corpora """ @@ -3907,7 +3989,7 @@ def test_corpora_serialization(self): corpora_model_json2 = corpora_model.to_dict() assert corpora_model_json2 == corpora_model_json -class TestCorpus(): +class TestModel_Corpus(): """ Test Class for Corpus """ @@ -3940,7 +4022,7 @@ def test_corpus_serialization(self): corpus_model_json2 = corpus_model.to_dict() assert corpus_model_json2 == corpus_model_json -class TestCustomWord(): +class TestModel_CustomWord(): """ Test Class for CustomWord """ @@ -3971,7 +4053,7 @@ def test_custom_word_serialization(self): custom_word_model_json2 = custom_word_model.to_dict() assert custom_word_model_json2 == custom_word_model_json -class TestGrammar(): +class TestModel_Grammar(): """ Test Class for Grammar """ @@ -4003,7 +4085,7 @@ def test_grammar_serialization(self): grammar_model_json2 = grammar_model.to_dict() assert grammar_model_json2 == grammar_model_json -class TestGrammars(): +class TestModel_Grammars(): """ Test Class for Grammars """ @@ -4040,7 +4122,7 @@ def test_grammars_serialization(self): grammars_model_json2 = grammars_model.to_dict() assert grammars_model_json2 == grammars_model_json -class TestKeywordResult(): +class TestModel_KeywordResult(): """ Test Class for KeywordResult """ @@ -4072,7 +4154,7 @@ def test_keyword_result_serialization(self): keyword_result_model_json2 = keyword_result_model.to_dict() assert keyword_result_model_json2 == keyword_result_model_json -class TestLanguageModel(): +class TestModel_LanguageModel(): """ Test Class for LanguageModel """ @@ -4114,7 +4196,7 @@ def test_language_model_serialization(self): language_model_model_json2 = language_model_model.to_dict() assert language_model_model_json2 == language_model_model_json -class TestLanguageModels(): +class TestModel_LanguageModels(): """ Test Class for LanguageModels """ @@ -4161,7 +4243,7 @@ def test_language_models_serialization(self): language_models_model_json2 = language_models_model.to_dict() assert language_models_model_json2 == language_models_model_json -class TestProcessedAudio(): +class TestModel_ProcessedAudio(): """ Test Class for ProcessedAudio """ @@ -4193,7 +4275,7 @@ def test_processed_audio_serialization(self): processed_audio_model_json2 = processed_audio_model.to_dict() assert processed_audio_model_json2 == processed_audio_model_json -class TestProcessingMetrics(): +class TestModel_ProcessingMetrics(): """ Test Class for ProcessingMetrics """ @@ -4232,7 +4314,7 @@ def test_processing_metrics_serialization(self): processing_metrics_model_json2 = processing_metrics_model.to_dict() assert processing_metrics_model_json2 == processing_metrics_model_json -class TestRecognitionJob(): +class TestModel_RecognitionJob(): """ Test Class for RecognitionJob """ @@ -4344,7 +4426,7 @@ def test_recognition_job_serialization(self): recognition_job_model_json2 = recognition_job_model.to_dict() assert recognition_job_model_json2 == recognition_job_model_json -class TestRecognitionJobs(): +class TestModel_RecognitionJobs(): """ Test Class for RecognitionJobs """ @@ -4459,7 +4541,7 @@ def test_recognition_jobs_serialization(self): recognition_jobs_model_json2 = recognition_jobs_model.to_dict() assert recognition_jobs_model_json2 == recognition_jobs_model_json -class TestRegisterStatus(): +class TestModel_RegisterStatus(): """ Test Class for RegisterStatus """ @@ -4489,7 +4571,7 @@ def test_register_status_serialization(self): register_status_model_json2 = register_status_model.to_dict() assert register_status_model_json2 == register_status_model_json -class TestSpeakerLabelsResult(): +class TestModel_SpeakerLabelsResult(): """ Test Class for SpeakerLabelsResult """ @@ -4522,7 +4604,7 @@ def test_speaker_labels_result_serialization(self): speaker_labels_result_model_json2 = speaker_labels_result_model.to_dict() assert speaker_labels_result_model_json2 == speaker_labels_result_model_json -class TestSpeechModel(): +class TestModel_SpeechModel(): """ Test Class for SpeechModel """ @@ -4563,7 +4645,7 @@ def test_speech_model_serialization(self): speech_model_model_json2 = speech_model_model.to_dict() assert speech_model_model_json2 == speech_model_model_json -class TestSpeechModels(): +class TestModel_SpeechModels(): """ Test Class for SpeechModels """ @@ -4607,7 +4689,7 @@ def test_speech_models_serialization(self): speech_models_model_json2 = speech_models_model.to_dict() assert speech_models_model_json2 == speech_models_model_json -class TestSpeechRecognitionAlternative(): +class TestModel_SpeechRecognitionAlternative(): """ Test Class for SpeechRecognitionAlternative """ @@ -4639,7 +4721,7 @@ def test_speech_recognition_alternative_serialization(self): speech_recognition_alternative_model_json2 = speech_recognition_alternative_model.to_dict() assert speech_recognition_alternative_model_json2 == speech_recognition_alternative_model_json -class TestSpeechRecognitionResult(): +class TestModel_SpeechRecognitionResult(): """ Test Class for SpeechRecognitionResult """ @@ -4695,7 +4777,7 @@ def test_speech_recognition_result_serialization(self): speech_recognition_result_model_json2 = speech_recognition_result_model.to_dict() assert speech_recognition_result_model_json2 == speech_recognition_result_model_json -class TestSpeechRecognitionResults(): +class TestModel_SpeechRecognitionResults(): """ Test Class for SpeechRecognitionResults """ @@ -4797,7 +4879,7 @@ def test_speech_recognition_results_serialization(self): speech_recognition_results_model_json2 = speech_recognition_results_model.to_dict() assert speech_recognition_results_model_json2 == speech_recognition_results_model_json -class TestSupportedFeatures(): +class TestModel_SupportedFeatures(): """ Test Class for SupportedFeatures """ @@ -4828,7 +4910,7 @@ def test_supported_features_serialization(self): supported_features_model_json2 = supported_features_model.to_dict() assert supported_features_model_json2 == supported_features_model_json -class TestTrainingResponse(): +class TestModel_TrainingResponse(): """ Test Class for TrainingResponse """ @@ -4863,7 +4945,7 @@ def test_training_response_serialization(self): training_response_model_json2 = training_response_model.to_dict() assert training_response_model_json2 == training_response_model_json -class TestTrainingWarning(): +class TestModel_TrainingWarning(): """ Test Class for TrainingWarning """ @@ -4893,7 +4975,7 @@ def test_training_warning_serialization(self): training_warning_model_json2 = training_warning_model.to_dict() assert training_warning_model_json2 == training_warning_model_json -class TestWord(): +class TestModel_Word(): """ Test Class for Word """ @@ -4932,7 +5014,7 @@ def test_word_serialization(self): word_model_json2 = word_model.to_dict() assert word_model_json2 == word_model_json -class TestWordAlternativeResult(): +class TestModel_WordAlternativeResult(): """ Test Class for WordAlternativeResult """ @@ -4962,7 +5044,7 @@ def test_word_alternative_result_serialization(self): word_alternative_result_model_json2 = word_alternative_result_model.to_dict() assert word_alternative_result_model_json2 == word_alternative_result_model_json -class TestWordAlternativeResults(): +class TestModel_WordAlternativeResults(): """ Test Class for WordAlternativeResults """ @@ -4999,7 +5081,7 @@ def test_word_alternative_results_serialization(self): word_alternative_results_model_json2 = word_alternative_results_model.to_dict() assert word_alternative_results_model_json2 == word_alternative_results_model_json -class TestWordError(): +class TestModel_WordError(): """ Test Class for WordError """ @@ -5028,7 +5110,7 @@ def test_word_error_serialization(self): word_error_model_json2 = word_error_model.to_dict() assert word_error_model_json2 == word_error_model_json -class TestWords(): +class TestModel_Words(): """ Test Class for Words """ diff --git a/test/unit/test_text_to_speech_v1.py b/test/unit/test_text_to_speech_v1.py index 7414852a..c8119130 100644 --- a/test/unit/test_text_to_speech_v1.py +++ b/test/unit/test_text_to_speech_v1.py @@ -51,6 +51,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -88,6 +90,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -202,6 +206,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -223,8 +229,8 @@ def test_synthesize_all_params(self): # Set up parameter values text = 'testString' - accept = 'audio/basic' - voice = 'ar-AR_OmarVoice' + accept = 'audio/ogg;codecs=opus' + voice = 'en-US_MichaelV3Voice' customization_id = 'testString' # Invoke method @@ -327,6 +333,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -348,8 +356,8 @@ def test_get_pronunciation_all_params(self): # Set up parameter values text = 'testString' - voice = 'ar-AR_OmarVoice' - format = 'ibm' + voice = 'en-US_MichaelV3Voice' + format = 'ipa' customization_id = 'testString' # Invoke method @@ -452,6 +460,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -473,7 +483,7 @@ def test_create_custom_model_all_params(self): # Set up parameter values name = 'testString' - language = 'ar-MS' + language = 'en-US' description = 'testString' # Invoke method @@ -490,7 +500,7 @@ def test_create_custom_model_all_params(self): # Validate body params req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) assert req_body['name'] == 'testString' - assert req_body['language'] == 'ar-MS' + assert req_body['language'] == 'en-US' assert req_body['description'] == 'testString' @@ -510,7 +520,7 @@ def test_create_custom_model_value_error(self): # Set up parameter values name = 'testString' - language = 'ar-MS' + language = 'en-US' description = 'testString' # Pass in all but one required param and check for a ValueError @@ -533,6 +543,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -602,6 +614,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -692,6 +706,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -762,6 +778,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -836,6 +854,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -919,6 +939,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -989,6 +1011,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -1068,6 +1092,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -1142,6 +1168,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -1220,6 +1248,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -1290,6 +1320,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -1382,6 +1414,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -1456,6 +1490,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -1534,6 +1570,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -1571,6 +1609,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -1651,6 +1691,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -1721,6 +1763,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -1795,6 +1839,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -1864,7 +1910,7 @@ def test_delete_user_data_value_error(self): # Start of Model Tests ############################################################################## # region -class TestCustomModel(): +class TestModel_CustomModel(): """ Test Class for CustomModel """ @@ -1915,7 +1961,7 @@ def test_custom_model_serialization(self): custom_model_model_json2 = custom_model_model.to_dict() assert custom_model_model_json2 == custom_model_model_json -class TestCustomModels(): +class TestModel_CustomModels(): """ Test Class for CustomModels """ @@ -1969,7 +2015,7 @@ def test_custom_models_serialization(self): custom_models_model_json2 = custom_models_model.to_dict() assert custom_models_model_json2 == custom_models_model_json -class TestPrompt(): +class TestModel_Prompt(): """ Test Class for Prompt """ @@ -2002,7 +2048,7 @@ def test_prompt_serialization(self): prompt_model_json2 = prompt_model.to_dict() assert prompt_model_json2 == prompt_model_json -class TestPromptMetadata(): +class TestModel_PromptMetadata(): """ Test Class for PromptMetadata """ @@ -2032,7 +2078,7 @@ def test_prompt_metadata_serialization(self): prompt_metadata_model_json2 = prompt_metadata_model.to_dict() assert prompt_metadata_model_json2 == prompt_metadata_model_json -class TestPrompts(): +class TestModel_Prompts(): """ Test Class for Prompts """ @@ -2070,7 +2116,7 @@ def test_prompts_serialization(self): prompts_model_json2 = prompts_model.to_dict() assert prompts_model_json2 == prompts_model_json -class TestPronunciation(): +class TestModel_Pronunciation(): """ Test Class for Pronunciation """ @@ -2099,7 +2145,7 @@ def test_pronunciation_serialization(self): pronunciation_model_json2 = pronunciation_model.to_dict() assert pronunciation_model_json2 == pronunciation_model_json -class TestSpeaker(): +class TestModel_Speaker(): """ Test Class for Speaker """ @@ -2129,7 +2175,7 @@ def test_speaker_serialization(self): speaker_model_json2 = speaker_model.to_dict() assert speaker_model_json2 == speaker_model_json -class TestSpeakerCustomModel(): +class TestModel_SpeakerCustomModel(): """ Test Class for SpeakerCustomModel """ @@ -2167,7 +2213,7 @@ def test_speaker_custom_model_serialization(self): speaker_custom_model_model_json2 = speaker_custom_model_model.to_dict() assert speaker_custom_model_model_json2 == speaker_custom_model_model_json -class TestSpeakerCustomModels(): +class TestModel_SpeakerCustomModels(): """ Test Class for SpeakerCustomModels """ @@ -2208,7 +2254,7 @@ def test_speaker_custom_models_serialization(self): speaker_custom_models_model_json2 = speaker_custom_models_model.to_dict() assert speaker_custom_models_model_json2 == speaker_custom_models_model_json -class TestSpeakerModel(): +class TestModel_SpeakerModel(): """ Test Class for SpeakerModel """ @@ -2237,7 +2283,7 @@ def test_speaker_model_serialization(self): speaker_model_model_json2 = speaker_model_model.to_dict() assert speaker_model_model_json2 == speaker_model_model_json -class TestSpeakerPrompt(): +class TestModel_SpeakerPrompt(): """ Test Class for SpeakerPrompt """ @@ -2269,7 +2315,7 @@ def test_speaker_prompt_serialization(self): speaker_prompt_model_json2 = speaker_prompt_model.to_dict() assert speaker_prompt_model_json2 == speaker_prompt_model_json -class TestSpeakers(): +class TestModel_Speakers(): """ Test Class for Speakers """ @@ -2304,7 +2350,7 @@ def test_speakers_serialization(self): speakers_model_json2 = speakers_model.to_dict() assert speakers_model_json2 == speakers_model_json -class TestSupportedFeatures(): +class TestModel_SupportedFeatures(): """ Test Class for SupportedFeatures """ @@ -2334,7 +2380,7 @@ def test_supported_features_serialization(self): supported_features_model_json2 = supported_features_model.to_dict() assert supported_features_model_json2 == supported_features_model_json -class TestTranslation(): +class TestModel_Translation(): """ Test Class for Translation """ @@ -2364,7 +2410,7 @@ def test_translation_serialization(self): translation_model_json2 = translation_model.to_dict() assert translation_model_json2 == translation_model_json -class TestVoice(): +class TestModel_Voice(): """ Test Class for Voice """ @@ -2429,7 +2475,7 @@ def test_voice_serialization(self): voice_model_json2 = voice_model.to_dict() assert voice_model_json2 == voice_model_json -class TestVoices(): +class TestModel_Voices(): """ Test Class for Voices """ @@ -2497,7 +2543,7 @@ def test_voices_serialization(self): voices_model_json2 = voices_model.to_dict() assert voices_model_json2 == voices_model_json -class TestWord(): +class TestModel_Word(): """ Test Class for Word """ @@ -2528,7 +2574,7 @@ def test_word_serialization(self): word_model_json2 = word_model.to_dict() assert word_model_json2 == word_model_json -class TestWords(): +class TestModel_Words(): """ Test Class for Words """ diff --git a/test/unit/test_tone_analyzer_v3.py b/test/unit/test_tone_analyzer_v3.py index cb398a57..1a97b544 100755 --- a/test/unit/test_tone_analyzer_v3.py +++ b/test/unit/test_tone_analyzer_v3.py @@ -51,6 +51,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -80,7 +82,7 @@ def test_tone_all_params(self): sentences = True tones = ['emotion'] content_language = 'en' - accept_language = 'ar' + accept_language = 'en' # Invoke method response = _service.tone( @@ -178,6 +180,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -205,7 +209,7 @@ def test_tone_chat_all_params(self): # Set up parameter values utterances = [utterance_model] content_language = 'en' - accept_language = 'ar' + accept_language = 'en' # Invoke method response = _service.tone_chat( @@ -302,7 +306,7 @@ def test_tone_chat_value_error(self): # Start of Model Tests ############################################################################## # region -class TestDocumentAnalysis(): +class TestModel_DocumentAnalysis(): """ Test Class for DocumentAnalysis """ @@ -345,7 +349,7 @@ def test_document_analysis_serialization(self): document_analysis_model_json2 = document_analysis_model.to_dict() assert document_analysis_model_json2 == document_analysis_model_json -class TestSentenceAnalysis(): +class TestModel_SentenceAnalysis(): """ Test Class for SentenceAnalysis """ @@ -391,7 +395,7 @@ def test_sentence_analysis_serialization(self): sentence_analysis_model_json2 = sentence_analysis_model.to_dict() assert sentence_analysis_model_json2 == sentence_analysis_model_json -class TestToneAnalysis(): +class TestModel_ToneAnalysis(): """ Test Class for ToneAnalysis """ @@ -446,7 +450,7 @@ def test_tone_analysis_serialization(self): tone_analysis_model_json2 = tone_analysis_model.to_dict() assert tone_analysis_model_json2 == tone_analysis_model_json -class TestToneCategory(): +class TestModel_ToneCategory(): """ Test Class for ToneCategory """ @@ -484,7 +488,7 @@ def test_tone_category_serialization(self): tone_category_model_json2 = tone_category_model.to_dict() assert tone_category_model_json2 == tone_category_model_json -class TestToneChatScore(): +class TestModel_ToneChatScore(): """ Test Class for ToneChatScore """ @@ -515,7 +519,7 @@ def test_tone_chat_score_serialization(self): tone_chat_score_model_json2 = tone_chat_score_model.to_dict() assert tone_chat_score_model_json2 == tone_chat_score_model_json -class TestToneInput(): +class TestModel_ToneInput(): """ Test Class for ToneInput """ @@ -544,7 +548,7 @@ def test_tone_input_serialization(self): tone_input_model_json2 = tone_input_model.to_dict() assert tone_input_model_json2 == tone_input_model_json -class TestToneScore(): +class TestModel_ToneScore(): """ Test Class for ToneScore """ @@ -575,7 +579,7 @@ def test_tone_score_serialization(self): tone_score_model_json2 = tone_score_model.to_dict() assert tone_score_model_json2 == tone_score_model_json -class TestUtterance(): +class TestModel_Utterance(): """ Test Class for Utterance """ @@ -605,7 +609,7 @@ def test_utterance_serialization(self): utterance_model_json2 = utterance_model.to_dict() assert utterance_model_json2 == utterance_model_json -class TestUtteranceAnalyses(): +class TestModel_UtteranceAnalyses(): """ Test Class for UtteranceAnalyses """ @@ -648,7 +652,7 @@ def test_utterance_analyses_serialization(self): utterance_analyses_model_json2 = utterance_analyses_model.to_dict() assert utterance_analyses_model_json2 == utterance_analyses_model_json -class TestUtteranceAnalysis(): +class TestModel_UtteranceAnalysis(): """ Test Class for UtteranceAnalysis """ diff --git a/test/unit/test_visual_recognition_v3.py b/test/unit/test_visual_recognition_v3.py index ae9e5f13..327eb930 100644 --- a/test/unit/test_visual_recognition_v3.py +++ b/test/unit/test_visual_recognition_v3.py @@ -55,6 +55,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -168,6 +170,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -276,6 +280,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -369,6 +375,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -439,6 +447,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -543,6 +553,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -617,6 +629,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -697,6 +711,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -766,7 +782,7 @@ def test_delete_user_data_value_error(self): # Start of Model Tests ############################################################################## # region -class TestClass(): +class TestModel_Class(): """ Test Class for Class """ @@ -795,7 +811,7 @@ def test_class_serialization(self): class_model_json2 = class_model.to_dict() assert class_model_json2 == class_model_json -class TestClassResult(): +class TestModel_ClassResult(): """ Test Class for ClassResult """ @@ -826,7 +842,7 @@ def test_class_result_serialization(self): class_result_model_json2 = class_result_model.to_dict() assert class_result_model_json2 == class_result_model_json -class TestClassifiedImage(): +class TestModel_ClassifiedImage(): """ Test Class for ClassifiedImage """ @@ -876,7 +892,7 @@ def test_classified_image_serialization(self): classified_image_model_json2 = classified_image_model.to_dict() assert classified_image_model_json2 == classified_image_model_json -class TestClassifiedImages(): +class TestModel_ClassifiedImages(): """ Test Class for ClassifiedImages """ @@ -936,7 +952,7 @@ def test_classified_images_serialization(self): classified_images_model_json2 = classified_images_model.to_dict() assert classified_images_model_json2 == classified_images_model_json -class TestClassifier(): +class TestModel_Classifier(): """ Test Class for Classifier """ @@ -959,10 +975,10 @@ def test_classifier_serialization(self): classifier_model_json['status'] = 'ready' classifier_model_json['core_ml_enabled'] = True classifier_model_json['explanation'] = 'testString' - classifier_model_json['created'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + classifier_model_json['created'] = "2019-01-01T12:00:00Z" classifier_model_json['classes'] = [class_model] - classifier_model_json['retrained'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) - classifier_model_json['updated'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + classifier_model_json['retrained'] = "2019-01-01T12:00:00Z" + classifier_model_json['updated'] = "2019-01-01T12:00:00Z" # Construct a model instance of Classifier by calling from_dict on the json representation classifier_model = Classifier.from_dict(classifier_model_json) @@ -979,7 +995,7 @@ def test_classifier_serialization(self): classifier_model_json2 = classifier_model.to_dict() assert classifier_model_json2 == classifier_model_json -class TestClassifierResult(): +class TestModel_ClassifierResult(): """ Test Class for ClassifierResult """ @@ -1017,7 +1033,7 @@ def test_classifier_result_serialization(self): classifier_result_model_json2 = classifier_result_model.to_dict() assert classifier_result_model_json2 == classifier_result_model_json -class TestClassifiers(): +class TestModel_Classifiers(): """ Test Class for Classifiers """ @@ -1039,10 +1055,10 @@ def test_classifiers_serialization(self): classifier_model['status'] = 'ready' classifier_model['core_ml_enabled'] = True classifier_model['explanation'] = 'testString' - classifier_model['created'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + classifier_model['created'] = "2019-01-01T12:00:00Z" classifier_model['classes'] = [class_model] - classifier_model['retrained'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) - classifier_model['updated'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + classifier_model['retrained'] = "2019-01-01T12:00:00Z" + classifier_model['updated'] = "2019-01-01T12:00:00Z" # Construct a json representation of a Classifiers model classifiers_model_json = {} @@ -1063,7 +1079,7 @@ def test_classifiers_serialization(self): classifiers_model_json2 = classifiers_model.to_dict() assert classifiers_model_json2 == classifiers_model_json -class TestErrorInfo(): +class TestModel_ErrorInfo(): """ Test Class for ErrorInfo """ @@ -1094,7 +1110,7 @@ def test_error_info_serialization(self): error_info_model_json2 = error_info_model.to_dict() assert error_info_model_json2 == error_info_model_json -class TestWarningInfo(): +class TestModel_WarningInfo(): """ Test Class for WarningInfo """ diff --git a/test/unit/test_visual_recognition_v4.py b/test/unit/test_visual_recognition_v4.py index f6d177cc..d1875fdc 100644 --- a/test/unit/test_visual_recognition_v4.py +++ b/test/unit/test_visual_recognition_v4.py @@ -56,6 +56,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -182,6 +184,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -288,6 +292,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -349,6 +355,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -419,6 +427,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -541,6 +551,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -605,6 +617,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -698,6 +712,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -808,6 +824,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -878,6 +896,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -952,6 +972,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -1020,6 +1042,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -1140,6 +1164,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -1210,6 +1236,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -1291,6 +1319,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -1365,6 +1395,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -1443,6 +1475,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -1513,6 +1547,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -1617,6 +1653,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -1723,6 +1761,8 @@ def preprocess_url(self, request_url: str): """ Preprocess the request URL to ensure the mock response will be found. """ + request_url = urllib.parse.unquote(request_url) # don't double-encode if already encoded + request_url = urllib.parse.quote(request_url, safe=':/') if re.fullmatch('.*/+', request_url) is None: return request_url else: @@ -1792,7 +1832,7 @@ def test_delete_user_data_value_error(self): # Start of Model Tests ############################################################################## # region -class TestAnalyzeResponse(): +class TestModel_AnalyzeResponse(): """ Test Class for AnalyzeResponse """ @@ -1875,7 +1915,7 @@ def test_analyze_response_serialization(self): analyze_response_model_json2 = analyze_response_model.to_dict() assert analyze_response_model_json2 == analyze_response_model_json -class TestCollection(): +class TestModel_Collection(): """ Test Class for Collection """ @@ -1903,8 +1943,8 @@ def test_collection_serialization(self): collection_model_json['collection_id'] = 'testString' collection_model_json['name'] = 'testString' collection_model_json['description'] = 'testString' - collection_model_json['created'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) - collection_model_json['updated'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + collection_model_json['created'] = "2019-01-01T12:00:00Z" + collection_model_json['updated'] = "2019-01-01T12:00:00Z" collection_model_json['image_count'] = 38 collection_model_json['training_status'] = collection_training_status_model @@ -1923,7 +1963,7 @@ def test_collection_serialization(self): collection_model_json2 = collection_model.to_dict() assert collection_model_json2 == collection_model_json -class TestCollectionObjects(): +class TestModel_CollectionObjects(): """ Test Class for CollectionObjects """ @@ -1966,7 +2006,7 @@ def test_collection_objects_serialization(self): collection_objects_model_json2 = collection_objects_model.to_dict() assert collection_objects_model_json2 == collection_objects_model_json -class TestCollectionTrainingStatus(): +class TestModel_CollectionTrainingStatus(): """ Test Class for CollectionTrainingStatus """ @@ -2005,7 +2045,7 @@ def test_collection_training_status_serialization(self): collection_training_status_model_json2 = collection_training_status_model.to_dict() assert collection_training_status_model_json2 == collection_training_status_model_json -class TestCollectionsList(): +class TestModel_CollectionsList(): """ Test Class for CollectionsList """ @@ -2032,8 +2072,8 @@ def test_collections_list_serialization(self): collection_model['collection_id'] = 'testString' collection_model['name'] = 'testString' collection_model['description'] = 'testString' - collection_model['created'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) - collection_model['updated'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + collection_model['created'] = "2019-01-01T12:00:00Z" + collection_model['updated'] = "2019-01-01T12:00:00Z" collection_model['image_count'] = 38 collection_model['training_status'] = collection_training_status_model @@ -2056,7 +2096,7 @@ def test_collections_list_serialization(self): collections_list_model_json2 = collections_list_model.to_dict() assert collections_list_model_json2 == collections_list_model_json -class TestDetectedObjects(): +class TestModel_DetectedObjects(): """ Test Class for DetectedObjects """ @@ -2102,7 +2142,7 @@ def test_detected_objects_serialization(self): detected_objects_model_json2 = detected_objects_model.to_dict() assert detected_objects_model_json2 == detected_objects_model_json -class TestError(): +class TestModel_Error(): """ Test Class for Error """ @@ -2140,7 +2180,7 @@ def test_error_serialization(self): error_model_json2 = error_model.to_dict() assert error_model_json2 == error_model_json -class TestErrorTarget(): +class TestModel_ErrorTarget(): """ Test Class for ErrorTarget """ @@ -2170,7 +2210,7 @@ def test_error_target_serialization(self): error_target_model_json2 = error_target_model.to_dict() assert error_target_model_json2 == error_target_model_json -class TestImage(): +class TestModel_Image(): """ Test Class for Image """ @@ -2243,7 +2283,7 @@ def test_image_serialization(self): image_model_json2 = image_model.to_dict() assert image_model_json2 == image_model_json -class TestImageDetails(): +class TestModel_ImageDetails(): """ Test Class for ImageDetails """ @@ -2292,8 +2332,8 @@ def test_image_details_serialization(self): # Construct a json representation of a ImageDetails model image_details_model_json = {} image_details_model_json['image_id'] = 'testString' - image_details_model_json['updated'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) - image_details_model_json['created'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + image_details_model_json['updated'] = "2019-01-01T12:00:00Z" + image_details_model_json['created'] = "2019-01-01T12:00:00Z" image_details_model_json['source'] = image_source_model image_details_model_json['dimensions'] = image_dimensions_model image_details_model_json['errors'] = [error_model] @@ -2314,7 +2354,7 @@ def test_image_details_serialization(self): image_details_model_json2 = image_details_model.to_dict() assert image_details_model_json2 == image_details_model_json -class TestImageDetailsList(): +class TestModel_ImageDetailsList(): """ Test Class for ImageDetailsList """ @@ -2362,8 +2402,8 @@ def test_image_details_list_serialization(self): image_details_model = {} # ImageDetails image_details_model['image_id'] = 'testString' - image_details_model['updated'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) - image_details_model['created'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + image_details_model['updated'] = "2019-01-01T12:00:00Z" + image_details_model['created'] = "2019-01-01T12:00:00Z" image_details_model['source'] = image_source_model image_details_model['dimensions'] = image_dimensions_model image_details_model['errors'] = [error_model] @@ -2395,7 +2435,7 @@ def test_image_details_list_serialization(self): image_details_list_model_json2 = image_details_list_model.to_dict() assert image_details_list_model_json2 == image_details_list_model_json -class TestImageDimensions(): +class TestModel_ImageDimensions(): """ Test Class for ImageDimensions """ @@ -2425,7 +2465,7 @@ def test_image_dimensions_serialization(self): image_dimensions_model_json2 = image_dimensions_model.to_dict() assert image_dimensions_model_json2 == image_dimensions_model_json -class TestImageSource(): +class TestModel_ImageSource(): """ Test Class for ImageSource """ @@ -2458,7 +2498,7 @@ def test_image_source_serialization(self): image_source_model_json2 = image_source_model.to_dict() assert image_source_model_json2 == image_source_model_json -class TestImageSummary(): +class TestModel_ImageSummary(): """ Test Class for ImageSummary """ @@ -2471,7 +2511,7 @@ def test_image_summary_serialization(self): # Construct a json representation of a ImageSummary model image_summary_model_json = {} image_summary_model_json['image_id'] = 'testString' - image_summary_model_json['updated'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + image_summary_model_json['updated'] = "2019-01-01T12:00:00Z" # Construct a model instance of ImageSummary by calling from_dict on the json representation image_summary_model = ImageSummary.from_dict(image_summary_model_json) @@ -2488,7 +2528,7 @@ def test_image_summary_serialization(self): image_summary_model_json2 = image_summary_model.to_dict() assert image_summary_model_json2 == image_summary_model_json -class TestImageSummaryList(): +class TestModel_ImageSummaryList(): """ Test Class for ImageSummaryList """ @@ -2502,7 +2542,7 @@ def test_image_summary_list_serialization(self): image_summary_model = {} # ImageSummary image_summary_model['image_id'] = 'testString' - image_summary_model['updated'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + image_summary_model['updated'] = "2019-01-01T12:00:00Z" # Construct a json representation of a ImageSummaryList model image_summary_list_model_json = {} @@ -2523,7 +2563,7 @@ def test_image_summary_list_serialization(self): image_summary_list_model_json2 = image_summary_list_model.to_dict() assert image_summary_list_model_json2 == image_summary_list_model_json -class TestLocation(): +class TestModel_Location(): """ Test Class for Location """ @@ -2555,7 +2595,7 @@ def test_location_serialization(self): location_model_json2 = location_model.to_dict() assert location_model_json2 == location_model_json -class TestObjectDetail(): +class TestModel_ObjectDetail(): """ Test Class for ObjectDetail """ @@ -2594,7 +2634,7 @@ def test_object_detail_serialization(self): object_detail_model_json2 = object_detail_model.to_dict() assert object_detail_model_json2 == object_detail_model_json -class TestObjectDetailLocation(): +class TestModel_ObjectDetailLocation(): """ Test Class for ObjectDetailLocation """ @@ -2626,7 +2666,7 @@ def test_object_detail_location_serialization(self): object_detail_location_model_json2 = object_detail_location_model.to_dict() assert object_detail_location_model_json2 == object_detail_location_model_json -class TestObjectMetadata(): +class TestModel_ObjectMetadata(): """ Test Class for ObjectMetadata """ @@ -2656,7 +2696,7 @@ def test_object_metadata_serialization(self): object_metadata_model_json2 = object_metadata_model.to_dict() assert object_metadata_model_json2 == object_metadata_model_json -class TestObjectMetadataList(): +class TestModel_ObjectMetadataList(): """ Test Class for ObjectMetadataList """ @@ -2692,7 +2732,7 @@ def test_object_metadata_list_serialization(self): object_metadata_list_model_json2 = object_metadata_list_model.to_dict() assert object_metadata_list_model_json2 == object_metadata_list_model_json -class TestObjectTrainingStatus(): +class TestModel_ObjectTrainingStatus(): """ Test Class for ObjectTrainingStatus """ @@ -2726,7 +2766,7 @@ def test_object_training_status_serialization(self): object_training_status_model_json2 = object_training_status_model.to_dict() assert object_training_status_model_json2 == object_training_status_model_json -class TestTrainingDataObject(): +class TestModel_TrainingDataObject(): """ Test Class for TrainingDataObject """ @@ -2764,7 +2804,7 @@ def test_training_data_object_serialization(self): training_data_object_model_json2 = training_data_object_model.to_dict() assert training_data_object_model_json2 == training_data_object_model_json -class TestTrainingDataObjects(): +class TestModel_TrainingDataObjects(): """ Test Class for TrainingDataObjects """ @@ -2805,7 +2845,7 @@ def test_training_data_objects_serialization(self): training_data_objects_model_json2 = training_data_objects_model.to_dict() assert training_data_objects_model_json2 == training_data_objects_model_json -class TestTrainingEvent(): +class TestModel_TrainingEvent(): """ Test Class for TrainingEvent """ @@ -2819,7 +2859,7 @@ def test_training_event_serialization(self): training_event_model_json = {} training_event_model_json['type'] = 'objects' training_event_model_json['collection_id'] = 'testString' - training_event_model_json['completion_time'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + training_event_model_json['completion_time'] = "2019-01-01T12:00:00Z" training_event_model_json['status'] = 'failed' training_event_model_json['image_count'] = 38 @@ -2838,7 +2878,7 @@ def test_training_event_serialization(self): training_event_model_json2 = training_event_model.to_dict() assert training_event_model_json2 == training_event_model_json -class TestTrainingEvents(): +class TestModel_TrainingEvents(): """ Test Class for TrainingEvents """ @@ -2853,14 +2893,14 @@ def test_training_events_serialization(self): training_event_model = {} # TrainingEvent training_event_model['type'] = 'objects' training_event_model['collection_id'] = 'testString' - training_event_model['completion_time'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + training_event_model['completion_time'] = "2019-01-01T12:00:00Z" training_event_model['status'] = 'failed' training_event_model['image_count'] = 38 # Construct a json representation of a TrainingEvents model training_events_model_json = {} - training_events_model_json['start_time'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) - training_events_model_json['end_time'] = datetime_to_string(string_to_datetime("2019-01-01T12:00:00.000Z")) + training_events_model_json['start_time'] = "2019-01-01T12:00:00Z" + training_events_model_json['end_time'] = "2019-01-01T12:00:00Z" training_events_model_json['completed_events'] = 38 training_events_model_json['trained_images'] = 38 training_events_model_json['events'] = [training_event_model] @@ -2880,7 +2920,7 @@ def test_training_events_serialization(self): training_events_model_json2 = training_events_model.to_dict() assert training_events_model_json2 == training_events_model_json -class TestTrainingStatus(): +class TestModel_TrainingStatus(): """ Test Class for TrainingStatus """ @@ -2919,7 +2959,7 @@ def test_training_status_serialization(self): training_status_model_json2 = training_status_model.to_dict() assert training_status_model_json2 == training_status_model_json -class TestUpdateObjectMetadata(): +class TestModel_UpdateObjectMetadata(): """ Test Class for UpdateObjectMetadata """ @@ -2949,7 +2989,7 @@ def test_update_object_metadata_serialization(self): update_object_metadata_model_json2 = update_object_metadata_model.to_dict() assert update_object_metadata_model_json2 == update_object_metadata_model_json -class TestWarning(): +class TestModel_Warning(): """ Test Class for Warning """ @@ -2980,7 +3020,7 @@ def test_warning_serialization(self): warning_model_json2 = warning_model.to_dict() assert warning_model_json2 == warning_model_json -class TestFileWithMetadata(): +class TestModel_FileWithMetadata(): """ Test Class for FileWithMetadata """