From 7c0ef4aa53d00d735e33c2a5baa2601ab964c70a Mon Sep 17 00:00:00 2001 From: azure-sdk Date: Mon, 18 Aug 2025 23:21:10 +0000 Subject: [PATCH 1/5] Configurations: 'specification/cognitiveservices/Language.AnalyzeText/tspconfig.yaml', API Version: 2025-05-15-preview, SDK Release Type: beta, and CommitSHA: '7c640375cebac863ef3430e8ac9aa8cae4843677' in SpecRepo: 'https://github.com/Azure/azure-rest-api-specs' Pipeline run: https://dev.azure.com/azure-sdk/internal/_build/results?buildId=5232274 Refer to https://eng.ms/docs/products/azure-developer-experience/develop/sdk-release/sdk-release-prerequisites to prepare for SDK release. --- .../azure-ai-textanalytics/CHANGELOG.md | 7 + .../azure-ai-textanalytics/LICENSE | 21 + .../azure-ai-textanalytics/MANIFEST.in | 8 + .../azure-ai-textanalytics/README.md | 43 + .../azure-ai-textanalytics/_metadata.json | 7 + .../apiview-properties.json | 200 + .../azure-ai-textanalytics/azure/__init__.py | 1 + .../azure/ai/__init__.py | 1 + .../azure/ai/language/__init__.py | 1 + .../azure/ai/language/text/__init__.py | 32 + .../azure/ai/language/text/_client.py | 111 + .../azure/ai/language/text/_configuration.py | 73 + .../ai/language/text/_operations/__init__.py | 23 + .../language/text/_operations/_operations.py | 711 ++ .../ai/language/text/_operations/_patch.py | 21 + .../azure/ai/language/text/_patch.py | 21 + .../azure/ai/language/text/_utils/__init__.py | 6 + .../ai/language/text/_utils/model_base.py | 1233 +++ .../ai/language/text/_utils/serialization.py | 2032 +++++ .../azure/ai/language/text/_utils/utils.py | 25 + .../azure/ai/language/text/_version.py | 9 + .../azure/ai/language/text/aio/__init__.py | 29 + .../azure/ai/language/text/aio/_client.py | 115 + .../ai/language/text/aio/_configuration.py | 75 + .../language/text/aio/_operations/__init__.py | 23 + .../text/aio/_operations/_operations.py | 618 ++ .../language/text/aio/_operations/_patch.py | 21 + .../azure/ai/language/text/aio/_patch.py | 21 + .../azure/ai/language/text/models/__init__.py | 402 + .../azure/ai/language/text/models/_enums.py | 1979 +++++ .../azure/ai/language/text/models/_models.py | 6624 +++++++++++++++++ .../azure/ai/language/text/models/_patch.py | 21 + .../azure/ai/language/text/py.typed | 1 + .../dev_requirements.txt | 3 + ...ation_summary_length_prompt_task_result.py | 33 + ...l_abstractive_summarization_task_result.py | 33 + ...essful_analyze_text_jobs_cancel_request.py | 32 + ..._text_jobs_multiple_task_status_request.py | 33 + .../successful_entity_linking_request.py | 42 + ...ul_entity_recognition_exclusion_request.py | 51 + ...ul_entity_recognition_inclusion_request.py | 47 + ...y_recognition_inference_options_request.py | 41 + ...ssful_entity_recognition_overlap_policy.py | 45 + .../successful_entity_recognition_request.py | 53 + ...hcare_document_type_task_status_request.py | 33 + ...ccessful_healthcare_task_status_request.py | 33 + ...uccessful_key_phrase_extraction_request.py | 43 + .../successful_language_detection_request.py | 44 + ...ii_entity_recognition_exclusion_request.py | 48 + ..._pii_entity_recognition_masked_entities.py | 41 + ...ty_recognition_redaction_policy_request.py | 51 + ...ccessful_pii_entity_recognition_request.py | 48 + .../successful_sentiment_analysis_request.py | 46 + .../generated_tests/conftest.py | 35 + .../generated_tests/test_text.py | 69 + .../generated_tests/test_text_async.py | 74 + .../generated_tests/testpreparer.py | 24 + .../generated_tests/testpreparer_async.py | 20 + .../azure-ai-textanalytics/pyproject.toml | 2 + .../azure-ai-textanalytics/setup.py | 73 + .../azure-ai-textanalytics/tsp-location.yaml | 4 + sdk/cognitivelanguage/ci.yml | 2 + 62 files changed, 15618 insertions(+) create mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/CHANGELOG.md create mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/LICENSE create mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/MANIFEST.in create mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/README.md create mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/_metadata.json create mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/apiview-properties.json create mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/azure/__init__.py create mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/__init__.py create mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/__init__.py create mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/__init__.py create mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/_client.py create mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/_configuration.py create mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/_operations/__init__.py create mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/_operations/_operations.py create mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/_operations/_patch.py create mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/_patch.py create mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/_utils/__init__.py create mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/_utils/model_base.py create mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/_utils/serialization.py create mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/_utils/utils.py create mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/_version.py create mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/aio/__init__.py create mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/aio/_client.py create mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/aio/_configuration.py create mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/aio/_operations/__init__.py create mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/aio/_operations/_operations.py create mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/aio/_operations/_patch.py create mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/aio/_patch.py create mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/models/__init__.py create mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/models/_enums.py create mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/models/_models.py create mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/models/_patch.py create mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/py.typed create mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/dev_requirements.txt create mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_abstractive_summarization_summary_length_prompt_task_result.py create mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_abstractive_summarization_task_result.py create mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_analyze_text_jobs_cancel_request.py create mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_analyze_text_jobs_multiple_task_status_request.py create mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_entity_linking_request.py create mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_entity_recognition_exclusion_request.py create mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_entity_recognition_inclusion_request.py create mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_entity_recognition_inference_options_request.py create mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_entity_recognition_overlap_policy.py create mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_entity_recognition_request.py create mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_healthcare_document_type_task_status_request.py create mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_healthcare_task_status_request.py create mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_key_phrase_extraction_request.py create mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_language_detection_request.py create mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_pii_entity_recognition_exclusion_request.py create mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_pii_entity_recognition_masked_entities.py create mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_pii_entity_recognition_redaction_policy_request.py create mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_pii_entity_recognition_request.py create mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_sentiment_analysis_request.py create mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/generated_tests/conftest.py create mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/generated_tests/test_text.py create mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/generated_tests/test_text_async.py create mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/generated_tests/testpreparer.py create mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/generated_tests/testpreparer_async.py create mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/pyproject.toml create mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/setup.py create mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/tsp-location.yaml diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/CHANGELOG.md b/sdk/cognitivelanguage/azure-ai-textanalytics/CHANGELOG.md new file mode 100644 index 000000000000..b957b2575b48 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-textanalytics/CHANGELOG.md @@ -0,0 +1,7 @@ +# Release History + +## 1.0.0b1 (1970-01-01) + +### Other Changes + + - Initial version \ No newline at end of file diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/LICENSE b/sdk/cognitivelanguage/azure-ai-textanalytics/LICENSE new file mode 100644 index 000000000000..63447fd8bbbf --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-textanalytics/LICENSE @@ -0,0 +1,21 @@ +Copyright (c) Microsoft Corporation. + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/MANIFEST.in b/sdk/cognitivelanguage/azure-ai-textanalytics/MANIFEST.in new file mode 100644 index 000000000000..b90964c5d09b --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-textanalytics/MANIFEST.in @@ -0,0 +1,8 @@ +include *.md +include LICENSE +include azure/ai/language/text/py.typed +recursive-include tests *.py +recursive-include samples *.py *.md +include azure/__init__.py +include azure/ai/__init__.py +include azure/ai/language/__init__.py diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/README.md b/sdk/cognitivelanguage/azure-ai-textanalytics/README.md new file mode 100644 index 000000000000..dae8b84e3ac4 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-textanalytics/README.md @@ -0,0 +1,43 @@ +# Azure Ai Language Text client library for Python + + +## Getting started + +### Install the package + +```bash +python -m pip install azure-ai-language-text +``` + +#### Prequisites + +- Python 3.9 or later is required to use this package. +- You need an [Azure subscription][azure_sub] to use this package. +- An existing Azure Ai Language Text instance. + + +## Contributing + +This project welcomes contributions and suggestions. Most contributions require +you to agree to a Contributor License Agreement (CLA) declaring that you have +the right to, and actually do, grant us the rights to use your contribution. +For details, visit https://cla.microsoft.com. + +When you submit a pull request, a CLA-bot will automatically determine whether +you need to provide a CLA and decorate the PR appropriately (e.g., label, +comment). Simply follow the instructions provided by the bot. You will only +need to do this once across all repos using our CLA. + +This project has adopted the +[Microsoft Open Source Code of Conduct][code_of_conduct]. For more information, +see the Code of Conduct FAQ or contact opencode@microsoft.com with any +additional questions or comments. + + +[code_of_conduct]: https://opensource.microsoft.com/codeofconduct/ +[authenticate_with_token]: https://docs.microsoft.com/azure/cognitive-services/authentication?tabs=powershell#authenticate-with-an-authentication-token +[azure_identity_credentials]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/identity/azure-identity#credentials +[azure_identity_pip]: https://pypi.org/project/azure-identity/ +[default_azure_credential]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/identity/azure-identity#defaultazurecredential +[pip]: https://pypi.org/project/pip/ +[azure_sub]: https://azure.microsoft.com/free/ diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/_metadata.json b/sdk/cognitivelanguage/azure-ai-textanalytics/_metadata.json new file mode 100644 index 000000000000..fc20001b7df0 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-textanalytics/_metadata.json @@ -0,0 +1,7 @@ +{ + "apiVersion": "2025-05-15-preview", + "commit": "7c640375cebac863ef3430e8ac9aa8cae4843677", + "repository_url": "https://github.com/Azure/azure-rest-api-specs", + "typespec_src": "specification/cognitiveservices/Language.AnalyzeText", + "emitterVersion": "0.48.2" +} \ No newline at end of file diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/apiview-properties.json b/sdk/cognitivelanguage/azure-ai-textanalytics/apiview-properties.json new file mode 100644 index 000000000000..9721c6dfa4e4 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-textanalytics/apiview-properties.json @@ -0,0 +1,200 @@ +{ + "CrossLanguagePackageId": "Language.Text", + "CrossLanguageDefinitionId": { + "azure.ai.language.text.models.AnalyzeTextLROResult": "Language.Text.AnalyzeTextLROResult", + "azure.ai.language.text.models.AbstractiveSummarizationLROResult": "Language.Text.AbstractiveSummarizationLROResult", + "azure.ai.language.text.models.AnalyzeTextLROTask": "Language.Text.AnalyzeTextLROTask", + "azure.ai.language.text.models.AbstractiveSummarizationLROTask": "Language.Text.AbstractiveSummarizationLROTask", + "azure.ai.language.text.models.AbstractiveSummarizationResult": "Language.Text.AbstractiveSummarizationResult", + "azure.ai.language.text.models.AbstractiveSummarizationTaskParameters": "Language.Text.AbstractiveSummarizationTaskParameters", + "azure.ai.language.text.models.AbstractiveSummary": "Language.Text.AbstractiveSummary", + "azure.ai.language.text.models.AbstractiveSummaryDocumentResultWithDetectedLanguage": "Language.Text.AbstractiveSummaryDocumentResultWithDetectedLanguage", + "azure.ai.language.text.models.BaseMetadata": "Language.Text.BaseMetadata", + "azure.ai.language.text.models.AgeMetadata": "Language.Text.AgeMetadata", + "azure.ai.language.text.models.BaseEntityOverlapPolicy": "Language.Text.BaseEntityOverlapPolicy", + "azure.ai.language.text.models.AllowOverlapEntityPolicyType": "Language.Text.AllowOverlapEntityPolicyType", + "azure.ai.language.text.models.AnalyzeTextTask": "Language.Text.AnalyzeTextTask", + "azure.ai.language.text.models.AnalyzeTextEntityLinkingInput": "Language.Text.AnalyzeTextEntityLinkingInput", + "azure.ai.language.text.models.AnalyzeTextEntityRecognitionInput": "Language.Text.AnalyzeTextEntityRecognitionInput", + "azure.ai.language.text.models.AnalyzeTextJobState": "Language.Text.AnalyzeTextJobState", + "azure.ai.language.text.models.AnalyzeTextKeyPhraseExtractionInput": "Language.Text.AnalyzeTextKeyPhraseExtractionInput", + "azure.ai.language.text.models.AnalyzeTextLanguageDetectionInput": "Language.Text.AnalyzeTextLanguageDetectionInput", + "azure.ai.language.text.models.AnalyzeTextPiiEntitiesRecognitionInput": "Language.Text.AnalyzeTextPiiEntitiesRecognitionInput", + "azure.ai.language.text.models.AnalyzeTextSentimentAnalysisInput": "Language.Text.AnalyzeTextSentimentAnalysisInput", + "azure.ai.language.text.models.AnalyzeTextTaskResult": "Language.Text.AnalyzeTextTaskResult", + "azure.ai.language.text.models.AreaMetadata": "Language.Text.AreaMetadata", + "azure.ai.language.text.models.BaseRedactionPolicy": "Language.Text.BaseRedactionPolicy", + "azure.ai.language.text.models.CharacterMaskPolicyType": "Language.Text.CharacterMaskPolicyType", + "azure.ai.language.text.models.ClassificationDocumentResultWithDetectedLanguage": "Language.Text.ClassificationDocumentResultWithDetectedLanguage", + "azure.ai.language.text.models.ClassificationResult": "Language.Text.ClassificationResult", + "azure.ai.language.text.models.CurrencyMetadata": "Language.Text.CurrencyMetadata", + "azure.ai.language.text.models.CustomEntitiesLROTask": "Language.Text.CustomEntitiesLROTask", + "azure.ai.language.text.models.CustomEntitiesResult": "Language.Text.CustomEntitiesResult", + "azure.ai.language.text.models.CustomEntitiesTaskParameters": "Language.Text.CustomEntitiesTaskParameters", + "azure.ai.language.text.models.CustomEntityRecognitionLROResult": "Language.Text.CustomEntityRecognitionLROResult", + "azure.ai.language.text.models.CustomLabelClassificationResult": "Language.Text.CustomLabelClassificationResult", + "azure.ai.language.text.models.CustomMultiLabelClassificationLROResult": "Language.Text.CustomMultiLabelClassificationLROResult", + "azure.ai.language.text.models.CustomMultiLabelClassificationLROTask": "Language.Text.CustomMultiLabelClassificationLROTask", + "azure.ai.language.text.models.CustomMultiLabelClassificationTaskParameters": "Language.Text.CustomMultiLabelClassificationTaskParameters", + "azure.ai.language.text.models.CustomSingleLabelClassificationLROResult": "Language.Text.CustomSingleLabelClassificationLROResult", + "azure.ai.language.text.models.CustomSingleLabelClassificationLROTask": "Language.Text.CustomSingleLabelClassificationLROTask", + "azure.ai.language.text.models.CustomSingleLabelClassificationTaskParameters": "Language.Text.CustomSingleLabelClassificationTaskParameters", + "azure.ai.language.text.models.DateMetadata": "Language.Text.DateMetadata", + "azure.ai.language.text.models.DateTimeMetadata": "Language.Text.DateTimeMetadata", + "azure.ai.language.text.models.DateValue": "Language.Text.DateValue", + "azure.ai.language.text.models.DetectedLanguage": "Language.Text.DetectedLanguage", + "azure.ai.language.text.models.DocumentError": "Language.Text.DocumentError", + "azure.ai.language.text.models.DocumentStatistics": "Language.Text.DocumentStatistics", + "azure.ai.language.text.models.DocumentWarning": "Language.Text.DocumentWarning", + "azure.ai.language.text.models.EntitiesDocumentResultWithDetectedLanguage": "Language.Text.EntitiesDocumentResultWithDetectedLanguage", + "azure.ai.language.text.models.EntitiesDocumentResultWithMetadata": "Language.Text.EntitiesDocumentResultWithMetadata", + "azure.ai.language.text.models.EntitiesDocumentResultWithMetadataDetectedLanguage": "Language.Text.EntitiesDocumentResultWithMetadataDetectedLanguage", + "azure.ai.language.text.models.EntitiesLROTask": "Language.Text.EntitiesLROTask", + "azure.ai.language.text.models.EntitiesResult": "Language.Text.EntitiesResult", + "azure.ai.language.text.models.EntitiesTaskParameters": "Language.Text.EntitiesTaskParameters", + "azure.ai.language.text.models.EntitiesTaskResult": "Language.Text.EntitiesTaskResult", + "azure.ai.language.text.models.EntitiesWithMetadataAutoResult": "Language.Text.EntitiesWithMetadataAutoResult", + "azure.ai.language.text.models.Entity": "Language.Text.Entity", + "azure.ai.language.text.models.EntityInferenceOptions": "Language.Text.EntityInferenceOptions", + "azure.ai.language.text.models.EntityLinkingLROResult": "Language.Text.EntityLinkingLROResult", + "azure.ai.language.text.models.EntityLinkingLROTask": "Language.Text.EntityLinkingLROTask", + "azure.ai.language.text.models.EntityLinkingResult": "Language.Text.EntityLinkingResult", + "azure.ai.language.text.models.EntityLinkingResultWithDetectedLanguage": "Language.Text.EntityLinkingResultWithDetectedLanguage", + "azure.ai.language.text.models.EntityLinkingTaskParameters": "Language.Text.EntityLinkingTaskParameters", + "azure.ai.language.text.models.EntityLinkingTaskResult": "Language.Text.EntityLinkingTaskResult", + "azure.ai.language.text.models.EntityMaskPolicyType": "Language.Text.EntityMaskPolicyType", + "azure.ai.language.text.models.EntityRecognitionLROResult": "Language.Text.EntityRecognitionLROResult", + "azure.ai.language.text.models.EntitySynonym": "Language.Text.EntitySynonym", + "azure.ai.language.text.models.EntitySynonyms": "Language.Text.EntitySynonyms", + "azure.ai.language.text.models.EntityTag": "Language.Text.EntityTag", + "azure.ai.language.text.models.EntityWithMetadata": "Language.Text.EntityWithMetadata", + "azure.ai.language.text.models.Error": "Language.Text.Error", + "azure.ai.language.text.models.ErrorResponse": "Language.Text.ErrorResponse", + "azure.ai.language.text.models.ExtractedSummaryDocumentResultWithDetectedLanguage": "Language.Text.ExtractedSummaryDocumentResultWithDetectedLanguage", + "azure.ai.language.text.models.ExtractedSummarySentence": "Language.Text.ExtractedSummarySentence", + "azure.ai.language.text.models.ExtractiveSummarizationLROResult": "Language.Text.ExtractiveSummarizationLROResult", + "azure.ai.language.text.models.ExtractiveSummarizationLROTask": "Language.Text.ExtractiveSummarizationLROTask", + "azure.ai.language.text.models.ExtractiveSummarizationResult": "Language.Text.ExtractiveSummarizationResult", + "azure.ai.language.text.models.ExtractiveSummarizationTaskParameters": "Language.Text.ExtractiveSummarizationTaskParameters", + "azure.ai.language.text.models.FhirBundle": "Language.Text.FhirBundle", + "azure.ai.language.text.models.HealthcareAssertion": "Language.Text.HealthcareAssertion", + "azure.ai.language.text.models.HealthcareEntitiesDocumentResultWithDocumentDetectedLanguage": "Language.Text.HealthcareEntitiesDocumentResultWithDocumentDetectedLanguage", + "azure.ai.language.text.models.HealthcareEntity": "Language.Text.HealthcareEntity", + "azure.ai.language.text.models.HealthcareEntityLink": "Language.Text.HealthcareEntityLink", + "azure.ai.language.text.models.HealthcareLROResult": "Language.Text.HealthcareLROResult", + "azure.ai.language.text.models.HealthcareLROTask": "Language.Text.HealthcareLROTask", + "azure.ai.language.text.models.HealthcareRelation": "Language.Text.HealthcareRelation", + "azure.ai.language.text.models.HealthcareRelationEntity": "Language.Text.HealthcareRelationEntity", + "azure.ai.language.text.models.HealthcareResult": "Language.Text.HealthcareResult", + "azure.ai.language.text.models.HealthcareTaskParameters": "Language.Text.HealthcareTaskParameters", + "azure.ai.language.text.models.InformationMetadata": "Language.Text.InformationMetadata", + "azure.ai.language.text.models.InnerErrorModel": "Language.Text.InnerErrorModel", + "azure.ai.language.text.models.KeyPhraseExtractionLROResult": "Language.Text.KeyPhraseExtractionLROResult", + "azure.ai.language.text.models.KeyPhraseLROTask": "Language.Text.KeyPhraseLROTask", + "azure.ai.language.text.models.KeyPhraseResult": "Language.Text.KeyPhraseResult", + "azure.ai.language.text.models.KeyPhrasesDocumentResultWithDetectedLanguage": "Language.Text.KeyPhrasesDocumentResultWithDetectedLanguage", + "azure.ai.language.text.models.KeyPhraseTaskParameters": "Language.Text.KeyPhraseTaskParameters", + "azure.ai.language.text.models.KeyPhraseTaskResult": "Language.Text.KeyPhraseTaskResult", + "azure.ai.language.text.models.LanguageDetectionAnalysisInput": "Language.Text.LanguageDetectionAnalysisInput", + "azure.ai.language.text.models.LanguageDetectionDocumentResult": "Language.Text.LanguageDetectionDocumentResult", + "azure.ai.language.text.models.LanguageDetectionResult": "Language.Text.LanguageDetectionResult", + "azure.ai.language.text.models.LanguageDetectionTaskParameters": "Language.Text.LanguageDetectionTaskParameters", + "azure.ai.language.text.models.LanguageDetectionTaskResult": "Language.Text.LanguageDetectionTaskResult", + "azure.ai.language.text.models.LanguageInput": "Language.Text.LanguageInput", + "azure.ai.language.text.models.LengthMetadata": "Language.Text.LengthMetadata", + "azure.ai.language.text.models.LinkedEntity": "Language.Text.LinkedEntity", + "azure.ai.language.text.models.Match": "Language.Text.Match", + "azure.ai.language.text.models.MatchLongestEntityPolicyType": "Language.Text.MatchLongestEntityPolicyType", + "azure.ai.language.text.models.MultiLanguageAnalysisInput": "Language.Text.MultiLanguageAnalysisInput", + "azure.ai.language.text.models.MultiLanguageInput": "Language.Text.MultiLanguageInput", + "azure.ai.language.text.models.NoMaskPolicyType": "Language.Text.NoMaskPolicyType", + "azure.ai.language.text.models.NumberMetadata": "Language.Text.NumberMetadata", + "azure.ai.language.text.models.NumericRangeMetadata": "Language.Text.NumericRangeMetadata", + "azure.ai.language.text.models.OrdinalMetadata": "Language.Text.OrdinalMetadata", + "azure.ai.language.text.models.PiiEntityRecognitionLROResult": "Language.Text.PiiEntityRecognitionLROResult", + "azure.ai.language.text.models.PiiEntityWithTags": "Language.Text.PiiEntityWithTags", + "azure.ai.language.text.models.PiiLROTask": "Language.Text.PiiLROTask", + "azure.ai.language.text.models.PiiResult": "Language.Text.PiiResult", + "azure.ai.language.text.models.PiiResultWithDetectedLanguage": "Language.Text.PiiResultWithDetectedLanguage", + "azure.ai.language.text.models.PiiTaskParameters": "Language.Text.PiiTaskParameters", + "azure.ai.language.text.models.PiiTaskResult": "Language.Text.PiiTaskResult", + "azure.ai.language.text.models.RequestStatistics": "Language.Text.RequestStatistics", + "azure.ai.language.text.models.SentenceAssessment": "Language.Text.SentenceAssessment", + "azure.ai.language.text.models.SentenceSentiment": "Language.Text.SentenceSentiment", + "azure.ai.language.text.models.SentenceTarget": "Language.Text.SentenceTarget", + "azure.ai.language.text.models.SentimentAnalysisLROTask": "Language.Text.SentimentAnalysisLROTask", + "azure.ai.language.text.models.SentimentAnalysisTaskParameters": "Language.Text.SentimentAnalysisTaskParameters", + "azure.ai.language.text.models.SentimentConfidenceScores": "Language.Text.SentimentConfidenceScores", + "azure.ai.language.text.models.SentimentDocumentResultWithDetectedLanguage": "Language.Text.SentimentDocumentResultWithDetectedLanguage", + "azure.ai.language.text.models.SentimentLROResult": "Language.Text.SentimentLROResult", + "azure.ai.language.text.models.SentimentResponse": "Language.Text.SentimentResponse", + "azure.ai.language.text.models.SentimentTaskResult": "Language.Text.SentimentTaskResult", + "azure.ai.language.text.models.SpeedMetadata": "Language.Text.SpeedMetadata", + "azure.ai.language.text.models.SummaryContext": "Language.Text.SummaryContext", + "azure.ai.language.text.models.TargetConfidenceScoreLabel": "Language.Text.TargetConfidenceScoreLabel", + "azure.ai.language.text.models.TargetRelation": "Language.Text.TargetRelation", + "azure.ai.language.text.models.Tasks": "Language.Text.Tasks", + "azure.ai.language.text.models.TemperatureMetadata": "Language.Text.TemperatureMetadata", + "azure.ai.language.text.models.TemporalSetMetadata": "Language.Text.TemporalSetMetadata", + "azure.ai.language.text.models.TemporalSpanMetadata": "Language.Text.TemporalSpanMetadata", + "azure.ai.language.text.models.TemporalSpanValues": "Language.Text.TemporalSpanValues", + "azure.ai.language.text.models.TimeMetadata": "Language.Text.TimeMetadata", + "azure.ai.language.text.models.ValueExclusionPolicy": "Language.Text.ValueExclusionPolicy", + "azure.ai.language.text.models.VolumeMetadata": "Language.Text.VolumeMetadata", + "azure.ai.language.text.models.WeightMetadata": "Language.Text.WeightMetadata", + "azure.ai.language.text.models.AnalyzeTextTaskResultsKind": "Language.Text.AnalyzeTextTaskResultsKind", + "azure.ai.language.text.models.ErrorCode": "Language.Text.ErrorCode", + "azure.ai.language.text.models.InnerErrorCode": "Language.Text.InnerErrorCode", + "azure.ai.language.text.models.WarningCodeValue": "Language.Text.WarningCodeValue", + "azure.ai.language.text.models.ScriptKind": "Language.Text.ScriptKind", + "azure.ai.language.text.models.ScriptCode": "Language.Text.ScriptCode", + "azure.ai.language.text.models.MetadataKind": "Language.Text.MetadataKind", + "azure.ai.language.text.models.AgeUnit": "Language.Text.AgeUnit", + "azure.ai.language.text.models.AreaUnit": "Language.Text.AreaUnit", + "azure.ai.language.text.models.TemporalModifier": "Language.Text.TemporalModifier", + "azure.ai.language.text.models.InformationUnit": "Language.Text.InformationUnit", + "azure.ai.language.text.models.LengthUnit": "Language.Text.LengthUnit", + "azure.ai.language.text.models.NumberKind": "Language.Text.NumberKind", + "azure.ai.language.text.models.RangeKind": "Language.Text.RangeKind", + "azure.ai.language.text.models.RangeInclusivity": "Language.Text.RangeInclusivity", + "azure.ai.language.text.models.RelativeTo": "Language.Text.RelativeTo", + "azure.ai.language.text.models.SpeedUnit": "Language.Text.SpeedUnit", + "azure.ai.language.text.models.TemperatureUnit": "Language.Text.TemperatureUnit", + "azure.ai.language.text.models.VolumeUnit": "Language.Text.VolumeUnit", + "azure.ai.language.text.models.WeightUnit": "Language.Text.WeightUnit", + "azure.ai.language.text.models.DocumentSentimentValue": "Language.Text.DocumentSentimentValue", + "azure.ai.language.text.models.SentenceSentimentValue": "Language.Text.SentenceSentimentValue", + "azure.ai.language.text.models.TokenSentimentValue": "Language.Text.TokenSentimentValue", + "azure.ai.language.text.models.TargetRelationType": "Language.Text.TargetRelationType", + "azure.ai.language.text.models.AnalyzeTextTaskKind": "Language.Text.AnalyzeTextTaskKind", + "azure.ai.language.text.models.StringIndexType": "Language.Text.StringIndexType", + "azure.ai.language.text.models.EntityCategory": "Language.Text.EntityCategory", + "azure.ai.language.text.models.PolicyKind": "Language.Text.policyKind", + "azure.ai.language.text.models.PiiDomain": "Language.Text.PiiDomain", + "azure.ai.language.text.models.PiiCategory": "Language.Text.PiiCategory", + "azure.ai.language.text.models.PiiCategoriesExclude": "Language.Text.PiiCategoriesExclude", + "azure.ai.language.text.models.RedactionPolicyKind": "Language.Text.RedactionPolicyKind", + "azure.ai.language.text.models.RedactionCharacter": "Language.Text.redactionCharacter", + "azure.ai.language.text.models.State": "Language.Text.State", + "azure.ai.language.text.models.AnalyzeTextLROResultsKind": "Language.Text.AnalyzeTextLROResultsKind", + "azure.ai.language.text.models.HealthcareEntityCategory": "Language.Text.healthcareEntityCategory", + "azure.ai.language.text.models.Conditionality": "Language.Text.Conditionality", + "azure.ai.language.text.models.Certainty": "Language.Text.Certainty", + "azure.ai.language.text.models.Association": "Language.Text.Association", + "azure.ai.language.text.models.Temporality": "Language.Text.Temporality", + "azure.ai.language.text.models.RelationType": "Language.Text.relationType", + "azure.ai.language.text.models.AnalyzeTextLROTaskKind": "Language.Text.AnalyzeTextLROTaskKind", + "azure.ai.language.text.models.SummaryLengthBucket": "Language.Text.SummaryLengthBucket", + "azure.ai.language.text.models.ExtractiveSummarizationSortingCriteria": "Language.Text.ExtractiveSummarizationSortingCriteria", + "azure.ai.language.text.models.FhirVersion": "Language.Text.fhirVersion", + "azure.ai.language.text.models.HealthcareDocumentType": "Language.Text.healthcareDocumentType", + "azure.ai.language.text.TextClient.analyze_text": "Language.Text.analyzeText", + "azure.ai.language.text.aio.TextClient.analyze_text": "Language.Text.analyzeText", + "azure.ai.language.text.TextClient.analyze_text_job_status": "Language.Text.analyzeTextJobStatus", + "azure.ai.language.text.aio.TextClient.analyze_text_job_status": "Language.Text.analyzeTextJobStatus", + "azure.ai.language.text.TextClient.begin_analyze_text_submit_job": "Language.Text.analyzeTextSubmitJob", + "azure.ai.language.text.aio.TextClient.begin_analyze_text_submit_job": "Language.Text.analyzeTextSubmitJob", + "azure.ai.language.text.TextClient.begin_analyze_text_cancel_job": "Language.Text.analyzeTextCancelJob", + "azure.ai.language.text.aio.TextClient.begin_analyze_text_cancel_job": "Language.Text.analyzeTextCancelJob" + } +} \ No newline at end of file diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/azure/__init__.py b/sdk/cognitivelanguage/azure-ai-textanalytics/azure/__init__.py new file mode 100644 index 000000000000..d55ccad1f573 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-textanalytics/azure/__init__.py @@ -0,0 +1 @@ +__path__ = __import__("pkgutil").extend_path(__path__, __name__) # type: ignore diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/__init__.py b/sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/__init__.py new file mode 100644 index 000000000000..d55ccad1f573 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/__init__.py @@ -0,0 +1 @@ +__path__ = __import__("pkgutil").extend_path(__path__, __name__) # type: ignore diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/__init__.py b/sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/__init__.py new file mode 100644 index 000000000000..d55ccad1f573 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/__init__.py @@ -0,0 +1 @@ +__path__ = __import__("pkgutil").extend_path(__path__, __name__) # type: ignore diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/__init__.py b/sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/__init__.py new file mode 100644 index 000000000000..7273540b410f --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/__init__.py @@ -0,0 +1,32 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +# pylint: disable=wrong-import-position + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from ._patch import * # pylint: disable=unused-wildcard-import + +from ._client import TextClient # type: ignore +from ._version import VERSION + +__version__ = VERSION + +try: + from ._patch import __all__ as _patch_all + from ._patch import * +except ImportError: + _patch_all = [] +from ._patch import patch_sdk as _patch_sdk + +__all__ = [ + "TextClient", +] +__all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore + +_patch_sdk() diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/_client.py b/sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/_client.py new file mode 100644 index 000000000000..29017ae3dfab --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/_client.py @@ -0,0 +1,111 @@ +# pylint: disable=line-too-long,useless-suppression +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from copy import deepcopy +from typing import Any, TYPE_CHECKING, Union +from typing_extensions import Self + +from azure.core import PipelineClient +from azure.core.credentials import AzureKeyCredential +from azure.core.pipeline import policies +from azure.core.rest import HttpRequest, HttpResponse + +from ._configuration import TextClientConfiguration +from ._operations import _TextClientOperationsMixin +from ._utils.serialization import Deserializer, Serializer + +if TYPE_CHECKING: + from azure.core.credentials import TokenCredential + + +class TextClient(_TextClientOperationsMixin): + """The language service API is a suite of natural language processing (NLP) skills built with + best-in-class Microsoft machine learning algorithms. The API can be used to analyze + unstructured text for tasks such as sentiment analysis, key phrase extraction, language + detection and question answering. Further documentation can be found in https://learn.microsoft.com/azure/cognitive-services/language-service/overview + https://learn.microsoft.com/azure/cognitive-services/language-service/overview>`_.0. + + :param endpoint: Supported Cognitive Services endpoint (e.g., + https://.api.cognitiveservices.azure.com). Required. + :type endpoint: str + :param credential: Credential used to authenticate requests to the service. Is either a key + credential type or a token credential type. Required. + :type credential: ~azure.core.credentials.AzureKeyCredential or + ~azure.core.credentials.TokenCredential + :keyword api_version: The API version to use for this operation. Default value is + "2025-05-15-preview". Note that overriding this default value may result in unsupported + behavior. + :paramtype api_version: str + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + """ + + def __init__(self, endpoint: str, credential: Union[AzureKeyCredential, "TokenCredential"], **kwargs: Any) -> None: + _endpoint = "{Endpoint}/language" + self._config = TextClientConfiguration(endpoint=endpoint, credential=credential, **kwargs) + + _policies = kwargs.pop("policies", None) + if _policies is None: + _policies = [ + policies.RequestIdPolicy(**kwargs), + self._config.headers_policy, + self._config.user_agent_policy, + self._config.proxy_policy, + policies.ContentDecodePolicy(**kwargs), + self._config.redirect_policy, + self._config.retry_policy, + self._config.authentication_policy, + self._config.custom_hook_policy, + self._config.logging_policy, + policies.DistributedTracingPolicy(**kwargs), + policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None, + self._config.http_logging_policy, + ] + self._client: PipelineClient = PipelineClient(base_url=_endpoint, policies=_policies, **kwargs) + + self._serialize = Serializer() + self._deserialize = Deserializer() + self._serialize.client_side_validation = False + + def send_request(self, request: HttpRequest, *, stream: bool = False, **kwargs: Any) -> HttpResponse: + """Runs the network request through the client's chained policies. + + >>> from azure.core.rest import HttpRequest + >>> request = HttpRequest("GET", "https://www.example.org/") + + >>> response = client.send_request(request) + + + For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request + + :param request: The network request you want to make. Required. + :type request: ~azure.core.rest.HttpRequest + :keyword bool stream: Whether the response payload will be streamed. Defaults to False. + :return: The response of your network call. Does not do error handling on your response. + :rtype: ~azure.core.rest.HttpResponse + """ + + request_copy = deepcopy(request) + path_format_arguments = { + "Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + } + + request_copy.url = self._client.format_url(request_copy.url, **path_format_arguments) + return self._client.send_request(request_copy, stream=stream, **kwargs) # type: ignore + + def close(self) -> None: + self._client.close() + + def __enter__(self) -> Self: + self._client.__enter__() + return self + + def __exit__(self, *exc_details: Any) -> None: + self._client.__exit__(*exc_details) diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/_configuration.py b/sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/_configuration.py new file mode 100644 index 000000000000..17b99c1e53ca --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/_configuration.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from typing import Any, TYPE_CHECKING, Union + +from azure.core.credentials import AzureKeyCredential +from azure.core.pipeline import policies + +from ._version import VERSION + +if TYPE_CHECKING: + from azure.core.credentials import TokenCredential + + +class TextClientConfiguration: # pylint: disable=too-many-instance-attributes + """Configuration for TextClient. + + Note that all parameters used to create this instance are saved as instance + attributes. + + :param endpoint: Supported Cognitive Services endpoint (e.g., + https://.api.cognitiveservices.azure.com). Required. + :type endpoint: str + :param credential: Credential used to authenticate requests to the service. Is either a key + credential type or a token credential type. Required. + :type credential: ~azure.core.credentials.AzureKeyCredential or + ~azure.core.credentials.TokenCredential + :keyword api_version: The API version to use for this operation. Default value is + "2025-05-15-preview". Note that overriding this default value may result in unsupported + behavior. + :paramtype api_version: str + """ + + def __init__(self, endpoint: str, credential: Union[AzureKeyCredential, "TokenCredential"], **kwargs: Any) -> None: + api_version: str = kwargs.pop("api_version", "2025-05-15-preview") + + if endpoint is None: + raise ValueError("Parameter 'endpoint' must not be None.") + if credential is None: + raise ValueError("Parameter 'credential' must not be None.") + + self.endpoint = endpoint + self.credential = credential + self.api_version = api_version + self.credential_scopes = kwargs.pop("credential_scopes", ["https://cognitiveservices.azure.com/.default"]) + kwargs.setdefault("sdk_moniker", "ai-language-text/{}".format(VERSION)) + self.polling_interval = kwargs.get("polling_interval", 30) + self._configure(**kwargs) + + def _infer_policy(self, **kwargs): + if isinstance(self.credential, AzureKeyCredential): + return policies.AzureKeyCredentialPolicy(self.credential, "Ocp-Apim-Subscription-Key", **kwargs) + if hasattr(self.credential, "get_token"): + return policies.BearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs) + raise TypeError(f"Unsupported credential: {self.credential}") + + def _configure(self, **kwargs: Any) -> None: + self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs) + self.http_logging_policy = kwargs.get("http_logging_policy") or policies.HttpLoggingPolicy(**kwargs) + self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get("redirect_policy") or policies.RedirectPolicy(**kwargs) + self.retry_policy = kwargs.get("retry_policy") or policies.RetryPolicy(**kwargs) + self.authentication_policy = kwargs.get("authentication_policy") + if self.credential and not self.authentication_policy: + self.authentication_policy = self._infer_policy(**kwargs) diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/_operations/__init__.py b/sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/_operations/__init__.py new file mode 100644 index 000000000000..46ed8f84233c --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/_operations/__init__.py @@ -0,0 +1,23 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +# pylint: disable=wrong-import-position + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from ._patch import * # pylint: disable=unused-wildcard-import + +from ._operations import _TextClientOperationsMixin # type: ignore # pylint: disable=unused-import + +from ._patch import __all__ as _patch_all +from ._patch import * +from ._patch import patch_sdk as _patch_sdk + +__all__ = [] +__all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore +_patch_sdk() diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/_operations/_operations.py b/sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/_operations/_operations.py new file mode 100644 index 000000000000..04648958c800 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/_operations/_operations.py @@ -0,0 +1,711 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from collections.abc import MutableMapping +from io import IOBase +import json +from typing import Any, Callable, Dict, IO, Iterator, List, Optional, TypeVar, Union, cast, overload + +from azure.core import PipelineClient +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + ResourceNotModifiedError, + StreamClosedError, + StreamConsumedError, + map_error, +) +from azure.core.pipeline import PipelineResponse +from azure.core.polling import LROPoller, NoPolling, PollingMethod +from azure.core.polling.base_polling import LROBasePolling +from azure.core.rest import HttpRequest, HttpResponse +from azure.core.tracing.decorator import distributed_trace +from azure.core.utils import case_insensitive_dict + +from .. import models as _models +from .._configuration import TextClientConfiguration +from .._utils.model_base import SdkJSONEncoder, _deserialize, _failsafe_deserialize +from .._utils.serialization import Serializer +from .._utils.utils import ClientMixinABC + +JSON = MutableMapping[str, Any] +_Unset: Any = object() +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +_SERIALIZER = Serializer() +_SERIALIZER.client_side_validation = False + + +def build_text_analyze_text_request(*, show_stats: Optional[bool] = None, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/:analyze-text" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if show_stats is not None: + _params["showStats"] = _SERIALIZER.query("show_stats", show_stats, "bool") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_text_analyze_text_job_status_request( # pylint: disable=name-too-long + job_id: str, + *, + show_stats: Optional[bool] = None, + top: Optional[int] = None, + skip: Optional[int] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/analyze-text/jobs/{jobId}" + path_format_arguments = { + "jobId": _SERIALIZER.url("job_id", job_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if show_stats is not None: + _params["showStats"] = _SERIALIZER.query("show_stats", show_stats, "bool") + if top is not None: + _params["top"] = _SERIALIZER.query("top", top, "int") + if skip is not None: + _params["skip"] = _SERIALIZER.query("skip", skip, "int") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_text_analyze_text_submit_job_request(**kwargs: Any) -> HttpRequest: # pylint: disable=name-too-long + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) + # Construct URL + _url = "/analyze-text/jobs" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_text_analyze_text_cancel_job_request( # pylint: disable=name-too-long + job_id: str, **kwargs: Any +) -> HttpRequest: + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-05-15-preview")) + # Construct URL + _url = "/analyze-text/jobs/{jobId}:cancel" + path_format_arguments = { + "jobId": _SERIALIZER.url("job_id", job_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + return HttpRequest(method="POST", url=_url, params=_params, **kwargs) + + +class _TextClientOperationsMixin(ClientMixinABC[PipelineClient[HttpRequest, HttpResponse], TextClientConfiguration]): + + @overload + def analyze_text( + self, + body: _models.AnalyzeTextTask, + *, + show_stats: Optional[bool] = None, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.AnalyzeTextTaskResult: + """Request text analysis over a collection of documents. + + :param body: The input documents to analyze. Required. + :type body: ~azure.ai.language.text.models.AnalyzeTextTask + :keyword show_stats: (Optional) if set to true, response will contain request and document + level statistics. Default value is None. + :paramtype show_stats: bool + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: AnalyzeTextTaskResult. The AnalyzeTextTaskResult is compatible with MutableMapping + :rtype: ~azure.ai.language.text.models.AnalyzeTextTaskResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def analyze_text( + self, body: JSON, *, show_stats: Optional[bool] = None, content_type: str = "application/json", **kwargs: Any + ) -> _models.AnalyzeTextTaskResult: + """Request text analysis over a collection of documents. + + :param body: The input documents to analyze. Required. + :type body: JSON + :keyword show_stats: (Optional) if set to true, response will contain request and document + level statistics. Default value is None. + :paramtype show_stats: bool + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: AnalyzeTextTaskResult. The AnalyzeTextTaskResult is compatible with MutableMapping + :rtype: ~azure.ai.language.text.models.AnalyzeTextTaskResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def analyze_text( + self, + body: IO[bytes], + *, + show_stats: Optional[bool] = None, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.AnalyzeTextTaskResult: + """Request text analysis over a collection of documents. + + :param body: The input documents to analyze. Required. + :type body: IO[bytes] + :keyword show_stats: (Optional) if set to true, response will contain request and document + level statistics. Default value is None. + :paramtype show_stats: bool + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: AnalyzeTextTaskResult. The AnalyzeTextTaskResult is compatible with MutableMapping + :rtype: ~azure.ai.language.text.models.AnalyzeTextTaskResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def analyze_text( + self, body: Union[_models.AnalyzeTextTask, JSON, IO[bytes]], *, show_stats: Optional[bool] = None, **kwargs: Any + ) -> _models.AnalyzeTextTaskResult: + """Request text analysis over a collection of documents. + + :param body: The input documents to analyze. Is one of the following types: AnalyzeTextTask, + JSON, IO[bytes] Required. + :type body: ~azure.ai.language.text.models.AnalyzeTextTask or JSON or IO[bytes] + :keyword show_stats: (Optional) if set to true, response will contain request and document + level statistics. Default value is None. + :paramtype show_stats: bool + :return: AnalyzeTextTaskResult. The AnalyzeTextTaskResult is compatible with MutableMapping + :rtype: ~azure.ai.language.text.models.AnalyzeTextTaskResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.AnalyzeTextTaskResult] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_text_analyze_text_request( + show_stats=show_stats, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize(_models.ErrorResponse, response) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.AnalyzeTextTaskResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def analyze_text_job_status( + self, + job_id: str, + *, + show_stats: Optional[bool] = None, + top: Optional[int] = None, + skip: Optional[int] = None, + **kwargs: Any + ) -> _models.AnalyzeTextJobState: + """Get analysis status and results. + + Get the status of an analysis job. A job can consist of one or more tasks. After all tasks + succeed, the job transitions to the succeeded state and results are available for each task. + + :param job_id: job ID. Required. + :type job_id: str + :keyword show_stats: (Optional) if set to true, response will contain request and document + level statistics. Default value is None. + :paramtype show_stats: bool + :keyword top: The maximum number of resources to return from the collection. Default value is + None. + :paramtype top: int + :keyword skip: An offset into the collection of the first resource to be returned. Default + value is None. + :paramtype skip: int + :return: AnalyzeTextJobState. The AnalyzeTextJobState is compatible with MutableMapping + :rtype: ~azure.ai.language.text.models.AnalyzeTextJobState + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.AnalyzeTextJobState] = kwargs.pop("cls", None) + + _request = build_text_analyze_text_job_status_request( + job_id=job_id, + show_stats=show_stats, + top=top, + skip=skip, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize(_models.ErrorResponse, response) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.AnalyzeTextJobState, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + def _analyze_text_submit_job_initial( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + analysis_input: _models.MultiLanguageAnalysisInput = _Unset, + tasks: List[_models.AnalyzeTextLROTask] = _Unset, + display_name: Optional[str] = None, + default_language: Optional[str] = None, + cancel_after: Optional[float] = None, + **kwargs: Any + ) -> Iterator[bytes]: + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None) + + if body is _Unset: + if analysis_input is _Unset: + raise TypeError("missing required argument: analysis_input") + if tasks is _Unset: + raise TypeError("missing required argument: tasks") + body = { + "analysisInput": analysis_input, + "cancelAfter": cancel_after, + "defaultLanguage": default_language, + "displayName": display_name, + "tasks": tasks, + } + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_text_analyze_text_submit_job_request( + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = True + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize(_models.ErrorResponse, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["Operation-Location"] = self._deserialize("str", response.headers.get("Operation-Location")) + + deserialized = response.iter_bytes() + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @overload + def begin_analyze_text_submit_job( + self, + *, + analysis_input: _models.MultiLanguageAnalysisInput, + tasks: List[_models.AnalyzeTextLROTask], + content_type: str = "application/json", + display_name: Optional[str] = None, + default_language: Optional[str] = None, + cancel_after: Optional[float] = None, + **kwargs: Any + ) -> LROPoller[None]: + """Submit a collection of text documents for analysis. Specify one or more unique tasks to be + executed as a long-running operation. + + :keyword analysis_input: Contains the input to be analyzed. Required. + :paramtype analysis_input: ~azure.ai.language.text.models.MultiLanguageAnalysisInput + :keyword tasks: List of tasks to be performed as part of the LRO. Required. + :paramtype tasks: list[~azure.ai.language.text.models.AnalyzeTextLROTask] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword display_name: Name for the task. Default value is None. + :paramtype display_name: str + :keyword default_language: Default language to use for records requesting automatic language + detection. Default value is None. + :paramtype default_language: str + :keyword cancel_after: Optional duration in seconds after which the job will be canceled if not + completed. Default value is None. + :paramtype cancel_after: float + :return: An instance of LROPoller that returns None + :rtype: ~azure.core.polling.LROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def begin_analyze_text_submit_job( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> LROPoller[None]: + """Submit a collection of text documents for analysis. Specify one or more unique tasks to be + executed as a long-running operation. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of LROPoller that returns None + :rtype: ~azure.core.polling.LROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def begin_analyze_text_submit_job( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> LROPoller[None]: + """Submit a collection of text documents for analysis. Specify one or more unique tasks to be + executed as a long-running operation. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of LROPoller that returns None + :rtype: ~azure.core.polling.LROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def begin_analyze_text_submit_job( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + analysis_input: _models.MultiLanguageAnalysisInput = _Unset, + tasks: List[_models.AnalyzeTextLROTask] = _Unset, + display_name: Optional[str] = None, + default_language: Optional[str] = None, + cancel_after: Optional[float] = None, + **kwargs: Any + ) -> LROPoller[None]: + """Submit a collection of text documents for analysis. Specify one or more unique tasks to be + executed as a long-running operation. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword analysis_input: Contains the input to be analyzed. Required. + :paramtype analysis_input: ~azure.ai.language.text.models.MultiLanguageAnalysisInput + :keyword tasks: List of tasks to be performed as part of the LRO. Required. + :paramtype tasks: list[~azure.ai.language.text.models.AnalyzeTextLROTask] + :keyword display_name: Name for the task. Default value is None. + :paramtype display_name: str + :keyword default_language: Default language to use for records requesting automatic language + detection. Default value is None. + :paramtype default_language: str + :keyword cancel_after: Optional duration in seconds after which the job will be canceled if not + completed. Default value is None. + :paramtype cancel_after: float + :return: An instance of LROPoller that returns None + :rtype: ~azure.core.polling.LROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._analyze_text_submit_job_initial( + body=body, + analysis_input=analysis_input, + tasks=tasks, + display_name=display_name, + default_language=default_language, + cancel_after=cancel_after, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + path_format_arguments = { + "Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + } + + if polling is True: + polling_method: PollingMethod = cast( + PollingMethod, LROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) + ) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller[None].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + def _analyze_text_cancel_job_initial(self, job_id: str, **kwargs: Any) -> Iterator[bytes]: + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None) + + _request = build_text_analyze_text_cancel_job_request( + job_id=job_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = True + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize(_models.ErrorResponse, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["Operation-Location"] = self._deserialize("str", response.headers.get("Operation-Location")) + + deserialized = response.iter_bytes() + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def begin_analyze_text_cancel_job(self, job_id: str, **kwargs: Any) -> LROPoller[None]: + """Cancel a long-running Text Analysis job. + + Cancel a long-running Text Analysis job. + + :param job_id: The job ID to cancel. Required. + :type job_id: str + :return: An instance of LROPoller that returns None + :rtype: ~azure.core.polling.LROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._analyze_text_cancel_job_initial( + job_id=job_id, cls=lambda x, y, z: x, headers=_headers, params=_params, **kwargs + ) + raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + path_format_arguments = { + "Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + } + + if polling is True: + polling_method: PollingMethod = cast( + PollingMethod, LROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) + ) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller[None].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/_operations/_patch.py b/sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/_operations/_patch.py new file mode 100644 index 000000000000..8bcb627aa475 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/_operations/_patch.py @@ -0,0 +1,21 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------- +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" +from typing import List + +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level + + +def patch_sdk(): + """Do not remove from this file. + + `patch_sdk` is a last resort escape hatch that allows you to do customizations + you can't accomplish using the techniques described in + https://aka.ms/azsdk/python/dpcodegen/python/customize + """ diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/_patch.py b/sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/_patch.py new file mode 100644 index 000000000000..8bcb627aa475 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/_patch.py @@ -0,0 +1,21 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------- +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" +from typing import List + +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level + + +def patch_sdk(): + """Do not remove from this file. + + `patch_sdk` is a last resort escape hatch that allows you to do customizations + you can't accomplish using the techniques described in + https://aka.ms/azsdk/python/dpcodegen/python/customize + """ diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/_utils/__init__.py b/sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/_utils/__init__.py new file mode 100644 index 000000000000..8026245c2abc --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/_utils/__init__.py @@ -0,0 +1,6 @@ +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/_utils/model_base.py b/sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/_utils/model_base.py new file mode 100644 index 000000000000..c62e7e7784af --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/_utils/model_base.py @@ -0,0 +1,1233 @@ +# pylint: disable=too-many-lines +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +# pylint: disable=protected-access, broad-except + +import copy +import calendar +import decimal +import functools +import sys +import logging +import base64 +import re +import typing +import enum +import email.utils +from datetime import datetime, date, time, timedelta, timezone +from json import JSONEncoder +import xml.etree.ElementTree as ET +from collections.abc import MutableMapping +from typing_extensions import Self +import isodate +from azure.core.exceptions import DeserializationError +from azure.core import CaseInsensitiveEnumMeta +from azure.core.pipeline import PipelineResponse +from azure.core.serialization import _Null +from azure.core.rest import HttpResponse + +_LOGGER = logging.getLogger(__name__) + +__all__ = ["SdkJSONEncoder", "Model", "rest_field", "rest_discriminator"] + +TZ_UTC = timezone.utc +_T = typing.TypeVar("_T") + + +def _timedelta_as_isostr(td: timedelta) -> str: + """Converts a datetime.timedelta object into an ISO 8601 formatted string, e.g. 'P4DT12H30M05S' + + Function adapted from the Tin Can Python project: https://github.com/RusticiSoftware/TinCanPython + + :param timedelta td: The timedelta to convert + :rtype: str + :return: ISO8601 version of this timedelta + """ + + # Split seconds to larger units + seconds = td.total_seconds() + minutes, seconds = divmod(seconds, 60) + hours, minutes = divmod(minutes, 60) + days, hours = divmod(hours, 24) + + days, hours, minutes = list(map(int, (days, hours, minutes))) + seconds = round(seconds, 6) + + # Build date + date_str = "" + if days: + date_str = "%sD" % days + + if hours or minutes or seconds: + # Build time + time_str = "T" + + # Hours + bigger_exists = date_str or hours + if bigger_exists: + time_str += "{:02}H".format(hours) + + # Minutes + bigger_exists = bigger_exists or minutes + if bigger_exists: + time_str += "{:02}M".format(minutes) + + # Seconds + try: + if seconds.is_integer(): + seconds_string = "{:02}".format(int(seconds)) + else: + # 9 chars long w/ leading 0, 6 digits after decimal + seconds_string = "%09.6f" % seconds + # Remove trailing zeros + seconds_string = seconds_string.rstrip("0") + except AttributeError: # int.is_integer() raises + seconds_string = "{:02}".format(seconds) + + time_str += "{}S".format(seconds_string) + else: + time_str = "" + + return "P" + date_str + time_str + + +def _serialize_bytes(o, format: typing.Optional[str] = None) -> str: + encoded = base64.b64encode(o).decode() + if format == "base64url": + return encoded.strip("=").replace("+", "-").replace("/", "_") + return encoded + + +def _serialize_datetime(o, format: typing.Optional[str] = None): + if hasattr(o, "year") and hasattr(o, "hour"): + if format == "rfc7231": + return email.utils.format_datetime(o, usegmt=True) + if format == "unix-timestamp": + return int(calendar.timegm(o.utctimetuple())) + + # astimezone() fails for naive times in Python 2.7, so make make sure o is aware (tzinfo is set) + if not o.tzinfo: + iso_formatted = o.replace(tzinfo=TZ_UTC).isoformat() + else: + iso_formatted = o.astimezone(TZ_UTC).isoformat() + # Replace the trailing "+00:00" UTC offset with "Z" (RFC 3339: https://www.ietf.org/rfc/rfc3339.txt) + return iso_formatted.replace("+00:00", "Z") + # Next try datetime.date or datetime.time + return o.isoformat() + + +def _is_readonly(p): + try: + return p._visibility == ["read"] + except AttributeError: + return False + + +class SdkJSONEncoder(JSONEncoder): + """A JSON encoder that's capable of serializing datetime objects and bytes.""" + + def __init__(self, *args, exclude_readonly: bool = False, format: typing.Optional[str] = None, **kwargs): + super().__init__(*args, **kwargs) + self.exclude_readonly = exclude_readonly + self.format = format + + def default(self, o): # pylint: disable=too-many-return-statements + if _is_model(o): + if self.exclude_readonly: + readonly_props = [p._rest_name for p in o._attr_to_rest_field.values() if _is_readonly(p)] + return {k: v for k, v in o.items() if k not in readonly_props} + return dict(o.items()) + try: + return super(SdkJSONEncoder, self).default(o) + except TypeError: + if isinstance(o, _Null): + return None + if isinstance(o, decimal.Decimal): + return float(o) + if isinstance(o, (bytes, bytearray)): + return _serialize_bytes(o, self.format) + try: + # First try datetime.datetime + return _serialize_datetime(o, self.format) + except AttributeError: + pass + # Last, try datetime.timedelta + try: + return _timedelta_as_isostr(o) + except AttributeError: + # This will be raised when it hits value.total_seconds in the method above + pass + return super(SdkJSONEncoder, self).default(o) + + +_VALID_DATE = re.compile(r"\d{4}[-]\d{2}[-]\d{2}T\d{2}:\d{2}:\d{2}" + r"\.?\d*Z?[-+]?[\d{2}]?:?[\d{2}]?") +_VALID_RFC7231 = re.compile( + r"(Mon|Tue|Wed|Thu|Fri|Sat|Sun),\s\d{2}\s" + r"(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\s\d{4}\s\d{2}:\d{2}:\d{2}\sGMT" +) + + +def _deserialize_datetime(attr: typing.Union[str, datetime]) -> datetime: + """Deserialize ISO-8601 formatted string into Datetime object. + + :param str attr: response string to be deserialized. + :rtype: ~datetime.datetime + :returns: The datetime object from that input + """ + if isinstance(attr, datetime): + # i'm already deserialized + return attr + attr = attr.upper() + match = _VALID_DATE.match(attr) + if not match: + raise ValueError("Invalid datetime string: " + attr) + + check_decimal = attr.split(".") + if len(check_decimal) > 1: + decimal_str = "" + for digit in check_decimal[1]: + if digit.isdigit(): + decimal_str += digit + else: + break + if len(decimal_str) > 6: + attr = attr.replace(decimal_str, decimal_str[0:6]) + + date_obj = isodate.parse_datetime(attr) + test_utc = date_obj.utctimetuple() + if test_utc.tm_year > 9999 or test_utc.tm_year < 1: + raise OverflowError("Hit max or min date") + return date_obj + + +def _deserialize_datetime_rfc7231(attr: typing.Union[str, datetime]) -> datetime: + """Deserialize RFC7231 formatted string into Datetime object. + + :param str attr: response string to be deserialized. + :rtype: ~datetime.datetime + :returns: The datetime object from that input + """ + if isinstance(attr, datetime): + # i'm already deserialized + return attr + match = _VALID_RFC7231.match(attr) + if not match: + raise ValueError("Invalid datetime string: " + attr) + + return email.utils.parsedate_to_datetime(attr) + + +def _deserialize_datetime_unix_timestamp(attr: typing.Union[float, datetime]) -> datetime: + """Deserialize unix timestamp into Datetime object. + + :param str attr: response string to be deserialized. + :rtype: ~datetime.datetime + :returns: The datetime object from that input + """ + if isinstance(attr, datetime): + # i'm already deserialized + return attr + return datetime.fromtimestamp(attr, TZ_UTC) + + +def _deserialize_date(attr: typing.Union[str, date]) -> date: + """Deserialize ISO-8601 formatted string into Date object. + :param str attr: response string to be deserialized. + :rtype: date + :returns: The date object from that input + """ + # This must NOT use defaultmonth/defaultday. Using None ensure this raises an exception. + if isinstance(attr, date): + return attr + return isodate.parse_date(attr, defaultmonth=None, defaultday=None) # type: ignore + + +def _deserialize_time(attr: typing.Union[str, time]) -> time: + """Deserialize ISO-8601 formatted string into time object. + + :param str attr: response string to be deserialized. + :rtype: datetime.time + :returns: The time object from that input + """ + if isinstance(attr, time): + return attr + return isodate.parse_time(attr) + + +def _deserialize_bytes(attr): + if isinstance(attr, (bytes, bytearray)): + return attr + return bytes(base64.b64decode(attr)) + + +def _deserialize_bytes_base64(attr): + if isinstance(attr, (bytes, bytearray)): + return attr + padding = "=" * (3 - (len(attr) + 3) % 4) # type: ignore + attr = attr + padding # type: ignore + encoded = attr.replace("-", "+").replace("_", "/") + return bytes(base64.b64decode(encoded)) + + +def _deserialize_duration(attr): + if isinstance(attr, timedelta): + return attr + return isodate.parse_duration(attr) + + +def _deserialize_decimal(attr): + if isinstance(attr, decimal.Decimal): + return attr + return decimal.Decimal(str(attr)) + + +def _deserialize_int_as_str(attr): + if isinstance(attr, int): + return attr + return int(attr) + + +_DESERIALIZE_MAPPING = { + datetime: _deserialize_datetime, + date: _deserialize_date, + time: _deserialize_time, + bytes: _deserialize_bytes, + bytearray: _deserialize_bytes, + timedelta: _deserialize_duration, + typing.Any: lambda x: x, + decimal.Decimal: _deserialize_decimal, +} + +_DESERIALIZE_MAPPING_WITHFORMAT = { + "rfc3339": _deserialize_datetime, + "rfc7231": _deserialize_datetime_rfc7231, + "unix-timestamp": _deserialize_datetime_unix_timestamp, + "base64": _deserialize_bytes, + "base64url": _deserialize_bytes_base64, +} + + +def get_deserializer(annotation: typing.Any, rf: typing.Optional["_RestField"] = None): + if annotation is int and rf and rf._format == "str": + return _deserialize_int_as_str + if rf and rf._format: + return _DESERIALIZE_MAPPING_WITHFORMAT.get(rf._format) + return _DESERIALIZE_MAPPING.get(annotation) # pyright: ignore + + +def _get_type_alias_type(module_name: str, alias_name: str): + types = { + k: v + for k, v in sys.modules[module_name].__dict__.items() + if isinstance(v, typing._GenericAlias) # type: ignore + } + if alias_name not in types: + return alias_name + return types[alias_name] + + +def _get_model(module_name: str, model_name: str): + models = {k: v for k, v in sys.modules[module_name].__dict__.items() if isinstance(v, type)} + module_end = module_name.rsplit(".", 1)[0] + models.update({k: v for k, v in sys.modules[module_end].__dict__.items() if isinstance(v, type)}) + if isinstance(model_name, str): + model_name = model_name.split(".")[-1] + if model_name not in models: + return model_name + return models[model_name] + + +_UNSET = object() + + +class _MyMutableMapping(MutableMapping[str, typing.Any]): + def __init__(self, data: typing.Dict[str, typing.Any]) -> None: + self._data = data + + def __contains__(self, key: typing.Any) -> bool: + return key in self._data + + def __getitem__(self, key: str) -> typing.Any: + return self._data.__getitem__(key) + + def __setitem__(self, key: str, value: typing.Any) -> None: + self._data.__setitem__(key, value) + + def __delitem__(self, key: str) -> None: + self._data.__delitem__(key) + + def __iter__(self) -> typing.Iterator[typing.Any]: + return self._data.__iter__() + + def __len__(self) -> int: + return self._data.__len__() + + def __ne__(self, other: typing.Any) -> bool: + return not self.__eq__(other) + + def keys(self) -> typing.KeysView[str]: + """ + :returns: a set-like object providing a view on D's keys + :rtype: ~typing.KeysView + """ + return self._data.keys() + + def values(self) -> typing.ValuesView[typing.Any]: + """ + :returns: an object providing a view on D's values + :rtype: ~typing.ValuesView + """ + return self._data.values() + + def items(self) -> typing.ItemsView[str, typing.Any]: + """ + :returns: set-like object providing a view on D's items + :rtype: ~typing.ItemsView + """ + return self._data.items() + + def get(self, key: str, default: typing.Any = None) -> typing.Any: + """ + Get the value for key if key is in the dictionary, else default. + :param str key: The key to look up. + :param any default: The value to return if key is not in the dictionary. Defaults to None + :returns: D[k] if k in D, else d. + :rtype: any + """ + try: + return self[key] + except KeyError: + return default + + @typing.overload + def pop(self, key: str) -> typing.Any: ... # pylint: disable=arguments-differ + + @typing.overload + def pop(self, key: str, default: _T) -> _T: ... # pylint: disable=signature-differs + + @typing.overload + def pop(self, key: str, default: typing.Any) -> typing.Any: ... # pylint: disable=signature-differs + + def pop(self, key: str, default: typing.Any = _UNSET) -> typing.Any: + """ + Removes specified key and return the corresponding value. + :param str key: The key to pop. + :param any default: The value to return if key is not in the dictionary + :returns: The value corresponding to the key. + :rtype: any + :raises KeyError: If key is not found and default is not given. + """ + if default is _UNSET: + return self._data.pop(key) + return self._data.pop(key, default) + + def popitem(self) -> typing.Tuple[str, typing.Any]: + """ + Removes and returns some (key, value) pair + :returns: The (key, value) pair. + :rtype: tuple + :raises KeyError: if D is empty. + """ + return self._data.popitem() + + def clear(self) -> None: + """ + Remove all items from D. + """ + self._data.clear() + + def update(self, *args: typing.Any, **kwargs: typing.Any) -> None: # pylint: disable=arguments-differ + """ + Updates D from mapping/iterable E and F. + :param any args: Either a mapping object or an iterable of key-value pairs. + """ + self._data.update(*args, **kwargs) + + @typing.overload + def setdefault(self, key: str, default: None = None) -> None: ... + + @typing.overload + def setdefault(self, key: str, default: typing.Any) -> typing.Any: ... # pylint: disable=signature-differs + + def setdefault(self, key: str, default: typing.Any = _UNSET) -> typing.Any: + """ + Same as calling D.get(k, d), and setting D[k]=d if k not found + :param str key: The key to look up. + :param any default: The value to set if key is not in the dictionary + :returns: D[k] if k in D, else d. + :rtype: any + """ + if default is _UNSET: + return self._data.setdefault(key) + return self._data.setdefault(key, default) + + def __eq__(self, other: typing.Any) -> bool: + try: + other_model = self.__class__(other) + except Exception: + return False + return self._data == other_model._data + + def __repr__(self) -> str: + return str(self._data) + + +def _is_model(obj: typing.Any) -> bool: + return getattr(obj, "_is_model", False) + + +def _serialize(o, format: typing.Optional[str] = None): # pylint: disable=too-many-return-statements + if isinstance(o, list): + return [_serialize(x, format) for x in o] + if isinstance(o, dict): + return {k: _serialize(v, format) for k, v in o.items()} + if isinstance(o, set): + return {_serialize(x, format) for x in o} + if isinstance(o, tuple): + return tuple(_serialize(x, format) for x in o) + if isinstance(o, (bytes, bytearray)): + return _serialize_bytes(o, format) + if isinstance(o, decimal.Decimal): + return float(o) + if isinstance(o, enum.Enum): + return o.value + if isinstance(o, int): + if format == "str": + return str(o) + return o + try: + # First try datetime.datetime + return _serialize_datetime(o, format) + except AttributeError: + pass + # Last, try datetime.timedelta + try: + return _timedelta_as_isostr(o) + except AttributeError: + # This will be raised when it hits value.total_seconds in the method above + pass + return o + + +def _get_rest_field( + attr_to_rest_field: typing.Dict[str, "_RestField"], rest_name: str +) -> typing.Optional["_RestField"]: + try: + return next(rf for rf in attr_to_rest_field.values() if rf._rest_name == rest_name) + except StopIteration: + return None + + +def _create_value(rf: typing.Optional["_RestField"], value: typing.Any) -> typing.Any: + if not rf: + return _serialize(value, None) + if rf._is_multipart_file_input: + return value + if rf._is_model: + return _deserialize(rf._type, value) + if isinstance(value, ET.Element): + value = _deserialize(rf._type, value) + return _serialize(value, rf._format) + + +class Model(_MyMutableMapping): + _is_model = True + # label whether current class's _attr_to_rest_field has been calculated + # could not see _attr_to_rest_field directly because subclass inherits it from parent class + _calculated: typing.Set[str] = set() + + def __init__(self, *args: typing.Any, **kwargs: typing.Any) -> None: + class_name = self.__class__.__name__ + if len(args) > 1: + raise TypeError(f"{class_name}.__init__() takes 2 positional arguments but {len(args) + 1} were given") + dict_to_pass = { + rest_field._rest_name: rest_field._default + for rest_field in self._attr_to_rest_field.values() + if rest_field._default is not _UNSET + } + if args: # pylint: disable=too-many-nested-blocks + if isinstance(args[0], ET.Element): + existed_attr_keys = [] + model_meta = getattr(self, "_xml", {}) + + for rf in self._attr_to_rest_field.values(): + prop_meta = getattr(rf, "_xml", {}) + xml_name = prop_meta.get("name", rf._rest_name) + xml_ns = prop_meta.get("ns", model_meta.get("ns", None)) + if xml_ns: + xml_name = "{" + xml_ns + "}" + xml_name + + # attribute + if prop_meta.get("attribute", False) and args[0].get(xml_name) is not None: + existed_attr_keys.append(xml_name) + dict_to_pass[rf._rest_name] = _deserialize(rf._type, args[0].get(xml_name)) + continue + + # unwrapped element is array + if prop_meta.get("unwrapped", False): + # unwrapped array could either use prop items meta/prop meta + if prop_meta.get("itemsName"): + xml_name = prop_meta.get("itemsName") + xml_ns = prop_meta.get("itemNs") + if xml_ns: + xml_name = "{" + xml_ns + "}" + xml_name + items = args[0].findall(xml_name) # pyright: ignore + if len(items) > 0: + existed_attr_keys.append(xml_name) + dict_to_pass[rf._rest_name] = _deserialize(rf._type, items) + continue + + # text element is primitive type + if prop_meta.get("text", False): + if args[0].text is not None: + dict_to_pass[rf._rest_name] = _deserialize(rf._type, args[0].text) + continue + + # wrapped element could be normal property or array, it should only have one element + item = args[0].find(xml_name) + if item is not None: + existed_attr_keys.append(xml_name) + dict_to_pass[rf._rest_name] = _deserialize(rf._type, item) + + # rest thing is additional properties + for e in args[0]: + if e.tag not in existed_attr_keys: + dict_to_pass[e.tag] = _convert_element(e) + else: + dict_to_pass.update( + {k: _create_value(_get_rest_field(self._attr_to_rest_field, k), v) for k, v in args[0].items()} + ) + else: + non_attr_kwargs = [k for k in kwargs if k not in self._attr_to_rest_field] + if non_attr_kwargs: + # actual type errors only throw the first wrong keyword arg they see, so following that. + raise TypeError(f"{class_name}.__init__() got an unexpected keyword argument '{non_attr_kwargs[0]}'") + dict_to_pass.update( + { + self._attr_to_rest_field[k]._rest_name: _create_value(self._attr_to_rest_field[k], v) + for k, v in kwargs.items() + if v is not None + } + ) + super().__init__(dict_to_pass) + + def copy(self) -> "Model": + return Model(self.__dict__) + + def __new__(cls, *args: typing.Any, **kwargs: typing.Any) -> Self: + if f"{cls.__module__}.{cls.__qualname__}" not in cls._calculated: + # we know the last nine classes in mro are going to be 'Model', '_MyMutableMapping', 'MutableMapping', + # 'Mapping', 'Collection', 'Sized', 'Iterable', 'Container' and 'object' + mros = cls.__mro__[:-9][::-1] # ignore parents, and reverse the mro order + attr_to_rest_field: typing.Dict[str, _RestField] = { # map attribute name to rest_field property + k: v for mro_class in mros for k, v in mro_class.__dict__.items() if k[0] != "_" and hasattr(v, "_type") + } + annotations = { + k: v + for mro_class in mros + if hasattr(mro_class, "__annotations__") + for k, v in mro_class.__annotations__.items() + } + for attr, rf in attr_to_rest_field.items(): + rf._module = cls.__module__ + if not rf._type: + rf._type = rf._get_deserialize_callable_from_annotation(annotations.get(attr, None)) + if not rf._rest_name_input: + rf._rest_name_input = attr + cls._attr_to_rest_field: typing.Dict[str, _RestField] = dict(attr_to_rest_field.items()) + cls._calculated.add(f"{cls.__module__}.{cls.__qualname__}") + + return super().__new__(cls) + + def __init_subclass__(cls, discriminator: typing.Optional[str] = None) -> None: + for base in cls.__bases__: + if hasattr(base, "__mapping__"): + base.__mapping__[discriminator or cls.__name__] = cls # type: ignore + + @classmethod + def _get_discriminator(cls, exist_discriminators) -> typing.Optional["_RestField"]: + for v in cls.__dict__.values(): + if isinstance(v, _RestField) and v._is_discriminator and v._rest_name not in exist_discriminators: + return v + return None + + @classmethod + def _deserialize(cls, data, exist_discriminators): + if not hasattr(cls, "__mapping__"): + return cls(data) + discriminator = cls._get_discriminator(exist_discriminators) + if discriminator is None: + return cls(data) + exist_discriminators.append(discriminator._rest_name) + if isinstance(data, ET.Element): + model_meta = getattr(cls, "_xml", {}) + prop_meta = getattr(discriminator, "_xml", {}) + xml_name = prop_meta.get("name", discriminator._rest_name) + xml_ns = prop_meta.get("ns", model_meta.get("ns", None)) + if xml_ns: + xml_name = "{" + xml_ns + "}" + xml_name + + if data.get(xml_name) is not None: + discriminator_value = data.get(xml_name) + else: + discriminator_value = data.find(xml_name).text # pyright: ignore + else: + discriminator_value = data.get(discriminator._rest_name) + mapped_cls = cls.__mapping__.get(discriminator_value, cls) # pyright: ignore # pylint: disable=no-member + return mapped_cls._deserialize(data, exist_discriminators) + + def as_dict(self, *, exclude_readonly: bool = False) -> typing.Dict[str, typing.Any]: + """Return a dict that can be turned into json using json.dump. + + :keyword bool exclude_readonly: Whether to remove the readonly properties. + :returns: A dict JSON compatible object + :rtype: dict + """ + + result = {} + readonly_props = [] + if exclude_readonly: + readonly_props = [p._rest_name for p in self._attr_to_rest_field.values() if _is_readonly(p)] + for k, v in self.items(): + if exclude_readonly and k in readonly_props: # pyright: ignore + continue + is_multipart_file_input = False + try: + is_multipart_file_input = next( + rf for rf in self._attr_to_rest_field.values() if rf._rest_name == k + )._is_multipart_file_input + except StopIteration: + pass + result[k] = v if is_multipart_file_input else Model._as_dict_value(v, exclude_readonly=exclude_readonly) + return result + + @staticmethod + def _as_dict_value(v: typing.Any, exclude_readonly: bool = False) -> typing.Any: + if v is None or isinstance(v, _Null): + return None + if isinstance(v, (list, tuple, set)): + return type(v)(Model._as_dict_value(x, exclude_readonly=exclude_readonly) for x in v) + if isinstance(v, dict): + return {dk: Model._as_dict_value(dv, exclude_readonly=exclude_readonly) for dk, dv in v.items()} + return v.as_dict(exclude_readonly=exclude_readonly) if hasattr(v, "as_dict") else v + + +def _deserialize_model(model_deserializer: typing.Optional[typing.Callable], obj): + if _is_model(obj): + return obj + return _deserialize(model_deserializer, obj) + + +def _deserialize_with_optional(if_obj_deserializer: typing.Optional[typing.Callable], obj): + if obj is None: + return obj + return _deserialize_with_callable(if_obj_deserializer, obj) + + +def _deserialize_with_union(deserializers, obj): + for deserializer in deserializers: + try: + return _deserialize(deserializer, obj) + except DeserializationError: + pass + raise DeserializationError() + + +def _deserialize_dict( + value_deserializer: typing.Optional[typing.Callable], + module: typing.Optional[str], + obj: typing.Dict[typing.Any, typing.Any], +): + if obj is None: + return obj + if isinstance(obj, ET.Element): + obj = {child.tag: child for child in obj} + return {k: _deserialize(value_deserializer, v, module) for k, v in obj.items()} + + +def _deserialize_multiple_sequence( + entry_deserializers: typing.List[typing.Optional[typing.Callable]], + module: typing.Optional[str], + obj, +): + if obj is None: + return obj + return type(obj)(_deserialize(deserializer, entry, module) for entry, deserializer in zip(obj, entry_deserializers)) + + +def _deserialize_sequence( + deserializer: typing.Optional[typing.Callable], + module: typing.Optional[str], + obj, +): + if obj is None: + return obj + if isinstance(obj, ET.Element): + obj = list(obj) + return type(obj)(_deserialize(deserializer, entry, module) for entry in obj) + + +def _sorted_annotations(types: typing.List[typing.Any]) -> typing.List[typing.Any]: + return sorted( + types, + key=lambda x: hasattr(x, "__name__") and x.__name__.lower() in ("str", "float", "int", "bool"), + ) + + +def _get_deserialize_callable_from_annotation( # pylint: disable=too-many-return-statements, too-many-branches + annotation: typing.Any, + module: typing.Optional[str], + rf: typing.Optional["_RestField"] = None, +) -> typing.Optional[typing.Callable[[typing.Any], typing.Any]]: + if not annotation: + return None + + # is it a type alias? + if isinstance(annotation, str): + if module is not None: + annotation = _get_type_alias_type(module, annotation) + + # is it a forward ref / in quotes? + if isinstance(annotation, (str, typing.ForwardRef)): + try: + model_name = annotation.__forward_arg__ # type: ignore + except AttributeError: + model_name = annotation + if module is not None: + annotation = _get_model(module, model_name) # type: ignore + + try: + if module and _is_model(annotation): + if rf: + rf._is_model = True + + return functools.partial(_deserialize_model, annotation) # pyright: ignore + except Exception: + pass + + # is it a literal? + try: + if annotation.__origin__ is typing.Literal: # pyright: ignore + return None + except AttributeError: + pass + + # is it optional? + try: + if any(a for a in annotation.__args__ if a == type(None)): # pyright: ignore + if len(annotation.__args__) <= 2: # pyright: ignore + if_obj_deserializer = _get_deserialize_callable_from_annotation( + next(a for a in annotation.__args__ if a != type(None)), module, rf # pyright: ignore + ) + + return functools.partial(_deserialize_with_optional, if_obj_deserializer) + # the type is Optional[Union[...]], we need to remove the None type from the Union + annotation_copy = copy.copy(annotation) + annotation_copy.__args__ = [a for a in annotation_copy.__args__ if a != type(None)] # pyright: ignore + return _get_deserialize_callable_from_annotation(annotation_copy, module, rf) + except AttributeError: + pass + + # is it union? + if getattr(annotation, "__origin__", None) is typing.Union: + # initial ordering is we make `string` the last deserialization option, because it is often them most generic + deserializers = [ + _get_deserialize_callable_from_annotation(arg, module, rf) + for arg in _sorted_annotations(annotation.__args__) # pyright: ignore + ] + + return functools.partial(_deserialize_with_union, deserializers) + + try: + if annotation._name == "Dict": # pyright: ignore + value_deserializer = _get_deserialize_callable_from_annotation( + annotation.__args__[1], module, rf # pyright: ignore + ) + + return functools.partial( + _deserialize_dict, + value_deserializer, + module, + ) + except (AttributeError, IndexError): + pass + try: + if annotation._name in ["List", "Set", "Tuple", "Sequence"]: # pyright: ignore + if len(annotation.__args__) > 1: # pyright: ignore + entry_deserializers = [ + _get_deserialize_callable_from_annotation(dt, module, rf) + for dt in annotation.__args__ # pyright: ignore + ] + return functools.partial(_deserialize_multiple_sequence, entry_deserializers, module) + deserializer = _get_deserialize_callable_from_annotation( + annotation.__args__[0], module, rf # pyright: ignore + ) + + return functools.partial(_deserialize_sequence, deserializer, module) + except (TypeError, IndexError, AttributeError, SyntaxError): + pass + + def _deserialize_default( + deserializer, + obj, + ): + if obj is None: + return obj + try: + return _deserialize_with_callable(deserializer, obj) + except Exception: + pass + return obj + + if get_deserializer(annotation, rf): + return functools.partial(_deserialize_default, get_deserializer(annotation, rf)) + + return functools.partial(_deserialize_default, annotation) + + +def _deserialize_with_callable( + deserializer: typing.Optional[typing.Callable[[typing.Any], typing.Any]], + value: typing.Any, +): # pylint: disable=too-many-return-statements + try: + if value is None or isinstance(value, _Null): + return None + if isinstance(value, ET.Element): + if deserializer is str: + return value.text or "" + if deserializer is int: + return int(value.text) if value.text else None + if deserializer is float: + return float(value.text) if value.text else None + if deserializer is bool: + return value.text == "true" if value.text else None + if deserializer is None: + return value + if deserializer in [int, float, bool]: + return deserializer(value) + if isinstance(deserializer, CaseInsensitiveEnumMeta): + try: + return deserializer(value) + except ValueError: + # for unknown value, return raw value + return value + if isinstance(deserializer, type) and issubclass(deserializer, Model): + return deserializer._deserialize(value, []) + return typing.cast(typing.Callable[[typing.Any], typing.Any], deserializer)(value) + except Exception as e: + raise DeserializationError() from e + + +def _deserialize( + deserializer: typing.Any, + value: typing.Any, + module: typing.Optional[str] = None, + rf: typing.Optional["_RestField"] = None, + format: typing.Optional[str] = None, +) -> typing.Any: + if isinstance(value, PipelineResponse): + value = value.http_response.json() + if rf is None and format: + rf = _RestField(format=format) + if not isinstance(deserializer, functools.partial): + deserializer = _get_deserialize_callable_from_annotation(deserializer, module, rf) + return _deserialize_with_callable(deserializer, value) + + +def _failsafe_deserialize( + deserializer: typing.Any, + response: HttpResponse, + module: typing.Optional[str] = None, + rf: typing.Optional["_RestField"] = None, + format: typing.Optional[str] = None, +) -> typing.Any: + try: + return _deserialize(deserializer, response.json(), module, rf, format) + except DeserializationError: + _LOGGER.warning( + "Ran into a deserialization error. Ignoring since this is failsafe deserialization", exc_info=True + ) + return None + + +def _failsafe_deserialize_xml( + deserializer: typing.Any, + response: HttpResponse, +) -> typing.Any: + try: + return _deserialize_xml(deserializer, response.text()) + except DeserializationError: + _LOGGER.warning( + "Ran into a deserialization error. Ignoring since this is failsafe deserialization", exc_info=True + ) + return None + + +class _RestField: + def __init__( + self, + *, + name: typing.Optional[str] = None, + type: typing.Optional[typing.Callable] = None, # pylint: disable=redefined-builtin + is_discriminator: bool = False, + visibility: typing.Optional[typing.List[str]] = None, + default: typing.Any = _UNSET, + format: typing.Optional[str] = None, + is_multipart_file_input: bool = False, + xml: typing.Optional[typing.Dict[str, typing.Any]] = None, + ): + self._type = type + self._rest_name_input = name + self._module: typing.Optional[str] = None + self._is_discriminator = is_discriminator + self._visibility = visibility + self._is_model = False + self._default = default + self._format = format + self._is_multipart_file_input = is_multipart_file_input + self._xml = xml if xml is not None else {} + + @property + def _class_type(self) -> typing.Any: + return getattr(self._type, "args", [None])[0] + + @property + def _rest_name(self) -> str: + if self._rest_name_input is None: + raise ValueError("Rest name was never set") + return self._rest_name_input + + def __get__(self, obj: Model, type=None): # pylint: disable=redefined-builtin + # by this point, type and rest_name will have a value bc we default + # them in __new__ of the Model class + item = obj.get(self._rest_name) + if item is None: + return item + if self._is_model: + return item + return _deserialize(self._type, _serialize(item, self._format), rf=self) + + def __set__(self, obj: Model, value) -> None: + if value is None: + # we want to wipe out entries if users set attr to None + try: + obj.__delitem__(self._rest_name) + except KeyError: + pass + return + if self._is_model: + if not _is_model(value): + value = _deserialize(self._type, value) + obj.__setitem__(self._rest_name, value) + return + obj.__setitem__(self._rest_name, _serialize(value, self._format)) + + def _get_deserialize_callable_from_annotation( + self, annotation: typing.Any + ) -> typing.Optional[typing.Callable[[typing.Any], typing.Any]]: + return _get_deserialize_callable_from_annotation(annotation, self._module, self) + + +def rest_field( + *, + name: typing.Optional[str] = None, + type: typing.Optional[typing.Callable] = None, # pylint: disable=redefined-builtin + visibility: typing.Optional[typing.List[str]] = None, + default: typing.Any = _UNSET, + format: typing.Optional[str] = None, + is_multipart_file_input: bool = False, + xml: typing.Optional[typing.Dict[str, typing.Any]] = None, +) -> typing.Any: + return _RestField( + name=name, + type=type, + visibility=visibility, + default=default, + format=format, + is_multipart_file_input=is_multipart_file_input, + xml=xml, + ) + + +def rest_discriminator( + *, + name: typing.Optional[str] = None, + type: typing.Optional[typing.Callable] = None, # pylint: disable=redefined-builtin + visibility: typing.Optional[typing.List[str]] = None, + xml: typing.Optional[typing.Dict[str, typing.Any]] = None, +) -> typing.Any: + return _RestField(name=name, type=type, is_discriminator=True, visibility=visibility, xml=xml) + + +def serialize_xml(model: Model, exclude_readonly: bool = False) -> str: + """Serialize a model to XML. + + :param Model model: The model to serialize. + :param bool exclude_readonly: Whether to exclude readonly properties. + :returns: The XML representation of the model. + :rtype: str + """ + return ET.tostring(_get_element(model, exclude_readonly), encoding="unicode") # type: ignore + + +def _get_element( + o: typing.Any, + exclude_readonly: bool = False, + parent_meta: typing.Optional[typing.Dict[str, typing.Any]] = None, + wrapped_element: typing.Optional[ET.Element] = None, +) -> typing.Union[ET.Element, typing.List[ET.Element]]: + if _is_model(o): + model_meta = getattr(o, "_xml", {}) + + # if prop is a model, then use the prop element directly, else generate a wrapper of model + if wrapped_element is None: + wrapped_element = _create_xml_element( + model_meta.get("name", o.__class__.__name__), + model_meta.get("prefix"), + model_meta.get("ns"), + ) + + readonly_props = [] + if exclude_readonly: + readonly_props = [p._rest_name for p in o._attr_to_rest_field.values() if _is_readonly(p)] + + for k, v in o.items(): + # do not serialize readonly properties + if exclude_readonly and k in readonly_props: + continue + + prop_rest_field = _get_rest_field(o._attr_to_rest_field, k) + if prop_rest_field: + prop_meta = getattr(prop_rest_field, "_xml").copy() + # use the wire name as xml name if no specific name is set + if prop_meta.get("name") is None: + prop_meta["name"] = k + else: + # additional properties will not have rest field, use the wire name as xml name + prop_meta = {"name": k} + + # if no ns for prop, use model's + if prop_meta.get("ns") is None and model_meta.get("ns"): + prop_meta["ns"] = model_meta.get("ns") + prop_meta["prefix"] = model_meta.get("prefix") + + if prop_meta.get("unwrapped", False): + # unwrapped could only set on array + wrapped_element.extend(_get_element(v, exclude_readonly, prop_meta)) + elif prop_meta.get("text", False): + # text could only set on primitive type + wrapped_element.text = _get_primitive_type_value(v) + elif prop_meta.get("attribute", False): + xml_name = prop_meta.get("name", k) + if prop_meta.get("ns"): + ET.register_namespace(prop_meta.get("prefix"), prop_meta.get("ns")) # pyright: ignore + xml_name = "{" + prop_meta.get("ns") + "}" + xml_name # pyright: ignore + # attribute should be primitive type + wrapped_element.set(xml_name, _get_primitive_type_value(v)) + else: + # other wrapped prop element + wrapped_element.append(_get_wrapped_element(v, exclude_readonly, prop_meta)) + return wrapped_element + if isinstance(o, list): + return [_get_element(x, exclude_readonly, parent_meta) for x in o] # type: ignore + if isinstance(o, dict): + result = [] + for k, v in o.items(): + result.append( + _get_wrapped_element( + v, + exclude_readonly, + { + "name": k, + "ns": parent_meta.get("ns") if parent_meta else None, + "prefix": parent_meta.get("prefix") if parent_meta else None, + }, + ) + ) + return result + + # primitive case need to create element based on parent_meta + if parent_meta: + return _get_wrapped_element( + o, + exclude_readonly, + { + "name": parent_meta.get("itemsName", parent_meta.get("name")), + "prefix": parent_meta.get("itemsPrefix", parent_meta.get("prefix")), + "ns": parent_meta.get("itemsNs", parent_meta.get("ns")), + }, + ) + + raise ValueError("Could not serialize value into xml: " + o) + + +def _get_wrapped_element( + v: typing.Any, + exclude_readonly: bool, + meta: typing.Optional[typing.Dict[str, typing.Any]], +) -> ET.Element: + wrapped_element = _create_xml_element( + meta.get("name") if meta else None, meta.get("prefix") if meta else None, meta.get("ns") if meta else None + ) + if isinstance(v, (dict, list)): + wrapped_element.extend(_get_element(v, exclude_readonly, meta)) + elif _is_model(v): + _get_element(v, exclude_readonly, meta, wrapped_element) + else: + wrapped_element.text = _get_primitive_type_value(v) + return wrapped_element + + +def _get_primitive_type_value(v) -> str: + if v is True: + return "true" + if v is False: + return "false" + if isinstance(v, _Null): + return "" + return str(v) + + +def _create_xml_element(tag, prefix=None, ns=None): + if prefix and ns: + ET.register_namespace(prefix, ns) + if ns: + return ET.Element("{" + ns + "}" + tag) + return ET.Element(tag) + + +def _deserialize_xml( + deserializer: typing.Any, + value: str, +) -> typing.Any: + element = ET.fromstring(value) # nosec + return _deserialize(deserializer, element) + + +def _convert_element(e: ET.Element): + # dict case + if len(e.attrib) > 0 or len({child.tag for child in e}) > 1: + dict_result: typing.Dict[str, typing.Any] = {} + for child in e: + if dict_result.get(child.tag) is not None: + if isinstance(dict_result[child.tag], list): + dict_result[child.tag].append(_convert_element(child)) + else: + dict_result[child.tag] = [dict_result[child.tag], _convert_element(child)] + else: + dict_result[child.tag] = _convert_element(child) + dict_result.update(e.attrib) + return dict_result + # array case + if len(e) > 0: + array_result: typing.List[typing.Any] = [] + for child in e: + array_result.append(_convert_element(child)) + return array_result + # primitive case + return e.text diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/_utils/serialization.py b/sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/_utils/serialization.py new file mode 100644 index 000000000000..eb86ea23c965 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/_utils/serialization.py @@ -0,0 +1,2032 @@ +# pylint: disable=line-too-long,useless-suppression,too-many-lines +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +# pyright: reportUnnecessaryTypeIgnoreComment=false + +from base64 import b64decode, b64encode +import calendar +import datetime +import decimal +import email +from enum import Enum +import json +import logging +import re +import sys +import codecs +from typing import ( + Dict, + Any, + cast, + Optional, + Union, + AnyStr, + IO, + Mapping, + Callable, + MutableMapping, + List, +) + +try: + from urllib import quote # type: ignore +except ImportError: + from urllib.parse import quote +import xml.etree.ElementTree as ET + +import isodate # type: ignore +from typing_extensions import Self + +from azure.core.exceptions import DeserializationError, SerializationError +from azure.core.serialization import NULL as CoreNull + +_BOM = codecs.BOM_UTF8.decode(encoding="utf-8") + +JSON = MutableMapping[str, Any] + + +class RawDeserializer: + + # Accept "text" because we're open minded people... + JSON_REGEXP = re.compile(r"^(application|text)/([a-z+.]+\+)?json$") + + # Name used in context + CONTEXT_NAME = "deserialized_data" + + @classmethod + def deserialize_from_text(cls, data: Optional[Union[AnyStr, IO]], content_type: Optional[str] = None) -> Any: + """Decode data according to content-type. + + Accept a stream of data as well, but will be load at once in memory for now. + + If no content-type, will return the string version (not bytes, not stream) + + :param data: Input, could be bytes or stream (will be decoded with UTF8) or text + :type data: str or bytes or IO + :param str content_type: The content type. + :return: The deserialized data. + :rtype: object + """ + if hasattr(data, "read"): + # Assume a stream + data = cast(IO, data).read() + + if isinstance(data, bytes): + data_as_str = data.decode(encoding="utf-8-sig") + else: + # Explain to mypy the correct type. + data_as_str = cast(str, data) + + # Remove Byte Order Mark if present in string + data_as_str = data_as_str.lstrip(_BOM) + + if content_type is None: + return data + + if cls.JSON_REGEXP.match(content_type): + try: + return json.loads(data_as_str) + except ValueError as err: + raise DeserializationError("JSON is invalid: {}".format(err), err) from err + elif "xml" in (content_type or []): + try: + + try: + if isinstance(data, unicode): # type: ignore + # If I'm Python 2.7 and unicode XML will scream if I try a "fromstring" on unicode string + data_as_str = data_as_str.encode(encoding="utf-8") # type: ignore + except NameError: + pass + + return ET.fromstring(data_as_str) # nosec + except ET.ParseError as err: + # It might be because the server has an issue, and returned JSON with + # content-type XML.... + # So let's try a JSON load, and if it's still broken + # let's flow the initial exception + def _json_attemp(data): + try: + return True, json.loads(data) + except ValueError: + return False, None # Don't care about this one + + success, json_result = _json_attemp(data) + if success: + return json_result + # If i'm here, it's not JSON, it's not XML, let's scream + # and raise the last context in this block (the XML exception) + # The function hack is because Py2.7 messes up with exception + # context otherwise. + _LOGGER.critical("Wasn't XML not JSON, failing") + raise DeserializationError("XML is invalid") from err + elif content_type.startswith("text/"): + return data_as_str + raise DeserializationError("Cannot deserialize content-type: {}".format(content_type)) + + @classmethod + def deserialize_from_http_generics(cls, body_bytes: Optional[Union[AnyStr, IO]], headers: Mapping) -> Any: + """Deserialize from HTTP response. + + Use bytes and headers to NOT use any requests/aiohttp or whatever + specific implementation. + Headers will tested for "content-type" + + :param bytes body_bytes: The body of the response. + :param dict headers: The headers of the response. + :returns: The deserialized data. + :rtype: object + """ + # Try to use content-type from headers if available + content_type = None + if "content-type" in headers: + content_type = headers["content-type"].split(";")[0].strip().lower() + # Ouch, this server did not declare what it sent... + # Let's guess it's JSON... + # Also, since Autorest was considering that an empty body was a valid JSON, + # need that test as well.... + else: + content_type = "application/json" + + if body_bytes: + return cls.deserialize_from_text(body_bytes, content_type) + return None + + +_LOGGER = logging.getLogger(__name__) + +try: + _long_type = long # type: ignore +except NameError: + _long_type = int + +TZ_UTC = datetime.timezone.utc + +_FLATTEN = re.compile(r"(? None: + self.additional_properties: Optional[Dict[str, Any]] = {} + for k in kwargs: # pylint: disable=consider-using-dict-items + if k not in self._attribute_map: + _LOGGER.warning("%s is not a known attribute of class %s and will be ignored", k, self.__class__) + elif k in self._validation and self._validation[k].get("readonly", False): + _LOGGER.warning("Readonly attribute %s will be ignored in class %s", k, self.__class__) + else: + setattr(self, k, kwargs[k]) + + def __eq__(self, other: Any) -> bool: + """Compare objects by comparing all attributes. + + :param object other: The object to compare + :returns: True if objects are equal + :rtype: bool + """ + if isinstance(other, self.__class__): + return self.__dict__ == other.__dict__ + return False + + def __ne__(self, other: Any) -> bool: + """Compare objects by comparing all attributes. + + :param object other: The object to compare + :returns: True if objects are not equal + :rtype: bool + """ + return not self.__eq__(other) + + def __str__(self) -> str: + return str(self.__dict__) + + @classmethod + def enable_additional_properties_sending(cls) -> None: + cls._attribute_map["additional_properties"] = {"key": "", "type": "{object}"} + + @classmethod + def is_xml_model(cls) -> bool: + try: + cls._xml_map # type: ignore + except AttributeError: + return False + return True + + @classmethod + def _create_xml_node(cls): + """Create XML node. + + :returns: The XML node + :rtype: xml.etree.ElementTree.Element + """ + try: + xml_map = cls._xml_map # type: ignore + except AttributeError: + xml_map = {} + + return _create_xml_node(xml_map.get("name", cls.__name__), xml_map.get("prefix", None), xml_map.get("ns", None)) + + def serialize(self, keep_readonly: bool = False, **kwargs: Any) -> JSON: + """Return the JSON that would be sent to server from this model. + + This is an alias to `as_dict(full_restapi_key_transformer, keep_readonly=False)`. + + If you want XML serialization, you can pass the kwargs is_xml=True. + + :param bool keep_readonly: If you want to serialize the readonly attributes + :returns: A dict JSON compatible object + :rtype: dict + """ + serializer = Serializer(self._infer_class_models()) + return serializer._serialize( # type: ignore # pylint: disable=protected-access + self, keep_readonly=keep_readonly, **kwargs + ) + + def as_dict( + self, + keep_readonly: bool = True, + key_transformer: Callable[[str, Dict[str, Any], Any], Any] = attribute_transformer, + **kwargs: Any + ) -> JSON: + """Return a dict that can be serialized using json.dump. + + Advanced usage might optionally use a callback as parameter: + + .. code::python + + def my_key_transformer(key, attr_desc, value): + return key + + Key is the attribute name used in Python. Attr_desc + is a dict of metadata. Currently contains 'type' with the + msrest type and 'key' with the RestAPI encoded key. + Value is the current value in this object. + + The string returned will be used to serialize the key. + If the return type is a list, this is considered hierarchical + result dict. + + See the three examples in this file: + + - attribute_transformer + - full_restapi_key_transformer + - last_restapi_key_transformer + + If you want XML serialization, you can pass the kwargs is_xml=True. + + :param bool keep_readonly: If you want to serialize the readonly attributes + :param function key_transformer: A key transformer function. + :returns: A dict JSON compatible object + :rtype: dict + """ + serializer = Serializer(self._infer_class_models()) + return serializer._serialize( # type: ignore # pylint: disable=protected-access + self, key_transformer=key_transformer, keep_readonly=keep_readonly, **kwargs + ) + + @classmethod + def _infer_class_models(cls): + try: + str_models = cls.__module__.rsplit(".", 1)[0] + models = sys.modules[str_models] + client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} + if cls.__name__ not in client_models: + raise ValueError("Not Autorest generated code") + except Exception: # pylint: disable=broad-exception-caught + # Assume it's not Autorest generated (tests?). Add ourselves as dependencies. + client_models = {cls.__name__: cls} + return client_models + + @classmethod + def deserialize(cls, data: Any, content_type: Optional[str] = None) -> Self: + """Parse a str using the RestAPI syntax and return a model. + + :param str data: A str using RestAPI structure. JSON by default. + :param str content_type: JSON by default, set application/xml if XML. + :returns: An instance of this model + :raises DeserializationError: if something went wrong + :rtype: Self + """ + deserializer = Deserializer(cls._infer_class_models()) + return deserializer(cls.__name__, data, content_type=content_type) # type: ignore + + @classmethod + def from_dict( + cls, + data: Any, + key_extractors: Optional[Callable[[str, Dict[str, Any], Any], Any]] = None, + content_type: Optional[str] = None, + ) -> Self: + """Parse a dict using given key extractor return a model. + + By default consider key + extractors (rest_key_case_insensitive_extractor, attribute_key_case_insensitive_extractor + and last_rest_key_case_insensitive_extractor) + + :param dict data: A dict using RestAPI structure + :param function key_extractors: A key extractor function. + :param str content_type: JSON by default, set application/xml if XML. + :returns: An instance of this model + :raises DeserializationError: if something went wrong + :rtype: Self + """ + deserializer = Deserializer(cls._infer_class_models()) + deserializer.key_extractors = ( # type: ignore + [ # type: ignore + attribute_key_case_insensitive_extractor, + rest_key_case_insensitive_extractor, + last_rest_key_case_insensitive_extractor, + ] + if key_extractors is None + else key_extractors + ) + return deserializer(cls.__name__, data, content_type=content_type) # type: ignore + + @classmethod + def _flatten_subtype(cls, key, objects): + if "_subtype_map" not in cls.__dict__: + return {} + result = dict(cls._subtype_map[key]) + for valuetype in cls._subtype_map[key].values(): + result.update(objects[valuetype]._flatten_subtype(key, objects)) # pylint: disable=protected-access + return result + + @classmethod + def _classify(cls, response, objects): + """Check the class _subtype_map for any child classes. + We want to ignore any inherited _subtype_maps. + + :param dict response: The initial data + :param dict objects: The class objects + :returns: The class to be used + :rtype: class + """ + for subtype_key in cls.__dict__.get("_subtype_map", {}).keys(): + subtype_value = None + + if not isinstance(response, ET.Element): + rest_api_response_key = cls._get_rest_key_parts(subtype_key)[-1] + subtype_value = response.get(rest_api_response_key, None) or response.get(subtype_key, None) + else: + subtype_value = xml_key_extractor(subtype_key, cls._attribute_map[subtype_key], response) + if subtype_value: + # Try to match base class. Can be class name only + # (bug to fix in Autorest to support x-ms-discriminator-name) + if cls.__name__ == subtype_value: + return cls + flatten_mapping_type = cls._flatten_subtype(subtype_key, objects) + try: + return objects[flatten_mapping_type[subtype_value]] # type: ignore + except KeyError: + _LOGGER.warning( + "Subtype value %s has no mapping, use base class %s.", + subtype_value, + cls.__name__, + ) + break + else: + _LOGGER.warning("Discriminator %s is absent or null, use base class %s.", subtype_key, cls.__name__) + break + return cls + + @classmethod + def _get_rest_key_parts(cls, attr_key): + """Get the RestAPI key of this attr, split it and decode part + :param str attr_key: Attribute key must be in attribute_map. + :returns: A list of RestAPI part + :rtype: list + """ + rest_split_key = _FLATTEN.split(cls._attribute_map[attr_key]["key"]) + return [_decode_attribute_map_key(key_part) for key_part in rest_split_key] + + +def _decode_attribute_map_key(key): + """This decode a key in an _attribute_map to the actual key we want to look at + inside the received data. + + :param str key: A key string from the generated code + :returns: The decoded key + :rtype: str + """ + return key.replace("\\.", ".") + + +class Serializer: # pylint: disable=too-many-public-methods + """Request object model serializer.""" + + basic_types = {str: "str", int: "int", bool: "bool", float: "float"} + + _xml_basic_types_serializers = {"bool": lambda x: str(x).lower()} + days = {0: "Mon", 1: "Tue", 2: "Wed", 3: "Thu", 4: "Fri", 5: "Sat", 6: "Sun"} + months = { + 1: "Jan", + 2: "Feb", + 3: "Mar", + 4: "Apr", + 5: "May", + 6: "Jun", + 7: "Jul", + 8: "Aug", + 9: "Sep", + 10: "Oct", + 11: "Nov", + 12: "Dec", + } + validation = { + "min_length": lambda x, y: len(x) < y, + "max_length": lambda x, y: len(x) > y, + "minimum": lambda x, y: x < y, + "maximum": lambda x, y: x > y, + "minimum_ex": lambda x, y: x <= y, + "maximum_ex": lambda x, y: x >= y, + "min_items": lambda x, y: len(x) < y, + "max_items": lambda x, y: len(x) > y, + "pattern": lambda x, y: not re.match(y, x, re.UNICODE), + "unique": lambda x, y: len(x) != len(set(x)), + "multiple": lambda x, y: x % y != 0, + } + + def __init__(self, classes: Optional[Mapping[str, type]] = None) -> None: + self.serialize_type = { + "iso-8601": Serializer.serialize_iso, + "rfc-1123": Serializer.serialize_rfc, + "unix-time": Serializer.serialize_unix, + "duration": Serializer.serialize_duration, + "date": Serializer.serialize_date, + "time": Serializer.serialize_time, + "decimal": Serializer.serialize_decimal, + "long": Serializer.serialize_long, + "bytearray": Serializer.serialize_bytearray, + "base64": Serializer.serialize_base64, + "object": self.serialize_object, + "[]": self.serialize_iter, + "{}": self.serialize_dict, + } + self.dependencies: Dict[str, type] = dict(classes) if classes else {} + self.key_transformer = full_restapi_key_transformer + self.client_side_validation = True + + def _serialize( # pylint: disable=too-many-nested-blocks, too-many-branches, too-many-statements, too-many-locals + self, target_obj, data_type=None, **kwargs + ): + """Serialize data into a string according to type. + + :param object target_obj: The data to be serialized. + :param str data_type: The type to be serialized from. + :rtype: str, dict + :raises SerializationError: if serialization fails. + :returns: The serialized data. + """ + key_transformer = kwargs.get("key_transformer", self.key_transformer) + keep_readonly = kwargs.get("keep_readonly", False) + if target_obj is None: + return None + + attr_name = None + class_name = target_obj.__class__.__name__ + + if data_type: + return self.serialize_data(target_obj, data_type, **kwargs) + + if not hasattr(target_obj, "_attribute_map"): + data_type = type(target_obj).__name__ + if data_type in self.basic_types.values(): + return self.serialize_data(target_obj, data_type, **kwargs) + + # Force "is_xml" kwargs if we detect a XML model + try: + is_xml_model_serialization = kwargs["is_xml"] + except KeyError: + is_xml_model_serialization = kwargs.setdefault("is_xml", target_obj.is_xml_model()) + + serialized = {} + if is_xml_model_serialization: + serialized = target_obj._create_xml_node() # pylint: disable=protected-access + try: + attributes = target_obj._attribute_map # pylint: disable=protected-access + for attr, attr_desc in attributes.items(): + attr_name = attr + if not keep_readonly and target_obj._validation.get( # pylint: disable=protected-access + attr_name, {} + ).get("readonly", False): + continue + + if attr_name == "additional_properties" and attr_desc["key"] == "": + if target_obj.additional_properties is not None: + serialized.update(target_obj.additional_properties) + continue + try: + + orig_attr = getattr(target_obj, attr) + if is_xml_model_serialization: + pass # Don't provide "transformer" for XML for now. Keep "orig_attr" + else: # JSON + keys, orig_attr = key_transformer(attr, attr_desc.copy(), orig_attr) + keys = keys if isinstance(keys, list) else [keys] + + kwargs["serialization_ctxt"] = attr_desc + new_attr = self.serialize_data(orig_attr, attr_desc["type"], **kwargs) + + if is_xml_model_serialization: + xml_desc = attr_desc.get("xml", {}) + xml_name = xml_desc.get("name", attr_desc["key"]) + xml_prefix = xml_desc.get("prefix", None) + xml_ns = xml_desc.get("ns", None) + if xml_desc.get("attr", False): + if xml_ns: + ET.register_namespace(xml_prefix, xml_ns) + xml_name = "{{{}}}{}".format(xml_ns, xml_name) + serialized.set(xml_name, new_attr) # type: ignore + continue + if xml_desc.get("text", False): + serialized.text = new_attr # type: ignore + continue + if isinstance(new_attr, list): + serialized.extend(new_attr) # type: ignore + elif isinstance(new_attr, ET.Element): + # If the down XML has no XML/Name, + # we MUST replace the tag with the local tag. But keeping the namespaces. + if "name" not in getattr(orig_attr, "_xml_map", {}): + splitted_tag = new_attr.tag.split("}") + if len(splitted_tag) == 2: # Namespace + new_attr.tag = "}".join([splitted_tag[0], xml_name]) + else: + new_attr.tag = xml_name + serialized.append(new_attr) # type: ignore + else: # That's a basic type + # Integrate namespace if necessary + local_node = _create_xml_node(xml_name, xml_prefix, xml_ns) + local_node.text = str(new_attr) + serialized.append(local_node) # type: ignore + else: # JSON + for k in reversed(keys): # type: ignore + new_attr = {k: new_attr} + + _new_attr = new_attr + _serialized = serialized + for k in keys: # type: ignore + if k not in _serialized: + _serialized.update(_new_attr) # type: ignore + _new_attr = _new_attr[k] # type: ignore + _serialized = _serialized[k] + except ValueError as err: + if isinstance(err, SerializationError): + raise + + except (AttributeError, KeyError, TypeError) as err: + msg = "Attribute {} in object {} cannot be serialized.\n{}".format(attr_name, class_name, str(target_obj)) + raise SerializationError(msg) from err + return serialized + + def body(self, data, data_type, **kwargs): + """Serialize data intended for a request body. + + :param object data: The data to be serialized. + :param str data_type: The type to be serialized from. + :rtype: dict + :raises SerializationError: if serialization fails. + :raises ValueError: if data is None + :returns: The serialized request body + """ + + # Just in case this is a dict + internal_data_type_str = data_type.strip("[]{}") + internal_data_type = self.dependencies.get(internal_data_type_str, None) + try: + is_xml_model_serialization = kwargs["is_xml"] + except KeyError: + if internal_data_type and issubclass(internal_data_type, Model): + is_xml_model_serialization = kwargs.setdefault("is_xml", internal_data_type.is_xml_model()) + else: + is_xml_model_serialization = False + if internal_data_type and not isinstance(internal_data_type, Enum): + try: + deserializer = Deserializer(self.dependencies) + # Since it's on serialization, it's almost sure that format is not JSON REST + # We're not able to deal with additional properties for now. + deserializer.additional_properties_detection = False + if is_xml_model_serialization: + deserializer.key_extractors = [ # type: ignore + attribute_key_case_insensitive_extractor, + ] + else: + deserializer.key_extractors = [ + rest_key_case_insensitive_extractor, + attribute_key_case_insensitive_extractor, + last_rest_key_case_insensitive_extractor, + ] + data = deserializer._deserialize(data_type, data) # pylint: disable=protected-access + except DeserializationError as err: + raise SerializationError("Unable to build a model: " + str(err)) from err + + return self._serialize(data, data_type, **kwargs) + + def url(self, name, data, data_type, **kwargs): + """Serialize data intended for a URL path. + + :param str name: The name of the URL path parameter. + :param object data: The data to be serialized. + :param str data_type: The type to be serialized from. + :rtype: str + :returns: The serialized URL path + :raises TypeError: if serialization fails. + :raises ValueError: if data is None + """ + try: + output = self.serialize_data(data, data_type, **kwargs) + if data_type == "bool": + output = json.dumps(output) + + if kwargs.get("skip_quote") is True: + output = str(output) + output = output.replace("{", quote("{")).replace("}", quote("}")) + else: + output = quote(str(output), safe="") + except SerializationError as exc: + raise TypeError("{} must be type {}.".format(name, data_type)) from exc + return output + + def query(self, name, data, data_type, **kwargs): + """Serialize data intended for a URL query. + + :param str name: The name of the query parameter. + :param object data: The data to be serialized. + :param str data_type: The type to be serialized from. + :rtype: str, list + :raises TypeError: if serialization fails. + :raises ValueError: if data is None + :returns: The serialized query parameter + """ + try: + # Treat the list aside, since we don't want to encode the div separator + if data_type.startswith("["): + internal_data_type = data_type[1:-1] + do_quote = not kwargs.get("skip_quote", False) + return self.serialize_iter(data, internal_data_type, do_quote=do_quote, **kwargs) + + # Not a list, regular serialization + output = self.serialize_data(data, data_type, **kwargs) + if data_type == "bool": + output = json.dumps(output) + if kwargs.get("skip_quote") is True: + output = str(output) + else: + output = quote(str(output), safe="") + except SerializationError as exc: + raise TypeError("{} must be type {}.".format(name, data_type)) from exc + return str(output) + + def header(self, name, data, data_type, **kwargs): + """Serialize data intended for a request header. + + :param str name: The name of the header. + :param object data: The data to be serialized. + :param str data_type: The type to be serialized from. + :rtype: str + :raises TypeError: if serialization fails. + :raises ValueError: if data is None + :returns: The serialized header + """ + try: + if data_type in ["[str]"]: + data = ["" if d is None else d for d in data] + + output = self.serialize_data(data, data_type, **kwargs) + if data_type == "bool": + output = json.dumps(output) + except SerializationError as exc: + raise TypeError("{} must be type {}.".format(name, data_type)) from exc + return str(output) + + def serialize_data(self, data, data_type, **kwargs): + """Serialize generic data according to supplied data type. + + :param object data: The data to be serialized. + :param str data_type: The type to be serialized from. + :raises AttributeError: if required data is None. + :raises ValueError: if data is None + :raises SerializationError: if serialization fails. + :returns: The serialized data. + :rtype: str, int, float, bool, dict, list + """ + if data is None: + raise ValueError("No value for given attribute") + + try: + if data is CoreNull: + return None + if data_type in self.basic_types.values(): + return self.serialize_basic(data, data_type, **kwargs) + + if data_type in self.serialize_type: + return self.serialize_type[data_type](data, **kwargs) + + # If dependencies is empty, try with current data class + # It has to be a subclass of Enum anyway + enum_type = self.dependencies.get(data_type, data.__class__) + if issubclass(enum_type, Enum): + return Serializer.serialize_enum(data, enum_obj=enum_type) + + iter_type = data_type[0] + data_type[-1] + if iter_type in self.serialize_type: + return self.serialize_type[iter_type](data, data_type[1:-1], **kwargs) + + except (ValueError, TypeError) as err: + msg = "Unable to serialize value: {!r} as type: {!r}." + raise SerializationError(msg.format(data, data_type)) from err + return self._serialize(data, **kwargs) + + @classmethod + def _get_custom_serializers(cls, data_type, **kwargs): # pylint: disable=inconsistent-return-statements + custom_serializer = kwargs.get("basic_types_serializers", {}).get(data_type) + if custom_serializer: + return custom_serializer + if kwargs.get("is_xml", False): + return cls._xml_basic_types_serializers.get(data_type) + + @classmethod + def serialize_basic(cls, data, data_type, **kwargs): + """Serialize basic builting data type. + Serializes objects to str, int, float or bool. + + Possible kwargs: + - basic_types_serializers dict[str, callable] : If set, use the callable as serializer + - is_xml bool : If set, use xml_basic_types_serializers + + :param obj data: Object to be serialized. + :param str data_type: Type of object in the iterable. + :rtype: str, int, float, bool + :return: serialized object + """ + custom_serializer = cls._get_custom_serializers(data_type, **kwargs) + if custom_serializer: + return custom_serializer(data) + if data_type == "str": + return cls.serialize_unicode(data) + return eval(data_type)(data) # nosec # pylint: disable=eval-used + + @classmethod + def serialize_unicode(cls, data): + """Special handling for serializing unicode strings in Py2. + Encode to UTF-8 if unicode, otherwise handle as a str. + + :param str data: Object to be serialized. + :rtype: str + :return: serialized object + """ + try: # If I received an enum, return its value + return data.value + except AttributeError: + pass + + try: + if isinstance(data, unicode): # type: ignore + # Don't change it, JSON and XML ElementTree are totally able + # to serialize correctly u'' strings + return data + except NameError: + return str(data) + return str(data) + + def serialize_iter(self, data, iter_type, div=None, **kwargs): + """Serialize iterable. + + Supported kwargs: + - serialization_ctxt dict : The current entry of _attribute_map, or same format. + serialization_ctxt['type'] should be same as data_type. + - is_xml bool : If set, serialize as XML + + :param list data: Object to be serialized. + :param str iter_type: Type of object in the iterable. + :param str div: If set, this str will be used to combine the elements + in the iterable into a combined string. Default is 'None'. + Defaults to False. + :rtype: list, str + :return: serialized iterable + """ + if isinstance(data, str): + raise SerializationError("Refuse str type as a valid iter type.") + + serialization_ctxt = kwargs.get("serialization_ctxt", {}) + is_xml = kwargs.get("is_xml", False) + + serialized = [] + for d in data: + try: + serialized.append(self.serialize_data(d, iter_type, **kwargs)) + except ValueError as err: + if isinstance(err, SerializationError): + raise + serialized.append(None) + + if kwargs.get("do_quote", False): + serialized = ["" if s is None else quote(str(s), safe="") for s in serialized] + + if div: + serialized = ["" if s is None else str(s) for s in serialized] + serialized = div.join(serialized) + + if "xml" in serialization_ctxt or is_xml: + # XML serialization is more complicated + xml_desc = serialization_ctxt.get("xml", {}) + xml_name = xml_desc.get("name") + if not xml_name: + xml_name = serialization_ctxt["key"] + + # Create a wrap node if necessary (use the fact that Element and list have "append") + is_wrapped = xml_desc.get("wrapped", False) + node_name = xml_desc.get("itemsName", xml_name) + if is_wrapped: + final_result = _create_xml_node(xml_name, xml_desc.get("prefix", None), xml_desc.get("ns", None)) + else: + final_result = [] + # All list elements to "local_node" + for el in serialized: + if isinstance(el, ET.Element): + el_node = el + else: + el_node = _create_xml_node(node_name, xml_desc.get("prefix", None), xml_desc.get("ns", None)) + if el is not None: # Otherwise it writes "None" :-p + el_node.text = str(el) + final_result.append(el_node) + return final_result + return serialized + + def serialize_dict(self, attr, dict_type, **kwargs): + """Serialize a dictionary of objects. + + :param dict attr: Object to be serialized. + :param str dict_type: Type of object in the dictionary. + :rtype: dict + :return: serialized dictionary + """ + serialization_ctxt = kwargs.get("serialization_ctxt", {}) + serialized = {} + for key, value in attr.items(): + try: + serialized[self.serialize_unicode(key)] = self.serialize_data(value, dict_type, **kwargs) + except ValueError as err: + if isinstance(err, SerializationError): + raise + serialized[self.serialize_unicode(key)] = None + + if "xml" in serialization_ctxt: + # XML serialization is more complicated + xml_desc = serialization_ctxt["xml"] + xml_name = xml_desc["name"] + + final_result = _create_xml_node(xml_name, xml_desc.get("prefix", None), xml_desc.get("ns", None)) + for key, value in serialized.items(): + ET.SubElement(final_result, key).text = value + return final_result + + return serialized + + def serialize_object(self, attr, **kwargs): # pylint: disable=too-many-return-statements + """Serialize a generic object. + This will be handled as a dictionary. If object passed in is not + a basic type (str, int, float, dict, list) it will simply be + cast to str. + + :param dict attr: Object to be serialized. + :rtype: dict or str + :return: serialized object + """ + if attr is None: + return None + if isinstance(attr, ET.Element): + return attr + obj_type = type(attr) + if obj_type in self.basic_types: + return self.serialize_basic(attr, self.basic_types[obj_type], **kwargs) + if obj_type is _long_type: + return self.serialize_long(attr) + if obj_type is str: + return self.serialize_unicode(attr) + if obj_type is datetime.datetime: + return self.serialize_iso(attr) + if obj_type is datetime.date: + return self.serialize_date(attr) + if obj_type is datetime.time: + return self.serialize_time(attr) + if obj_type is datetime.timedelta: + return self.serialize_duration(attr) + if obj_type is decimal.Decimal: + return self.serialize_decimal(attr) + + # If it's a model or I know this dependency, serialize as a Model + if obj_type in self.dependencies.values() or isinstance(attr, Model): + return self._serialize(attr) + + if obj_type == dict: + serialized = {} + for key, value in attr.items(): + try: + serialized[self.serialize_unicode(key)] = self.serialize_object(value, **kwargs) + except ValueError: + serialized[self.serialize_unicode(key)] = None + return serialized + + if obj_type == list: + serialized = [] + for obj in attr: + try: + serialized.append(self.serialize_object(obj, **kwargs)) + except ValueError: + pass + return serialized + return str(attr) + + @staticmethod + def serialize_enum(attr, enum_obj=None): + try: + result = attr.value + except AttributeError: + result = attr + try: + enum_obj(result) # type: ignore + return result + except ValueError as exc: + for enum_value in enum_obj: # type: ignore + if enum_value.value.lower() == str(attr).lower(): + return enum_value.value + error = "{!r} is not valid value for enum {!r}" + raise SerializationError(error.format(attr, enum_obj)) from exc + + @staticmethod + def serialize_bytearray(attr, **kwargs): # pylint: disable=unused-argument + """Serialize bytearray into base-64 string. + + :param str attr: Object to be serialized. + :rtype: str + :return: serialized base64 + """ + return b64encode(attr).decode() + + @staticmethod + def serialize_base64(attr, **kwargs): # pylint: disable=unused-argument + """Serialize str into base-64 string. + + :param str attr: Object to be serialized. + :rtype: str + :return: serialized base64 + """ + encoded = b64encode(attr).decode("ascii") + return encoded.strip("=").replace("+", "-").replace("/", "_") + + @staticmethod + def serialize_decimal(attr, **kwargs): # pylint: disable=unused-argument + """Serialize Decimal object to float. + + :param decimal attr: Object to be serialized. + :rtype: float + :return: serialized decimal + """ + return float(attr) + + @staticmethod + def serialize_long(attr, **kwargs): # pylint: disable=unused-argument + """Serialize long (Py2) or int (Py3). + + :param int attr: Object to be serialized. + :rtype: int/long + :return: serialized long + """ + return _long_type(attr) + + @staticmethod + def serialize_date(attr, **kwargs): # pylint: disable=unused-argument + """Serialize Date object into ISO-8601 formatted string. + + :param Date attr: Object to be serialized. + :rtype: str + :return: serialized date + """ + if isinstance(attr, str): + attr = isodate.parse_date(attr) + t = "{:04}-{:02}-{:02}".format(attr.year, attr.month, attr.day) + return t + + @staticmethod + def serialize_time(attr, **kwargs): # pylint: disable=unused-argument + """Serialize Time object into ISO-8601 formatted string. + + :param datetime.time attr: Object to be serialized. + :rtype: str + :return: serialized time + """ + if isinstance(attr, str): + attr = isodate.parse_time(attr) + t = "{:02}:{:02}:{:02}".format(attr.hour, attr.minute, attr.second) + if attr.microsecond: + t += ".{:02}".format(attr.microsecond) + return t + + @staticmethod + def serialize_duration(attr, **kwargs): # pylint: disable=unused-argument + """Serialize TimeDelta object into ISO-8601 formatted string. + + :param TimeDelta attr: Object to be serialized. + :rtype: str + :return: serialized duration + """ + if isinstance(attr, str): + attr = isodate.parse_duration(attr) + return isodate.duration_isoformat(attr) + + @staticmethod + def serialize_rfc(attr, **kwargs): # pylint: disable=unused-argument + """Serialize Datetime object into RFC-1123 formatted string. + + :param Datetime attr: Object to be serialized. + :rtype: str + :raises TypeError: if format invalid. + :return: serialized rfc + """ + try: + if not attr.tzinfo: + _LOGGER.warning("Datetime with no tzinfo will be considered UTC.") + utc = attr.utctimetuple() + except AttributeError as exc: + raise TypeError("RFC1123 object must be valid Datetime object.") from exc + + return "{}, {:02} {} {:04} {:02}:{:02}:{:02} GMT".format( + Serializer.days[utc.tm_wday], + utc.tm_mday, + Serializer.months[utc.tm_mon], + utc.tm_year, + utc.tm_hour, + utc.tm_min, + utc.tm_sec, + ) + + @staticmethod + def serialize_iso(attr, **kwargs): # pylint: disable=unused-argument + """Serialize Datetime object into ISO-8601 formatted string. + + :param Datetime attr: Object to be serialized. + :rtype: str + :raises SerializationError: if format invalid. + :return: serialized iso + """ + if isinstance(attr, str): + attr = isodate.parse_datetime(attr) + try: + if not attr.tzinfo: + _LOGGER.warning("Datetime with no tzinfo will be considered UTC.") + utc = attr.utctimetuple() + if utc.tm_year > 9999 or utc.tm_year < 1: + raise OverflowError("Hit max or min date") + + microseconds = str(attr.microsecond).rjust(6, "0").rstrip("0").ljust(3, "0") + if microseconds: + microseconds = "." + microseconds + date = "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}".format( + utc.tm_year, utc.tm_mon, utc.tm_mday, utc.tm_hour, utc.tm_min, utc.tm_sec + ) + return date + microseconds + "Z" + except (ValueError, OverflowError) as err: + msg = "Unable to serialize datetime object." + raise SerializationError(msg) from err + except AttributeError as err: + msg = "ISO-8601 object must be valid Datetime object." + raise TypeError(msg) from err + + @staticmethod + def serialize_unix(attr, **kwargs): # pylint: disable=unused-argument + """Serialize Datetime object into IntTime format. + This is represented as seconds. + + :param Datetime attr: Object to be serialized. + :rtype: int + :raises SerializationError: if format invalid + :return: serialied unix + """ + if isinstance(attr, int): + return attr + try: + if not attr.tzinfo: + _LOGGER.warning("Datetime with no tzinfo will be considered UTC.") + return int(calendar.timegm(attr.utctimetuple())) + except AttributeError as exc: + raise TypeError("Unix time object must be valid Datetime object.") from exc + + +def rest_key_extractor(attr, attr_desc, data): # pylint: disable=unused-argument + key = attr_desc["key"] + working_data = data + + while "." in key: + # Need the cast, as for some reasons "split" is typed as list[str | Any] + dict_keys = cast(List[str], _FLATTEN.split(key)) + if len(dict_keys) == 1: + key = _decode_attribute_map_key(dict_keys[0]) + break + working_key = _decode_attribute_map_key(dict_keys[0]) + working_data = working_data.get(working_key, data) + if working_data is None: + # If at any point while following flatten JSON path see None, it means + # that all properties under are None as well + return None + key = ".".join(dict_keys[1:]) + + return working_data.get(key) + + +def rest_key_case_insensitive_extractor( # pylint: disable=unused-argument, inconsistent-return-statements + attr, attr_desc, data +): + key = attr_desc["key"] + working_data = data + + while "." in key: + dict_keys = _FLATTEN.split(key) + if len(dict_keys) == 1: + key = _decode_attribute_map_key(dict_keys[0]) + break + working_key = _decode_attribute_map_key(dict_keys[0]) + working_data = attribute_key_case_insensitive_extractor(working_key, None, working_data) + if working_data is None: + # If at any point while following flatten JSON path see None, it means + # that all properties under are None as well + return None + key = ".".join(dict_keys[1:]) + + if working_data: + return attribute_key_case_insensitive_extractor(key, None, working_data) + + +def last_rest_key_extractor(attr, attr_desc, data): # pylint: disable=unused-argument + """Extract the attribute in "data" based on the last part of the JSON path key. + + :param str attr: The attribute to extract + :param dict attr_desc: The attribute description + :param dict data: The data to extract from + :rtype: object + :returns: The extracted attribute + """ + key = attr_desc["key"] + dict_keys = _FLATTEN.split(key) + return attribute_key_extractor(dict_keys[-1], None, data) + + +def last_rest_key_case_insensitive_extractor(attr, attr_desc, data): # pylint: disable=unused-argument + """Extract the attribute in "data" based on the last part of the JSON path key. + + This is the case insensitive version of "last_rest_key_extractor" + :param str attr: The attribute to extract + :param dict attr_desc: The attribute description + :param dict data: The data to extract from + :rtype: object + :returns: The extracted attribute + """ + key = attr_desc["key"] + dict_keys = _FLATTEN.split(key) + return attribute_key_case_insensitive_extractor(dict_keys[-1], None, data) + + +def attribute_key_extractor(attr, _, data): + return data.get(attr) + + +def attribute_key_case_insensitive_extractor(attr, _, data): + found_key = None + lower_attr = attr.lower() + for key in data: + if lower_attr == key.lower(): + found_key = key + break + + return data.get(found_key) + + +def _extract_name_from_internal_type(internal_type): + """Given an internal type XML description, extract correct XML name with namespace. + + :param dict internal_type: An model type + :rtype: tuple + :returns: A tuple XML name + namespace dict + """ + internal_type_xml_map = getattr(internal_type, "_xml_map", {}) + xml_name = internal_type_xml_map.get("name", internal_type.__name__) + xml_ns = internal_type_xml_map.get("ns", None) + if xml_ns: + xml_name = "{{{}}}{}".format(xml_ns, xml_name) + return xml_name + + +def xml_key_extractor(attr, attr_desc, data): # pylint: disable=unused-argument,too-many-return-statements + if isinstance(data, dict): + return None + + # Test if this model is XML ready first + if not isinstance(data, ET.Element): + return None + + xml_desc = attr_desc.get("xml", {}) + xml_name = xml_desc.get("name", attr_desc["key"]) + + # Look for a children + is_iter_type = attr_desc["type"].startswith("[") + is_wrapped = xml_desc.get("wrapped", False) + internal_type = attr_desc.get("internalType", None) + internal_type_xml_map = getattr(internal_type, "_xml_map", {}) + + # Integrate namespace if necessary + xml_ns = xml_desc.get("ns", internal_type_xml_map.get("ns", None)) + if xml_ns: + xml_name = "{{{}}}{}".format(xml_ns, xml_name) + + # If it's an attribute, that's simple + if xml_desc.get("attr", False): + return data.get(xml_name) + + # If it's x-ms-text, that's simple too + if xml_desc.get("text", False): + return data.text + + # Scenario where I take the local name: + # - Wrapped node + # - Internal type is an enum (considered basic types) + # - Internal type has no XML/Name node + if is_wrapped or (internal_type and (issubclass(internal_type, Enum) or "name" not in internal_type_xml_map)): + children = data.findall(xml_name) + # If internal type has a local name and it's not a list, I use that name + elif not is_iter_type and internal_type and "name" in internal_type_xml_map: + xml_name = _extract_name_from_internal_type(internal_type) + children = data.findall(xml_name) + # That's an array + else: + if internal_type: # Complex type, ignore itemsName and use the complex type name + items_name = _extract_name_from_internal_type(internal_type) + else: + items_name = xml_desc.get("itemsName", xml_name) + children = data.findall(items_name) + + if len(children) == 0: + if is_iter_type: + if is_wrapped: + return None # is_wrapped no node, we want None + return [] # not wrapped, assume empty list + return None # Assume it's not there, maybe an optional node. + + # If is_iter_type and not wrapped, return all found children + if is_iter_type: + if not is_wrapped: + return children + # Iter and wrapped, should have found one node only (the wrap one) + if len(children) != 1: + raise DeserializationError( + "Tried to deserialize an array not wrapped, and found several nodes '{}'. Maybe you should declare this array as wrapped?".format( + xml_name + ) + ) + return list(children[0]) # Might be empty list and that's ok. + + # Here it's not a itertype, we should have found one element only or empty + if len(children) > 1: + raise DeserializationError("Find several XML '{}' where it was not expected".format(xml_name)) + return children[0] + + +class Deserializer: + """Response object model deserializer. + + :param dict classes: Class type dictionary for deserializing complex types. + :ivar list key_extractors: Ordered list of extractors to be used by this deserializer. + """ + + basic_types = {str: "str", int: "int", bool: "bool", float: "float"} + + valid_date = re.compile(r"\d{4}[-]\d{2}[-]\d{2}T\d{2}:\d{2}:\d{2}\.?\d*Z?[-+]?[\d{2}]?:?[\d{2}]?") + + def __init__(self, classes: Optional[Mapping[str, type]] = None) -> None: + self.deserialize_type = { + "iso-8601": Deserializer.deserialize_iso, + "rfc-1123": Deserializer.deserialize_rfc, + "unix-time": Deserializer.deserialize_unix, + "duration": Deserializer.deserialize_duration, + "date": Deserializer.deserialize_date, + "time": Deserializer.deserialize_time, + "decimal": Deserializer.deserialize_decimal, + "long": Deserializer.deserialize_long, + "bytearray": Deserializer.deserialize_bytearray, + "base64": Deserializer.deserialize_base64, + "object": self.deserialize_object, + "[]": self.deserialize_iter, + "{}": self.deserialize_dict, + } + self.deserialize_expected_types = { + "duration": (isodate.Duration, datetime.timedelta), + "iso-8601": (datetime.datetime), + } + self.dependencies: Dict[str, type] = dict(classes) if classes else {} + self.key_extractors = [rest_key_extractor, xml_key_extractor] + # Additional properties only works if the "rest_key_extractor" is used to + # extract the keys. Making it to work whatever the key extractor is too much + # complicated, with no real scenario for now. + # So adding a flag to disable additional properties detection. This flag should be + # used if your expect the deserialization to NOT come from a JSON REST syntax. + # Otherwise, result are unexpected + self.additional_properties_detection = True + + def __call__(self, target_obj, response_data, content_type=None): + """Call the deserializer to process a REST response. + + :param str target_obj: Target data type to deserialize to. + :param requests.Response response_data: REST response object. + :param str content_type: Swagger "produces" if available. + :raises DeserializationError: if deserialization fails. + :return: Deserialized object. + :rtype: object + """ + data = self._unpack_content(response_data, content_type) + return self._deserialize(target_obj, data) + + def _deserialize(self, target_obj, data): # pylint: disable=inconsistent-return-statements + """Call the deserializer on a model. + + Data needs to be already deserialized as JSON or XML ElementTree + + :param str target_obj: Target data type to deserialize to. + :param object data: Object to deserialize. + :raises DeserializationError: if deserialization fails. + :return: Deserialized object. + :rtype: object + """ + # This is already a model, go recursive just in case + if hasattr(data, "_attribute_map"): + constants = [name for name, config in getattr(data, "_validation", {}).items() if config.get("constant")] + try: + for attr, mapconfig in data._attribute_map.items(): # pylint: disable=protected-access + if attr in constants: + continue + value = getattr(data, attr) + if value is None: + continue + local_type = mapconfig["type"] + internal_data_type = local_type.strip("[]{}") + if internal_data_type not in self.dependencies or isinstance(internal_data_type, Enum): + continue + setattr(data, attr, self._deserialize(local_type, value)) + return data + except AttributeError: + return + + response, class_name = self._classify_target(target_obj, data) + + if isinstance(response, str): + return self.deserialize_data(data, response) + if isinstance(response, type) and issubclass(response, Enum): + return self.deserialize_enum(data, response) + + if data is None or data is CoreNull: + return data + try: + attributes = response._attribute_map # type: ignore # pylint: disable=protected-access + d_attrs = {} + for attr, attr_desc in attributes.items(): + # Check empty string. If it's not empty, someone has a real "additionalProperties"... + if attr == "additional_properties" and attr_desc["key"] == "": + continue + raw_value = None + # Enhance attr_desc with some dynamic data + attr_desc = attr_desc.copy() # Do a copy, do not change the real one + internal_data_type = attr_desc["type"].strip("[]{}") + if internal_data_type in self.dependencies: + attr_desc["internalType"] = self.dependencies[internal_data_type] + + for key_extractor in self.key_extractors: + found_value = key_extractor(attr, attr_desc, data) + if found_value is not None: + if raw_value is not None and raw_value != found_value: + msg = ( + "Ignoring extracted value '%s' from %s for key '%s'" + " (duplicate extraction, follow extractors order)" + ) + _LOGGER.warning(msg, found_value, key_extractor, attr) + continue + raw_value = found_value + + value = self.deserialize_data(raw_value, attr_desc["type"]) + d_attrs[attr] = value + except (AttributeError, TypeError, KeyError) as err: + msg = "Unable to deserialize to object: " + class_name # type: ignore + raise DeserializationError(msg) from err + additional_properties = self._build_additional_properties(attributes, data) + return self._instantiate_model(response, d_attrs, additional_properties) + + def _build_additional_properties(self, attribute_map, data): + if not self.additional_properties_detection: + return None + if "additional_properties" in attribute_map and attribute_map.get("additional_properties", {}).get("key") != "": + # Check empty string. If it's not empty, someone has a real "additionalProperties" + return None + if isinstance(data, ET.Element): + data = {el.tag: el.text for el in data} + + known_keys = { + _decode_attribute_map_key(_FLATTEN.split(desc["key"])[0]) + for desc in attribute_map.values() + if desc["key"] != "" + } + present_keys = set(data.keys()) + missing_keys = present_keys - known_keys + return {key: data[key] for key in missing_keys} + + def _classify_target(self, target, data): + """Check to see whether the deserialization target object can + be classified into a subclass. + Once classification has been determined, initialize object. + + :param str target: The target object type to deserialize to. + :param str/dict data: The response data to deserialize. + :return: The classified target object and its class name. + :rtype: tuple + """ + if target is None: + return None, None + + if isinstance(target, str): + try: + target = self.dependencies[target] + except KeyError: + return target, target + + try: + target = target._classify(data, self.dependencies) # type: ignore # pylint: disable=protected-access + except AttributeError: + pass # Target is not a Model, no classify + return target, target.__class__.__name__ # type: ignore + + def failsafe_deserialize(self, target_obj, data, content_type=None): + """Ignores any errors encountered in deserialization, + and falls back to not deserializing the object. Recommended + for use in error deserialization, as we want to return the + HttpResponseError to users, and not have them deal with + a deserialization error. + + :param str target_obj: The target object type to deserialize to. + :param str/dict data: The response data to deserialize. + :param str content_type: Swagger "produces" if available. + :return: Deserialized object. + :rtype: object + """ + try: + return self(target_obj, data, content_type=content_type) + except: # pylint: disable=bare-except + _LOGGER.debug( + "Ran into a deserialization error. Ignoring since this is failsafe deserialization", exc_info=True + ) + return None + + @staticmethod + def _unpack_content(raw_data, content_type=None): + """Extract the correct structure for deserialization. + + If raw_data is a PipelineResponse, try to extract the result of RawDeserializer. + if we can't, raise. Your Pipeline should have a RawDeserializer. + + If not a pipeline response and raw_data is bytes or string, use content-type + to decode it. If no content-type, try JSON. + + If raw_data is something else, bypass all logic and return it directly. + + :param obj raw_data: Data to be processed. + :param str content_type: How to parse if raw_data is a string/bytes. + :raises JSONDecodeError: If JSON is requested and parsing is impossible. + :raises UnicodeDecodeError: If bytes is not UTF8 + :rtype: object + :return: Unpacked content. + """ + # Assume this is enough to detect a Pipeline Response without importing it + context = getattr(raw_data, "context", {}) + if context: + if RawDeserializer.CONTEXT_NAME in context: + return context[RawDeserializer.CONTEXT_NAME] + raise ValueError("This pipeline didn't have the RawDeserializer policy; can't deserialize") + + # Assume this is enough to recognize universal_http.ClientResponse without importing it + if hasattr(raw_data, "body"): + return RawDeserializer.deserialize_from_http_generics(raw_data.text(), raw_data.headers) + + # Assume this enough to recognize requests.Response without importing it. + if hasattr(raw_data, "_content_consumed"): + return RawDeserializer.deserialize_from_http_generics(raw_data.text, raw_data.headers) + + if isinstance(raw_data, (str, bytes)) or hasattr(raw_data, "read"): + return RawDeserializer.deserialize_from_text(raw_data, content_type) # type: ignore + return raw_data + + def _instantiate_model(self, response, attrs, additional_properties=None): + """Instantiate a response model passing in deserialized args. + + :param Response response: The response model class. + :param dict attrs: The deserialized response attributes. + :param dict additional_properties: Additional properties to be set. + :rtype: Response + :return: The instantiated response model. + """ + if callable(response): + subtype = getattr(response, "_subtype_map", {}) + try: + readonly = [ + k + for k, v in response._validation.items() # pylint: disable=protected-access # type: ignore + if v.get("readonly") + ] + const = [ + k + for k, v in response._validation.items() # pylint: disable=protected-access # type: ignore + if v.get("constant") + ] + kwargs = {k: v for k, v in attrs.items() if k not in subtype and k not in readonly + const} + response_obj = response(**kwargs) + for attr in readonly: + setattr(response_obj, attr, attrs.get(attr)) + if additional_properties: + response_obj.additional_properties = additional_properties # type: ignore + return response_obj + except TypeError as err: + msg = "Unable to deserialize {} into model {}. ".format(kwargs, response) # type: ignore + raise DeserializationError(msg + str(err)) from err + else: + try: + for attr, value in attrs.items(): + setattr(response, attr, value) + return response + except Exception as exp: + msg = "Unable to populate response model. " + msg += "Type: {}, Error: {}".format(type(response), exp) + raise DeserializationError(msg) from exp + + def deserialize_data(self, data, data_type): # pylint: disable=too-many-return-statements + """Process data for deserialization according to data type. + + :param str data: The response string to be deserialized. + :param str data_type: The type to deserialize to. + :raises DeserializationError: if deserialization fails. + :return: Deserialized object. + :rtype: object + """ + if data is None: + return data + + try: + if not data_type: + return data + if data_type in self.basic_types.values(): + return self.deserialize_basic(data, data_type) + if data_type in self.deserialize_type: + if isinstance(data, self.deserialize_expected_types.get(data_type, tuple())): + return data + + is_a_text_parsing_type = lambda x: x not in [ # pylint: disable=unnecessary-lambda-assignment + "object", + "[]", + r"{}", + ] + if isinstance(data, ET.Element) and is_a_text_parsing_type(data_type) and not data.text: + return None + data_val = self.deserialize_type[data_type](data) + return data_val + + iter_type = data_type[0] + data_type[-1] + if iter_type in self.deserialize_type: + return self.deserialize_type[iter_type](data, data_type[1:-1]) + + obj_type = self.dependencies[data_type] + if issubclass(obj_type, Enum): + if isinstance(data, ET.Element): + data = data.text + return self.deserialize_enum(data, obj_type) + + except (ValueError, TypeError, AttributeError) as err: + msg = "Unable to deserialize response data." + msg += " Data: {}, {}".format(data, data_type) + raise DeserializationError(msg) from err + return self._deserialize(obj_type, data) + + def deserialize_iter(self, attr, iter_type): + """Deserialize an iterable. + + :param list attr: Iterable to be deserialized. + :param str iter_type: The type of object in the iterable. + :return: Deserialized iterable. + :rtype: list + """ + if attr is None: + return None + if isinstance(attr, ET.Element): # If I receive an element here, get the children + attr = list(attr) + if not isinstance(attr, (list, set)): + raise DeserializationError("Cannot deserialize as [{}] an object of type {}".format(iter_type, type(attr))) + return [self.deserialize_data(a, iter_type) for a in attr] + + def deserialize_dict(self, attr, dict_type): + """Deserialize a dictionary. + + :param dict/list attr: Dictionary to be deserialized. Also accepts + a list of key, value pairs. + :param str dict_type: The object type of the items in the dictionary. + :return: Deserialized dictionary. + :rtype: dict + """ + if isinstance(attr, list): + return {x["key"]: self.deserialize_data(x["value"], dict_type) for x in attr} + + if isinstance(attr, ET.Element): + # Transform value into {"Key": "value"} + attr = {el.tag: el.text for el in attr} + return {k: self.deserialize_data(v, dict_type) for k, v in attr.items()} + + def deserialize_object(self, attr, **kwargs): # pylint: disable=too-many-return-statements + """Deserialize a generic object. + This will be handled as a dictionary. + + :param dict attr: Dictionary to be deserialized. + :return: Deserialized object. + :rtype: dict + :raises TypeError: if non-builtin datatype encountered. + """ + if attr is None: + return None + if isinstance(attr, ET.Element): + # Do no recurse on XML, just return the tree as-is + return attr + if isinstance(attr, str): + return self.deserialize_basic(attr, "str") + obj_type = type(attr) + if obj_type in self.basic_types: + return self.deserialize_basic(attr, self.basic_types[obj_type]) + if obj_type is _long_type: + return self.deserialize_long(attr) + + if obj_type == dict: + deserialized = {} + for key, value in attr.items(): + try: + deserialized[key] = self.deserialize_object(value, **kwargs) + except ValueError: + deserialized[key] = None + return deserialized + + if obj_type == list: + deserialized = [] + for obj in attr: + try: + deserialized.append(self.deserialize_object(obj, **kwargs)) + except ValueError: + pass + return deserialized + + error = "Cannot deserialize generic object with type: " + raise TypeError(error + str(obj_type)) + + def deserialize_basic(self, attr, data_type): # pylint: disable=too-many-return-statements + """Deserialize basic builtin data type from string. + Will attempt to convert to str, int, float and bool. + This function will also accept '1', '0', 'true' and 'false' as + valid bool values. + + :param str attr: response string to be deserialized. + :param str data_type: deserialization data type. + :return: Deserialized basic type. + :rtype: str, int, float or bool + :raises TypeError: if string format is not valid. + """ + # If we're here, data is supposed to be a basic type. + # If it's still an XML node, take the text + if isinstance(attr, ET.Element): + attr = attr.text + if not attr: + if data_type == "str": + # None or '', node is empty string. + return "" + # None or '', node with a strong type is None. + # Don't try to model "empty bool" or "empty int" + return None + + if data_type == "bool": + if attr in [True, False, 1, 0]: + return bool(attr) + if isinstance(attr, str): + if attr.lower() in ["true", "1"]: + return True + if attr.lower() in ["false", "0"]: + return False + raise TypeError("Invalid boolean value: {}".format(attr)) + + if data_type == "str": + return self.deserialize_unicode(attr) + return eval(data_type)(attr) # nosec # pylint: disable=eval-used + + @staticmethod + def deserialize_unicode(data): + """Preserve unicode objects in Python 2, otherwise return data + as a string. + + :param str data: response string to be deserialized. + :return: Deserialized string. + :rtype: str or unicode + """ + # We might be here because we have an enum modeled as string, + # and we try to deserialize a partial dict with enum inside + if isinstance(data, Enum): + return data + + # Consider this is real string + try: + if isinstance(data, unicode): # type: ignore + return data + except NameError: + return str(data) + return str(data) + + @staticmethod + def deserialize_enum(data, enum_obj): + """Deserialize string into enum object. + + If the string is not a valid enum value it will be returned as-is + and a warning will be logged. + + :param str data: Response string to be deserialized. If this value is + None or invalid it will be returned as-is. + :param Enum enum_obj: Enum object to deserialize to. + :return: Deserialized enum object. + :rtype: Enum + """ + if isinstance(data, enum_obj) or data is None: + return data + if isinstance(data, Enum): + data = data.value + if isinstance(data, int): + # Workaround. We might consider remove it in the future. + try: + return list(enum_obj.__members__.values())[data] + except IndexError as exc: + error = "{!r} is not a valid index for enum {!r}" + raise DeserializationError(error.format(data, enum_obj)) from exc + try: + return enum_obj(str(data)) + except ValueError: + for enum_value in enum_obj: + if enum_value.value.lower() == str(data).lower(): + return enum_value + # We don't fail anymore for unknown value, we deserialize as a string + _LOGGER.warning("Deserializer is not able to find %s as valid enum in %s", data, enum_obj) + return Deserializer.deserialize_unicode(data) + + @staticmethod + def deserialize_bytearray(attr): + """Deserialize string into bytearray. + + :param str attr: response string to be deserialized. + :return: Deserialized bytearray + :rtype: bytearray + :raises TypeError: if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + return bytearray(b64decode(attr)) # type: ignore + + @staticmethod + def deserialize_base64(attr): + """Deserialize base64 encoded string into string. + + :param str attr: response string to be deserialized. + :return: Deserialized base64 string + :rtype: bytearray + :raises TypeError: if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + padding = "=" * (3 - (len(attr) + 3) % 4) # type: ignore + attr = attr + padding # type: ignore + encoded = attr.replace("-", "+").replace("_", "/") + return b64decode(encoded) + + @staticmethod + def deserialize_decimal(attr): + """Deserialize string into Decimal object. + + :param str attr: response string to be deserialized. + :return: Deserialized decimal + :raises DeserializationError: if string format invalid. + :rtype: decimal + """ + if isinstance(attr, ET.Element): + attr = attr.text + try: + return decimal.Decimal(str(attr)) # type: ignore + except decimal.DecimalException as err: + msg = "Invalid decimal {}".format(attr) + raise DeserializationError(msg) from err + + @staticmethod + def deserialize_long(attr): + """Deserialize string into long (Py2) or int (Py3). + + :param str attr: response string to be deserialized. + :return: Deserialized int + :rtype: long or int + :raises ValueError: if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + return _long_type(attr) # type: ignore + + @staticmethod + def deserialize_duration(attr): + """Deserialize ISO-8601 formatted string into TimeDelta object. + + :param str attr: response string to be deserialized. + :return: Deserialized duration + :rtype: TimeDelta + :raises DeserializationError: if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + try: + duration = isodate.parse_duration(attr) + except (ValueError, OverflowError, AttributeError) as err: + msg = "Cannot deserialize duration object." + raise DeserializationError(msg) from err + return duration + + @staticmethod + def deserialize_date(attr): + """Deserialize ISO-8601 formatted string into Date object. + + :param str attr: response string to be deserialized. + :return: Deserialized date + :rtype: Date + :raises DeserializationError: if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + if re.search(r"[^\W\d_]", attr, re.I + re.U): # type: ignore + raise DeserializationError("Date must have only digits and -. Received: %s" % attr) + # This must NOT use defaultmonth/defaultday. Using None ensure this raises an exception. + return isodate.parse_date(attr, defaultmonth=0, defaultday=0) + + @staticmethod + def deserialize_time(attr): + """Deserialize ISO-8601 formatted string into time object. + + :param str attr: response string to be deserialized. + :return: Deserialized time + :rtype: datetime.time + :raises DeserializationError: if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + if re.search(r"[^\W\d_]", attr, re.I + re.U): # type: ignore + raise DeserializationError("Date must have only digits and -. Received: %s" % attr) + return isodate.parse_time(attr) + + @staticmethod + def deserialize_rfc(attr): + """Deserialize RFC-1123 formatted string into Datetime object. + + :param str attr: response string to be deserialized. + :return: Deserialized RFC datetime + :rtype: Datetime + :raises DeserializationError: if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + try: + parsed_date = email.utils.parsedate_tz(attr) # type: ignore + date_obj = datetime.datetime( + *parsed_date[:6], tzinfo=datetime.timezone(datetime.timedelta(minutes=(parsed_date[9] or 0) / 60)) + ) + if not date_obj.tzinfo: + date_obj = date_obj.astimezone(tz=TZ_UTC) + except ValueError as err: + msg = "Cannot deserialize to rfc datetime object." + raise DeserializationError(msg) from err + return date_obj + + @staticmethod + def deserialize_iso(attr): + """Deserialize ISO-8601 formatted string into Datetime object. + + :param str attr: response string to be deserialized. + :return: Deserialized ISO datetime + :rtype: Datetime + :raises DeserializationError: if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + try: + attr = attr.upper() # type: ignore + match = Deserializer.valid_date.match(attr) + if not match: + raise ValueError("Invalid datetime string: " + attr) + + check_decimal = attr.split(".") + if len(check_decimal) > 1: + decimal_str = "" + for digit in check_decimal[1]: + if digit.isdigit(): + decimal_str += digit + else: + break + if len(decimal_str) > 6: + attr = attr.replace(decimal_str, decimal_str[0:6]) + + date_obj = isodate.parse_datetime(attr) + test_utc = date_obj.utctimetuple() + if test_utc.tm_year > 9999 or test_utc.tm_year < 1: + raise OverflowError("Hit max or min date") + except (ValueError, OverflowError, AttributeError) as err: + msg = "Cannot deserialize datetime object." + raise DeserializationError(msg) from err + return date_obj + + @staticmethod + def deserialize_unix(attr): + """Serialize Datetime object into IntTime format. + This is represented as seconds. + + :param int attr: Object to be serialized. + :return: Deserialized datetime + :rtype: Datetime + :raises DeserializationError: if format invalid + """ + if isinstance(attr, ET.Element): + attr = int(attr.text) # type: ignore + try: + attr = int(attr) + date_obj = datetime.datetime.fromtimestamp(attr, TZ_UTC) + except ValueError as err: + msg = "Cannot deserialize to unix datetime object." + raise DeserializationError(msg) from err + return date_obj diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/_utils/utils.py b/sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/_utils/utils.py new file mode 100644 index 000000000000..35c9c836f85f --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/_utils/utils.py @@ -0,0 +1,25 @@ +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from abc import ABC +from typing import Generic, TYPE_CHECKING, TypeVar + +if TYPE_CHECKING: + from .serialization import Deserializer, Serializer + + +TClient = TypeVar("TClient") +TConfig = TypeVar("TConfig") + + +class ClientMixinABC(ABC, Generic[TClient, TConfig]): + """DO NOT use this class. It is for internal typing use only.""" + + _client: TClient + _config: TConfig + _serialize: "Serializer" + _deserialize: "Deserializer" diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/_version.py b/sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/_version.py new file mode 100644 index 000000000000..be71c81bd282 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/_version.py @@ -0,0 +1,9 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +VERSION = "1.0.0b1" diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/aio/__init__.py b/sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/aio/__init__.py new file mode 100644 index 000000000000..ec7b738c9f3c --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/aio/__init__.py @@ -0,0 +1,29 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +# pylint: disable=wrong-import-position + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from ._patch import * # pylint: disable=unused-wildcard-import + +from ._client import TextClient # type: ignore + +try: + from ._patch import __all__ as _patch_all + from ._patch import * +except ImportError: + _patch_all = [] +from ._patch import patch_sdk as _patch_sdk + +__all__ = [ + "TextClient", +] +__all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore + +_patch_sdk() diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/aio/_client.py b/sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/aio/_client.py new file mode 100644 index 000000000000..e659bf3fdcfd --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/aio/_client.py @@ -0,0 +1,115 @@ +# pylint: disable=line-too-long,useless-suppression +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from copy import deepcopy +from typing import Any, Awaitable, TYPE_CHECKING, Union +from typing_extensions import Self + +from azure.core import AsyncPipelineClient +from azure.core.credentials import AzureKeyCredential +from azure.core.pipeline import policies +from azure.core.rest import AsyncHttpResponse, HttpRequest + +from .._utils.serialization import Deserializer, Serializer +from ._configuration import TextClientConfiguration +from ._operations import _TextClientOperationsMixin + +if TYPE_CHECKING: + from azure.core.credentials_async import AsyncTokenCredential + + +class TextClient(_TextClientOperationsMixin): + """The language service API is a suite of natural language processing (NLP) skills built with + best-in-class Microsoft machine learning algorithms. The API can be used to analyze + unstructured text for tasks such as sentiment analysis, key phrase extraction, language + detection and question answering. Further documentation can be found in https://learn.microsoft.com/azure/cognitive-services/language-service/overview + https://learn.microsoft.com/azure/cognitive-services/language-service/overview>`_.0. + + :param endpoint: Supported Cognitive Services endpoint (e.g., + https://.api.cognitiveservices.azure.com). Required. + :type endpoint: str + :param credential: Credential used to authenticate requests to the service. Is either a key + credential type or a token credential type. Required. + :type credential: ~azure.core.credentials.AzureKeyCredential or + ~azure.core.credentials_async.AsyncTokenCredential + :keyword api_version: The API version to use for this operation. Default value is + "2025-05-15-preview". Note that overriding this default value may result in unsupported + behavior. + :paramtype api_version: str + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + """ + + def __init__( + self, endpoint: str, credential: Union[AzureKeyCredential, "AsyncTokenCredential"], **kwargs: Any + ) -> None: + _endpoint = "{Endpoint}/language" + self._config = TextClientConfiguration(endpoint=endpoint, credential=credential, **kwargs) + + _policies = kwargs.pop("policies", None) + if _policies is None: + _policies = [ + policies.RequestIdPolicy(**kwargs), + self._config.headers_policy, + self._config.user_agent_policy, + self._config.proxy_policy, + policies.ContentDecodePolicy(**kwargs), + self._config.redirect_policy, + self._config.retry_policy, + self._config.authentication_policy, + self._config.custom_hook_policy, + self._config.logging_policy, + policies.DistributedTracingPolicy(**kwargs), + policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None, + self._config.http_logging_policy, + ] + self._client: AsyncPipelineClient = AsyncPipelineClient(base_url=_endpoint, policies=_policies, **kwargs) + + self._serialize = Serializer() + self._deserialize = Deserializer() + self._serialize.client_side_validation = False + + def send_request( + self, request: HttpRequest, *, stream: bool = False, **kwargs: Any + ) -> Awaitable[AsyncHttpResponse]: + """Runs the network request through the client's chained policies. + + >>> from azure.core.rest import HttpRequest + >>> request = HttpRequest("GET", "https://www.example.org/") + + >>> response = await client.send_request(request) + + + For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request + + :param request: The network request you want to make. Required. + :type request: ~azure.core.rest.HttpRequest + :keyword bool stream: Whether the response payload will be streamed. Defaults to False. + :return: The response of your network call. Does not do error handling on your response. + :rtype: ~azure.core.rest.AsyncHttpResponse + """ + + request_copy = deepcopy(request) + path_format_arguments = { + "Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + } + + request_copy.url = self._client.format_url(request_copy.url, **path_format_arguments) + return self._client.send_request(request_copy, stream=stream, **kwargs) # type: ignore + + async def close(self) -> None: + await self._client.close() + + async def __aenter__(self) -> Self: + await self._client.__aenter__() + return self + + async def __aexit__(self, *exc_details: Any) -> None: + await self._client.__aexit__(*exc_details) diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/aio/_configuration.py b/sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/aio/_configuration.py new file mode 100644 index 000000000000..5af3c20caf52 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/aio/_configuration.py @@ -0,0 +1,75 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from typing import Any, TYPE_CHECKING, Union + +from azure.core.credentials import AzureKeyCredential +from azure.core.pipeline import policies + +from .._version import VERSION + +if TYPE_CHECKING: + from azure.core.credentials_async import AsyncTokenCredential + + +class TextClientConfiguration: # pylint: disable=too-many-instance-attributes + """Configuration for TextClient. + + Note that all parameters used to create this instance are saved as instance + attributes. + + :param endpoint: Supported Cognitive Services endpoint (e.g., + https://.api.cognitiveservices.azure.com). Required. + :type endpoint: str + :param credential: Credential used to authenticate requests to the service. Is either a key + credential type or a token credential type. Required. + :type credential: ~azure.core.credentials.AzureKeyCredential or + ~azure.core.credentials_async.AsyncTokenCredential + :keyword api_version: The API version to use for this operation. Default value is + "2025-05-15-preview". Note that overriding this default value may result in unsupported + behavior. + :paramtype api_version: str + """ + + def __init__( + self, endpoint: str, credential: Union[AzureKeyCredential, "AsyncTokenCredential"], **kwargs: Any + ) -> None: + api_version: str = kwargs.pop("api_version", "2025-05-15-preview") + + if endpoint is None: + raise ValueError("Parameter 'endpoint' must not be None.") + if credential is None: + raise ValueError("Parameter 'credential' must not be None.") + + self.endpoint = endpoint + self.credential = credential + self.api_version = api_version + self.credential_scopes = kwargs.pop("credential_scopes", ["https://cognitiveservices.azure.com/.default"]) + kwargs.setdefault("sdk_moniker", "ai-language-text/{}".format(VERSION)) + self.polling_interval = kwargs.get("polling_interval", 30) + self._configure(**kwargs) + + def _infer_policy(self, **kwargs): + if isinstance(self.credential, AzureKeyCredential): + return policies.AzureKeyCredentialPolicy(self.credential, "Ocp-Apim-Subscription-Key", **kwargs) + if hasattr(self.credential, "get_token"): + return policies.AsyncBearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs) + raise TypeError(f"Unsupported credential: {self.credential}") + + def _configure(self, **kwargs: Any) -> None: + self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs) + self.http_logging_policy = kwargs.get("http_logging_policy") or policies.HttpLoggingPolicy(**kwargs) + self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get("redirect_policy") or policies.AsyncRedirectPolicy(**kwargs) + self.retry_policy = kwargs.get("retry_policy") or policies.AsyncRetryPolicy(**kwargs) + self.authentication_policy = kwargs.get("authentication_policy") + if self.credential and not self.authentication_policy: + self.authentication_policy = self._infer_policy(**kwargs) diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/aio/_operations/__init__.py b/sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/aio/_operations/__init__.py new file mode 100644 index 000000000000..46ed8f84233c --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/aio/_operations/__init__.py @@ -0,0 +1,23 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +# pylint: disable=wrong-import-position + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from ._patch import * # pylint: disable=unused-wildcard-import + +from ._operations import _TextClientOperationsMixin # type: ignore # pylint: disable=unused-import + +from ._patch import __all__ as _patch_all +from ._patch import * +from ._patch import patch_sdk as _patch_sdk + +__all__ = [] +__all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore +_patch_sdk() diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/aio/_operations/_operations.py b/sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/aio/_operations/_operations.py new file mode 100644 index 000000000000..b9e8a21e3663 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/aio/_operations/_operations.py @@ -0,0 +1,618 @@ +# pylint: disable=line-too-long,useless-suppression +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from collections.abc import MutableMapping +from io import IOBase +import json +from typing import Any, AsyncIterator, Callable, Dict, IO, List, Optional, TypeVar, Union, cast, overload + +from azure.core import AsyncPipelineClient +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + ResourceNotModifiedError, + StreamClosedError, + StreamConsumedError, + map_error, +) +from azure.core.pipeline import PipelineResponse +from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod +from azure.core.polling.async_base_polling import AsyncLROBasePolling +from azure.core.rest import AsyncHttpResponse, HttpRequest +from azure.core.tracing.decorator_async import distributed_trace_async +from azure.core.utils import case_insensitive_dict + +from ... import models as _models +from ..._operations._operations import ( + build_text_analyze_text_cancel_job_request, + build_text_analyze_text_job_status_request, + build_text_analyze_text_request, + build_text_analyze_text_submit_job_request, +) +from ..._utils.model_base import SdkJSONEncoder, _deserialize, _failsafe_deserialize +from ..._utils.utils import ClientMixinABC +from .._configuration import TextClientConfiguration + +JSON = MutableMapping[str, Any] +_Unset: Any = object() +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + + +class _TextClientOperationsMixin( + ClientMixinABC[AsyncPipelineClient[HttpRequest, AsyncHttpResponse], TextClientConfiguration] +): + + @overload + async def analyze_text( + self, + body: _models.AnalyzeTextTask, + *, + show_stats: Optional[bool] = None, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.AnalyzeTextTaskResult: + """Request text analysis over a collection of documents. + + :param body: The input documents to analyze. Required. + :type body: ~azure.ai.language.text.models.AnalyzeTextTask + :keyword show_stats: (Optional) if set to true, response will contain request and document + level statistics. Default value is None. + :paramtype show_stats: bool + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: AnalyzeTextTaskResult. The AnalyzeTextTaskResult is compatible with MutableMapping + :rtype: ~azure.ai.language.text.models.AnalyzeTextTaskResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def analyze_text( + self, body: JSON, *, show_stats: Optional[bool] = None, content_type: str = "application/json", **kwargs: Any + ) -> _models.AnalyzeTextTaskResult: + """Request text analysis over a collection of documents. + + :param body: The input documents to analyze. Required. + :type body: JSON + :keyword show_stats: (Optional) if set to true, response will contain request and document + level statistics. Default value is None. + :paramtype show_stats: bool + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: AnalyzeTextTaskResult. The AnalyzeTextTaskResult is compatible with MutableMapping + :rtype: ~azure.ai.language.text.models.AnalyzeTextTaskResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def analyze_text( + self, + body: IO[bytes], + *, + show_stats: Optional[bool] = None, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.AnalyzeTextTaskResult: + """Request text analysis over a collection of documents. + + :param body: The input documents to analyze. Required. + :type body: IO[bytes] + :keyword show_stats: (Optional) if set to true, response will contain request and document + level statistics. Default value is None. + :paramtype show_stats: bool + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: AnalyzeTextTaskResult. The AnalyzeTextTaskResult is compatible with MutableMapping + :rtype: ~azure.ai.language.text.models.AnalyzeTextTaskResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def analyze_text( + self, body: Union[_models.AnalyzeTextTask, JSON, IO[bytes]], *, show_stats: Optional[bool] = None, **kwargs: Any + ) -> _models.AnalyzeTextTaskResult: + """Request text analysis over a collection of documents. + + :param body: The input documents to analyze. Is one of the following types: AnalyzeTextTask, + JSON, IO[bytes] Required. + :type body: ~azure.ai.language.text.models.AnalyzeTextTask or JSON or IO[bytes] + :keyword show_stats: (Optional) if set to true, response will contain request and document + level statistics. Default value is None. + :paramtype show_stats: bool + :return: AnalyzeTextTaskResult. The AnalyzeTextTaskResult is compatible with MutableMapping + :rtype: ~azure.ai.language.text.models.AnalyzeTextTaskResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.AnalyzeTextTaskResult] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_text_analyze_text_request( + show_stats=show_stats, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize(_models.ErrorResponse, response) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.AnalyzeTextTaskResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def analyze_text_job_status( + self, + job_id: str, + *, + show_stats: Optional[bool] = None, + top: Optional[int] = None, + skip: Optional[int] = None, + **kwargs: Any + ) -> _models.AnalyzeTextJobState: + """Get analysis status and results. + + Get the status of an analysis job. A job can consist of one or more tasks. After all tasks + succeed, the job transitions to the succeeded state and results are available for each task. + + :param job_id: job ID. Required. + :type job_id: str + :keyword show_stats: (Optional) if set to true, response will contain request and document + level statistics. Default value is None. + :paramtype show_stats: bool + :keyword top: The maximum number of resources to return from the collection. Default value is + None. + :paramtype top: int + :keyword skip: An offset into the collection of the first resource to be returned. Default + value is None. + :paramtype skip: int + :return: AnalyzeTextJobState. The AnalyzeTextJobState is compatible with MutableMapping + :rtype: ~azure.ai.language.text.models.AnalyzeTextJobState + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.AnalyzeTextJobState] = kwargs.pop("cls", None) + + _request = build_text_analyze_text_job_status_request( + job_id=job_id, + show_stats=show_stats, + top=top, + skip=skip, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize(_models.ErrorResponse, response) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.AnalyzeTextJobState, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + async def _analyze_text_submit_job_initial( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + analysis_input: _models.MultiLanguageAnalysisInput = _Unset, + tasks: List[_models.AnalyzeTextLROTask] = _Unset, + display_name: Optional[str] = None, + default_language: Optional[str] = None, + cancel_after: Optional[float] = None, + **kwargs: Any + ) -> AsyncIterator[bytes]: + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None) + + if body is _Unset: + if analysis_input is _Unset: + raise TypeError("missing required argument: analysis_input") + if tasks is _Unset: + raise TypeError("missing required argument: tasks") + body = { + "analysisInput": analysis_input, + "cancelAfter": cancel_after, + "defaultLanguage": default_language, + "displayName": display_name, + "tasks": tasks, + } + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_text_analyze_text_submit_job_request( + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = True + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize(_models.ErrorResponse, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["Operation-Location"] = self._deserialize("str", response.headers.get("Operation-Location")) + + deserialized = response.iter_bytes() + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @overload + async def begin_analyze_text_submit_job( + self, + *, + analysis_input: _models.MultiLanguageAnalysisInput, + tasks: List[_models.AnalyzeTextLROTask], + content_type: str = "application/json", + display_name: Optional[str] = None, + default_language: Optional[str] = None, + cancel_after: Optional[float] = None, + **kwargs: Any + ) -> AsyncLROPoller[None]: + """Submit a collection of text documents for analysis. Specify one or more unique tasks to be + executed as a long-running operation. + + :keyword analysis_input: Contains the input to be analyzed. Required. + :paramtype analysis_input: ~azure.ai.language.text.models.MultiLanguageAnalysisInput + :keyword tasks: List of tasks to be performed as part of the LRO. Required. + :paramtype tasks: list[~azure.ai.language.text.models.AnalyzeTextLROTask] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword display_name: Name for the task. Default value is None. + :paramtype display_name: str + :keyword default_language: Default language to use for records requesting automatic language + detection. Default value is None. + :paramtype default_language: str + :keyword cancel_after: Optional duration in seconds after which the job will be canceled if not + completed. Default value is None. + :paramtype cancel_after: float + :return: An instance of AsyncLROPoller that returns None + :rtype: ~azure.core.polling.AsyncLROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def begin_analyze_text_submit_job( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> AsyncLROPoller[None]: + """Submit a collection of text documents for analysis. Specify one or more unique tasks to be + executed as a long-running operation. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of AsyncLROPoller that returns None + :rtype: ~azure.core.polling.AsyncLROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def begin_analyze_text_submit_job( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> AsyncLROPoller[None]: + """Submit a collection of text documents for analysis. Specify one or more unique tasks to be + executed as a long-running operation. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of AsyncLROPoller that returns None + :rtype: ~azure.core.polling.AsyncLROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def begin_analyze_text_submit_job( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + analysis_input: _models.MultiLanguageAnalysisInput = _Unset, + tasks: List[_models.AnalyzeTextLROTask] = _Unset, + display_name: Optional[str] = None, + default_language: Optional[str] = None, + cancel_after: Optional[float] = None, + **kwargs: Any + ) -> AsyncLROPoller[None]: + """Submit a collection of text documents for analysis. Specify one or more unique tasks to be + executed as a long-running operation. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword analysis_input: Contains the input to be analyzed. Required. + :paramtype analysis_input: ~azure.ai.language.text.models.MultiLanguageAnalysisInput + :keyword tasks: List of tasks to be performed as part of the LRO. Required. + :paramtype tasks: list[~azure.ai.language.text.models.AnalyzeTextLROTask] + :keyword display_name: Name for the task. Default value is None. + :paramtype display_name: str + :keyword default_language: Default language to use for records requesting automatic language + detection. Default value is None. + :paramtype default_language: str + :keyword cancel_after: Optional duration in seconds after which the job will be canceled if not + completed. Default value is None. + :paramtype cancel_after: float + :return: An instance of AsyncLROPoller that returns None + :rtype: ~azure.core.polling.AsyncLROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._analyze_text_submit_job_initial( + body=body, + analysis_input=analysis_input, + tasks=tasks, + display_name=display_name, + default_language=default_language, + cancel_after=cancel_after, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + await raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + path_format_arguments = { + "Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + } + + if polling is True: + polling_method: AsyncPollingMethod = cast( + AsyncPollingMethod, + AsyncLROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs), + ) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller[None].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + async def _analyze_text_cancel_job_initial(self, job_id: str, **kwargs: Any) -> AsyncIterator[bytes]: + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None) + + _request = build_text_analyze_text_cancel_job_request( + job_id=job_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = True + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize(_models.ErrorResponse, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["Operation-Location"] = self._deserialize("str", response.headers.get("Operation-Location")) + + deserialized = response.iter_bytes() + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def begin_analyze_text_cancel_job(self, job_id: str, **kwargs: Any) -> AsyncLROPoller[None]: + """Cancel a long-running Text Analysis job. + + Cancel a long-running Text Analysis job. + + :param job_id: The job ID to cancel. Required. + :type job_id: str + :return: An instance of AsyncLROPoller that returns None + :rtype: ~azure.core.polling.AsyncLROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._analyze_text_cancel_job_initial( + job_id=job_id, cls=lambda x, y, z: x, headers=_headers, params=_params, **kwargs + ) + await raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + path_format_arguments = { + "Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + } + + if polling is True: + polling_method: AsyncPollingMethod = cast( + AsyncPollingMethod, + AsyncLROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs), + ) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller[None].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/aio/_operations/_patch.py b/sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/aio/_operations/_patch.py new file mode 100644 index 000000000000..8bcb627aa475 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/aio/_operations/_patch.py @@ -0,0 +1,21 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------- +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" +from typing import List + +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level + + +def patch_sdk(): + """Do not remove from this file. + + `patch_sdk` is a last resort escape hatch that allows you to do customizations + you can't accomplish using the techniques described in + https://aka.ms/azsdk/python/dpcodegen/python/customize + """ diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/aio/_patch.py b/sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/aio/_patch.py new file mode 100644 index 000000000000..8bcb627aa475 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/aio/_patch.py @@ -0,0 +1,21 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------- +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" +from typing import List + +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level + + +def patch_sdk(): + """Do not remove from this file. + + `patch_sdk` is a last resort escape hatch that allows you to do customizations + you can't accomplish using the techniques described in + https://aka.ms/azsdk/python/dpcodegen/python/customize + """ diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/models/__init__.py b/sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/models/__init__.py new file mode 100644 index 000000000000..8e2a12ae589e --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/models/__init__.py @@ -0,0 +1,402 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +# pylint: disable=wrong-import-position + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from ._patch import * # pylint: disable=unused-wildcard-import + + +from ._models import ( # type: ignore + AbstractiveSummarizationLROResult, + AbstractiveSummarizationLROTask, + AbstractiveSummarizationResult, + AbstractiveSummarizationTaskParameters, + AbstractiveSummary, + AbstractiveSummaryDocumentResultWithDetectedLanguage, + AgeMetadata, + AllowOverlapEntityPolicyType, + AnalyzeTextEntityLinkingInput, + AnalyzeTextEntityRecognitionInput, + AnalyzeTextJobState, + AnalyzeTextKeyPhraseExtractionInput, + AnalyzeTextLROResult, + AnalyzeTextLROTask, + AnalyzeTextLanguageDetectionInput, + AnalyzeTextPiiEntitiesRecognitionInput, + AnalyzeTextSentimentAnalysisInput, + AnalyzeTextTask, + AnalyzeTextTaskResult, + AreaMetadata, + BaseEntityOverlapPolicy, + BaseMetadata, + BaseRedactionPolicy, + CharacterMaskPolicyType, + ClassificationDocumentResultWithDetectedLanguage, + ClassificationResult, + CurrencyMetadata, + CustomEntitiesLROTask, + CustomEntitiesResult, + CustomEntitiesTaskParameters, + CustomEntityRecognitionLROResult, + CustomLabelClassificationResult, + CustomMultiLabelClassificationLROResult, + CustomMultiLabelClassificationLROTask, + CustomMultiLabelClassificationTaskParameters, + CustomSingleLabelClassificationLROResult, + CustomSingleLabelClassificationLROTask, + CustomSingleLabelClassificationTaskParameters, + DateMetadata, + DateTimeMetadata, + DateValue, + DetectedLanguage, + DocumentError, + DocumentStatistics, + DocumentWarning, + EntitiesDocumentResultWithDetectedLanguage, + EntitiesDocumentResultWithMetadata, + EntitiesDocumentResultWithMetadataDetectedLanguage, + EntitiesLROTask, + EntitiesResult, + EntitiesTaskParameters, + EntitiesTaskResult, + EntitiesWithMetadataAutoResult, + Entity, + EntityInferenceOptions, + EntityLinkingLROResult, + EntityLinkingLROTask, + EntityLinkingResult, + EntityLinkingResultWithDetectedLanguage, + EntityLinkingTaskParameters, + EntityLinkingTaskResult, + EntityMaskPolicyType, + EntityRecognitionLROResult, + EntitySynonym, + EntitySynonyms, + EntityTag, + EntityWithMetadata, + Error, + ErrorResponse, + ExtractedSummaryDocumentResultWithDetectedLanguage, + ExtractedSummarySentence, + ExtractiveSummarizationLROResult, + ExtractiveSummarizationLROTask, + ExtractiveSummarizationResult, + ExtractiveSummarizationTaskParameters, + FhirBundle, + HealthcareAssertion, + HealthcareEntitiesDocumentResultWithDocumentDetectedLanguage, + HealthcareEntity, + HealthcareEntityLink, + HealthcareLROResult, + HealthcareLROTask, + HealthcareRelation, + HealthcareRelationEntity, + HealthcareResult, + HealthcareTaskParameters, + InformationMetadata, + InnerErrorModel, + KeyPhraseExtractionLROResult, + KeyPhraseLROTask, + KeyPhraseResult, + KeyPhraseTaskParameters, + KeyPhraseTaskResult, + KeyPhrasesDocumentResultWithDetectedLanguage, + LanguageDetectionAnalysisInput, + LanguageDetectionDocumentResult, + LanguageDetectionResult, + LanguageDetectionTaskParameters, + LanguageDetectionTaskResult, + LanguageInput, + LengthMetadata, + LinkedEntity, + Match, + MatchLongestEntityPolicyType, + MultiLanguageAnalysisInput, + MultiLanguageInput, + NoMaskPolicyType, + NumberMetadata, + NumericRangeMetadata, + OrdinalMetadata, + PiiEntityRecognitionLROResult, + PiiEntityWithTags, + PiiLROTask, + PiiResult, + PiiResultWithDetectedLanguage, + PiiTaskParameters, + PiiTaskResult, + RequestStatistics, + SentenceAssessment, + SentenceSentiment, + SentenceTarget, + SentimentAnalysisLROTask, + SentimentAnalysisTaskParameters, + SentimentConfidenceScores, + SentimentDocumentResultWithDetectedLanguage, + SentimentLROResult, + SentimentResponse, + SentimentTaskResult, + SpeedMetadata, + SummaryContext, + TargetConfidenceScoreLabel, + TargetRelation, + Tasks, + TemperatureMetadata, + TemporalSetMetadata, + TemporalSpanMetadata, + TemporalSpanValues, + TimeMetadata, + ValueExclusionPolicy, + VolumeMetadata, + WeightMetadata, +) + +from ._enums import ( # type: ignore + AgeUnit, + AnalyzeTextLROResultsKind, + AnalyzeTextLROTaskKind, + AnalyzeTextTaskKind, + AnalyzeTextTaskResultsKind, + AreaUnit, + Association, + Certainty, + Conditionality, + DocumentSentimentValue, + EntityCategory, + ErrorCode, + ExtractiveSummarizationSortingCriteria, + FhirVersion, + HealthcareDocumentType, + HealthcareEntityCategory, + InformationUnit, + InnerErrorCode, + LengthUnit, + MetadataKind, + NumberKind, + PiiCategoriesExclude, + PiiCategory, + PiiDomain, + PolicyKind, + RangeInclusivity, + RangeKind, + RedactionCharacter, + RedactionPolicyKind, + RelationType, + RelativeTo, + ScriptCode, + ScriptKind, + SentenceSentimentValue, + SpeedUnit, + State, + StringIndexType, + SummaryLengthBucket, + TargetRelationType, + TemperatureUnit, + TemporalModifier, + Temporality, + TokenSentimentValue, + VolumeUnit, + WarningCodeValue, + WeightUnit, +) +from ._patch import __all__ as _patch_all +from ._patch import * +from ._patch import patch_sdk as _patch_sdk + +__all__ = [ + "AbstractiveSummarizationLROResult", + "AbstractiveSummarizationLROTask", + "AbstractiveSummarizationResult", + "AbstractiveSummarizationTaskParameters", + "AbstractiveSummary", + "AbstractiveSummaryDocumentResultWithDetectedLanguage", + "AgeMetadata", + "AllowOverlapEntityPolicyType", + "AnalyzeTextEntityLinkingInput", + "AnalyzeTextEntityRecognitionInput", + "AnalyzeTextJobState", + "AnalyzeTextKeyPhraseExtractionInput", + "AnalyzeTextLROResult", + "AnalyzeTextLROTask", + "AnalyzeTextLanguageDetectionInput", + "AnalyzeTextPiiEntitiesRecognitionInput", + "AnalyzeTextSentimentAnalysisInput", + "AnalyzeTextTask", + "AnalyzeTextTaskResult", + "AreaMetadata", + "BaseEntityOverlapPolicy", + "BaseMetadata", + "BaseRedactionPolicy", + "CharacterMaskPolicyType", + "ClassificationDocumentResultWithDetectedLanguage", + "ClassificationResult", + "CurrencyMetadata", + "CustomEntitiesLROTask", + "CustomEntitiesResult", + "CustomEntitiesTaskParameters", + "CustomEntityRecognitionLROResult", + "CustomLabelClassificationResult", + "CustomMultiLabelClassificationLROResult", + "CustomMultiLabelClassificationLROTask", + "CustomMultiLabelClassificationTaskParameters", + "CustomSingleLabelClassificationLROResult", + "CustomSingleLabelClassificationLROTask", + "CustomSingleLabelClassificationTaskParameters", + "DateMetadata", + "DateTimeMetadata", + "DateValue", + "DetectedLanguage", + "DocumentError", + "DocumentStatistics", + "DocumentWarning", + "EntitiesDocumentResultWithDetectedLanguage", + "EntitiesDocumentResultWithMetadata", + "EntitiesDocumentResultWithMetadataDetectedLanguage", + "EntitiesLROTask", + "EntitiesResult", + "EntitiesTaskParameters", + "EntitiesTaskResult", + "EntitiesWithMetadataAutoResult", + "Entity", + "EntityInferenceOptions", + "EntityLinkingLROResult", + "EntityLinkingLROTask", + "EntityLinkingResult", + "EntityLinkingResultWithDetectedLanguage", + "EntityLinkingTaskParameters", + "EntityLinkingTaskResult", + "EntityMaskPolicyType", + "EntityRecognitionLROResult", + "EntitySynonym", + "EntitySynonyms", + "EntityTag", + "EntityWithMetadata", + "Error", + "ErrorResponse", + "ExtractedSummaryDocumentResultWithDetectedLanguage", + "ExtractedSummarySentence", + "ExtractiveSummarizationLROResult", + "ExtractiveSummarizationLROTask", + "ExtractiveSummarizationResult", + "ExtractiveSummarizationTaskParameters", + "FhirBundle", + "HealthcareAssertion", + "HealthcareEntitiesDocumentResultWithDocumentDetectedLanguage", + "HealthcareEntity", + "HealthcareEntityLink", + "HealthcareLROResult", + "HealthcareLROTask", + "HealthcareRelation", + "HealthcareRelationEntity", + "HealthcareResult", + "HealthcareTaskParameters", + "InformationMetadata", + "InnerErrorModel", + "KeyPhraseExtractionLROResult", + "KeyPhraseLROTask", + "KeyPhraseResult", + "KeyPhraseTaskParameters", + "KeyPhraseTaskResult", + "KeyPhrasesDocumentResultWithDetectedLanguage", + "LanguageDetectionAnalysisInput", + "LanguageDetectionDocumentResult", + "LanguageDetectionResult", + "LanguageDetectionTaskParameters", + "LanguageDetectionTaskResult", + "LanguageInput", + "LengthMetadata", + "LinkedEntity", + "Match", + "MatchLongestEntityPolicyType", + "MultiLanguageAnalysisInput", + "MultiLanguageInput", + "NoMaskPolicyType", + "NumberMetadata", + "NumericRangeMetadata", + "OrdinalMetadata", + "PiiEntityRecognitionLROResult", + "PiiEntityWithTags", + "PiiLROTask", + "PiiResult", + "PiiResultWithDetectedLanguage", + "PiiTaskParameters", + "PiiTaskResult", + "RequestStatistics", + "SentenceAssessment", + "SentenceSentiment", + "SentenceTarget", + "SentimentAnalysisLROTask", + "SentimentAnalysisTaskParameters", + "SentimentConfidenceScores", + "SentimentDocumentResultWithDetectedLanguage", + "SentimentLROResult", + "SentimentResponse", + "SentimentTaskResult", + "SpeedMetadata", + "SummaryContext", + "TargetConfidenceScoreLabel", + "TargetRelation", + "Tasks", + "TemperatureMetadata", + "TemporalSetMetadata", + "TemporalSpanMetadata", + "TemporalSpanValues", + "TimeMetadata", + "ValueExclusionPolicy", + "VolumeMetadata", + "WeightMetadata", + "AgeUnit", + "AnalyzeTextLROResultsKind", + "AnalyzeTextLROTaskKind", + "AnalyzeTextTaskKind", + "AnalyzeTextTaskResultsKind", + "AreaUnit", + "Association", + "Certainty", + "Conditionality", + "DocumentSentimentValue", + "EntityCategory", + "ErrorCode", + "ExtractiveSummarizationSortingCriteria", + "FhirVersion", + "HealthcareDocumentType", + "HealthcareEntityCategory", + "InformationUnit", + "InnerErrorCode", + "LengthUnit", + "MetadataKind", + "NumberKind", + "PiiCategoriesExclude", + "PiiCategory", + "PiiDomain", + "PolicyKind", + "RangeInclusivity", + "RangeKind", + "RedactionCharacter", + "RedactionPolicyKind", + "RelationType", + "RelativeTo", + "ScriptCode", + "ScriptKind", + "SentenceSentimentValue", + "SpeedUnit", + "State", + "StringIndexType", + "SummaryLengthBucket", + "TargetRelationType", + "TemperatureUnit", + "TemporalModifier", + "Temporality", + "TokenSentimentValue", + "VolumeUnit", + "WarningCodeValue", + "WeightUnit", +] +__all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore +_patch_sdk() diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/models/_enums.py b/sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/models/_enums.py new file mode 100644 index 000000000000..69193c93a210 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/models/_enums.py @@ -0,0 +1,1979 @@ +# pylint: disable=too-many-lines +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from enum import Enum +from azure.core import CaseInsensitiveEnumMeta + + +class AgeUnit(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The Age Unit of measurement.""" + + UNSPECIFIED = "Unspecified" + """Unspecified time period""" + YEAR = "Year" + """Time period of a year""" + MONTH = "Month" + """Time period of a month""" + WEEK = "Week" + """Time period of a week""" + DAY = "Day" + """Time period of a day""" + + +class AnalyzeTextLROResultsKind(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The kind of the response object returned by the analyze-text long running task.""" + + SENTIMENT_ANALYSIS_LRO_RESULTS = "SentimentAnalysisLROResults" + """Sentiment analysis LRO results""" + ENTITY_RECOGNITION_LRO_RESULTS = "EntityRecognitionLROResults" + """Entity recognition LRO results""" + PII_ENTITY_RECOGNITION_LRO_RESULTS = "PiiEntityRecognitionLROResults" + """PII entity recognition LRO results""" + KEY_PHRASE_EXTRACTION_LRO_RESULTS = "KeyPhraseExtractionLROResults" + """Key phrase extraction LRO results""" + ENTITY_LINKING_LRO_RESULTS = "EntityLinkingLROResults" + """Entity linking LRO results""" + HEALTHCARE_LRO_RESULTS = "HealthcareLROResults" + """Healthcare LRO results""" + CUSTOM_ENTITY_RECOGNITION_LRO_RESULTS = "CustomEntityRecognitionLROResults" + """Custom entity recognition LRO results""" + CUSTOM_SINGLE_LABEL_CLASSIFICATION_LRO_RESULTS = "CustomSingleLabelClassificationLROResults" + """Custom single label classification LRO results""" + CUSTOM_MULTI_LABEL_CLASSIFICATION_LRO_RESULTS = "CustomMultiLabelClassificationLROResults" + """Custom multi label classification LRO results""" + EXTRACTIVE_SUMMARIZATION_LRO_RESULTS = "ExtractiveSummarizationLROResults" + """Extractive summarization LRO results""" + ABSTRACTIVE_SUMMARIZATION_LRO_RESULTS = "AbstractiveSummarizationLROResults" + """Abstractive summarization LRO results""" + + +class AnalyzeTextLROTaskKind(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The kind of the long running analyze text tasks supported.""" + + SENTIMENT_ANALYSIS = "SentimentAnalysis" + """Sentiment analysis task""" + ENTITY_RECOGNITION = "EntityRecognition" + """Entity recognition task""" + PII_ENTITY_RECOGNITION = "PiiEntityRecognition" + """PII entity recognition task""" + KEY_PHRASE_EXTRACTION = "KeyPhraseExtraction" + """Key phrase extraction task""" + ENTITY_LINKING = "EntityLinking" + """Entity linking task""" + HEALTHCARE = "Healthcare" + """Healthcare task""" + CUSTOM_ENTITY_RECOGNITION = "CustomEntityRecognition" + """Custom entity recognition task""" + CUSTOM_SINGLE_LABEL_CLASSIFICATION = "CustomSingleLabelClassification" + """Custom single label classification task""" + CUSTOM_MULTI_LABEL_CLASSIFICATION = "CustomMultiLabelClassification" + """Custom multi label classification task""" + EXTRACTIVE_SUMMARIZATION = "ExtractiveSummarization" + """Extractive summarization task""" + ABSTRACTIVE_SUMMARIZATION = "AbstractiveSummarization" + """Abstractive summarization task""" + + +class AnalyzeTextTaskKind(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The kind of the analyze-text tasks supported.""" + + SENTIMENT_ANALYSIS = "SentimentAnalysis" + """Sentiment analysis task""" + ENTITY_RECOGNITION = "EntityRecognition" + """Entity recognition task""" + PII_ENTITY_RECOGNITION = "PiiEntityRecognition" + """PII entity recognition task""" + KEY_PHRASE_EXTRACTION = "KeyPhraseExtraction" + """Key phrase extraction task""" + LANGUAGE_DETECTION = "LanguageDetection" + """Language detection task""" + ENTITY_LINKING = "EntityLinking" + """Entity linking task""" + + +class AnalyzeTextTaskResultsKind(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The kind of the response object returned by the analyze-text task.""" + + SENTIMENT_ANALYSIS_RESULTS = "SentimentAnalysisResults" + """Sentiment analysis results""" + ENTITY_RECOGNITION_RESULTS = "EntityRecognitionResults" + """Entity recognition results""" + PII_ENTITY_RECOGNITION_RESULTS = "PiiEntityRecognitionResults" + """PII entity recognition results""" + KEY_PHRASE_EXTRACTION_RESULTS = "KeyPhraseExtractionResults" + """Key phrase extraction results""" + LANGUAGE_DETECTION_RESULTS = "LanguageDetectionResults" + """Language detection results""" + ENTITY_LINKING_RESULTS = "EntityLinkingResults" + """Entity linking results""" + + +class AreaUnit(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The area unit of measurement.""" + + UNSPECIFIED = "Unspecified" + """Unspecified area unit""" + SQUARE_KILOMETER = "SquareKilometer" + """Area unit in square kilometers""" + SQUARE_HECTOMETER = "SquareHectometer" + """Area unit in square hectometers""" + SQUARE_DECAMETER = "SquareDecameter" + """Area unit in square decameters""" + SQUARE_DECIMETER = "SquareDecimeter" + """Area unit in square decimeters""" + SQUARE_METER = "SquareMeter" + """Area unit in square meters""" + SQUARE_CENTIMETER = "SquareCentimeter" + """Area unit in square centimeters""" + SQUARE_MILLIMETER = "SquareMillimeter" + """Area unit in square millimeters""" + SQUARE_INCH = "SquareInch" + """Area unit in square inches""" + SQUARE_FOOT = "SquareFoot" + """Area unit in square feet""" + SQUARE_MILE = "SquareMile" + """Area unit in square miles""" + SQUARE_YARD = "SquareYard" + """Area unit in square yards""" + ACRE = "Acre" + """Area unit in acres""" + + +class Association(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Describes if the entity is the subject of the text or if it describes someone else.""" + + SUBJECT = "subject" + """Subject association""" + OTHER = "other" + """Other association""" + + +class Certainty(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Describes the entities certainty and polarity.""" + + POSITIVE = "positive" + """Positive certainty""" + POSITIVE_POSSIBLE = "positivePossible" + """Possibly positive certainty""" + NEUTRAL_POSSIBLE = "neutralPossible" + """Possibly neutral certainty""" + NEGATIVE_POSSIBLE = "negativePossible" + """Possibly negative certainty""" + NEGATIVE = "negative" + """Negative certainty""" + + +class Conditionality(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Describes any conditionality on the entity.""" + + HYPOTHETICAL = "hypothetical" + """Hypothetical conditionality""" + CONDITIONAL = "conditional" + """Conditional conditionality""" + + +class DocumentSentimentValue(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Predicted sentiment for document (Negative, Neutral, Positive, or Mixed).""" + + POSITIVE = "positive" + """Positive statement""" + NEUTRAL = "neutral" + """Neutral statement""" + NEGATIVE = "negative" + """Negative statement""" + MIXED = "mixed" + """Mixed statement""" + + +class EntityCategory(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Contains all the entity categories detected by entity recognition.""" + + ADDRESS = "Address" + """Specific street-level mentions of locations: house/building numbers, streets, avenues, + highways, intersections referenced by name.""" + NUMERIC = "Numeric" + """Numeric values, including digits and number words.""" + AGE = "Age" + """Age-related values.""" + CURRENCY = "Currency" + """Currency-related values.""" + NUMBER = "Number" + """Numbers without a unit""" + NUMBER_RANGE = "NumberRange" + """Range of Numbers""" + PERCENTAGE = "Percentage" + """Percentage-related values.""" + ORDINAL = "Ordinal" + """Ordinal numbers.""" + TEMPERATURE = "Temperature" + """Temperature-related values.""" + DIMENSION = "Dimension" + """Dimension of measurements""" + LENGTH = "Length" + """Length of an object.""" + WEIGHT = "Weight" + """Weight of an object.""" + HEIGHT = "Height" + """Height of an object.""" + SPEED = "Speed" + """Speed of an object.""" + AREA = "Area" + """Area of an object.""" + VOLUME = "Volume" + """Volume of an object.""" + INFORMATION = "Information" + """Unit of measure for digital information.""" + TEMPORAL = "Temporal" + """Items relating to time.""" + DATE = "Date" + """Calendar dates.""" + TIME = "Time" + """Times of day.""" + DATE_TIME = "DateTime" + """Calendar dates with time.""" + DATE_RANGE = "DateRange" + """Range of dates.""" + TIME_RANGE = "TimeRange" + """Range of times.""" + DATE_TIME_RANGE = "DateTimeRange" + """Range of date and time.""" + DURATION = "Duration" + """Duration of time.""" + SET_TEMPORAL = "SetTemporal" + """Set of time-related values.""" + EVENT = "Event" + """Social, sports, business, political, educational, natural, historical, criminal, violent, + legal, military events with a timed period.""" + SPORTS_EVENT = "SportsEvent" + """Sports event-related values.""" + CULTURAL_EVENT = "CulturalEvent" + """Cultural event-related values.""" + NATURAL_EVENT = "NaturalEvent" + """Natural event-related values.""" + LOCATION = "Location" + """Particular point or place in physical space.""" + GPE = "GPE" + """Cities, countries/regions, states.""" + CITY = "City" + """City-related values.""" + STATE = "State" + """State-related values.""" + COUNTRY_REGION = "CountryRegion" + """Country or region-related values.""" + CONTINENT = "Continent" + """Continent-related values.""" + STRUCTURAL = "Structural" + """Manmade structures.""" + AIRPORT = "Airport" + """Airports.""" + GEOLOGICAL = "Geological" + """Geographic and natural features such as rivers, oceans, and deserts.""" + ORGANIZATION = "Organization" + """Corporations, agencies, and other groups of people defined by some established organizational + structure. These labels can include companies, political parties/movements, musical bands, + sport clubs, government bodies, and public organizations. Nationalities or religions are not + ORGANIZATION.""" + ORGANIZATION_MEDICAL = "OrganizationMedical" + """Medical companies and groups.""" + ORGANIZATION_STOCK_EXCHANGE = "OrganizationStockExchange" + """Stock exchange groups.""" + ORGANIZATION_SPORTS = "OrganizationSports" + """Sports-related organizations.""" + PERSON = "Person" + """First, last, and middle names, names of fictional characters, and aliases. Titles, such as + 'Mr.' or 'President', are not considered part of the named entity.""" + PERSON_TYPE = "PersonType" + """Human roles classified by a group membership.""" + EMAIL = "Email" + """Email addresses.""" + URL = "URL" + """URLs to websites.""" + IP = "IP" + """network IP addresses.""" + PHONE_NUMBER = "PhoneNumber" + """Phone numbers (US and EU phone numbers only).""" + PRODUCT = "Product" + """Single or group of commercial, consumable objects, electronics, vehicles, food groups.""" + COMPUTING_PRODUCT = "ComputingProduct" + """Computing products.""" + SKILL = "Skill" + """A capability, skill, or expertise.""" + + +class ErrorCode(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Human-readable error code.""" + + INVALID_REQUEST = "InvalidRequest" + """Invalid request error""" + INVALID_ARGUMENT = "InvalidArgument" + """Invalid argument error""" + UNAUTHORIZED = "Unauthorized" + """Unauthorized access error""" + FORBIDDEN = "Forbidden" + """Forbidden access error""" + NOT_FOUND = "NotFound" + """Not found error""" + PROJECT_NOT_FOUND = "ProjectNotFound" + """Project not found error""" + OPERATION_NOT_FOUND = "OperationNotFound" + """Operation not found error""" + AZURE_COGNITIVE_SEARCH_NOT_FOUND = "AzureCognitiveSearchNotFound" + """Azure Cognitive Search not found error""" + AZURE_COGNITIVE_SEARCH_INDEX_NOT_FOUND = "AzureCognitiveSearchIndexNotFound" + """Azure Cognitive Search index not found error""" + TOO_MANY_REQUESTS = "TooManyRequests" + """Too many requests error""" + AZURE_COGNITIVE_SEARCH_THROTTLING = "AzureCognitiveSearchThrottling" + """Azure Cognitive Search throttling error""" + AZURE_COGNITIVE_SEARCH_INDEX_LIMIT_REACHED = "AzureCognitiveSearchIndexLimitReached" + """Azure Cognitive Search index limit reached error""" + INTERNAL_SERVER_ERROR = "InternalServerError" + """Internal server error""" + SERVICE_UNAVAILABLE = "ServiceUnavailable" + """Service unavailable error""" + TIMEOUT = "Timeout" + """Timeout error""" + QUOTA_EXCEEDED = "QuotaExceeded" + """Quota exceeded error""" + CONFLICT = "Conflict" + """Conflict error""" + WARNING = "Warning" + """Warning error""" + + +class ExtractiveSummarizationSortingCriteria(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The sorting criteria to use for the results of Extractive Summarization.""" + + OFFSET = "Offset" + """Indicates that results should be sorted in order of appearance in the text.""" + RANK = "Rank" + """Indicates that results should be sorted in order of importance (i.e. rank score) according to + the model.""" + + +class FhirVersion(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The FHIR Spec version.""" + + ENUM_4_0_1 = "4.0.1" + """Version 4.0.1""" + + +class HealthcareDocumentType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Document type.""" + + NONE = "None" + """None document type""" + CLINICAL_TRIAL = "ClinicalTrial" + """Clinical trial document type""" + DISCHARGE_SUMMARY = "DischargeSummary" + """Discharge summary document type""" + PROGRESS_NOTE = "ProgressNote" + """Progress note document type""" + HISTORY_AND_PHYSICAL = "HistoryAndPhysical" + """History and physical document type""" + CONSULT = "Consult" + """Consult document type""" + IMAGING = "Imaging" + """Imaging document type""" + PATHOLOGY = "Pathology" + """Pathology document type""" + PROCEDURE_NOTE = "ProcedureNote" + """Procedure note document type""" + + +class HealthcareEntityCategory(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Healthcare Entity Category.""" + + BODY_STRUCTURE = "BodyStructure" + """Body structure""" + AGE = "Age" + """Age""" + GENDER = "Gender" + """Gender""" + EXAMINATION_NAME = "ExaminationName" + """Examination name""" + DATE = "Date" + """Date""" + DIRECTION = "Direction" + """Direction""" + FREQUENCY = "Frequency" + """Frequency""" + MEASUREMENT_VALUE = "MeasurementValue" + """Measurement value""" + MEASUREMENT_UNIT = "MeasurementUnit" + """Measurement unit""" + RELATIONAL_OPERATOR = "RelationalOperator" + """Relational operator""" + TIME = "Time" + """Time""" + GENE_OR_PROTEIN = "GeneOrProtein" + """Gene or protein""" + VARIANT = "Variant" + """Variant""" + ADMINISTRATIVE_EVENT = "AdministrativeEvent" + """Administrative event""" + CARE_ENVIRONMENT = "CareEnvironment" + """Care environment""" + HEALTHCARE_PROFESSION = "HealthcareProfession" + """Healthcare profession""" + DIAGNOSIS = "Diagnosis" + """Diagnosis""" + SYMPTOM_OR_SIGN = "SymptomOrSign" + """Symptom or sign""" + CONDITION_QUALIFIER = "ConditionQualifier" + """Condition qualifier""" + MEDICATION_CLASS = "MedicationClass" + """Medication class""" + MEDICATION_NAME = "MedicationName" + """Medication name""" + DOSAGE = "Dosage" + """Dosage""" + MEDICATION_FORM = "MedicationForm" + """Medication form""" + MEDICATION_ROUTE = "MedicationRoute" + """Medication route""" + FAMILY_RELATION = "FamilyRelation" + """Family relation""" + TREATMENT_NAME = "TreatmentName" + """Treatment name""" + ETHNICITY = "Ethnicity" + """Ethnicity""" + COURSE = "Course" + """Course""" + EXPRESSION = "Expression" + """Expression""" + MUTATION_TYPE = "MutationType" + """Mutation type""" + CONDITION_SCALE = "ConditionScale" + """Condition scale""" + ALLERGEN = "Allergen" + """Allergen""" + EMPLOYMENT = "Employment" + """Employment""" + LIVING_STATUS = "LivingStatus" + """Living status""" + SUBSTANCE_USE = "SubstanceUse" + """Substance use""" + SUBSTANCE_USE_AMOUNT = "SubstanceUseAmount" + """Substance use amount""" + + +class InformationUnit(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The information (data) Unit of measurement.""" + + UNSPECIFIED = "Unspecified" + """Unspecified data size unit""" + BIT = "Bit" + """Data size unit in bits""" + KILOBIT = "Kilobit" + """Data size unit in kilobits""" + MEGABIT = "Megabit" + """Data size unit in megabits""" + GIGABIT = "Gigabit" + """Data size unit in gigabits""" + TERABIT = "Terabit" + """Data size unit in terabits""" + PETABIT = "Petabit" + """Data size unit in petabits""" + BYTE = "Byte" + """Data size unit in bytes""" + KILOBYTE = "Kilobyte" + """Data size unit in kilobytes""" + MEGABYTE = "Megabyte" + """Data size unit in megabytes""" + GIGABYTE = "Gigabyte" + """Data size unit in gigabytes""" + TERABYTE = "Terabyte" + """Data size unit in terabytes""" + PETABYTE = "Petabyte" + """Data size unit in petabytes""" + + +class InnerErrorCode(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Human-readable error code.""" + + INVALID_REQUEST = "InvalidRequest" + """Invalid request error""" + INVALID_PARAMETER_VALUE = "InvalidParameterValue" + """Invalid parameter value error""" + KNOWLEDGE_BASE_NOT_FOUND = "KnowledgeBaseNotFound" + """Knowledge base not found error""" + AZURE_COGNITIVE_SEARCH_NOT_FOUND = "AzureCognitiveSearchNotFound" + """Azure Cognitive Search not found error""" + AZURE_COGNITIVE_SEARCH_THROTTLING = "AzureCognitiveSearchThrottling" + """Azure Cognitive Search throttling error""" + EXTRACTION_FAILURE = "ExtractionFailure" + """Extraction failure error""" + INVALID_REQUEST_BODY_FORMAT = "InvalidRequestBodyFormat" + """Invalid request body format error""" + EMPTY_REQUEST = "EmptyRequest" + """Empty request error""" + MISSING_INPUT_DOCUMENTS = "MissingInputDocuments" + """Missing input documents error""" + INVALID_DOCUMENT = "InvalidDocument" + """Invalid document error""" + MODEL_VERSION_INCORRECT = "ModelVersionIncorrect" + """Model version incorrect error""" + INVALID_DOCUMENT_BATCH = "InvalidDocumentBatch" + """Invalid document batch error""" + UNSUPPORTED_LANGUAGE_CODE = "UnsupportedLanguageCode" + """Unsupported language code error""" + INVALID_COUNTRY_HINT = "InvalidCountryHint" + """Invalid country hint error""" + + +class LengthUnit(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The length unit of measurement.""" + + UNSPECIFIED = "Unspecified" + """Unspecified length unit.""" + KILOMETER = "Kilometer" + """Length unit in kilometers.""" + HECTOMETER = "Hectometer" + """Length unit in hectometers.""" + DECAMETER = "Decameter" + """Length unit in decameters.""" + METER = "Meter" + """Length unit in meters.""" + DECIMETER = "Decimeter" + """Length unit in decimeters.""" + CENTIMETER = "Centimeter" + """Length unit in centimeters.""" + MILLIMETER = "Millimeter" + """Length unit in millimeters.""" + MICROMETER = "Micrometer" + """Length unit in micrometers.""" + NANOMETER = "Nanometer" + """Length unit in nanometers.""" + PICOMETER = "Picometer" + """Length unit in picometers.""" + MILE = "Mile" + """Length unit in miles.""" + YARD = "Yard" + """Length unit in yards.""" + INCH = "Inch" + """Length unit in inches.""" + FOOT = "Foot" + """Length unit in feet.""" + LIGHT_YEAR = "LightYear" + """Length unit in light years.""" + POINT = "Point" + """Length unit in points.""" + + +class MetadataKind(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The entity Metadata object kind.""" + + DATE_METADATA = "DateMetadata" + """Metadata for date-related values.""" + DATE_TIME_METADATA = "DateTimeMetadata" + """Metadata for date and time-related values.""" + TIME_METADATA = "TimeMetadata" + """Metadata for time-related values.""" + TEMPORAL_SET_METADATA = "TemporalSetMetadata" + """Metadata for set of time-related values.""" + NUMBER_METADATA = "NumberMetadata" + """Metadata for numeric values.""" + ORDINAL_METADATA = "OrdinalMetadata" + """Metadata for ordinal numbers.""" + SPEED_METADATA = "SpeedMetadata" + """Metadata for speed-related values.""" + WEIGHT_METADATA = "WeightMetadata" + """Metadata for weight-related values.""" + LENGTH_METADATA = "LengthMetadata" + """Metadata for length-related values.""" + VOLUME_METADATA = "VolumeMetadata" + """Metadata for volume-related values.""" + AREA_METADATA = "AreaMetadata" + """Metadata for area-related values.""" + AGE_METADATA = "AgeMetadata" + """Metadata for age-related values.""" + INFORMATION_METADATA = "InformationMetadata" + """Metadata for information-related values.""" + TEMPERATURE_METADATA = "TemperatureMetadata" + """Metadata for temperature-related values.""" + CURRENCY_METADATA = "CurrencyMetadata" + """Metadata for currency-related values.""" + NUMERIC_RANGE_METADATA = "NumericRangeMetadata" + """Metadata for numeric range values.""" + TEMPORAL_SPAN_METADATA = "TemporalSpanMetadata" + """Metadata for temporal span values.""" + + +class NumberKind(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The type of the extracted number entity.""" + + INTEGER = "Integer" + """Integer number""" + DECIMAL = "Decimal" + """Decimal number""" + POWER = "Power" + """Power number""" + FRACTION = "Fraction" + """Fraction number""" + PERCENT = "Percent" + """Percent number""" + UNSPECIFIED = "Unspecified" + """Unspecified number kind""" + + +class PiiCategoriesExclude(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """(Optional) describes the PII categories to return.""" + + ABA_ROUTING_NUMBER = "ABARoutingNumber" + """ABA Routing number""" + AR_NATIONAL_IDENTITY_NUMBER = "ARNationalIdentityNumber" + """AR National Identity Number""" + AU_BANK_ACCOUNT_NUMBER = "AUBankAccountNumber" + """AT Bank Account Number""" + AU_DRIVERS_LICENSE_NUMBER = "AUDriversLicenseNumber" + """AU Driver's License Number""" + AU_MEDICAL_ACCOUNT_NUMBER = "AUMedicalAccountNumber" + """AU Medical Account Number""" + AU_PASSPORT_NUMBER = "AUPassportNumber" + """AU Passport Number""" + AU_TAX_FILE_NUMBER = "AUTaxFileNumber" + """AU Tax File Number""" + AU_BUSINESS_NUMBER = "AUBusinessNumber" + """AU Business Number""" + AU_COMPANY_NUMBER = "AUCompanyNumber" + """AU Company Number""" + AT_IDENTITY_CARD = "ATIdentityCard" + """AT Identity Card""" + AT_TAX_IDENTIFICATION_NUMBER = "ATTaxIdentificationNumber" + """AT Tax Identification Number""" + AT_VALUE_ADDED_TAX_NUMBER = "ATValueAddedTaxNumber" + """AT Value Added Tax Number""" + AZURE_DOCUMENT_DB_AUTH_KEY = "AzureDocumentDBAuthKey" + """Azure Document DB Auth Key""" + AZURE_IAAS_DATABASE_CONNECTION_AND_SQL_STRING = "AzureIAASDatabaseConnectionAndSQLString" + """Azure IAAS Database Connection And SQL String""" + AZURE_IO_T_CONNECTION_STRING = "AzureIoTConnectionString" + """Azure IoT Connection String""" + AZURE_PUBLISH_SETTING_PASSWORD = "AzurePublishSettingPassword" + """Azure Publish Setting Password""" + AZURE_REDIS_CACHE_STRING = "AzureRedisCacheString" + """Azure Redis Cache String""" + AZURE_SAS = "AzureSAS" + """Azure SAS""" + AZURE_SERVICE_BUS_STRING = "AzureServiceBusString" + """Azure Service Bus String""" + AZURE_STORAGE_ACCOUNT_KEY = "AzureStorageAccountKey" + """Azure Storage Account Key""" + AZURE_STORAGE_ACCOUNT_GENERIC = "AzureStorageAccountGeneric" + """Azure Storage Account Generic""" + BE_NATIONAL_NUMBER = "BENationalNumber" + """BE National Number""" + BE_NATIONAL_NUMBER_V2 = "BENationalNumberV2" + """BE National Number V2""" + BE_VALUE_ADDED_TAX_NUMBER = "BEValueAddedTaxNumber" + """BE Value Added Tax Number""" + BRCPF_NUMBER = "BRCPFNumber" + """BR CPF Number""" + BR_LEGAL_ENTITY_NUMBER = "BRLegalEntityNumber" + """BR Legal Entity Number""" + BR_NATIONAL_IDRG = "BRNationalIDRG" + """BR National ID RG""" + BG_UNIFORM_CIVIL_NUMBER = "BGUniformCivilNumber" + """BG Uniform Civil Number""" + CA_BANK_ACCOUNT_NUMBER = "CABankAccountNumber" + """CA Bank Account Number""" + CA_DRIVERS_LICENSE_NUMBER = "CADriversLicenseNumber" + """CA Driver's License Number""" + CA_HEALTH_SERVICE_NUMBER = "CAHealthServiceNumber" + """CA Health Service Number""" + CA_PASSPORT_NUMBER = "CAPassportNumber" + """CA Passport Number""" + CA_PERSONAL_HEALTH_IDENTIFICATION = "CAPersonalHealthIdentification" + """CA Personal Health Identification""" + CA_SOCIAL_INSURANCE_NUMBER = "CASocialInsuranceNumber" + """CA Social Insurance Number""" + CL_IDENTITY_CARD_NUMBER = "CLIdentityCardNumber" + """CL Identity Card Number""" + CN_RESIDENT_IDENTITY_CARD_NUMBER = "CNResidentIdentityCardNumber" + """CN Resident Identity Card Number""" + CREDIT_CARD_NUMBER = "CreditCardNumber" + """Credit Card Number""" + HR_IDENTITY_CARD_NUMBER = "HRIdentityCardNumber" + """HR Identity Card Number""" + HR_NATIONAL_ID_NUMBER = "HRNationalIDNumber" + """HR National ID Number""" + HR_PERSONAL_IDENTIFICATION_NUMBER = "HRPersonalIdentificationNumber" + """HR Personal Identification Number""" + HR_PERSONAL_IDENTIFICATION_OIB_NUMBER_V2 = "HRPersonalIdentificationOIBNumberV2" + """HR Personal Identification OIB Number V2""" + CY_IDENTITY_CARD = "CYIdentityCard" + """CY Identity Card""" + CY_TAX_IDENTIFICATION_NUMBER = "CYTaxIdentificationNumber" + """CY Tax Identification Number""" + CZ_PERSONAL_IDENTITY_NUMBER = "CZPersonalIdentityNumber" + """CZ Personal Identity Number""" + CZ_PERSONAL_IDENTITY_V2 = "CZPersonalIdentityV2" + """CZ Personal Identity V2""" + DK_PERSONAL_IDENTIFICATION_NUMBER = "DKPersonalIdentificationNumber" + """DK Personal Identification Number""" + DK_PERSONAL_IDENTIFICATION_V2 = "DKPersonalIdentificationV2" + """DK Personal Identification V2""" + DRUG_ENFORCEMENT_AGENCY_NUMBER = "DrugEnforcementAgencyNumber" + """Drug Enforcement Agency Number""" + EE_PERSONAL_IDENTIFICATION_CODE = "EEPersonalIdentificationCode" + """EE Personal Identification Code""" + EU_DEBIT_CARD_NUMBER = "EUDebitCardNumber" + """EU Debit Card Number""" + EU_DRIVERS_LICENSE_NUMBER = "EUDriversLicenseNumber" + """EU Driver's License Number""" + EUGPS_COORDINATES = "EUGPSCoordinates" + """EU GPS Coordinates""" + EU_NATIONAL_IDENTIFICATION_NUMBER = "EUNationalIdentificationNumber" + """EU National Identification Number""" + EU_PASSPORT_NUMBER = "EUPassportNumber" + """EU Passport Number""" + EU_SOCIAL_SECURITY_NUMBER = "EUSocialSecurityNumber" + """EU Social Security Number""" + EU_TAX_IDENTIFICATION_NUMBER = "EUTaxIdentificationNumber" + """EU Tax Identification Number""" + FI_EUROPEAN_HEALTH_NUMBER = "FIEuropeanHealthNumber" + """FI European Health Number""" + FI_NATIONAL_ID = "FINationalID" + """FI National ID""" + FI_NATIONAL_IDV2 = "FINationalIDV2" + """FI National ID V2""" + FI_PASSPORT_NUMBER = "FIPassportNumber" + """FI Passport Number""" + FR_DRIVERS_LICENSE_NUMBER = "FRDriversLicenseNumber" + """FR Driver's License Number""" + FR_HEALTH_INSURANCE_NUMBER = "FRHealthInsuranceNumber" + """FR Health Insurance Number""" + FR_NATIONAL_ID = "FRNationalID" + """FR National ID""" + FR_PASSPORT_NUMBER = "FRPassportNumber" + """FR Passport Number""" + FR_SOCIAL_SECURITY_NUMBER = "FRSocialSecurityNumber" + """FR Social Security Number""" + FR_TAX_IDENTIFICATION_NUMBER = "FRTaxIdentificationNumber" + """FR Tax Identification Number""" + FR_VALUE_ADDED_TAX_NUMBER = "FRValueAddedTaxNumber" + """FR Value Added Tax Number""" + DE_DRIVERS_LICENSE_NUMBER = "DEDriversLicenseNumber" + """DE Driver's License Number""" + DE_PASSPORT_NUMBER = "DEPassportNumber" + """DE Passport Number""" + DE_IDENTITY_CARD_NUMBER = "DEIdentityCardNumber" + """DE Identity Card Number""" + DE_TAX_IDENTIFICATION_NUMBER = "DETaxIdentificationNumber" + """DE Tax Identification Number""" + DE_VALUE_ADDED_NUMBER = "DEValueAddedNumber" + """DE Value Added Number""" + GR_NATIONAL_ID_CARD = "GRNationalIDCard" + """GR National ID Card""" + GR_NATIONAL_IDV2 = "GRNationalIDV2" + """GR National ID V2""" + GR_TAX_IDENTIFICATION_NUMBER = "GRTaxIdentificationNumber" + """GR Tax Identification Number""" + HK_IDENTITY_CARD_NUMBER = "HKIdentityCardNumber" + """HK Identity Card Number""" + HU_VALUE_ADDED_NUMBER = "HUValueAddedNumber" + """HU Value Added Number""" + HU_PERSONAL_IDENTIFICATION_NUMBER = "HUPersonalIdentificationNumber" + """HU Personal Identification Number""" + HU_TAX_IDENTIFICATION_NUMBER = "HUTaxIdentificationNumber" + """HU Tax Identification Number""" + IN_PERMANENT_ACCOUNT = "INPermanentAccount" + """IN Permanent Account""" + IN_UNIQUE_IDENTIFICATION_NUMBER = "INUniqueIdentificationNumber" + """IN Unique Identification Number""" + ID_IDENTITY_CARD_NUMBER = "IDIdentityCardNumber" + """ID Identity Card Number""" + INTERNATIONAL_BANKING_ACCOUNT_NUMBER = "InternationalBankingAccountNumber" + """International Banking Account Number""" + IE_PERSONAL_PUBLIC_SERVICE_NUMBER = "IEPersonalPublicServiceNumber" + """IE Personal Public Service Number""" + IE_PERSONAL_PUBLIC_SERVICE_NUMBER_V2 = "IEPersonalPublicServiceNumberV2" + """IE Personal Public Service Number V2""" + IL_BANK_ACCOUNT_NUMBER = "ILBankAccountNumber" + """IL Bank Account Number""" + IL_NATIONAL_ID = "ILNationalID" + """IL National ID""" + IT_DRIVERS_LICENSE_NUMBER = "ITDriversLicenseNumber" + """IT Driver's License Number""" + IT_FISCAL_CODE = "ITFiscalCode" + """IT Fiscal Code""" + IT_VALUE_ADDED_TAX_NUMBER = "ITValueAddedTaxNumber" + """IT Value Added Tax Number""" + JP_BANK_ACCOUNT_NUMBER = "JPBankAccountNumber" + """JP Bank Account Number""" + JP_DRIVERS_LICENSE_NUMBER = "JPDriversLicenseNumber" + """JP Driver's License Number""" + JP_PASSPORT_NUMBER = "JPPassportNumber" + """JP Passport Number""" + JP_RESIDENT_REGISTRATION_NUMBER = "JPResidentRegistrationNumber" + """JP Resident Registration Number""" + JP_SOCIAL_INSURANCE_NUMBER = "JPSocialInsuranceNumber" + """JP Social Insurance Number""" + JP_MY_NUMBER_CORPORATE = "JPMyNumberCorporate" + """JP My Number Corporate""" + JP_MY_NUMBER_PERSONAL = "JPMyNumberPersonal" + """JP My Number Personal""" + JP_RESIDENCE_CARD_NUMBER = "JPResidenceCardNumber" + """JP Residence Card Number""" + LV_PERSONAL_CODE = "LVPersonalCode" + """LV Personal Code""" + LT_PERSONAL_CODE = "LTPersonalCode" + """LT Personal Code""" + LU_NATIONAL_IDENTIFICATION_NUMBER_NATURAL = "LUNationalIdentificationNumberNatural" + """LU National Identification Number Natural""" + LU_NATIONAL_IDENTIFICATION_NUMBER_NON_NATURAL = "LUNationalIdentificationNumberNonNatural" + """LU National Identification Number Non Natural""" + MY_IDENTITY_CARD_NUMBER = "MYIdentityCardNumber" + """MY Identity Card Number""" + MT_IDENTITY_CARD_NUMBER = "MTIdentityCardNumber" + """MT Identity Card Number""" + MT_TAX_ID_NUMBER = "MTTaxIDNumber" + """MT Tax ID Number""" + NL_CITIZENS_SERVICE_NUMBER = "NLCitizensServiceNumber" + """NL Citizens Service Number""" + NL_CITIZENS_SERVICE_NUMBER_V2 = "NLCitizensServiceNumberV2" + """NL Citizens Service Number V2""" + NL_TAX_IDENTIFICATION_NUMBER = "NLTaxIdentificationNumber" + """NL Tax Identification Number""" + NL_VALUE_ADDED_TAX_NUMBER = "NLValueAddedTaxNumber" + """NL Value Added Tax Number""" + NZ_BANK_ACCOUNT_NUMBER = "NZBankAccountNumber" + """NZ Bank Account Number""" + NZ_DRIVERS_LICENSE_NUMBER = "NZDriversLicenseNumber" + """NZ Driver's License Number""" + NZ_INLAND_REVENUE_NUMBER = "NZInlandRevenueNumber" + """NZ Inland Revenue Number""" + NZ_MINISTRY_OF_HEALTH_NUMBER = "NZMinistryOfHealthNumber" + """NZ Ministry Of Health Number""" + NZ_SOCIAL_WELFARE_NUMBER = "NZSocialWelfareNumber" + """NZ Social Welfare Number""" + NO_IDENTITY_NUMBER = "NOIdentityNumber" + """NO Identity Number""" + PH_UNIFIED_MULTI_PURPOSE_ID_NUMBER = "PHUnifiedMultiPurposeIDNumber" + """PH Unified Multi Purpose ID Number""" + PL_IDENTITY_CARD = "PLIdentityCard" + """PL Identity Card""" + PL_NATIONAL_ID = "PLNationalID" + """PL National ID""" + PL_NATIONAL_IDV2 = "PLNationalIDV2" + """PL National ID V2""" + PL_PASSPORT_NUMBER = "PLPassportNumber" + """PL Passport Number""" + PL_TAX_IDENTIFICATION_NUMBER = "PLTaxIdentificationNumber" + """PL Tax Identification Number""" + PLREGON_NUMBER = "PLREGONNumber" + """PL REGON Number""" + PT_CITIZEN_CARD_NUMBER = "PTCitizenCardNumber" + """PT Citizen Card Number""" + PT_CITIZEN_CARD_NUMBER_V2 = "PTCitizenCardNumberV2" + """PT Citizen Card Number V2""" + PT_TAX_IDENTIFICATION_NUMBER = "PTTaxIdentificationNumber" + """PT Tax Identification Number""" + RO_PERSONAL_NUMERICAL_CODE = "ROPersonalNumericalCode" + """RO Personal Numerical Code""" + RU_PASSPORT_NUMBER_DOMESTIC = "RUPassportNumberDomestic" + """RU Passport Number Domestic""" + RU_PASSPORT_NUMBER_INTERNATIONAL = "RUPassportNumberInternational" + """RU Passport Number International""" + SA_NATIONAL_ID = "SANationalID" + """SA National ID""" + SG_NATIONAL_REGISTRATION_IDENTITY_CARD_NUMBER = "SGNationalRegistrationIdentityCardNumber" + """SG National Registration Identity Card Number""" + SK_PERSONAL_NUMBER = "SKPersonalNumber" + """SK Personal Number""" + SI_TAX_IDENTIFICATION_NUMBER = "SITaxIdentificationNumber" + """SI Tax Identification Number""" + SI_UNIQUE_MASTER_CITIZEN_NUMBER = "SIUniqueMasterCitizenNumber" + """SI Unique Master Citizen Number""" + ZA_IDENTIFICATION_NUMBER = "ZAIdentificationNumber" + """ZA Identification Number""" + KR_RESIDENT_REGISTRATION_NUMBER = "KRResidentRegistrationNumber" + """KR Resident Registration Number""" + ESDNI = "ESDNI" + """ES DNI""" + ES_SOCIAL_SECURITY_NUMBER = "ESSocialSecurityNumber" + """ES Social Security Number""" + ES_TAX_IDENTIFICATION_NUMBER = "ESTaxIdentificationNumber" + """ES Tax Identification Number""" + SQL_SERVER_CONNECTION_STRING = "SQLServerConnectionString" + """SQL Server Connection String""" + SE_NATIONAL_ID = "SENationalID" + """SE National ID""" + SE_NATIONAL_IDV2 = "SENationalIDV2" + """SE National ID V2""" + SE_PASSPORT_NUMBER = "SEPassportNumber" + """SE Passport Number""" + SE_TAX_IDENTIFICATION_NUMBER = "SETaxIdentificationNumber" + """SE Tax Identification Number""" + SWIFT_CODE = "SWIFTCode" + """SWIFT Code""" + CH_SOCIAL_SECURITY_NUMBER = "CHSocialSecurityNumber" + """CH Social Security Number""" + TW_NATIONAL_ID = "TWNationalID" + """TW National ID""" + TW_PASSPORT_NUMBER = "TWPassportNumber" + """TW Passport Number""" + TW_RESIDENT_CERTIFICATE = "TWResidentCertificate" + """TW Resident Certificate""" + TH_POPULATION_IDENTIFICATION_CODE = "THPopulationIdentificationCode" + """TH Population Identification Code""" + TR_NATIONAL_IDENTIFICATION_NUMBER = "TRNationalIdentificationNumber" + """TR National Identification Number""" + UK_DRIVERS_LICENSE_NUMBER = "UKDriversLicenseNumber" + """UK Driver's License Number""" + UK_ELECTORAL_ROLL_NUMBER = "UKElectoralRollNumber" + """UK Electoral Roll Number""" + UK_NATIONAL_HEALTH_NUMBER = "UKNationalHealthNumber" + """UK National Health Number""" + UK_NATIONAL_INSURANCE_NUMBER = "UKNationalInsuranceNumber" + """UK National Insurance Number""" + UK_UNIQUE_TAXPAYER_NUMBER = "UKUniqueTaxpayerNumber" + """UK Unique Taxpayer Number""" + USUK_PASSPORT_NUMBER = "USUKPassportNumber" + """US UK Passport Number""" + US_BANK_ACCOUNT_NUMBER = "USBankAccountNumber" + """US Bank Account Number""" + US_DRIVERS_LICENSE_NUMBER = "USDriversLicenseNumber" + """US Driver's License Number""" + US_INDIVIDUAL_TAXPAYER_IDENTIFICATION = "USIndividualTaxpayerIdentification" + """US Individual Taxpayer Identification""" + US_SOCIAL_SECURITY_NUMBER = "USSocialSecurityNumber" + """US Social Security Number""" + UA_PASSPORT_NUMBER_DOMESTIC = "UAPassportNumberDomestic" + """UA Passport Number Domestic""" + UA_PASSPORT_NUMBER_INTERNATIONAL = "UAPassportNumberInternational" + """UA Passport Number International""" + ORGANIZATION = "Organization" + """Organization""" + EMAIL = "Email" + """Email""" + URL = "URL" + """URL""" + AGE = "Age" + """Age""" + PHONE_NUMBER = "PhoneNumber" + """Phone Number""" + IP_ADDRESS = "IPAddress" + """IP Address""" + DATE = "Date" + """Date""" + PERSON = "Person" + """Person""" + ADDRESS = "Address" + """Address""" + DATE_OF_BIRTH = "DateOfBirth" + """Date Of Birth""" + BANK_ACCOUNT_NUMBER = "BankAccountNumber" + """Bank Account Number""" + PASSPORT_NUMBER = "PassportNumber" + """Passport Number""" + DRIVERS_LICENSE_NUMBER = "DriversLicenseNumber" + """Drivers License Number""" + NEIGHBORHOOD = "Neighborhood" + """Neighborhood""" + SORT_CODE = "SortCode" + """Sort Code. 6-digit number used in the UK to identify a specific bank and branch where a bank + account is held""" + PIN = "PIN" + """PIN""" + VIN = "VIN" + """VIN""" + LICENSE_PLATE = "LicensePlate" + """License Plate""" + + +class PiiCategory(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """(Optional) describes the PII categories to return.""" + + ABA_ROUTING_NUMBER = "ABARoutingNumber" + """ABA Routing number""" + AR_NATIONAL_IDENTITY_NUMBER = "ARNationalIdentityNumber" + """AR National Identity Number""" + AU_BANK_ACCOUNT_NUMBER = "AUBankAccountNumber" + """AT Bank Account Number""" + AU_DRIVERS_LICENSE_NUMBER = "AUDriversLicenseNumber" + """AU Driver's License Number""" + AU_MEDICAL_ACCOUNT_NUMBER = "AUMedicalAccountNumber" + """AU Medical Account Number""" + AU_PASSPORT_NUMBER = "AUPassportNumber" + """AU Passport Number""" + AU_TAX_FILE_NUMBER = "AUTaxFileNumber" + """AU Tax File Number""" + AU_BUSINESS_NUMBER = "AUBusinessNumber" + """AU Business Number""" + AU_COMPANY_NUMBER = "AUCompanyNumber" + """AU Company Number""" + AT_IDENTITY_CARD = "ATIdentityCard" + """AT Identity Card""" + AT_TAX_IDENTIFICATION_NUMBER = "ATTaxIdentificationNumber" + """AT Tax Identification Number""" + AT_VALUE_ADDED_TAX_NUMBER = "ATValueAddedTaxNumber" + """AT Value Added Tax Number""" + AZURE_DOCUMENT_DB_AUTH_KEY = "AzureDocumentDBAuthKey" + """Azure Document DB Auth Key""" + AZURE_IAAS_DATABASE_CONNECTION_AND_SQL_STRING = "AzureIAASDatabaseConnectionAndSQLString" + """Azure IAAS Database Connection And SQL String""" + AZURE_IO_T_CONNECTION_STRING = "AzureIoTConnectionString" + """Azure IoT Connection String""" + AZURE_PUBLISH_SETTING_PASSWORD = "AzurePublishSettingPassword" + """Azure Publish Setting Password""" + AZURE_REDIS_CACHE_STRING = "AzureRedisCacheString" + """Azure Redis Cache String""" + AZURE_SAS = "AzureSAS" + """Azure SAS""" + AZURE_SERVICE_BUS_STRING = "AzureServiceBusString" + """Azure Service Bus String""" + AZURE_STORAGE_ACCOUNT_KEY = "AzureStorageAccountKey" + """Azure Storage Account Key""" + AZURE_STORAGE_ACCOUNT_GENERIC = "AzureStorageAccountGeneric" + """Azure Storage Account Generic""" + BE_NATIONAL_NUMBER = "BENationalNumber" + """BE National Number""" + BE_NATIONAL_NUMBER_V2 = "BENationalNumberV2" + """BE National Number V2""" + BE_VALUE_ADDED_TAX_NUMBER = "BEValueAddedTaxNumber" + """BE Value Added Tax Number""" + BRCPF_NUMBER = "BRCPFNumber" + """BR CPF Number""" + BR_LEGAL_ENTITY_NUMBER = "BRLegalEntityNumber" + """BR Legal Entity Number""" + BR_NATIONAL_IDRG = "BRNationalIDRG" + """BR National ID RG""" + BG_UNIFORM_CIVIL_NUMBER = "BGUniformCivilNumber" + """BG Uniform Civil Number""" + CA_BANK_ACCOUNT_NUMBER = "CABankAccountNumber" + """CA Bank Account Number""" + CA_DRIVERS_LICENSE_NUMBER = "CADriversLicenseNumber" + """CA Driver's License Number""" + CA_HEALTH_SERVICE_NUMBER = "CAHealthServiceNumber" + """CA Health Service Number""" + CA_PASSPORT_NUMBER = "CAPassportNumber" + """CA Passport Number""" + CA_PERSONAL_HEALTH_IDENTIFICATION = "CAPersonalHealthIdentification" + """CA Personal Health Identification""" + CA_SOCIAL_INSURANCE_NUMBER = "CASocialInsuranceNumber" + """CA Social Insurance Number""" + CL_IDENTITY_CARD_NUMBER = "CLIdentityCardNumber" + """CL Identity Card Number""" + CN_RESIDENT_IDENTITY_CARD_NUMBER = "CNResidentIdentityCardNumber" + """CN Resident Identity Card Number""" + CREDIT_CARD_NUMBER = "CreditCardNumber" + """Credit Card Number""" + HR_IDENTITY_CARD_NUMBER = "HRIdentityCardNumber" + """HR Identity Card Number""" + HR_NATIONAL_ID_NUMBER = "HRNationalIDNumber" + """HR National ID Number""" + HR_PERSONAL_IDENTIFICATION_NUMBER = "HRPersonalIdentificationNumber" + """HR Personal Identification Number""" + HR_PERSONAL_IDENTIFICATION_OIB_NUMBER_V2 = "HRPersonalIdentificationOIBNumberV2" + """HR Personal Identification OIB Number V2""" + CY_IDENTITY_CARD = "CYIdentityCard" + """CY Identity Card""" + CY_TAX_IDENTIFICATION_NUMBER = "CYTaxIdentificationNumber" + """CY Tax Identification Number""" + CZ_PERSONAL_IDENTITY_NUMBER = "CZPersonalIdentityNumber" + """CZ Personal Identity Number""" + CZ_PERSONAL_IDENTITY_V2 = "CZPersonalIdentityV2" + """CZ Personal Identity V2""" + DK_PERSONAL_IDENTIFICATION_NUMBER = "DKPersonalIdentificationNumber" + """DK Personal Identification Number""" + DK_PERSONAL_IDENTIFICATION_V2 = "DKPersonalIdentificationV2" + """DK Personal Identification V2""" + DRUG_ENFORCEMENT_AGENCY_NUMBER = "DrugEnforcementAgencyNumber" + """Drug Enforcement Agency Number""" + EE_PERSONAL_IDENTIFICATION_CODE = "EEPersonalIdentificationCode" + """EE Personal Identification Code""" + EU_DEBIT_CARD_NUMBER = "EUDebitCardNumber" + """EU Debit Card Number""" + EU_DRIVERS_LICENSE_NUMBER = "EUDriversLicenseNumber" + """EU Driver's License Number""" + EUGPS_COORDINATES = "EUGPSCoordinates" + """EU GPS Coordinates""" + EU_NATIONAL_IDENTIFICATION_NUMBER = "EUNationalIdentificationNumber" + """EU National Identification Number""" + EU_PASSPORT_NUMBER = "EUPassportNumber" + """EU Passport Number""" + EU_SOCIAL_SECURITY_NUMBER = "EUSocialSecurityNumber" + """EU Social Security Number""" + EU_TAX_IDENTIFICATION_NUMBER = "EUTaxIdentificationNumber" + """EU Tax Identification Number""" + FI_EUROPEAN_HEALTH_NUMBER = "FIEuropeanHealthNumber" + """FI European Health Number""" + FI_NATIONAL_ID = "FINationalID" + """FI National ID""" + FI_NATIONAL_IDV2 = "FINationalIDV2" + """FI National ID V2""" + FI_PASSPORT_NUMBER = "FIPassportNumber" + """FI Passport Number""" + FR_DRIVERS_LICENSE_NUMBER = "FRDriversLicenseNumber" + """FR Driver's License Number""" + FR_HEALTH_INSURANCE_NUMBER = "FRHealthInsuranceNumber" + """FR Health Insurance Number""" + FR_NATIONAL_ID = "FRNationalID" + """FR National ID""" + FR_PASSPORT_NUMBER = "FRPassportNumber" + """FR Passport Number""" + FR_SOCIAL_SECURITY_NUMBER = "FRSocialSecurityNumber" + """FR Social Security Number""" + FR_TAX_IDENTIFICATION_NUMBER = "FRTaxIdentificationNumber" + """FR Tax Identification Number""" + FR_VALUE_ADDED_TAX_NUMBER = "FRValueAddedTaxNumber" + """FR Value Added Tax Number""" + DE_DRIVERS_LICENSE_NUMBER = "DEDriversLicenseNumber" + """DE Driver's License Number""" + DE_PASSPORT_NUMBER = "DEPassportNumber" + """DE Passport Number""" + DE_IDENTITY_CARD_NUMBER = "DEIdentityCardNumber" + """DE Identity Card Number""" + DE_TAX_IDENTIFICATION_NUMBER = "DETaxIdentificationNumber" + """DE Tax Identification Number""" + DE_VALUE_ADDED_NUMBER = "DEValueAddedNumber" + """DE Value Added Number""" + GR_NATIONAL_ID_CARD = "GRNationalIDCard" + """GR National ID Card""" + GR_NATIONAL_IDV2 = "GRNationalIDV2" + """GR National ID V2""" + GR_TAX_IDENTIFICATION_NUMBER = "GRTaxIdentificationNumber" + """GR Tax Identification Number""" + HK_IDENTITY_CARD_NUMBER = "HKIdentityCardNumber" + """HK Identity Card Number""" + HU_VALUE_ADDED_NUMBER = "HUValueAddedNumber" + """HU Value Added Number""" + HU_PERSONAL_IDENTIFICATION_NUMBER = "HUPersonalIdentificationNumber" + """HU Personal Identification Number""" + HU_TAX_IDENTIFICATION_NUMBER = "HUTaxIdentificationNumber" + """HU Tax Identification Number""" + IN_PERMANENT_ACCOUNT = "INPermanentAccount" + """IN Permanent Account""" + IN_UNIQUE_IDENTIFICATION_NUMBER = "INUniqueIdentificationNumber" + """IN Unique Identification Number""" + ID_IDENTITY_CARD_NUMBER = "IDIdentityCardNumber" + """ID Identity Card Number""" + INTERNATIONAL_BANKING_ACCOUNT_NUMBER = "InternationalBankingAccountNumber" + """International Banking Account Number""" + IE_PERSONAL_PUBLIC_SERVICE_NUMBER = "IEPersonalPublicServiceNumber" + """IE Personal Public Service Number""" + IE_PERSONAL_PUBLIC_SERVICE_NUMBER_V2 = "IEPersonalPublicServiceNumberV2" + """IE Personal Public Service Number V2""" + IL_BANK_ACCOUNT_NUMBER = "ILBankAccountNumber" + """IL Bank Account Number""" + IL_NATIONAL_ID = "ILNationalID" + """IL National ID""" + IT_DRIVERS_LICENSE_NUMBER = "ITDriversLicenseNumber" + """IT Driver's License Number""" + IT_FISCAL_CODE = "ITFiscalCode" + """IT Fiscal Code""" + IT_VALUE_ADDED_TAX_NUMBER = "ITValueAddedTaxNumber" + """IT Value Added Tax Number""" + JP_BANK_ACCOUNT_NUMBER = "JPBankAccountNumber" + """JP Bank Account Number""" + JP_DRIVERS_LICENSE_NUMBER = "JPDriversLicenseNumber" + """JP Driver's License Number""" + JP_PASSPORT_NUMBER = "JPPassportNumber" + """JP Passport Number""" + JP_RESIDENT_REGISTRATION_NUMBER = "JPResidentRegistrationNumber" + """JP Resident Registration Number""" + JP_SOCIAL_INSURANCE_NUMBER = "JPSocialInsuranceNumber" + """JP Social Insurance Number""" + JP_MY_NUMBER_CORPORATE = "JPMyNumberCorporate" + """JP My Number Corporate""" + JP_MY_NUMBER_PERSONAL = "JPMyNumberPersonal" + """JP My Number Personal""" + JP_RESIDENCE_CARD_NUMBER = "JPResidenceCardNumber" + """JP Residence Card Number""" + LV_PERSONAL_CODE = "LVPersonalCode" + """LV Personal Code""" + LT_PERSONAL_CODE = "LTPersonalCode" + """LT Personal Code""" + LU_NATIONAL_IDENTIFICATION_NUMBER_NATURAL = "LUNationalIdentificationNumberNatural" + """LU National Identification Number Natural""" + LU_NATIONAL_IDENTIFICATION_NUMBER_NON_NATURAL = "LUNationalIdentificationNumberNonNatural" + """LU National Identification Number Non Natural""" + MY_IDENTITY_CARD_NUMBER = "MYIdentityCardNumber" + """MY Identity Card Number""" + MT_IDENTITY_CARD_NUMBER = "MTIdentityCardNumber" + """MT Identity Card Number""" + MT_TAX_ID_NUMBER = "MTTaxIDNumber" + """MT Tax ID Number""" + NL_CITIZENS_SERVICE_NUMBER = "NLCitizensServiceNumber" + """NL Citizens Service Number""" + NL_CITIZENS_SERVICE_NUMBER_V2 = "NLCitizensServiceNumberV2" + """NL Citizens Service Number V2""" + NL_TAX_IDENTIFICATION_NUMBER = "NLTaxIdentificationNumber" + """NL Tax Identification Number""" + NL_VALUE_ADDED_TAX_NUMBER = "NLValueAddedTaxNumber" + """NL Value Added Tax Number""" + NZ_BANK_ACCOUNT_NUMBER = "NZBankAccountNumber" + """NZ Bank Account Number""" + NZ_DRIVERS_LICENSE_NUMBER = "NZDriversLicenseNumber" + """NZ Driver's License Number""" + NZ_INLAND_REVENUE_NUMBER = "NZInlandRevenueNumber" + """NZ Inland Revenue Number""" + NZ_MINISTRY_OF_HEALTH_NUMBER = "NZMinistryOfHealthNumber" + """NZ Ministry Of Health Number""" + NZ_SOCIAL_WELFARE_NUMBER = "NZSocialWelfareNumber" + """NZ Social Welfare Number""" + NO_IDENTITY_NUMBER = "NOIdentityNumber" + """NO Identity Number""" + PH_UNIFIED_MULTI_PURPOSE_ID_NUMBER = "PHUnifiedMultiPurposeIDNumber" + """PH Unified Multi Purpose ID Number""" + PL_IDENTITY_CARD = "PLIdentityCard" + """PL Identity Card""" + PL_NATIONAL_ID = "PLNationalID" + """PL National ID""" + PL_NATIONAL_IDV2 = "PLNationalIDV2" + """PL National ID V2""" + PL_PASSPORT_NUMBER = "PLPassportNumber" + """PL Passport Number""" + PL_TAX_IDENTIFICATION_NUMBER = "PLTaxIdentificationNumber" + """PL Tax Identification Number""" + PLREGON_NUMBER = "PLREGONNumber" + """PL REGON Number""" + PT_CITIZEN_CARD_NUMBER = "PTCitizenCardNumber" + """PT Citizen Card Number""" + PT_CITIZEN_CARD_NUMBER_V2 = "PTCitizenCardNumberV2" + """PT Citizen Card Number V2""" + PT_TAX_IDENTIFICATION_NUMBER = "PTTaxIdentificationNumber" + """PT Tax Identification Number""" + RO_PERSONAL_NUMERICAL_CODE = "ROPersonalNumericalCode" + """RO Personal Numerical Code""" + RU_PASSPORT_NUMBER_DOMESTIC = "RUPassportNumberDomestic" + """RU Passport Number Domestic""" + RU_PASSPORT_NUMBER_INTERNATIONAL = "RUPassportNumberInternational" + """RU Passport Number International""" + SA_NATIONAL_ID = "SANationalID" + """SA National ID""" + SG_NATIONAL_REGISTRATION_IDENTITY_CARD_NUMBER = "SGNationalRegistrationIdentityCardNumber" + """SG National Registration Identity Card Number""" + SK_PERSONAL_NUMBER = "SKPersonalNumber" + """SK Personal Number""" + SI_TAX_IDENTIFICATION_NUMBER = "SITaxIdentificationNumber" + """SI Tax Identification Number""" + SI_UNIQUE_MASTER_CITIZEN_NUMBER = "SIUniqueMasterCitizenNumber" + """SI Unique Master Citizen Number""" + ZA_IDENTIFICATION_NUMBER = "ZAIdentificationNumber" + """ZA Identification Number""" + KR_RESIDENT_REGISTRATION_NUMBER = "KRResidentRegistrationNumber" + """KR Resident Registration Number""" + ESDNI = "ESDNI" + """ES DNI""" + ES_SOCIAL_SECURITY_NUMBER = "ESSocialSecurityNumber" + """ES Social Security Number""" + ES_TAX_IDENTIFICATION_NUMBER = "ESTaxIdentificationNumber" + """ES Tax Identification Number""" + SQL_SERVER_CONNECTION_STRING = "SQLServerConnectionString" + """SQL Server Connection String""" + SE_NATIONAL_ID = "SENationalID" + """SE National ID""" + SE_NATIONAL_IDV2 = "SENationalIDV2" + """SE National ID V2""" + SE_PASSPORT_NUMBER = "SEPassportNumber" + """SE Passport Number""" + SE_TAX_IDENTIFICATION_NUMBER = "SETaxIdentificationNumber" + """SE Tax Identification Number""" + SWIFT_CODE = "SWIFTCode" + """SWIFT Code""" + CH_SOCIAL_SECURITY_NUMBER = "CHSocialSecurityNumber" + """CH Social Security Number""" + TW_NATIONAL_ID = "TWNationalID" + """TW National ID""" + TW_PASSPORT_NUMBER = "TWPassportNumber" + """TW Passport Number""" + TW_RESIDENT_CERTIFICATE = "TWResidentCertificate" + """TW Resident Certificate""" + TH_POPULATION_IDENTIFICATION_CODE = "THPopulationIdentificationCode" + """TH Population Identification Code""" + TR_NATIONAL_IDENTIFICATION_NUMBER = "TRNationalIdentificationNumber" + """TR National Identification Number""" + UK_DRIVERS_LICENSE_NUMBER = "UKDriversLicenseNumber" + """UK Driver's License Number""" + UK_ELECTORAL_ROLL_NUMBER = "UKElectoralRollNumber" + """UK Electoral Roll Number""" + UK_NATIONAL_HEALTH_NUMBER = "UKNationalHealthNumber" + """UK National Health Number""" + UK_NATIONAL_INSURANCE_NUMBER = "UKNationalInsuranceNumber" + """UK National Insurance Number""" + UK_UNIQUE_TAXPAYER_NUMBER = "UKUniqueTaxpayerNumber" + """UK Unique Taxpayer Number""" + USUK_PASSPORT_NUMBER = "USUKPassportNumber" + """US UK Passport Number""" + US_BANK_ACCOUNT_NUMBER = "USBankAccountNumber" + """US Bank Account Number""" + US_DRIVERS_LICENSE_NUMBER = "USDriversLicenseNumber" + """US Driver's License Number""" + US_INDIVIDUAL_TAXPAYER_IDENTIFICATION = "USIndividualTaxpayerIdentification" + """US Individual Taxpayer Identification""" + US_SOCIAL_SECURITY_NUMBER = "USSocialSecurityNumber" + """US Social Security Number""" + UA_PASSPORT_NUMBER_DOMESTIC = "UAPassportNumberDomestic" + """UA Passport Number Domestic""" + UA_PASSPORT_NUMBER_INTERNATIONAL = "UAPassportNumberInternational" + """UA Passport Number International""" + ORGANIZATION = "Organization" + """Organization""" + EMAIL = "Email" + """Email""" + URL = "URL" + """URL""" + AGE = "Age" + """Age""" + PHONE_NUMBER = "PhoneNumber" + """Phone Number""" + IP_ADDRESS = "IPAddress" + """IP Address""" + DATE = "Date" + """Date""" + PERSON = "Person" + """Person""" + ADDRESS = "Address" + """Address""" + DATE_OF_BIRTH = "DateOfBirth" + """Date Of Birth""" + BANK_ACCOUNT_NUMBER = "BankAccountNumber" + """Bank Account Number""" + PASSPORT_NUMBER = "PassportNumber" + """Passport Number""" + DRIVERS_LICENSE_NUMBER = "DriversLicenseNumber" + """Drivers License Number""" + NEIGHBORHOOD = "Neighborhood" + """Neighborhood""" + SORT_CODE = "SortCode" + """Sort Code. 6-digit number used in the UK to identify a specific bank and branch where a bank + account is held""" + PIN = "PIN" + """PIN""" + VIN = "VIN" + """VIN""" + LICENSE_PLATE = "LicensePlate" + """License Plate""" + ALL = "All" + """All PII categories.""" + DEFAULT = "Default" + """Default PII categories for the language.""" + + +class PiiDomain(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """PII domains.""" + + PHI = "phi" + """Indicates that entities in the Personal Health Information domain should be redacted.""" + NONE = "none" + """Indicates that no domain is specified.""" + + +class PolicyKind(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Kinds of overlap policies supported.""" + + MATCH_LONGEST = "matchLongest" + """Represents MatchLongestEntityPolicyType""" + ALLOW_OVERLAP = "allowOverlap" + """Represents AllowOverlapEntityPolicyType""" + + +class RangeInclusivity(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The range inclusiveness of this property property.""" + + NONE_INCLUSIVE = "NoneInclusive" + """No inclusivity""" + LEFT_INCLUSIVE = "LeftInclusive" + """Left side inclusive""" + RIGHT_INCLUSIVE = "RightInclusive" + """Right side inclusive""" + LEFT_RIGHT_INCLUSIVE = "LeftRightInclusive" + """Both sides inclusive""" + + +class RangeKind(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The kind of the number range entity.""" + + NUMBER = "Number" + """Number range""" + SPEED = "Speed" + """Speed range""" + WEIGHT = "Weight" + """Weight range""" + LENGTH = "Length" + """Length range""" + VOLUME = "Volume" + """Volume range""" + AREA = "Area" + """Area range""" + AGE = "Age" + """Age range""" + INFORMATION = "Information" + """Information range""" + TEMPERATURE = "Temperature" + """Temperature range""" + CURRENCY = "Currency" + """Currency range""" + + +class RedactionCharacter(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Optional parameter to use a Custom Character to be used for redaction in PII responses. Default + character will be * as before. We allow specific ascii characters for redaction. + """ + + EXCLAMATION_POINT = "!" + """Exclamation point character""" + NUMBER_SIGN = "#" + """Number sign character""" + DOLLAR = "$" + """Dollar sign character""" + PER_CENT = "%" + """Percent sign character""" + AMPERSAND = "&" + """Ampersand character""" + ASTERISK = "*" + """Asterisk character""" + PLUS = "+" + """Plus sign character""" + MINUS = "-" + """Minus sign character""" + EQUALS = "=" + """Equals sign character""" + QUESTION_MARK = "?" + """Question mark character""" + AT_SIGN = "@" + """At sign character""" + CARET = "^" + """Caret character""" + UNDERSCORE = "_" + """Underscore character""" + TILDE = "~" + """Tilde character""" + + +class RedactionPolicyKind(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Kinds of redaction policies supported.""" + + NO_MASK = "noMask" + """Do not redact detected entities.""" + CHARACTER_MASK = "characterMask" + """React detected entities with redaction character.""" + ENTITY_MASK = "entityMask" + """Redact detected entities with entity type.""" + + +class RelationType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Type of relation. Examples include: ``DosageOfMedication`` or 'FrequencyOfMedication', etc.""" + + ABBREVIATION = "Abbreviation" + """Abbreviation""" + DIRECTION_OF_BODY_STRUCTURE = "DirectionOfBodyStructure" + """Direction of body structure""" + DIRECTION_OF_CONDITION = "DirectionOfCondition" + """Direction of condition""" + DIRECTION_OF_EXAMINATION = "DirectionOfExamination" + """Direction of examination""" + DIRECTION_OF_TREATMENT = "DirectionOfTreatment" + """Direction of treatment""" + DOSAGE_OF_MEDICATION = "DosageOfMedication" + """Dosage of medication""" + FORM_OF_MEDICATION = "FormOfMedication" + """Form of medication""" + FREQUENCY_OF_MEDICATION = "FrequencyOfMedication" + """Frequency of medication""" + FREQUENCY_OF_TREATMENT = "FrequencyOfTreatment" + """Frequency of treatment""" + QUALIFIER_OF_CONDITION = "QualifierOfCondition" + """Qualifier of condition""" + RELATION_OF_EXAMINATION = "RelationOfExamination" + """Relation of examination""" + ROUTE_OF_MEDICATION = "RouteOfMedication" + """Route of medication""" + TIME_OF_CONDITION = "TimeOfCondition" + """Time of condition""" + TIME_OF_EVENT = "TimeOfEvent" + """Time of event""" + TIME_OF_EXAMINATION = "TimeOfExamination" + """Time of examination""" + TIME_OF_MEDICATION = "TimeOfMedication" + """Time of medication""" + TIME_OF_TREATMENT = "TimeOfTreatment" + """Time of treatment""" + UNIT_OF_CONDITION = "UnitOfCondition" + """Unit of condition""" + UNIT_OF_EXAMINATION = "UnitOfExamination" + """Unit of examination""" + VALUE_OF_CONDITION = "ValueOfCondition" + """Value of condition""" + VALUE_OF_EXAMINATION = "ValueOfExamination" + """Value of examination""" + BODY_SITE_OF_CONDITION = "BodySiteOfCondition" + """Body site of condition""" + BODY_SITE_OF_TREATMENT = "BodySiteOfTreatment" + """Body site of treatment""" + COURSE_OF_CONDITION = "CourseOfCondition" + """Course of condition""" + COURSE_OF_EXAMINATION = "CourseOfExamination" + """Course of examination""" + COURSE_OF_MEDICATION = "CourseOfMedication" + """Course of medication""" + COURSE_OF_TREATMENT = "CourseOfTreatment" + """Course of treatment""" + EXAMINATION_FINDS_CONDITION = "ExaminationFindsCondition" + """Examination finds condition""" + EXPRESSION_OF_GENE = "ExpressionOfGene" + """Expression of gene""" + EXPRESSION_OF_VARIANT = "ExpressionOfVariant" + """Expression of variant""" + FREQUENCY_OF_CONDITION = "FrequencyOfCondition" + """Frequency of condition""" + MUTATION_TYPE_OF_GENE = "MutationTypeOfGene" + """Mutation type of gene""" + MUTATION_TYPE_OF_VARIANT = "MutationTypeOfVariant" + """Mutation type of variant""" + SCALE_OF_CONDITION = "ScaleOfCondition" + """Scale of condition""" + VARIANT_OF_GENE = "VariantOfGene" + """Variant of gene""" + + +class RelativeTo(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The reference point that the ordinal number denotes.""" + + CURRENT = "Current" + """Current state or position""" + END = "End" + """End state or position""" + START = "Start" + """Start state or position""" + + +class ScriptCode(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Identifies the script of the input document. Maps to the ISO 15924 standard script code.""" + + ARAB = "Arab" + """Script code for the Arabic script.""" + ARMN = "Armn" + """Script code for the Armenian script.""" + BENG = "Beng" + """Script code for the Bangla script.""" + CANS = "Cans" + """Script code for the UnifiedCanadianAboriginalSyllabics script.""" + CYRL = "Cyrl" + """Script code for the Cyrillic script.""" + DEVA = "Deva" + """Script code for the Devanagari script.""" + ETHI = "Ethi" + """Script code for the Ethiopic script.""" + GEOR = "Geor" + """Script code for the Georgian script.""" + GREK = "Grek" + """Script code for the Greek script.""" + GUJR = "Gujr" + """Script code for the Gujarati script.""" + GURU = "Guru" + """Script code for the Gurmukhi script.""" + HANG = "Hang" + """Script code for the Hangul script.""" + HANI = "Hani" + """Script code for the HanLiteral script.""" + HANS = "Hans" + """Script code for the HanSimplified script.""" + HANT = "Hant" + """Script code for the HanTraditional script.""" + HEBR = "Hebr" + """Script code for the Hebrew script.""" + JPAN = "Jpan" + """Script code for the Japanese script.""" + KHMR = "Khmr" + """Script code for the Khmer script.""" + KNDA = "Knda" + """Script code for the Kannada script.""" + LAOO = "Laoo" + """Script code for the Lao script.""" + LATN = "Latn" + """Script code for the Latin script.""" + MLYM = "Mlym" + """Script code for the Malayalam script.""" + MONG = "Mong" + """Script code for the Mongolian script.""" + MTEI = "Mtei" + """Script code for the Meitei script.""" + MYMR = "Mymr" + """Script code for the Myanmar script.""" + OLCK = "Olck" + """Script code for the Santali script.""" + ORYA = "Orya" + """Script code for the Odia script.""" + SINH = "Sinh" + """Script code for the Sinhala script.""" + SHRD = "Shrd" + """Script code for the Sharada script.""" + TAML = "Taml" + """Script code for the Tamil script.""" + TELU = "Telu" + """Script code for the Telugu script.""" + THAA = "Thaa" + """Script code for the Thaana script.""" + THAI = "Thai" + """Script code for the Thai script.""" + TIBT = "Tibt" + """Script code for the Tibetan script.""" + + +class ScriptKind(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Identifies the script of the input document. Maps to the ISO 15924 standard formal name.""" + + ARABIC = "Arabic" + """Script name for the Arabic script.""" + ARMENIAN = "Armenian" + """Script name for the Armenian script.""" + BANGLA = "Bangla" + """Script name for the Bangla script.""" + UNIFIED_CANADIAN_ABORIGINAL_SYLLABICS = "UnifiedCanadianAboriginalSyllabics" + """Script name for the UnifiedCanadianAboriginalSyllabics script.""" + CYRILLIC = "Cyrillic" + """Script name for the Cyrillic script.""" + DEVANAGARI = "Devanagari" + """Script name for the Devanagari script.""" + ETHIOPIC = "Ethiopic" + """Script name for the Ethiopic script.""" + GEORGIAN = "Georgian" + """Script name for the Georgian script.""" + GREEK = "Greek" + """Script name for the Greek script.""" + GUJARATI = "Gujarati" + """Script name for the Gujarati script.""" + GURMUKHI = "Gurmukhi" + """Script name for the Gurmukhi script.""" + HANGUL = "Hangul" + """Script name for the Hangul script.""" + HAN_LITERAL = "HanLiteral" + """Script name for the HanLiteral script.""" + HAN_SIMPLIFIED = "HanSimplified" + """Script name for the HanSimplified script.""" + HAN_TRADITIONAL = "HanTraditional" + """Script name for the HanTraditional script.""" + HEBREW = "Hebrew" + """Script name for the Hebrew script.""" + JAPANESE = "Japanese" + """Script name for the Japanese script.""" + KHMER = "Khmer" + """Script name for the Khmer script.""" + KANNADA = "Kannada" + """Script name for the Kannada script.""" + LAO = "Lao" + """Script name for the Lao script.""" + LATIN = "Latin" + """Script name for the Latin script.""" + MALAYALAM = "Malayalam" + """Script name for the Malayalam script.""" + MEITEI = "Meitei" + """Script name for the Meitei script.""" + MONGOLIAN = "Mongolian" + """Script name for the Mongolian script.""" + MYANMAR = "Myanmar" + """Script name for the Myanmar script.""" + ODIA = "Odia" + """Script name for the Odia script.""" + SANTALI = "Santali" + """Script name for the Santali script.""" + SHARADA = "Sharada" + """Script name for the Sharada script.""" + SINHALA = "Sinhala" + """Script name for the Sinhala script.""" + TAMIL = "Tamil" + """Script name for the Tamil script.""" + TELUGU = "Telugu" + """Script name for the Telugu script.""" + THAANA = "Thaana" + """Script name for the Thaana script.""" + THAI = "Thai" + """Script name for the Thai script.""" + TIBETAN = "Tibetan" + """Script name for the Tibetan script.""" + + +class SentenceSentimentValue(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The predicted Sentiment for the sentence.""" + + POSITIVE = "positive" + """Positive sentiment""" + NEUTRAL = "neutral" + """Neutral sentiment""" + NEGATIVE = "negative" + """Negative sentiment""" + + +class SpeedUnit(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The speed Unit of measurement.""" + + UNSPECIFIED = "Unspecified" + """Unspecified speed unit.""" + METERS_PER_SECOND = "MetersPerSecond" + """Speed unit in meters per second.""" + KILOMETERS_PER_HOUR = "KilometersPerHour" + """Speed unit in kilometers per hour.""" + KILOMETERS_PER_MINUTE = "KilometersPerMinute" + """Speed unit in kilometers per minute.""" + KILOMETERS_PER_SECOND = "KilometersPerSecond" + """Speed unit in kilometers per second.""" + MILES_PER_HOUR = "MilesPerHour" + """Speed unit in miles per hour.""" + KNOTS = "Knots" + """Speed unit in knots.""" + FEET_PER_SECOND = "FeetPerSecond" + """Speed unit in feet per second.""" + FEET_PER_MINUTE = "FeetPerMinute" + """Speed unit in feet per minute.""" + YARDS_PER_MINUTE = "YardsPerMinute" + """Speed unit in yards per minute.""" + YARDS_PER_SECOND = "YardsPerSecond" + """Speed unit in yards per second.""" + METERS_PER_MILLISECOND = "MetersPerMillisecond" + """Speed unit in meters per millisecond.""" + CENTIMETERS_PER_MILLISECOND = "CentimetersPerMillisecond" + """Speed unit in centimeters per millisecond.""" + KILOMETERS_PER_MILLISECOND = "KilometersPerMillisecond" + """Speed unit in Kilometers per millisecond.""" + + +class State(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The status of the task at the mentioned last update time.""" + + NOT_STARTED = "notStarted" + """Not started status""" + RUNNING = "running" + """Running status""" + SUCCEEDED = "succeeded" + """Succeeded status""" + PARTIALLY_COMPLETED = "partiallyCompleted" + """Partially completed status""" + FAILED = "failed" + """Failed status""" + CANCELLED = "cancelled" + """Cancelled status""" + CANCELLING = "cancelling" + """Cancelling status""" + + +class StringIndexType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Specifies the method used to interpret string offsets. Defaults to Text Elements (Graphemes) + according to Unicode v8.0.0. For additional information see + `https://aka.ms/text-analytics-offsets `_. + """ + + TEXT_ELEMENTS_V8 = "TextElements_v8" + """Returned offset and length values will correspond to TextElements (Graphemes and Grapheme + clusters) confirming to the Unicode 8.0.0 standard. Use this option if your application is + written in .Net Framework or .Net Core and you will be using StringInfo.""" + UNICODE_CODE_POINT = "UnicodeCodePoint" + """Returned offset and length values will correspond to Unicode code points. Use this option if + your application is written in a language that support Unicode, for example Python.""" + UTF16_CODE_UNIT = "Utf16CodeUnit" + """Returned offset and length values will correspond to UTF-16 code units. Use this option if your + application is written in a language that support Unicode, for example Java, JavaScript.""" + + +class SummaryLengthBucket(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Enum that defines the length of the output summaries.""" + + SHORT = "short" + """Instructs model to generate shorter length summaries.""" + MEDIUM = "medium" + """Instructs model to generate medium length summaries.""" + LONG = "long" + """Instructs model to generate longer length summaries.""" + + +class TargetRelationType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The type related to the target.""" + + ASSESSMENT = "assessment" + """Assessment relation.""" + TARGET = "target" + """Target relation.""" + + +class TemperatureUnit(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The temperature Unit of measurement.""" + + UNSPECIFIED = "Unspecified" + """Unspecified temperature unit""" + FAHRENHEIT = "Fahrenheit" + """Temperature unit in Fahrenheit""" + KELVIN = "Kelvin" + """Temperature unit in Kelvin""" + RANKINE = "Rankine" + """Temperature unit in Rankine""" + CELSIUS = "Celsius" + """Temperature unit in Celsius""" + + +class Temporality(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Describes temporal information regarding the entity.""" + + CURRENT = "current" + """Current temporality""" + PAST = "past" + """Past temporality""" + FUTURE = "future" + """Future temporality""" + + +class TemporalModifier(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """An optional modifier of a date/time instance.""" + + AFTER_APPROX = "AfterApprox" + """After an approximate time""" + BEFORE = "Before" + """Before a specific time""" + BEFORE_START = "BeforeStart" + """Before the start of a time period""" + APPROX = "Approx" + """Approximately at a specific time""" + REFERENCE_UNDEFINED = "ReferenceUndefined" + """Reference to an undefined time""" + SINCE_END = "SinceEnd" + """Since the end of a time period""" + AFTER_MID = "AfterMid" + """After the middle of a time period""" + START = "Start" + """At the start of a time period""" + AFTER = "After" + """After a specific time""" + BEFORE_END = "BeforeEnd" + """Before the end of a time period""" + UNTIL = "Until" + """Until a specific time""" + END = "End" + """At the end of a time period""" + LESS = "Less" + """Less than a specific time""" + SINCE = "Since" + """Since a specific time""" + AFTER_START = "AfterStart" + """After the start of a time period""" + BEFORE_APPROX = "BeforeApprox" + """Before an approximate time""" + MID = "Mid" + """In the middle of a time period""" + MORE = "More" + """More than a specific time""" + + +class TokenSentimentValue(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The predicted Sentiment for the sentence.""" + + POSITIVE = "positive" + """Positive sentiment""" + MIXED = "mixed" + """Mixed sentiment""" + NEGATIVE = "negative" + """Negative sentiment""" + + +class VolumeUnit(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The Volume Unit of measurement.""" + + UNSPECIFIED = "Unspecified" + """Unspecified volume unit.""" + CUBIC_METER = "CubicMeter" + """Volume unit in cubic meters.""" + CUBIC_CENTIMETER = "CubicCentimeter" + """Volume unit in cubic centimeters.""" + CUBIC_MILLIMETER = "CubicMillimeter" + """Volume unit in cubic millimeters.""" + HECTOLITER = "Hectoliter" + """Volume unit in hectoliters.""" + DECALITER = "Decaliter" + """Volume unit in decaliters.""" + LITER = "Liter" + """Volume unit in liters.""" + CENTILITER = "Centiliter" + """Volume unit in centiliters.""" + MILLILITER = "Milliliter" + """Volume unit in milliliters.""" + CUBIC_YARD = "CubicYard" + """Volume unit in cubic yards.""" + CUBIC_INCH = "CubicInch" + """Volume unit in cubic inches.""" + CUBIC_FOOT = "CubicFoot" + """Volume unit in cubic feet.""" + CUBIC_MILE = "CubicMile" + """Volume unit in cubic miles.""" + FLUID_OUNCE = "FluidOunce" + """Volume unit in fluid ounces.""" + TEASPOON = "Teaspoon" + """Volume unit in teaspoons.""" + TABLESPOON = "Tablespoon" + """Volume unit in tablespoons.""" + PINT = "Pint" + """Volume unit in pints.""" + QUART = "Quart" + """Volume unit in quarts.""" + CUP = "Cup" + """Volume unit in cups.""" + GILL = "Gill" + """Volume unit in gills.""" + PINCH = "Pinch" + """Volume unit in pinches.""" + FLUID_DRAM = "FluidDram" + """Volume unit in fluid drams.""" + BARREL = "Barrel" + """Volume unit in barrels.""" + MINIM = "Minim" + """Volume unit in minims.""" + CORD = "Cord" + """Volume unit in cords.""" + PECK = "Peck" + """Volume unit in pecks.""" + BUSHEL = "Bushel" + """Volume unit in bushels.""" + HOGSHEAD = "Hogshead" + """Volume unit in hogsheads.""" + + +class WarningCodeValue(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Defines the list of the warning codes.""" + + LONG_WORDS_IN_DOCUMENT = "LongWordsInDocument" + """Long words in document warning""" + DOCUMENT_TRUNCATED = "DocumentTruncated" + """Document truncated warning""" + + +class WeightUnit(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The weight Unit of measurement.""" + + UNSPECIFIED = "Unspecified" + """Unspecified weight unit""" + KILOGRAM = "Kilogram" + """Weight unit in kilograms""" + GRAM = "Gram" + """Weight unit in grams""" + MILLIGRAM = "Milligram" + """Weight unit in milligrams""" + GALLON = "Gallon" + """Volume unit in gallons""" + METRIC_TON = "MetricTon" + """Weight unit in metric tons""" + TON = "Ton" + """Weight unit in tons""" + POUND = "Pound" + """Weight unit in pounds""" + OUNCE = "Ounce" + """Weight unit in ounces""" + GRAIN = "Grain" + """Weight unit in grains""" + PENNY_WEIGHT = "PennyWeight" + """Weight unit in pennyweights""" + LONG_TON_BRITISH = "LongTonBritish" + """Weight unit in long tons (British)""" + SHORT_TON_US = "ShortTonUS" + """Weight unit in short tons (US)""" + SHORT_HUNDRED_WEIGHT_US = "ShortHundredWeightUS" + """Weight unit in short hundredweights (US)""" + STONE = "Stone" + """Weight unit in stones""" + DRAM = "Dram" + """Weight unit in drams""" diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/models/_models.py b/sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/models/_models.py new file mode 100644 index 000000000000..274ab85c527b --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/models/_models.py @@ -0,0 +1,6624 @@ +# pylint: disable=line-too-long,useless-suppression,too-many-lines +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +# pylint: disable=useless-super-delegation + +import datetime +from typing import Any, Dict, List, Literal, Mapping, Optional, TYPE_CHECKING, Union, overload + +from .._utils.model_base import Model as _Model, rest_discriminator, rest_field +from ._enums import ( + AnalyzeTextLROResultsKind, + AnalyzeTextLROTaskKind, + AnalyzeTextTaskKind, + AnalyzeTextTaskResultsKind, + MetadataKind, + PolicyKind, + RedactionPolicyKind, +) + +if TYPE_CHECKING: + from .. import models as _models + + +class AnalyzeTextLROResult(_Model): + """Contains the AnalyzeText long running operation result object. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + AbstractiveSummarizationLROResult, CustomEntityRecognitionLROResult, + CustomMultiLabelClassificationLROResult, CustomSingleLabelClassificationLROResult, + EntityLinkingLROResult, EntityRecognitionLROResult, ExtractiveSummarizationLROResult, + HealthcareLROResult, KeyPhraseExtractionLROResult, PiiEntityRecognitionLROResult, + SentimentLROResult + + :ivar last_update_date_time: The last updated time in UTC for the task. Required. + :vartype last_update_date_time: ~datetime.datetime + :ivar status: The status of the task at the mentioned last update time. Required. Known values + are: "notStarted", "running", "succeeded", "partiallyCompleted", "failed", "cancelled", and + "cancelling". + :vartype status: str or ~azure.ai.language.text.models.State + :ivar task_name: task name. + :vartype task_name: str + :ivar kind: Kind of the task. Required. Known values are: "SentimentAnalysisLROResults", + "EntityRecognitionLROResults", "PiiEntityRecognitionLROResults", + "KeyPhraseExtractionLROResults", "EntityLinkingLROResults", "HealthcareLROResults", + "CustomEntityRecognitionLROResults", "CustomSingleLabelClassificationLROResults", + "CustomMultiLabelClassificationLROResults", "ExtractiveSummarizationLROResults", and + "AbstractiveSummarizationLROResults". + :vartype kind: str or ~azure.ai.language.text.models.AnalyzeTextLROResultsKind + """ + + __mapping__: Dict[str, _Model] = {} + last_update_date_time: datetime.datetime = rest_field( + name="lastUpdateDateTime", visibility=["read", "create", "update", "delete", "query"], format="rfc3339" + ) + """The last updated time in UTC for the task. Required.""" + status: Union[str, "_models.State"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The status of the task at the mentioned last update time. Required. Known values are: + \"notStarted\", \"running\", \"succeeded\", \"partiallyCompleted\", \"failed\", \"cancelled\", + and \"cancelling\".""" + task_name: Optional[str] = rest_field(name="taskName", visibility=["read", "create", "update", "delete", "query"]) + """task name.""" + kind: str = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) + """Kind of the task. Required. Known values are: \"SentimentAnalysisLROResults\", + \"EntityRecognitionLROResults\", \"PiiEntityRecognitionLROResults\", + \"KeyPhraseExtractionLROResults\", \"EntityLinkingLROResults\", \"HealthcareLROResults\", + \"CustomEntityRecognitionLROResults\", \"CustomSingleLabelClassificationLROResults\", + \"CustomMultiLabelClassificationLROResults\", \"ExtractiveSummarizationLROResults\", and + \"AbstractiveSummarizationLROResults\".""" + + @overload + def __init__( + self, + *, + last_update_date_time: datetime.datetime, + status: Union[str, "_models.State"], + kind: str, + task_name: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class AbstractiveSummarizationLROResult(AnalyzeTextLROResult, discriminator="AbstractiveSummarizationLROResults"): + """An object representing the results for an Abstractive Summarization task. + + :ivar last_update_date_time: The last updated time in UTC for the task. Required. + :vartype last_update_date_time: ~datetime.datetime + :ivar status: The status of the task at the mentioned last update time. Required. Known values + are: "notStarted", "running", "succeeded", "partiallyCompleted", "failed", "cancelled", and + "cancelling". + :vartype status: str or ~azure.ai.language.text.models.State + :ivar task_name: task name. + :vartype task_name: str + :ivar kind: Kind of the task. Required. Abstractive summarization LRO results + :vartype kind: str or ~azure.ai.language.text.models.ABSTRACTIVE_SUMMARIZATION_LRO_RESULTS + :ivar results: Results of the task. Required. + :vartype results: ~azure.ai.language.text.models.AbstractiveSummarizationResult + """ + + kind: Literal[AnalyzeTextLROResultsKind.ABSTRACTIVE_SUMMARIZATION_LRO_RESULTS] = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """Kind of the task. Required. Abstractive summarization LRO results""" + results: "_models.AbstractiveSummarizationResult" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Results of the task. Required.""" + + @overload + def __init__( + self, + *, + last_update_date_time: datetime.datetime, + status: Union[str, "_models.State"], + results: "_models.AbstractiveSummarizationResult", + task_name: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, kind=AnalyzeTextLROResultsKind.ABSTRACTIVE_SUMMARIZATION_LRO_RESULTS, **kwargs) + + +class AnalyzeTextLROTask(_Model): + """The long running task to be performed by the service on the input documents. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + AbstractiveSummarizationLROTask, CustomEntitiesLROTask, CustomMultiLabelClassificationLROTask, + CustomSingleLabelClassificationLROTask, EntityLinkingLROTask, EntitiesLROTask, + ExtractiveSummarizationLROTask, HealthcareLROTask, KeyPhraseLROTask, PiiLROTask, + SentimentAnalysisLROTask + + :ivar task_name: task name. + :vartype task_name: str + :ivar kind: The kind of task to perform. Required. Known values are: "SentimentAnalysis", + "EntityRecognition", "PiiEntityRecognition", "KeyPhraseExtraction", "EntityLinking", + "Healthcare", "CustomEntityRecognition", "CustomSingleLabelClassification", + "CustomMultiLabelClassification", "ExtractiveSummarization", and "AbstractiveSummarization". + :vartype kind: str or ~azure.ai.language.text.models.AnalyzeTextLROTaskKind + """ + + __mapping__: Dict[str, _Model] = {} + task_name: Optional[str] = rest_field(name="taskName", visibility=["read", "create", "update", "delete", "query"]) + """task name.""" + kind: str = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) + """The kind of task to perform. Required. Known values are: \"SentimentAnalysis\", + \"EntityRecognition\", \"PiiEntityRecognition\", \"KeyPhraseExtraction\", \"EntityLinking\", + \"Healthcare\", \"CustomEntityRecognition\", \"CustomSingleLabelClassification\", + \"CustomMultiLabelClassification\", \"ExtractiveSummarization\", and + \"AbstractiveSummarization\".""" + + @overload + def __init__( + self, + *, + kind: str, + task_name: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class AbstractiveSummarizationLROTask(AnalyzeTextLROTask, discriminator="AbstractiveSummarization"): + """An object representing the task definition for an Abstractive Summarization task. + + :ivar task_name: task name. + :vartype task_name: str + :ivar kind: The Abstractive Summarization kind of the long running task. Required. Abstractive + summarization task + :vartype kind: str or ~azure.ai.language.text.models.ABSTRACTIVE_SUMMARIZATION + :ivar parameters: Parameters for the Abstractive Summarization task. + :vartype parameters: ~azure.ai.language.text.models.AbstractiveSummarizationTaskParameters + """ + + kind: Literal[AnalyzeTextLROTaskKind.ABSTRACTIVE_SUMMARIZATION] = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The Abstractive Summarization kind of the long running task. Required. Abstractive + summarization task""" + parameters: Optional["_models.AbstractiveSummarizationTaskParameters"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Parameters for the Abstractive Summarization task.""" + + @overload + def __init__( + self, + *, + task_name: Optional[str] = None, + parameters: Optional["_models.AbstractiveSummarizationTaskParameters"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, kind=AnalyzeTextLROTaskKind.ABSTRACTIVE_SUMMARIZATION, **kwargs) + + +class AbstractiveSummarizationResult(_Model): + """An object representing the pre-built Abstractive Summarization results of each document. + + :ivar errors: Errors by document id. Required. + :vartype errors: list[~azure.ai.language.text.models.DocumentError] + :ivar statistics: if showStats=true was specified in the request this field will contain + information about the request payload. + :vartype statistics: ~azure.ai.language.text.models.RequestStatistics + :ivar model_version: This field indicates which model is used for scoring. Required. + :vartype model_version: str + :ivar documents: Response by document. Required. + :vartype documents: + list[~azure.ai.language.text.models.AbstractiveSummaryDocumentResultWithDetectedLanguage] + """ + + errors: List["_models.DocumentError"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Errors by document id. Required.""" + statistics: Optional["_models.RequestStatistics"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """if showStats=true was specified in the request this field will contain information about the + request payload.""" + model_version: str = rest_field(name="modelVersion", visibility=["read", "create", "update", "delete", "query"]) + """This field indicates which model is used for scoring. Required.""" + documents: List["_models.AbstractiveSummaryDocumentResultWithDetectedLanguage"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Response by document. Required.""" + + @overload + def __init__( + self, + *, + errors: List["_models.DocumentError"], + model_version: str, + documents: List["_models.AbstractiveSummaryDocumentResultWithDetectedLanguage"], + statistics: Optional["_models.RequestStatistics"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class AbstractiveSummarizationTaskParameters(_Model): + """Supported parameters for the pre-built Abstractive Summarization task. + + :ivar logging_opt_out: logging opt out. + :vartype logging_opt_out: bool + :ivar model_version: model version. + :vartype model_version: str + :ivar sentence_count: Controls the approximate number of sentences in the output summaries. + :vartype sentence_count: int + :ivar string_index_type: String index type. Known values are: "TextElements_v8", + "UnicodeCodePoint", and "Utf16CodeUnit". + :vartype string_index_type: str or ~azure.ai.language.text.models.StringIndexType + :ivar summary_length: (NOTE: Recommended to use summaryLength over sentenceCount) Controls the + approximate length of the output summaries. Known values are: "short", "medium", and "long". + :vartype summary_length: str or ~azure.ai.language.text.models.SummaryLengthBucket + :ivar instruction: (Optional) If provided, the query will be used to generate the summary. + :vartype instruction: str + """ + + logging_opt_out: Optional[bool] = rest_field( + name="loggingOptOut", visibility=["read", "create", "update", "delete", "query"] + ) + """logging opt out.""" + model_version: Optional[str] = rest_field( + name="modelVersion", visibility=["read", "create", "update", "delete", "query"] + ) + """model version.""" + sentence_count: Optional[int] = rest_field( + name="sentenceCount", visibility=["read", "create", "update", "delete", "query"] + ) + """Controls the approximate number of sentences in the output summaries.""" + string_index_type: Optional[Union[str, "_models.StringIndexType"]] = rest_field( + name="stringIndexType", visibility=["read", "create", "update", "delete", "query"] + ) + """String index type. Known values are: \"TextElements_v8\", \"UnicodeCodePoint\", and + \"Utf16CodeUnit\".""" + summary_length: Optional[Union[str, "_models.SummaryLengthBucket"]] = rest_field( + name="summaryLength", visibility=["read", "create", "update", "delete", "query"] + ) + """(NOTE: Recommended to use summaryLength over sentenceCount) Controls the approximate length of + the output summaries. Known values are: \"short\", \"medium\", and \"long\".""" + instruction: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """(Optional) If provided, the query will be used to generate the summary.""" + + @overload + def __init__( + self, + *, + logging_opt_out: Optional[bool] = None, + model_version: Optional[str] = None, + sentence_count: Optional[int] = None, + string_index_type: Optional[Union[str, "_models.StringIndexType"]] = None, + summary_length: Optional[Union[str, "_models.SummaryLengthBucket"]] = None, + instruction: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class AbstractiveSummary(_Model): + """An object representing a single summary with context for given document. + + :ivar text: The text of the summary. Required. + :vartype text: str + :ivar contexts: The context list of the summary. + :vartype contexts: list[~azure.ai.language.text.models.SummaryContext] + """ + + text: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The text of the summary. Required.""" + contexts: Optional[List["_models.SummaryContext"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The context list of the summary.""" + + @overload + def __init__( + self, + *, + text: str, + contexts: Optional[List["_models.SummaryContext"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class AbstractiveSummaryDocumentResultWithDetectedLanguage(_Model): # pylint: disable=name-too-long + """An object representing the Abstractive Summarization result of a single document with detected + language. + + :ivar id: Unique, non-empty document identifier. Required. + :vartype id: str + :ivar warnings: Warnings encountered while processing document. Required. + :vartype warnings: list[~azure.ai.language.text.models.DocumentWarning] + :ivar statistics: if showStats=true was specified in the request this field will contain + information about the document payload. + :vartype statistics: ~azure.ai.language.text.models.DocumentStatistics + :ivar summaries: A list of abstractive summaries. Required. + :vartype summaries: list[~azure.ai.language.text.models.AbstractiveSummary] + :ivar detected_language: If 'language' is set to 'auto' for the document in the request this + field will contain a 2 letter ISO 639-1 representation of the language detected for this + document. + :vartype detected_language: ~azure.ai.language.text.models.DetectedLanguage + """ + + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Unique, non-empty document identifier. Required.""" + warnings: List["_models.DocumentWarning"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Warnings encountered while processing document. Required.""" + statistics: Optional["_models.DocumentStatistics"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """if showStats=true was specified in the request this field will contain information about the + document payload.""" + summaries: List["_models.AbstractiveSummary"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """A list of abstractive summaries. Required.""" + detected_language: Optional["_models.DetectedLanguage"] = rest_field( + name="detectedLanguage", visibility=["read", "create", "update", "delete", "query"] + ) + """If 'language' is set to 'auto' for the document in the request this field will contain a 2 + letter ISO 639-1 representation of the language detected for this document.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + warnings: List["_models.DocumentWarning"], + summaries: List["_models.AbstractiveSummary"], + statistics: Optional["_models.DocumentStatistics"] = None, + detected_language: Optional["_models.DetectedLanguage"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class BaseMetadata(_Model): + """The abstract base class for entity Metadata. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + AgeMetadata, AreaMetadata, CurrencyMetadata, DateMetadata, DateTimeMetadata, + InformationMetadata, LengthMetadata, NumberMetadata, NumericRangeMetadata, OrdinalMetadata, + SpeedMetadata, TemperatureMetadata, TemporalSetMetadata, TemporalSpanMetadata, TimeMetadata, + VolumeMetadata, WeightMetadata + + :ivar metadata_kind: The entity Metadata object kind. Required. Known values are: + "DateMetadata", "DateTimeMetadata", "TimeMetadata", "TemporalSetMetadata", "NumberMetadata", + "OrdinalMetadata", "SpeedMetadata", "WeightMetadata", "LengthMetadata", "VolumeMetadata", + "AreaMetadata", "AgeMetadata", "InformationMetadata", "TemperatureMetadata", + "CurrencyMetadata", "NumericRangeMetadata", and "TemporalSpanMetadata". + :vartype metadata_kind: str or ~azure.ai.language.text.models.MetadataKind + """ + + __mapping__: Dict[str, _Model] = {} + metadata_kind: str = rest_discriminator( + name="metadataKind", visibility=["read", "create", "update", "delete", "query"] + ) + """The entity Metadata object kind. Required. Known values are: \"DateMetadata\", + \"DateTimeMetadata\", \"TimeMetadata\", \"TemporalSetMetadata\", \"NumberMetadata\", + \"OrdinalMetadata\", \"SpeedMetadata\", \"WeightMetadata\", \"LengthMetadata\", + \"VolumeMetadata\", \"AreaMetadata\", \"AgeMetadata\", \"InformationMetadata\", + \"TemperatureMetadata\", \"CurrencyMetadata\", \"NumericRangeMetadata\", and + \"TemporalSpanMetadata\".""" + + @overload + def __init__( + self, + *, + metadata_kind: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class AgeMetadata(BaseMetadata, discriminator="AgeMetadata"): + """Represents the Age entity Metadata model. + + :ivar value: The numeric value that the extracted text denotes. Required. + :vartype value: float + :ivar metadata_kind: Kind of the metadata. Required. Metadata for age-related values. + :vartype metadata_kind: str or ~azure.ai.language.text.models.AGE_METADATA + :ivar unit: Unit of measure for age. Required. Known values are: "Unspecified", "Year", + "Month", "Week", and "Day". + :vartype unit: str or ~azure.ai.language.text.models.AgeUnit + """ + + value: float = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The numeric value that the extracted text denotes. Required.""" + metadata_kind: Literal[MetadataKind.AGE_METADATA] = rest_discriminator(name="metadataKind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """Kind of the metadata. Required. Metadata for age-related values.""" + unit: Union[str, "_models.AgeUnit"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Unit of measure for age. Required. Known values are: \"Unspecified\", \"Year\", \"Month\", + \"Week\", and \"Day\".""" + + @overload + def __init__( + self, + *, + value: float, + unit: Union[str, "_models.AgeUnit"], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, metadata_kind=MetadataKind.AGE_METADATA, **kwargs) + + +class BaseEntityOverlapPolicy(_Model): + """The abstract base class for entity OverlapPolicy. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + AllowOverlapEntityPolicyType, MatchLongestEntityPolicyType + + :ivar policy_kind: The entity OverlapPolicy object kind. Required. Known values are: + "matchLongest" and "allowOverlap". + :vartype policy_kind: str or ~azure.ai.language.text.models.PolicyKind + """ + + __mapping__: Dict[str, _Model] = {} + policy_kind: str = rest_discriminator(name="policyKind", visibility=["read", "create", "update", "delete", "query"]) + """The entity OverlapPolicy object kind. Required. Known values are: \"matchLongest\" and + \"allowOverlap\".""" + + @overload + def __init__( + self, + *, + policy_kind: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class AllowOverlapEntityPolicyType(BaseEntityOverlapPolicy, discriminator="allowOverlap"): + """Represents the allow overlap policy. Will apply no post processing logic for the entities. + Whatever the model predicts is what will be returned to the user. This allows the user to get a + full view of every single model's possible values and apply their own custom logic on entity + selection. + + :ivar policy_kind: The entity OverlapPolicy object kind. Required. Represents + AllowOverlapEntityPolicyType + :vartype policy_kind: str or ~azure.ai.language.text.models.ALLOW_OVERLAP + """ + + policy_kind: Literal[PolicyKind.ALLOW_OVERLAP] = rest_discriminator(name="policyKind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The entity OverlapPolicy object kind. Required. Represents AllowOverlapEntityPolicyType""" + + @overload + def __init__( + self, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, policy_kind=PolicyKind.ALLOW_OVERLAP, **kwargs) + + +class AnalyzeTextTask(_Model): + """Collection of documents to analyze and a single task to execute. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + AnalyzeTextEntityLinkingInput, AnalyzeTextEntityRecognitionInput, + AnalyzeTextKeyPhraseExtractionInput, AnalyzeTextLanguageDetectionInput, + AnalyzeTextPiiEntitiesRecognitionInput, AnalyzeTextSentimentAnalysisInput + + :ivar kind: The kind of task to perform. Required. Known values are: "SentimentAnalysis", + "EntityRecognition", "PiiEntityRecognition", "KeyPhraseExtraction", "LanguageDetection", and + "EntityLinking". + :vartype kind: str or ~azure.ai.language.text.models.AnalyzeTextTaskKind + """ + + __mapping__: Dict[str, _Model] = {} + kind: str = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) + """The kind of task to perform. Required. Known values are: \"SentimentAnalysis\", + \"EntityRecognition\", \"PiiEntityRecognition\", \"KeyPhraseExtraction\", + \"LanguageDetection\", and \"EntityLinking\".""" + + @overload + def __init__( + self, + *, + kind: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class AnalyzeTextEntityLinkingInput(AnalyzeTextTask, discriminator="EntityLinking"): + """Contains the analyze text Entity linking input. + + :ivar kind: Kind for Entity linking input. Required. Entity linking task + :vartype kind: str or ~azure.ai.language.text.models.ENTITY_LINKING + :ivar analysis_input: Contains the analysis input to be handled by the service. + :vartype analysis_input: ~azure.ai.language.text.models.MultiLanguageAnalysisInput + :ivar parameters: Task parameters. + :vartype parameters: ~azure.ai.language.text.models.EntityLinkingTaskParameters + """ + + kind: Literal[AnalyzeTextTaskKind.ENTITY_LINKING] = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """Kind for Entity linking input. Required. Entity linking task""" + analysis_input: Optional["_models.MultiLanguageAnalysisInput"] = rest_field( + name="analysisInput", visibility=["read", "create", "update", "delete", "query"] + ) + """Contains the analysis input to be handled by the service.""" + parameters: Optional["_models.EntityLinkingTaskParameters"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Task parameters.""" + + @overload + def __init__( + self, + *, + analysis_input: Optional["_models.MultiLanguageAnalysisInput"] = None, + parameters: Optional["_models.EntityLinkingTaskParameters"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, kind=AnalyzeTextTaskKind.ENTITY_LINKING, **kwargs) + + +class AnalyzeTextEntityRecognitionInput(AnalyzeTextTask, discriminator="EntityRecognition"): + """The entity recognition analyze text input task request. + + :ivar kind: The kind of task. Required. Entity recognition task + :vartype kind: str or ~azure.ai.language.text.models.ENTITY_RECOGNITION + :ivar analysis_input: The input to be analyzed. + :vartype analysis_input: ~azure.ai.language.text.models.MultiLanguageAnalysisInput + :ivar parameters: Task parameters. + :vartype parameters: ~azure.ai.language.text.models.EntitiesTaskParameters + """ + + kind: Literal[AnalyzeTextTaskKind.ENTITY_RECOGNITION] = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The kind of task. Required. Entity recognition task""" + analysis_input: Optional["_models.MultiLanguageAnalysisInput"] = rest_field( + name="analysisInput", visibility=["read", "create", "update", "delete", "query"] + ) + """The input to be analyzed.""" + parameters: Optional["_models.EntitiesTaskParameters"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Task parameters.""" + + @overload + def __init__( + self, + *, + analysis_input: Optional["_models.MultiLanguageAnalysisInput"] = None, + parameters: Optional["_models.EntitiesTaskParameters"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, kind=AnalyzeTextTaskKind.ENTITY_RECOGNITION, **kwargs) + + +class AnalyzeTextJobState(_Model): + """The object containing the analyze job LRO job state. + + :ivar display_name: display name. + :vartype display_name: str + :ivar created_date_time: Date and time job created. Required. + :vartype created_date_time: ~datetime.datetime + :ivar expiration_date_time: Date and time job expires. + :vartype expiration_date_time: ~datetime.datetime + :ivar job_id: job ID. Required. + :vartype job_id: str + :ivar last_updated_date_time: last updated date and time. Required. + :vartype last_updated_date_time: ~datetime.datetime + :ivar status: status. Required. Known values are: "notStarted", "running", "succeeded", + "partiallyCompleted", "failed", "cancelled", and "cancelling". + :vartype status: str or ~azure.ai.language.text.models.State + :ivar errors: errors. + :vartype errors: list[~azure.ai.language.text.models.Error] + :ivar next_link: next link. + :vartype next_link: str + :ivar tasks: List of tasks. Required. + :vartype tasks: ~azure.ai.language.text.models.Tasks + :ivar statistics: if showStats=true was specified in the request this field will contain + information about the request payload. + :vartype statistics: ~azure.ai.language.text.models.RequestStatistics + """ + + display_name: Optional[str] = rest_field( + name="displayName", visibility=["read", "create", "update", "delete", "query"] + ) + """display name.""" + created_date_time: datetime.datetime = rest_field( + name="createdDateTime", visibility=["read", "create", "update", "delete", "query"], format="rfc3339" + ) + """Date and time job created. Required.""" + expiration_date_time: Optional[datetime.datetime] = rest_field( + name="expirationDateTime", visibility=["read", "create", "update", "delete", "query"], format="rfc3339" + ) + """Date and time job expires.""" + job_id: str = rest_field(name="jobId", visibility=["read"]) + """job ID. Required.""" + last_updated_date_time: datetime.datetime = rest_field( + name="lastUpdatedDateTime", visibility=["read", "create", "update", "delete", "query"], format="rfc3339" + ) + """last updated date and time. Required.""" + status: Union[str, "_models.State"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """status. Required. Known values are: \"notStarted\", \"running\", \"succeeded\", + \"partiallyCompleted\", \"failed\", \"cancelled\", and \"cancelling\".""" + errors: Optional[List["_models.Error"]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """errors.""" + next_link: Optional[str] = rest_field(name="nextLink", visibility=["read", "create", "update", "delete", "query"]) + """next link.""" + tasks: "_models.Tasks" = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """List of tasks. Required.""" + statistics: Optional["_models.RequestStatistics"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """if showStats=true was specified in the request this field will contain information about the + request payload.""" + + @overload + def __init__( + self, + *, + created_date_time: datetime.datetime, + last_updated_date_time: datetime.datetime, + status: Union[str, "_models.State"], + tasks: "_models.Tasks", + display_name: Optional[str] = None, + expiration_date_time: Optional[datetime.datetime] = None, + errors: Optional[List["_models.Error"]] = None, + next_link: Optional[str] = None, + statistics: Optional["_models.RequestStatistics"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class AnalyzeTextKeyPhraseExtractionInput(AnalyzeTextTask, discriminator="KeyPhraseExtraction"): + """Contains the analyze text KeyPhraseExtraction task input. + + :ivar kind: Kind of the task. Required. Key phrase extraction task + :vartype kind: str or ~azure.ai.language.text.models.KEY_PHRASE_EXTRACTION + :ivar analysis_input: Contains the input documents. + :vartype analysis_input: ~azure.ai.language.text.models.MultiLanguageAnalysisInput + :ivar parameters: Key phrase extraction task parameters. + :vartype parameters: ~azure.ai.language.text.models.KeyPhraseTaskParameters + """ + + kind: Literal[AnalyzeTextTaskKind.KEY_PHRASE_EXTRACTION] = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """Kind of the task. Required. Key phrase extraction task""" + analysis_input: Optional["_models.MultiLanguageAnalysisInput"] = rest_field( + name="analysisInput", visibility=["read", "create", "update", "delete", "query"] + ) + """Contains the input documents.""" + parameters: Optional["_models.KeyPhraseTaskParameters"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Key phrase extraction task parameters.""" + + @overload + def __init__( + self, + *, + analysis_input: Optional["_models.MultiLanguageAnalysisInput"] = None, + parameters: Optional["_models.KeyPhraseTaskParameters"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, kind=AnalyzeTextTaskKind.KEY_PHRASE_EXTRACTION, **kwargs) + + +class AnalyzeTextLanguageDetectionInput(AnalyzeTextTask, discriminator="LanguageDetection"): + """Contains the language detection document analysis task input. + + :ivar kind: Kind of the task. Required. Language detection task + :vartype kind: str or ~azure.ai.language.text.models.LANGUAGE_DETECTION + :ivar analysis_input: Documents to be analyzed. + :vartype analysis_input: ~azure.ai.language.text.models.LanguageDetectionAnalysisInput + :ivar parameters: task parameters. + :vartype parameters: ~azure.ai.language.text.models.LanguageDetectionTaskParameters + """ + + kind: Literal[AnalyzeTextTaskKind.LANGUAGE_DETECTION] = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """Kind of the task. Required. Language detection task""" + analysis_input: Optional["_models.LanguageDetectionAnalysisInput"] = rest_field( + name="analysisInput", visibility=["read", "create", "update", "delete", "query"] + ) + """Documents to be analyzed.""" + parameters: Optional["_models.LanguageDetectionTaskParameters"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """task parameters.""" + + @overload + def __init__( + self, + *, + analysis_input: Optional["_models.LanguageDetectionAnalysisInput"] = None, + parameters: Optional["_models.LanguageDetectionTaskParameters"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, kind=AnalyzeTextTaskKind.LANGUAGE_DETECTION, **kwargs) + + +class AnalyzeTextPiiEntitiesRecognitionInput(AnalyzeTextTask, discriminator="PiiEntityRecognition"): + """Contains the analyze text PIIEntityRecognition task input. + + :ivar kind: Kind of the task. Required. PII entity recognition task + :vartype kind: str or ~azure.ai.language.text.models.PII_ENTITY_RECOGNITION + :ivar analysis_input: Contains the input documents. + :vartype analysis_input: ~azure.ai.language.text.models.MultiLanguageAnalysisInput + :ivar parameters: Pii task parameters. + :vartype parameters: ~azure.ai.language.text.models.PiiTaskParameters + """ + + kind: Literal[AnalyzeTextTaskKind.PII_ENTITY_RECOGNITION] = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """Kind of the task. Required. PII entity recognition task""" + analysis_input: Optional["_models.MultiLanguageAnalysisInput"] = rest_field( + name="analysisInput", visibility=["read", "create", "update", "delete", "query"] + ) + """Contains the input documents.""" + parameters: Optional["_models.PiiTaskParameters"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Pii task parameters.""" + + @overload + def __init__( + self, + *, + analysis_input: Optional["_models.MultiLanguageAnalysisInput"] = None, + parameters: Optional["_models.PiiTaskParameters"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, kind=AnalyzeTextTaskKind.PII_ENTITY_RECOGNITION, **kwargs) + + +class AnalyzeTextSentimentAnalysisInput(AnalyzeTextTask, discriminator="SentimentAnalysis"): + """Contains the analyze text SentimentAnalysis task input. + + :ivar kind: Kind of the task. Required. Sentiment analysis task + :vartype kind: str or ~azure.ai.language.text.models.SENTIMENT_ANALYSIS + :ivar analysis_input: Contains the input documents. + :vartype analysis_input: ~azure.ai.language.text.models.MultiLanguageAnalysisInput + :ivar parameters: Sentiment Analysis task parameters. + :vartype parameters: ~azure.ai.language.text.models.SentimentAnalysisTaskParameters + """ + + kind: Literal[AnalyzeTextTaskKind.SENTIMENT_ANALYSIS] = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """Kind of the task. Required. Sentiment analysis task""" + analysis_input: Optional["_models.MultiLanguageAnalysisInput"] = rest_field( + name="analysisInput", visibility=["read", "create", "update", "delete", "query"] + ) + """Contains the input documents.""" + parameters: Optional["_models.SentimentAnalysisTaskParameters"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Sentiment Analysis task parameters.""" + + @overload + def __init__( + self, + *, + analysis_input: Optional["_models.MultiLanguageAnalysisInput"] = None, + parameters: Optional["_models.SentimentAnalysisTaskParameters"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, kind=AnalyzeTextTaskKind.SENTIMENT_ANALYSIS, **kwargs) + + +class AnalyzeTextTaskResult(_Model): + """The result object for the analyze task. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + EntityLinkingTaskResult, EntitiesTaskResult, KeyPhraseTaskResult, LanguageDetectionTaskResult, + PiiTaskResult, SentimentTaskResult + + :ivar kind: The kind of task result. Required. Known values are: "SentimentAnalysisResults", + "EntityRecognitionResults", "PiiEntityRecognitionResults", "KeyPhraseExtractionResults", + "LanguageDetectionResults", and "EntityLinkingResults". + :vartype kind: str or ~azure.ai.language.text.models.AnalyzeTextTaskResultsKind + """ + + __mapping__: Dict[str, _Model] = {} + kind: str = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) + """The kind of task result. Required. Known values are: \"SentimentAnalysisResults\", + \"EntityRecognitionResults\", \"PiiEntityRecognitionResults\", \"KeyPhraseExtractionResults\", + \"LanguageDetectionResults\", and \"EntityLinkingResults\".""" + + @overload + def __init__( + self, + *, + kind: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class AreaMetadata(BaseMetadata, discriminator="AreaMetadata"): + """Represents the Area entity Metadata model. + + :ivar value: The numeric value that the extracted text denotes. Required. + :vartype value: float + :ivar metadata_kind: Kind of the metadata. Required. Metadata for area-related values. + :vartype metadata_kind: str or ~azure.ai.language.text.models.AREA_METADATA + :ivar unit: Unit of measure for area. Required. Known values are: "Unspecified", + "SquareKilometer", "SquareHectometer", "SquareDecameter", "SquareDecimeter", "SquareMeter", + "SquareCentimeter", "SquareMillimeter", "SquareInch", "SquareFoot", "SquareMile", "SquareYard", + and "Acre". + :vartype unit: str or ~azure.ai.language.text.models.AreaUnit + """ + + value: float = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The numeric value that the extracted text denotes. Required.""" + metadata_kind: Literal[MetadataKind.AREA_METADATA] = rest_discriminator(name="metadataKind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """Kind of the metadata. Required. Metadata for area-related values.""" + unit: Union[str, "_models.AreaUnit"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Unit of measure for area. Required. Known values are: \"Unspecified\", \"SquareKilometer\", + \"SquareHectometer\", \"SquareDecameter\", \"SquareDecimeter\", \"SquareMeter\", + \"SquareCentimeter\", \"SquareMillimeter\", \"SquareInch\", \"SquareFoot\", \"SquareMile\", + \"SquareYard\", and \"Acre\".""" + + @overload + def __init__( + self, + *, + value: float, + unit: Union[str, "_models.AreaUnit"], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, metadata_kind=MetadataKind.AREA_METADATA, **kwargs) + + +class BaseRedactionPolicy(_Model): + """The abstract base class for RedactionPolicy. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + CharacterMaskPolicyType, EntityMaskPolicyType, NoMaskPolicyType + + :ivar policy_kind: The entity RedactionPolicy object kind. Required. Known values are: + "noMask", "characterMask", and "entityMask". + :vartype policy_kind: str or ~azure.ai.language.text.models.RedactionPolicyKind + """ + + __mapping__: Dict[str, _Model] = {} + policy_kind: str = rest_discriminator(name="policyKind", visibility=["read", "create", "update", "delete", "query"]) + """The entity RedactionPolicy object kind. Required. Known values are: \"noMask\", + \"characterMask\", and \"entityMask\".""" + + @overload + def __init__( + self, + *, + policy_kind: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class CharacterMaskPolicyType(BaseRedactionPolicy, discriminator="characterMask"): + """Represents the policy of redacting with a redaction character. + + :ivar policy_kind: The entity RedactionPolicy object kind. Required. React detected entities + with redaction character. + :vartype policy_kind: str or ~azure.ai.language.text.models.CHARACTER_MASK + :ivar redaction_character: Optional parameter to use a Custom Character to be used for + redaction in PII responses. Default character will bce * as before. We allow specific ascii + characters for redaction. Known values are: "!", "#", "$", "%", "&", "*", "+", "-", "=", "?", + "@", "^", "_", and "~". + :vartype redaction_character: str or ~azure.ai.language.text.models.RedactionCharacter + """ + + policy_kind: Literal[RedactionPolicyKind.CHARACTER_MASK] = rest_discriminator(name="policyKind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The entity RedactionPolicy object kind. Required. React detected entities with redaction + character.""" + redaction_character: Optional[Union[str, "_models.RedactionCharacter"]] = rest_field( + name="redactionCharacter", visibility=["read", "create", "update", "delete", "query"] + ) + """Optional parameter to use a Custom Character to be used for redaction in PII responses. Default + character will bce * as before. We allow specific ascii characters for redaction. Known values + are: \"!\", \"#\", \"$\", \"%\", \"&\", \"*\", \"+\", \"-\", \"=\", \"?\", \"@\", \"^\", \"_\", + and \"~\".""" + + @overload + def __init__( + self, + *, + redaction_character: Optional[Union[str, "_models.RedactionCharacter"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, policy_kind=RedactionPolicyKind.CHARACTER_MASK, **kwargs) + + +class ClassificationDocumentResultWithDetectedLanguage(_Model): # pylint: disable=name-too-long + """Contains the classification doc result for the task with detected language. + + :ivar id: Unique, non-empty document identifier. Required. + :vartype id: str + :ivar warnings: Warnings encountered while processing document. Required. + :vartype warnings: list[~azure.ai.language.text.models.DocumentWarning] + :ivar statistics: if showStats=true was specified in the request this field will contain + information about the document payload. + :vartype statistics: ~azure.ai.language.text.models.DocumentStatistics + :ivar class_property: Contains the classification doc results for all docs. Required. + :vartype class_property: list[~azure.ai.language.text.models.ClassificationResult] + :ivar detected_language: If 'language' is set to 'auto' for the document in the request this + field will contain a 2 letter ISO 639-1 representation of the language detected for this + document. + :vartype detected_language: ~azure.ai.language.text.models.DetectedLanguage + """ + + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Unique, non-empty document identifier. Required.""" + warnings: List["_models.DocumentWarning"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Warnings encountered while processing document. Required.""" + statistics: Optional["_models.DocumentStatistics"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """if showStats=true was specified in the request this field will contain information about the + document payload.""" + class_property: List["_models.ClassificationResult"] = rest_field( + name="class", visibility=["read", "create", "update", "delete", "query"] + ) + """Contains the classification doc results for all docs. Required.""" + detected_language: Optional["_models.DetectedLanguage"] = rest_field( + name="detectedLanguage", visibility=["read", "create", "update", "delete", "query"] + ) + """If 'language' is set to 'auto' for the document in the request this field will contain a 2 + letter ISO 639-1 representation of the language detected for this document.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + warnings: List["_models.DocumentWarning"], + class_property: List["_models.ClassificationResult"], + statistics: Optional["_models.DocumentStatistics"] = None, + detected_language: Optional["_models.DetectedLanguage"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class ClassificationResult(_Model): + """Contains the classification result. + + :ivar category: Classification type. Required. + :vartype category: str + :ivar confidence_score: Confidence score between 0 and 1 of the recognized class. Required. + :vartype confidence_score: float + """ + + category: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Classification type. Required.""" + confidence_score: float = rest_field( + name="confidenceScore", visibility=["read", "create", "update", "delete", "query"] + ) + """Confidence score between 0 and 1 of the recognized class. Required.""" + + @overload + def __init__( + self, + *, + category: str, + confidence_score: float, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class CurrencyMetadata(BaseMetadata, discriminator="CurrencyMetadata"): + """Represents the Currency ) entity Metadata model. + + :ivar value: The numeric value that the extracted text denotes. Required. + :vartype value: float + :ivar metadata_kind: Kind of the metadata. Required. Metadata for currency-related values. + :vartype metadata_kind: str or ~azure.ai.language.text.models.CURRENCY_METADATA + :ivar unit: Currency unit. Required. + :vartype unit: str + :ivar iso4217: The alphabetic code based on another ISO standard, ISO 3166, which lists the + codes for country names. The first two letters of the ISO 4217 three-letter code are the same + as the code for the country name, and, where possible, the third letter corresponds to the + first letter of the currency name. + :vartype iso4217: str + """ + + value: float = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The numeric value that the extracted text denotes. Required.""" + metadata_kind: Literal[MetadataKind.CURRENCY_METADATA] = rest_discriminator(name="metadataKind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """Kind of the metadata. Required. Metadata for currency-related values.""" + unit: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Currency unit. Required.""" + iso4217: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The alphabetic code based on another ISO standard, ISO 3166, which lists the codes for country + names. The first two letters of the ISO 4217 three-letter code are the same as the code for the + country name, and, where possible, the third letter corresponds to the first letter of the + currency name.""" + + @overload + def __init__( + self, + *, + value: float, + unit: str, + iso4217: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, metadata_kind=MetadataKind.CURRENCY_METADATA, **kwargs) + + +class CustomEntitiesLROTask(AnalyzeTextLROTask, discriminator="CustomEntityRecognition"): + """Contains the custom text LRO task. + + :ivar task_name: task name. + :vartype task_name: str + :ivar kind: Kind of the task. Required. Custom entity recognition task + :vartype kind: str or ~azure.ai.language.text.models.CUSTOM_ENTITY_RECOGNITION + :ivar parameters: task parameters. + :vartype parameters: ~azure.ai.language.text.models.CustomEntitiesTaskParameters + """ + + kind: Literal[AnalyzeTextLROTaskKind.CUSTOM_ENTITY_RECOGNITION] = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """Kind of the task. Required. Custom entity recognition task""" + parameters: Optional["_models.CustomEntitiesTaskParameters"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """task parameters.""" + + @overload + def __init__( + self, + *, + task_name: Optional[str] = None, + parameters: Optional["_models.CustomEntitiesTaskParameters"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, kind=AnalyzeTextLROTaskKind.CUSTOM_ENTITY_RECOGNITION, **kwargs) + + +class CustomEntitiesResult(_Model): + """Contains the list of detected custom entities result for the documents. + + :ivar errors: Errors by document id. Required. + :vartype errors: list[~azure.ai.language.text.models.DocumentError] + :ivar statistics: if showStats=true was specified in the request this field will contain + information about the request payload. + :vartype statistics: ~azure.ai.language.text.models.RequestStatistics + :ivar project_name: This field indicates the project name for the model. Required. + :vartype project_name: str + :ivar deployment_name: This field indicates the deployment name for the model. Required. + :vartype deployment_name: str + :ivar documents: Enumeration of the document results. Required. + :vartype documents: + list[~azure.ai.language.text.models.EntitiesDocumentResultWithDetectedLanguage] + """ + + errors: List["_models.DocumentError"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Errors by document id. Required.""" + statistics: Optional["_models.RequestStatistics"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """if showStats=true was specified in the request this field will contain information about the + request payload.""" + project_name: str = rest_field(name="projectName", visibility=["read", "create", "update", "delete", "query"]) + """This field indicates the project name for the model. Required.""" + deployment_name: str = rest_field(name="deploymentName", visibility=["read", "create", "update", "delete", "query"]) + """This field indicates the deployment name for the model. Required.""" + documents: List["_models.EntitiesDocumentResultWithDetectedLanguage"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Enumeration of the document results. Required.""" + + @overload + def __init__( + self, + *, + errors: List["_models.DocumentError"], + project_name: str, + deployment_name: str, + documents: List["_models.EntitiesDocumentResultWithDetectedLanguage"], + statistics: Optional["_models.RequestStatistics"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class CustomEntitiesTaskParameters(_Model): + """Supported parameters for a Custom Entities task. + + :ivar logging_opt_out: logging opt out. + :vartype logging_opt_out: bool + :ivar project_name: This field indicates the project name for the model. Required. + :vartype project_name: str + :ivar deployment_name: This field indicates the deployment name for the model. Required. + :vartype deployment_name: str + :ivar string_index_type: Optional parameter to provide the string index type used to interpret + string offsets. Defaults to TextElements (Graphemes). Known values are: "TextElements_v8", + "UnicodeCodePoint", and "Utf16CodeUnit". + :vartype string_index_type: str or ~azure.ai.language.text.models.StringIndexType + """ + + logging_opt_out: Optional[bool] = rest_field( + name="loggingOptOut", visibility=["read", "create", "update", "delete", "query"] + ) + """logging opt out.""" + project_name: str = rest_field(name="projectName", visibility=["read", "create", "update", "delete", "query"]) + """This field indicates the project name for the model. Required.""" + deployment_name: str = rest_field(name="deploymentName", visibility=["read", "create", "update", "delete", "query"]) + """This field indicates the deployment name for the model. Required.""" + string_index_type: Optional[Union[str, "_models.StringIndexType"]] = rest_field( + name="stringIndexType", visibility=["read", "create", "update", "delete", "query"] + ) + """Optional parameter to provide the string index type used to interpret string offsets. Defaults + to TextElements (Graphemes). Known values are: \"TextElements_v8\", \"UnicodeCodePoint\", and + \"Utf16CodeUnit\".""" + + @overload + def __init__( + self, + *, + project_name: str, + deployment_name: str, + logging_opt_out: Optional[bool] = None, + string_index_type: Optional[Union[str, "_models.StringIndexType"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class CustomEntityRecognitionLROResult(AnalyzeTextLROResult, discriminator="CustomEntityRecognitionLROResults"): + """Contains the custom entity recognition job result. + + :ivar last_update_date_time: The last updated time in UTC for the task. Required. + :vartype last_update_date_time: ~datetime.datetime + :ivar status: The status of the task at the mentioned last update time. Required. Known values + are: "notStarted", "running", "succeeded", "partiallyCompleted", "failed", "cancelled", and + "cancelling". + :vartype status: str or ~azure.ai.language.text.models.State + :ivar task_name: task name. + :vartype task_name: str + :ivar kind: Kind of the task. Required. Custom entity recognition LRO results + :vartype kind: str or ~azure.ai.language.text.models.CUSTOM_ENTITY_RECOGNITION_LRO_RESULTS + :ivar results: List of results. Required. + :vartype results: ~azure.ai.language.text.models.CustomEntitiesResult + """ + + kind: Literal[AnalyzeTextLROResultsKind.CUSTOM_ENTITY_RECOGNITION_LRO_RESULTS] = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """Kind of the task. Required. Custom entity recognition LRO results""" + results: "_models.CustomEntitiesResult" = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """List of results. Required.""" + + @overload + def __init__( + self, + *, + last_update_date_time: datetime.datetime, + status: Union[str, "_models.State"], + results: "_models.CustomEntitiesResult", + task_name: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, kind=AnalyzeTextLROResultsKind.CUSTOM_ENTITY_RECOGNITION_LRO_RESULTS, **kwargs) + + +class CustomLabelClassificationResult(_Model): + """Contains the custom label classification results. + + :ivar errors: Errors by document id. Required. + :vartype errors: list[~azure.ai.language.text.models.DocumentError] + :ivar statistics: if showStats=true was specified in the request this field will contain + information about the request payload. + :vartype statistics: ~azure.ai.language.text.models.RequestStatistics + :ivar project_name: This field indicates the project name for the model. Required. + :vartype project_name: str + :ivar deployment_name: This field indicates the deployment name for the model. Required. + :vartype deployment_name: str + :ivar documents: Response by document. Required. + :vartype documents: + list[~azure.ai.language.text.models.ClassificationDocumentResultWithDetectedLanguage] + """ + + errors: List["_models.DocumentError"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Errors by document id. Required.""" + statistics: Optional["_models.RequestStatistics"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """if showStats=true was specified in the request this field will contain information about the + request payload.""" + project_name: str = rest_field(name="projectName", visibility=["read", "create", "update", "delete", "query"]) + """This field indicates the project name for the model. Required.""" + deployment_name: str = rest_field(name="deploymentName", visibility=["read", "create", "update", "delete", "query"]) + """This field indicates the deployment name for the model. Required.""" + documents: List["_models.ClassificationDocumentResultWithDetectedLanguage"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Response by document. Required.""" + + @overload + def __init__( + self, + *, + errors: List["_models.DocumentError"], + project_name: str, + deployment_name: str, + documents: List["_models.ClassificationDocumentResultWithDetectedLanguage"], + statistics: Optional["_models.RequestStatistics"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class CustomMultiLabelClassificationLROResult( + AnalyzeTextLROResult, discriminator="CustomMultiLabelClassificationLROResults" +): + """Contains the custom multi label classification job result. + + :ivar last_update_date_time: The last updated time in UTC for the task. Required. + :vartype last_update_date_time: ~datetime.datetime + :ivar status: The status of the task at the mentioned last update time. Required. Known values + are: "notStarted", "running", "succeeded", "partiallyCompleted", "failed", "cancelled", and + "cancelling". + :vartype status: str or ~azure.ai.language.text.models.State + :ivar task_name: task name. + :vartype task_name: str + :ivar kind: Kind of the task. Required. Custom multi label classification LRO results + :vartype kind: str or + ~azure.ai.language.text.models.CUSTOM_MULTI_LABEL_CLASSIFICATION_LRO_RESULTS + :ivar results: List of results. Required. + :vartype results: ~azure.ai.language.text.models.CustomLabelClassificationResult + """ + + kind: Literal[AnalyzeTextLROResultsKind.CUSTOM_MULTI_LABEL_CLASSIFICATION_LRO_RESULTS] = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """Kind of the task. Required. Custom multi label classification LRO results""" + results: "_models.CustomLabelClassificationResult" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """List of results. Required.""" + + @overload + def __init__( + self, + *, + last_update_date_time: datetime.datetime, + status: Union[str, "_models.State"], + results: "_models.CustomLabelClassificationResult", + task_name: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, kind=AnalyzeTextLROResultsKind.CUSTOM_MULTI_LABEL_CLASSIFICATION_LRO_RESULTS, **kwargs) + + +class CustomMultiLabelClassificationLROTask(AnalyzeTextLROTask, discriminator="CustomMultiLabelClassification"): + """Use custom models to classify text into multi label taxonomy. + + :ivar task_name: task name. + :vartype task_name: str + :ivar kind: Kind of the task. Required. Custom multi label classification task + :vartype kind: str or ~azure.ai.language.text.models.CUSTOM_MULTI_LABEL_CLASSIFICATION + :ivar parameters: Task parameters. + :vartype parameters: + ~azure.ai.language.text.models.CustomMultiLabelClassificationTaskParameters + """ + + kind: Literal[AnalyzeTextLROTaskKind.CUSTOM_MULTI_LABEL_CLASSIFICATION] = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """Kind of the task. Required. Custom multi label classification task""" + parameters: Optional["_models.CustomMultiLabelClassificationTaskParameters"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Task parameters.""" + + @overload + def __init__( + self, + *, + task_name: Optional[str] = None, + parameters: Optional["_models.CustomMultiLabelClassificationTaskParameters"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, kind=AnalyzeTextLROTaskKind.CUSTOM_MULTI_LABEL_CLASSIFICATION, **kwargs) + + +class CustomMultiLabelClassificationTaskParameters(_Model): # pylint: disable=name-too-long + """Supported parameters for a Custom Multi Classification task. + + :ivar logging_opt_out: logging opt out. + :vartype logging_opt_out: bool + :ivar project_name: This field indicates the project name for the model. Required. + :vartype project_name: str + :ivar deployment_name: This field indicates the deployment name for the model. Required. + :vartype deployment_name: str + """ + + logging_opt_out: Optional[bool] = rest_field( + name="loggingOptOut", visibility=["read", "create", "update", "delete", "query"] + ) + """logging opt out.""" + project_name: str = rest_field(name="projectName", visibility=["read", "create", "update", "delete", "query"]) + """This field indicates the project name for the model. Required.""" + deployment_name: str = rest_field(name="deploymentName", visibility=["read", "create", "update", "delete", "query"]) + """This field indicates the deployment name for the model. Required.""" + + @overload + def __init__( + self, + *, + project_name: str, + deployment_name: str, + logging_opt_out: Optional[bool] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class CustomSingleLabelClassificationLROResult( + AnalyzeTextLROResult, discriminator="CustomSingleLabelClassificationLROResults" +): + """Contains the custom single label classification job result. + + :ivar last_update_date_time: The last updated time in UTC for the task. Required. + :vartype last_update_date_time: ~datetime.datetime + :ivar status: The status of the task at the mentioned last update time. Required. Known values + are: "notStarted", "running", "succeeded", "partiallyCompleted", "failed", "cancelled", and + "cancelling". + :vartype status: str or ~azure.ai.language.text.models.State + :ivar task_name: task name. + :vartype task_name: str + :ivar kind: Kind of the task. Required. Custom single label classification LRO results + :vartype kind: str or + ~azure.ai.language.text.models.CUSTOM_SINGLE_LABEL_CLASSIFICATION_LRO_RESULTS + :ivar results: List of results. Required. + :vartype results: ~azure.ai.language.text.models.CustomLabelClassificationResult + """ + + kind: Literal[AnalyzeTextLROResultsKind.CUSTOM_SINGLE_LABEL_CLASSIFICATION_LRO_RESULTS] = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """Kind of the task. Required. Custom single label classification LRO results""" + results: "_models.CustomLabelClassificationResult" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """List of results. Required.""" + + @overload + def __init__( + self, + *, + last_update_date_time: datetime.datetime, + status: Union[str, "_models.State"], + results: "_models.CustomLabelClassificationResult", + task_name: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, kind=AnalyzeTextLROResultsKind.CUSTOM_SINGLE_LABEL_CLASSIFICATION_LRO_RESULTS, **kwargs) + + +class CustomSingleLabelClassificationLROTask(AnalyzeTextLROTask, discriminator="CustomSingleLabelClassification"): + """Use custom models to classify text into single label taxonomy. + + :ivar task_name: task name. + :vartype task_name: str + :ivar kind: Kind of the task. Required. Custom single label classification task + :vartype kind: str or ~azure.ai.language.text.models.CUSTOM_SINGLE_LABEL_CLASSIFICATION + :ivar parameters: Task parameters. + :vartype parameters: + ~azure.ai.language.text.models.CustomSingleLabelClassificationTaskParameters + """ + + kind: Literal[AnalyzeTextLROTaskKind.CUSTOM_SINGLE_LABEL_CLASSIFICATION] = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """Kind of the task. Required. Custom single label classification task""" + parameters: Optional["_models.CustomSingleLabelClassificationTaskParameters"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Task parameters.""" + + @overload + def __init__( + self, + *, + task_name: Optional[str] = None, + parameters: Optional["_models.CustomSingleLabelClassificationTaskParameters"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, kind=AnalyzeTextLROTaskKind.CUSTOM_SINGLE_LABEL_CLASSIFICATION, **kwargs) + + +class CustomSingleLabelClassificationTaskParameters(_Model): # pylint: disable=name-too-long + """Supported parameters for a Custom Single Classification task. + + :ivar logging_opt_out: logging opt out. + :vartype logging_opt_out: bool + :ivar project_name: This field indicates the project name for the model. Required. + :vartype project_name: str + :ivar deployment_name: This field indicates the deployment name for the model. Required. + :vartype deployment_name: str + """ + + logging_opt_out: Optional[bool] = rest_field( + name="loggingOptOut", visibility=["read", "create", "update", "delete", "query"] + ) + """logging opt out.""" + project_name: str = rest_field(name="projectName", visibility=["read", "create", "update", "delete", "query"]) + """This field indicates the project name for the model. Required.""" + deployment_name: str = rest_field(name="deploymentName", visibility=["read", "create", "update", "delete", "query"]) + """This field indicates the deployment name for the model. Required.""" + + @overload + def __init__( + self, + *, + project_name: str, + deployment_name: str, + logging_opt_out: Optional[bool] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class DateMetadata(BaseMetadata, discriminator="DateMetadata"): + """A Metadata for date entity instances. + + :ivar date_values: List of date values. + :vartype date_values: list[~azure.ai.language.text.models.DateValue] + :ivar metadata_kind: Kind of the metadata. Required. Metadata for date-related values. + :vartype metadata_kind: str or ~azure.ai.language.text.models.DATE_METADATA + """ + + date_values: Optional[List["_models.DateValue"]] = rest_field( + name="dateValues", visibility=["read", "create", "update", "delete", "query"] + ) + """List of date values.""" + metadata_kind: Literal[MetadataKind.DATE_METADATA] = rest_discriminator(name="metadataKind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """Kind of the metadata. Required. Metadata for date-related values.""" + + @overload + def __init__( + self, + *, + date_values: Optional[List["_models.DateValue"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, metadata_kind=MetadataKind.DATE_METADATA, **kwargs) + + +class DateTimeMetadata(BaseMetadata, discriminator="DateTimeMetadata"): + """A Metadata for datetime entity instances. + + :ivar date_values: List of date values. + :vartype date_values: list[~azure.ai.language.text.models.DateValue] + :ivar metadata_kind: Kind of the metadata. Required. Metadata for date and time-related values. + :vartype metadata_kind: str or ~azure.ai.language.text.models.DATE_TIME_METADATA + """ + + date_values: Optional[List["_models.DateValue"]] = rest_field( + name="dateValues", visibility=["read", "create", "update", "delete", "query"] + ) + """List of date values.""" + metadata_kind: Literal[MetadataKind.DATE_TIME_METADATA] = rest_discriminator(name="metadataKind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """Kind of the metadata. Required. Metadata for date and time-related values.""" + + @overload + def __init__( + self, + *, + date_values: Optional[List["_models.DateValue"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, metadata_kind=MetadataKind.DATE_TIME_METADATA, **kwargs) + + +class DateValue(_Model): + """Represents the date value. + + :ivar timex: An extended ISO 8601 date/time representation as described in + (`https://github.com/Microsoft/Recognizers-Text/blob/master/Patterns/English/English-DateTime.yaml + `_). + Required. + :vartype timex: str + :ivar value: The actual time that the extracted text denote. Required. + :vartype value: str + :ivar modifier: Modifier for datetime to indicate point of reference like before, after etc. + Known values are: "AfterApprox", "Before", "BeforeStart", "Approx", "ReferenceUndefined", + "SinceEnd", "AfterMid", "Start", "After", "BeforeEnd", "Until", "End", "Less", "Since", + "AfterStart", "BeforeApprox", "Mid", and "More". + :vartype modifier: str or ~azure.ai.language.text.models.TemporalModifier + """ + + timex: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """An extended ISO 8601 date/time representation as described in + (`https://github.com/Microsoft/Recognizers-Text/blob/master/Patterns/English/English-DateTime.yaml + `_). + Required.""" + value: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The actual time that the extracted text denote. Required.""" + modifier: Optional[Union[str, "_models.TemporalModifier"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Modifier for datetime to indicate point of reference like before, after etc. Known values are: + \"AfterApprox\", \"Before\", \"BeforeStart\", \"Approx\", \"ReferenceUndefined\", \"SinceEnd\", + \"AfterMid\", \"Start\", \"After\", \"BeforeEnd\", \"Until\", \"End\", \"Less\", \"Since\", + \"AfterStart\", \"BeforeApprox\", \"Mid\", and \"More\".""" + + @overload + def __init__( + self, + *, + timex: str, + value: str, + modifier: Optional[Union[str, "_models.TemporalModifier"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class DetectedLanguage(_Model): + """Contains the details of the detected language for the text. + + :ivar name: Long name of a detected language (e.g. English, French). Required. + :vartype name: str + :ivar iso6391_name: A two letter representation of the detected language according to the ISO + 639-1 standard (e.g. en, fr). Required. + :vartype iso6391_name: str + :ivar confidence_score: A confidence score between 0 and 1. Scores close to 1 indicate 100% + certainty that the identified language is true. Required. + :vartype confidence_score: float + :ivar script_name: Identifies the script name of the input document according to the ISO 15924 + standard. Known values are: "Arabic", "Armenian", "Bangla", + "UnifiedCanadianAboriginalSyllabics", "Cyrillic", "Devanagari", "Ethiopic", "Georgian", + "Greek", "Gujarati", "Gurmukhi", "Hangul", "HanLiteral", "HanSimplified", "HanTraditional", + "Hebrew", "Japanese", "Khmer", "Kannada", "Lao", "Latin", "Malayalam", "Meitei", "Mongolian", + "Myanmar", "Odia", "Santali", "Sharada", "Sinhala", "Tamil", "Telugu", "Thaana", "Thai", and + "Tibetan". + :vartype script_name: str or ~azure.ai.language.text.models.ScriptKind + :ivar script_iso15924_code: Identifies the script code of the input document according to the + ISO 15924 standard. Known values are: "Arab", "Armn", "Beng", "Cans", "Cyrl", "Deva", "Ethi", + "Geor", "Grek", "Gujr", "Guru", "Hang", "Hani", "Hans", "Hant", "Hebr", "Jpan", "Khmr", "Knda", + "Laoo", "Latn", "Mlym", "Mong", "Mtei", "Mymr", "Olck", "Orya", "Sinh", "Shrd", "Taml", "Telu", + "Thaa", "Thai", and "Tibt". + :vartype script_iso15924_code: str or ~azure.ai.language.text.models.ScriptCode + """ + + name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Long name of a detected language (e.g. English, French). Required.""" + iso6391_name: str = rest_field(name="iso6391Name", visibility=["read", "create", "update", "delete", "query"]) + """A two letter representation of the detected language according to the ISO 639-1 standard (e.g. + en, fr). Required.""" + confidence_score: float = rest_field( + name="confidenceScore", visibility=["read", "create", "update", "delete", "query"] + ) + """A confidence score between 0 and 1. Scores close to 1 indicate 100% certainty that the + identified language is true. Required.""" + script_name: Optional[Union[str, "_models.ScriptKind"]] = rest_field( + name="scriptName", visibility=["read", "create", "update", "delete", "query"] + ) + """Identifies the script name of the input document according to the ISO 15924 standard. Known + values are: \"Arabic\", \"Armenian\", \"Bangla\", \"UnifiedCanadianAboriginalSyllabics\", + \"Cyrillic\", \"Devanagari\", \"Ethiopic\", \"Georgian\", \"Greek\", \"Gujarati\", + \"Gurmukhi\", \"Hangul\", \"HanLiteral\", \"HanSimplified\", \"HanTraditional\", \"Hebrew\", + \"Japanese\", \"Khmer\", \"Kannada\", \"Lao\", \"Latin\", \"Malayalam\", \"Meitei\", + \"Mongolian\", \"Myanmar\", \"Odia\", \"Santali\", \"Sharada\", \"Sinhala\", \"Tamil\", + \"Telugu\", \"Thaana\", \"Thai\", and \"Tibetan\".""" + script_iso15924_code: Optional[Union[str, "_models.ScriptCode"]] = rest_field( + name="scriptIso15924Code", visibility=["read", "create", "update", "delete", "query"] + ) + """Identifies the script code of the input document according to the ISO 15924 standard. Known + values are: \"Arab\", \"Armn\", \"Beng\", \"Cans\", \"Cyrl\", \"Deva\", \"Ethi\", \"Geor\", + \"Grek\", \"Gujr\", \"Guru\", \"Hang\", \"Hani\", \"Hans\", \"Hant\", \"Hebr\", \"Jpan\", + \"Khmr\", \"Knda\", \"Laoo\", \"Latn\", \"Mlym\", \"Mong\", \"Mtei\", \"Mymr\", \"Olck\", + \"Orya\", \"Sinh\", \"Shrd\", \"Taml\", \"Telu\", \"Thaa\", \"Thai\", and \"Tibt\".""" + + @overload + def __init__( + self, + *, + name: str, + iso6391_name: str, + confidence_score: float, + script_name: Optional[Union[str, "_models.ScriptKind"]] = None, + script_iso15924_code: Optional[Union[str, "_models.ScriptCode"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class DocumentError(_Model): + """Contains details of errors encountered during a job execution. + + :ivar id: The ID of the input document. Required. + :vartype id: str + :ivar error: Error encountered. Required. + :vartype error: ~azure.ai.language.text.models.Error + """ + + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The ID of the input document. Required.""" + error: "_models.Error" = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Error encountered. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + error: "_models.Error", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class DocumentStatistics(_Model): + """if showStats=true was specified in the request this field will contain information about the + document payload. + + :ivar characters_count: Number of text elements recognized in the document. Required. + :vartype characters_count: int + :ivar transactions_count: Number of transactions for the document. Required. + :vartype transactions_count: int + """ + + characters_count: int = rest_field( + name="charactersCount", visibility=["read", "create", "update", "delete", "query"] + ) + """Number of text elements recognized in the document. Required.""" + transactions_count: int = rest_field( + name="transactionsCount", visibility=["read", "create", "update", "delete", "query"] + ) + """Number of transactions for the document. Required.""" + + @overload + def __init__( + self, + *, + characters_count: int, + transactions_count: int, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class DocumentWarning(_Model): + """Contains the warnings object with warnings encountered for the processed document. + + :ivar code: Warning code. Required. Known values are: "LongWordsInDocument" and + "DocumentTruncated". + :vartype code: str or ~azure.ai.language.text.models.WarningCodeValue + :ivar message: Warning message. Required. + :vartype message: str + :ivar target_ref: A JSON pointer reference indicating the target object. + :vartype target_ref: str + """ + + code: Union[str, "_models.WarningCodeValue"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Warning code. Required. Known values are: \"LongWordsInDocument\" and \"DocumentTruncated\".""" + message: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Warning message. Required.""" + target_ref: Optional[str] = rest_field(name="targetRef", visibility=["read", "create", "update", "delete", "query"]) + """A JSON pointer reference indicating the target object.""" + + @overload + def __init__( + self, + *, + code: Union[str, "_models.WarningCodeValue"], + message: str, + target_ref: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class EntitiesDocumentResultWithDetectedLanguage(_Model): # pylint: disable=name-too-long + """Contains the entity recognition task result for the document with detected language. + + :ivar id: Unique, non-empty document identifier. Required. + :vartype id: str + :ivar warnings: Warnings encountered while processing document. Required. + :vartype warnings: list[~azure.ai.language.text.models.DocumentWarning] + :ivar statistics: if showStats=true was specified in the request this field will contain + information about the document payload. + :vartype statistics: ~azure.ai.language.text.models.DocumentStatistics + :ivar entities: Recognized entities in the document. Required. + :vartype entities: list[~azure.ai.language.text.models.Entity] + :ivar detected_language: If 'language' is set to 'auto' for the document in the request this + field will contain a 2 letter ISO 639-1 representation of the language detected for this + document. + :vartype detected_language: ~azure.ai.language.text.models.DetectedLanguage + """ + + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Unique, non-empty document identifier. Required.""" + warnings: List["_models.DocumentWarning"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Warnings encountered while processing document. Required.""" + statistics: Optional["_models.DocumentStatistics"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """if showStats=true was specified in the request this field will contain information about the + document payload.""" + entities: List["_models.Entity"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Recognized entities in the document. Required.""" + detected_language: Optional["_models.DetectedLanguage"] = rest_field( + name="detectedLanguage", visibility=["read", "create", "update", "delete", "query"] + ) + """If 'language' is set to 'auto' for the document in the request this field will contain a 2 + letter ISO 639-1 representation of the language detected for this document.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + warnings: List["_models.DocumentWarning"], + entities: List["_models.Entity"], + statistics: Optional["_models.DocumentStatistics"] = None, + detected_language: Optional["_models.DetectedLanguage"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class EntitiesDocumentResultWithMetadata(_Model): + """Entity documents result with metadata. + + :ivar id: Unique, non-empty document identifier. Required. + :vartype id: str + :ivar warnings: Warnings encountered while processing document. Required. + :vartype warnings: list[~azure.ai.language.text.models.DocumentWarning] + :ivar statistics: if showStats=true was specified in the request this field will contain + information about the document payload. + :vartype statistics: ~azure.ai.language.text.models.DocumentStatistics + :ivar entities: Recognized entities in the document. Required. + :vartype entities: list[~azure.ai.language.text.models.EntityWithMetadata] + """ + + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Unique, non-empty document identifier. Required.""" + warnings: List["_models.DocumentWarning"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Warnings encountered while processing document. Required.""" + statistics: Optional["_models.DocumentStatistics"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """if showStats=true was specified in the request this field will contain information about the + document payload.""" + entities: List["_models.EntityWithMetadata"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Recognized entities in the document. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + warnings: List["_models.DocumentWarning"], + entities: List["_models.EntityWithMetadata"], + statistics: Optional["_models.DocumentStatistics"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class EntitiesDocumentResultWithMetadataDetectedLanguage(_Model): # pylint: disable=name-too-long + """Contains the entity recognition task result for the document with metadata and detected + language. + + :ivar id: Unique, non-empty document identifier. Required. + :vartype id: str + :ivar warnings: Warnings encountered while processing document. Required. + :vartype warnings: list[~azure.ai.language.text.models.DocumentWarning] + :ivar statistics: if showStats=true was specified in the request this field will contain + information about the document payload. + :vartype statistics: ~azure.ai.language.text.models.DocumentStatistics + :ivar entities: Recognized entities in the document. Required. + :vartype entities: list[~azure.ai.language.text.models.EntityWithMetadata] + :ivar detected_language: If 'language' is set to 'auto' for the document in the request this + field will contain a 2 letter ISO 639-1 representation of the language detected for this + document. + :vartype detected_language: ~azure.ai.language.text.models.DetectedLanguage + """ + + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Unique, non-empty document identifier. Required.""" + warnings: List["_models.DocumentWarning"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Warnings encountered while processing document. Required.""" + statistics: Optional["_models.DocumentStatistics"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """if showStats=true was specified in the request this field will contain information about the + document payload.""" + entities: List["_models.EntityWithMetadata"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Recognized entities in the document. Required.""" + detected_language: Optional["_models.DetectedLanguage"] = rest_field( + name="detectedLanguage", visibility=["read", "create", "update", "delete", "query"] + ) + """If 'language' is set to 'auto' for the document in the request this field will contain a 2 + letter ISO 639-1 representation of the language detected for this document.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + warnings: List["_models.DocumentWarning"], + entities: List["_models.EntityWithMetadata"], + statistics: Optional["_models.DocumentStatistics"] = None, + detected_language: Optional["_models.DetectedLanguage"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class EntitiesLROTask(AnalyzeTextLROTask, discriminator="EntityRecognition"): + """An object representing the task definition for an Entities Recognition task. + + :ivar task_name: task name. + :vartype task_name: str + :ivar kind: The kind of task. Required. Entity recognition task + :vartype kind: str or ~azure.ai.language.text.models.ENTITY_RECOGNITION + :ivar parameters: Task parameters. + :vartype parameters: ~azure.ai.language.text.models.EntitiesTaskParameters + """ + + kind: Literal[AnalyzeTextLROTaskKind.ENTITY_RECOGNITION] = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The kind of task. Required. Entity recognition task""" + parameters: Optional["_models.EntitiesTaskParameters"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Task parameters.""" + + @overload + def __init__( + self, + *, + task_name: Optional[str] = None, + parameters: Optional["_models.EntitiesTaskParameters"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, kind=AnalyzeTextLROTaskKind.ENTITY_RECOGNITION, **kwargs) + + +class EntitiesResult(_Model): + """Contains the entity recognition task result. + + :ivar errors: Errors by document id. Required. + :vartype errors: list[~azure.ai.language.text.models.DocumentError] + :ivar statistics: if showStats=true was specified in the request this field will contain + information about the request payload. + :vartype statistics: ~azure.ai.language.text.models.RequestStatistics + :ivar model_version: This field indicates which model is used for scoring. Required. + :vartype model_version: str + :ivar documents: Response by document. Required. + :vartype documents: list[~azure.ai.language.text.models.EntitiesDocumentResultWithMetadata] + """ + + errors: List["_models.DocumentError"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Errors by document id. Required.""" + statistics: Optional["_models.RequestStatistics"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """if showStats=true was specified in the request this field will contain information about the + request payload.""" + model_version: str = rest_field(name="modelVersion", visibility=["read", "create", "update", "delete", "query"]) + """This field indicates which model is used for scoring. Required.""" + documents: List["_models.EntitiesDocumentResultWithMetadata"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Response by document. Required.""" + + @overload + def __init__( + self, + *, + errors: List["_models.DocumentError"], + model_version: str, + documents: List["_models.EntitiesDocumentResultWithMetadata"], + statistics: Optional["_models.RequestStatistics"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class EntitiesTaskParameters(_Model): + """Supported parameters for an Entity Recognition task. + + :ivar logging_opt_out: logging opt out. + :vartype logging_opt_out: bool + :ivar model_version: model version. + :vartype model_version: str + :ivar string_index_type: (Optional) parameter to provide the string index type used to + interpret string offsets. Defaults to TextElements (Graphemes). Known values are: + "TextElements_v8", "UnicodeCodePoint", and "Utf16CodeUnit". + :vartype string_index_type: str or ~azure.ai.language.text.models.StringIndexType + :ivar inclusion_list: (Optional) request parameter that limits the output to the requested + entity types included in this list. We will apply inclusionList before exclusionList. + :vartype inclusion_list: list[str or ~azure.ai.language.text.models.EntityCategory] + :ivar exclusion_list: (Optional) request parameter that filters out any entities that are + included the excludeList. When a user specifies an excludeList, they cannot get a prediction + returned with an entity in that list. We will apply inclusionList before exclusionList. + :vartype exclusion_list: list[str or ~azure.ai.language.text.models.EntityCategory] + :ivar overlap_policy: (Optional) describes the type of overlap policy to apply to the ner + output. + :vartype overlap_policy: ~azure.ai.language.text.models.BaseEntityOverlapPolicy + :ivar inference_options: (Optional) request parameter that allows the user to provide settings + for running the inference. + :vartype inference_options: ~azure.ai.language.text.models.EntityInferenceOptions + """ + + logging_opt_out: Optional[bool] = rest_field( + name="loggingOptOut", visibility=["read", "create", "update", "delete", "query"] + ) + """logging opt out.""" + model_version: Optional[str] = rest_field( + name="modelVersion", visibility=["read", "create", "update", "delete", "query"] + ) + """model version.""" + string_index_type: Optional[Union[str, "_models.StringIndexType"]] = rest_field( + name="stringIndexType", visibility=["read", "create", "update", "delete", "query"] + ) + """(Optional) parameter to provide the string index type used to interpret string offsets. + Defaults to TextElements (Graphemes). Known values are: \"TextElements_v8\", + \"UnicodeCodePoint\", and \"Utf16CodeUnit\".""" + inclusion_list: Optional[List[Union[str, "_models.EntityCategory"]]] = rest_field( + name="inclusionList", visibility=["read", "create", "update", "delete", "query"] + ) + """(Optional) request parameter that limits the output to the requested entity types included in + this list. We will apply inclusionList before exclusionList.""" + exclusion_list: Optional[List[Union[str, "_models.EntityCategory"]]] = rest_field( + name="exclusionList", visibility=["read", "create", "update", "delete", "query"] + ) + """(Optional) request parameter that filters out any entities that are included the excludeList. + When a user specifies an excludeList, they cannot get a prediction returned with an entity in + that list. We will apply inclusionList before exclusionList.""" + overlap_policy: Optional["_models.BaseEntityOverlapPolicy"] = rest_field( + name="overlapPolicy", visibility=["read", "create", "update", "delete", "query"] + ) + """(Optional) describes the type of overlap policy to apply to the ner output.""" + inference_options: Optional["_models.EntityInferenceOptions"] = rest_field( + name="inferenceOptions", visibility=["read", "create", "update", "delete", "query"] + ) + """(Optional) request parameter that allows the user to provide settings for running the + inference.""" + + @overload + def __init__( + self, + *, + logging_opt_out: Optional[bool] = None, + model_version: Optional[str] = None, + string_index_type: Optional[Union[str, "_models.StringIndexType"]] = None, + inclusion_list: Optional[List[Union[str, "_models.EntityCategory"]]] = None, + exclusion_list: Optional[List[Union[str, "_models.EntityCategory"]]] = None, + overlap_policy: Optional["_models.BaseEntityOverlapPolicy"] = None, + inference_options: Optional["_models.EntityInferenceOptions"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class EntitiesTaskResult(AnalyzeTextTaskResult, discriminator="EntityRecognitionResults"): + """Contains the entity task. + + :ivar kind: kind of the task. Required. Entity recognition results + :vartype kind: str or ~azure.ai.language.text.models.ENTITY_RECOGNITION_RESULTS + :ivar results: Results for entity recognition. Required. + :vartype results: ~azure.ai.language.text.models.EntitiesWithMetadataAutoResult + """ + + kind: Literal[AnalyzeTextTaskResultsKind.ENTITY_RECOGNITION_RESULTS] = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """kind of the task. Required. Entity recognition results""" + results: "_models.EntitiesWithMetadataAutoResult" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Results for entity recognition. Required.""" + + @overload + def __init__( + self, + *, + results: "_models.EntitiesWithMetadataAutoResult", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, kind=AnalyzeTextTaskResultsKind.ENTITY_RECOGNITION_RESULTS, **kwargs) + + +class EntitiesWithMetadataAutoResult(_Model): + """Contains the entity recognition task result. + + :ivar errors: Errors by document id. Required. + :vartype errors: list[~azure.ai.language.text.models.DocumentError] + :ivar statistics: if showStats=true was specified in the request this field will contain + information about the request payload. + :vartype statistics: ~azure.ai.language.text.models.RequestStatistics + :ivar model_version: This field indicates which model is used for scoring. Required. + :vartype model_version: str + :ivar documents: Response by document. Required. + :vartype documents: + list[~azure.ai.language.text.models.EntitiesDocumentResultWithMetadataDetectedLanguage] + """ + + errors: List["_models.DocumentError"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Errors by document id. Required.""" + statistics: Optional["_models.RequestStatistics"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """if showStats=true was specified in the request this field will contain information about the + request payload.""" + model_version: str = rest_field(name="modelVersion", visibility=["read", "create", "update", "delete", "query"]) + """This field indicates which model is used for scoring. Required.""" + documents: List["_models.EntitiesDocumentResultWithMetadataDetectedLanguage"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Response by document. Required.""" + + @overload + def __init__( + self, + *, + errors: List["_models.DocumentError"], + model_version: str, + documents: List["_models.EntitiesDocumentResultWithMetadataDetectedLanguage"], + statistics: Optional["_models.RequestStatistics"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class Entity(_Model): + """Defines the detected entity object containing the entity category and entity text detected, + etc. + + :ivar text: Entity text as appears in the request. Required. + :vartype text: str + :ivar category: Entity type. Required. + :vartype category: str + :ivar subcategory: (Optional) Entity sub type. + :vartype subcategory: str + :ivar offset: Start position for the entity text. Use of different 'stringIndexType' values can + affect the offset returned. Required. + :vartype offset: int + :ivar length: Length for the entity text. Use of different 'stringIndexType' values can affect + the length returned. Required. + :vartype length: int + :ivar confidence_score: Confidence score between 0 and 1 of the extracted entity. Required. + :vartype confidence_score: float + """ + + text: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Entity text as appears in the request. Required.""" + category: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Entity type. Required.""" + subcategory: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """(Optional) Entity sub type.""" + offset: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Start position for the entity text. Use of different 'stringIndexType' values can affect the + offset returned. Required.""" + length: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Length for the entity text. Use of different 'stringIndexType' values can affect the length + returned. Required.""" + confidence_score: float = rest_field( + name="confidenceScore", visibility=["read", "create", "update", "delete", "query"] + ) + """Confidence score between 0 and 1 of the extracted entity. Required.""" + + @overload + def __init__( + self, + *, + text: str, + category: str, + offset: int, + length: int, + confidence_score: float, + subcategory: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class EntityInferenceOptions(_Model): + """The class that houses the inference options allowed for named entity recognition. + + :ivar exclude_normalized_values: Option to include/exclude the detected entity values to be + normalized and included in the metadata. The numeric and temporal entity types support value + normalization. + :vartype exclude_normalized_values: bool + """ + + exclude_normalized_values: Optional[bool] = rest_field( + name="excludeNormalizedValues", visibility=["read", "create", "update", "delete", "query"] + ) + """Option to include/exclude the detected entity values to be normalized and included in the + metadata. The numeric and temporal entity types support value normalization.""" + + @overload + def __init__( + self, + *, + exclude_normalized_values: Optional[bool] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class EntityLinkingLROResult(AnalyzeTextLROResult, discriminator="EntityLinkingLROResults"): + """Contains the analyze text Entity linking task LRO result. + + :ivar last_update_date_time: The last updated time in UTC for the task. Required. + :vartype last_update_date_time: ~datetime.datetime + :ivar status: The status of the task at the mentioned last update time. Required. Known values + are: "notStarted", "running", "succeeded", "partiallyCompleted", "failed", "cancelled", and + "cancelling". + :vartype status: str or ~azure.ai.language.text.models.State + :ivar task_name: task name. + :vartype task_name: str + :ivar kind: Kind of the task. Required. Entity linking LRO results + :vartype kind: str or ~azure.ai.language.text.models.ENTITY_LINKING_LRO_RESULTS + :ivar results: Entity linking result. Required. + :vartype results: ~azure.ai.language.text.models.EntityLinkingResult + """ + + kind: Literal[AnalyzeTextLROResultsKind.ENTITY_LINKING_LRO_RESULTS] = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """Kind of the task. Required. Entity linking LRO results""" + results: "_models.EntityLinkingResult" = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Entity linking result. Required.""" + + @overload + def __init__( + self, + *, + last_update_date_time: datetime.datetime, + status: Union[str, "_models.State"], + results: "_models.EntityLinkingResult", + task_name: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, kind=AnalyzeTextLROResultsKind.ENTITY_LINKING_LRO_RESULTS, **kwargs) + + +class EntityLinkingLROTask(AnalyzeTextLROTask, discriminator="EntityLinking"): + """Contains the analyze text Entity linking LRO task. + + :ivar task_name: task name. + :vartype task_name: str + :ivar kind: Kind of task result. Required. Entity linking task + :vartype kind: str or ~azure.ai.language.text.models.ENTITY_LINKING + :ivar parameters: Task parameters. + :vartype parameters: ~azure.ai.language.text.models.EntityLinkingTaskParameters + """ + + kind: Literal[AnalyzeTextLROTaskKind.ENTITY_LINKING] = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """Kind of task result. Required. Entity linking task""" + parameters: Optional["_models.EntityLinkingTaskParameters"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Task parameters.""" + + @overload + def __init__( + self, + *, + task_name: Optional[str] = None, + parameters: Optional["_models.EntityLinkingTaskParameters"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, kind=AnalyzeTextLROTaskKind.ENTITY_LINKING, **kwargs) + + +class EntityLinkingResult(_Model): + """Entity linking result. + + :ivar errors: Errors by document id. Required. + :vartype errors: list[~azure.ai.language.text.models.DocumentError] + :ivar statistics: if showStats=true was specified in the request this field will contain + information about the request payload. + :vartype statistics: ~azure.ai.language.text.models.RequestStatistics + :ivar model_version: This field indicates which model is used for scoring. Required. + :vartype model_version: str + :ivar documents: Response by document. Required. + :vartype documents: + list[~azure.ai.language.text.models.EntityLinkingResultWithDetectedLanguage] + """ + + errors: List["_models.DocumentError"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Errors by document id. Required.""" + statistics: Optional["_models.RequestStatistics"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """if showStats=true was specified in the request this field will contain information about the + request payload.""" + model_version: str = rest_field(name="modelVersion", visibility=["read", "create", "update", "delete", "query"]) + """This field indicates which model is used for scoring. Required.""" + documents: List["_models.EntityLinkingResultWithDetectedLanguage"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Response by document. Required.""" + + @overload + def __init__( + self, + *, + errors: List["_models.DocumentError"], + model_version: str, + documents: List["_models.EntityLinkingResultWithDetectedLanguage"], + statistics: Optional["_models.RequestStatistics"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class EntityLinkingResultWithDetectedLanguage(_Model): + """Entity linking document result with auto language detection. + + :ivar id: Unique, non-empty document identifier. Required. + :vartype id: str + :ivar warnings: Warnings encountered while processing document. Required. + :vartype warnings: list[~azure.ai.language.text.models.DocumentWarning] + :ivar statistics: if showStats=true was specified in the request this field will contain + information about the document payload. + :vartype statistics: ~azure.ai.language.text.models.DocumentStatistics + :ivar entities: Recognized well known entities in the document. Required. + :vartype entities: list[~azure.ai.language.text.models.LinkedEntity] + :ivar detected_language: If 'language' is set to 'auto' for the document in the request this + field will contain a 2 letter ISO 639-1 representation of the language detected for this + document. + :vartype detected_language: ~azure.ai.language.text.models.DetectedLanguage + """ + + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Unique, non-empty document identifier. Required.""" + warnings: List["_models.DocumentWarning"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Warnings encountered while processing document. Required.""" + statistics: Optional["_models.DocumentStatistics"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """if showStats=true was specified in the request this field will contain information about the + document payload.""" + entities: List["_models.LinkedEntity"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Recognized well known entities in the document. Required.""" + detected_language: Optional["_models.DetectedLanguage"] = rest_field( + name="detectedLanguage", visibility=["read", "create", "update", "delete", "query"] + ) + """If 'language' is set to 'auto' for the document in the request this field will contain a 2 + letter ISO 639-1 representation of the language detected for this document.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + warnings: List["_models.DocumentWarning"], + entities: List["_models.LinkedEntity"], + statistics: Optional["_models.DocumentStatistics"] = None, + detected_language: Optional["_models.DetectedLanguage"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class EntityLinkingTaskParameters(_Model): + """Supported parameters for an Entity Linking task. + + :ivar logging_opt_out: logging opt out. + :vartype logging_opt_out: bool + :ivar model_version: model version. + :vartype model_version: str + :ivar string_index_type: Optional parameter to provide the string index type used to interpret + string offsets. Defaults to TextElements (Graphemes). Known values are: "TextElements_v8", + "UnicodeCodePoint", and "Utf16CodeUnit". + :vartype string_index_type: str or ~azure.ai.language.text.models.StringIndexType + """ + + logging_opt_out: Optional[bool] = rest_field( + name="loggingOptOut", visibility=["read", "create", "update", "delete", "query"] + ) + """logging opt out.""" + model_version: Optional[str] = rest_field( + name="modelVersion", visibility=["read", "create", "update", "delete", "query"] + ) + """model version.""" + string_index_type: Optional[Union[str, "_models.StringIndexType"]] = rest_field( + name="stringIndexType", visibility=["read", "create", "update", "delete", "query"] + ) + """Optional parameter to provide the string index type used to interpret string offsets. Defaults + to TextElements (Graphemes). Known values are: \"TextElements_v8\", \"UnicodeCodePoint\", and + \"Utf16CodeUnit\".""" + + @overload + def __init__( + self, + *, + logging_opt_out: Optional[bool] = None, + model_version: Optional[str] = None, + string_index_type: Optional[Union[str, "_models.StringIndexType"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class EntityLinkingTaskResult(AnalyzeTextTaskResult, discriminator="EntityLinkingResults"): + """Contains the analyze text Entity linking task result. + + :ivar kind: Kind of task result. Required. Entity linking results + :vartype kind: str or ~azure.ai.language.text.models.ENTITY_LINKING_RESULTS + :ivar results: Entity linking result. Required. + :vartype results: ~azure.ai.language.text.models.EntityLinkingResult + """ + + kind: Literal[AnalyzeTextTaskResultsKind.ENTITY_LINKING_RESULTS] = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """Kind of task result. Required. Entity linking results""" + results: "_models.EntityLinkingResult" = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Entity linking result. Required.""" + + @overload + def __init__( + self, + *, + results: "_models.EntityLinkingResult", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, kind=AnalyzeTextTaskResultsKind.ENTITY_LINKING_RESULTS, **kwargs) + + +class EntityMaskPolicyType(BaseRedactionPolicy, discriminator="entityMask"): + """Represents the policy of redacting PII with the entity type. + + :ivar policy_kind: The entity OverlapPolicy object kind. Required. Redact detected entities + with entity type. + :vartype policy_kind: str or ~azure.ai.language.text.models.ENTITY_MASK + """ + + policy_kind: Literal[RedactionPolicyKind.ENTITY_MASK] = rest_discriminator(name="policyKind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The entity OverlapPolicy object kind. Required. Redact detected entities with entity type.""" + + @overload + def __init__( + self, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, policy_kind=RedactionPolicyKind.ENTITY_MASK, **kwargs) + + +class EntityRecognitionLROResult(AnalyzeTextLROResult, discriminator="EntityRecognitionLROResults"): + """Contains the entity recognition job task result. + + :ivar last_update_date_time: The last updated time in UTC for the task. Required. + :vartype last_update_date_time: ~datetime.datetime + :ivar status: The status of the task at the mentioned last update time. Required. Known values + are: "notStarted", "running", "succeeded", "partiallyCompleted", "failed", "cancelled", and + "cancelling". + :vartype status: str or ~azure.ai.language.text.models.State + :ivar task_name: task name. + :vartype task_name: str + :ivar kind: Kind of the task. Required. Entity recognition LRO results + :vartype kind: str or ~azure.ai.language.text.models.ENTITY_RECOGNITION_LRO_RESULTS + :ivar results: Results for the task. Required. + :vartype results: ~azure.ai.language.text.models.EntitiesResult + """ + + kind: Literal[AnalyzeTextLROResultsKind.ENTITY_RECOGNITION_LRO_RESULTS] = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """Kind of the task. Required. Entity recognition LRO results""" + results: "_models.EntitiesResult" = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Results for the task. Required.""" + + @overload + def __init__( + self, + *, + last_update_date_time: datetime.datetime, + status: Union[str, "_models.State"], + results: "_models.EntitiesResult", + task_name: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, kind=AnalyzeTextLROResultsKind.ENTITY_RECOGNITION_LRO_RESULTS, **kwargs) + + +class EntitySynonym(_Model): + """The entity synonyms used to enhance pii entity detection. + + :ivar synonym: The synonym to be used for context. Required. + :vartype synonym: str + :ivar language: The 2 letter ISO 639-1 language the synonym. + :vartype language: str + """ + + synonym: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The synonym to be used for context. Required.""" + language: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The 2 letter ISO 639-1 language the synonym.""" + + @overload + def __init__( + self, + *, + synonym: str, + language: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class EntitySynonyms(_Model): + """Object that allows the user to provide synonyms for context words that to enhance pii entity + detection. + + :ivar entity_type: The entity name. Required. Known values are: "Address", "Numeric", "Age", + "Currency", "Number", "NumberRange", "Percentage", "Ordinal", "Temperature", "Dimension", + "Length", "Weight", "Height", "Speed", "Area", "Volume", "Information", "Temporal", "Date", + "Time", "DateTime", "DateRange", "TimeRange", "DateTimeRange", "Duration", "SetTemporal", + "Event", "SportsEvent", "CulturalEvent", "NaturalEvent", "Location", "GPE", "City", "State", + "CountryRegion", "Continent", "Structural", "Airport", "Geological", "Organization", + "OrganizationMedical", "OrganizationStockExchange", "OrganizationSports", "Person", + "PersonType", "Email", "URL", "IP", "PhoneNumber", "Product", "ComputingProduct", and "Skill". + :vartype entity_type: str or ~azure.ai.language.text.models.EntityCategory + :ivar synonyms: The entity synonyms. Required. + :vartype synonyms: list[~azure.ai.language.text.models.EntitySynonym] + """ + + entity_type: Union[str, "_models.EntityCategory"] = rest_field( + name="entityType", visibility=["read", "create", "update", "delete", "query"] + ) + """The entity name. Required. Known values are: \"Address\", \"Numeric\", \"Age\", \"Currency\", + \"Number\", \"NumberRange\", \"Percentage\", \"Ordinal\", \"Temperature\", \"Dimension\", + \"Length\", \"Weight\", \"Height\", \"Speed\", \"Area\", \"Volume\", \"Information\", + \"Temporal\", \"Date\", \"Time\", \"DateTime\", \"DateRange\", \"TimeRange\", + \"DateTimeRange\", \"Duration\", \"SetTemporal\", \"Event\", \"SportsEvent\", + \"CulturalEvent\", \"NaturalEvent\", \"Location\", \"GPE\", \"City\", \"State\", + \"CountryRegion\", \"Continent\", \"Structural\", \"Airport\", \"Geological\", + \"Organization\", \"OrganizationMedical\", \"OrganizationStockExchange\", + \"OrganizationSports\", \"Person\", \"PersonType\", \"Email\", \"URL\", \"IP\", + \"PhoneNumber\", \"Product\", \"ComputingProduct\", and \"Skill\".""" + synonyms: List["_models.EntitySynonym"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The entity synonyms. Required.""" + + @overload + def __init__( + self, + *, + entity_type: Union[str, "_models.EntityCategory"], + synonyms: List["_models.EntitySynonym"], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class EntityTag(_Model): + """Entity tag object which contains the name of the tags abd any associated confidence score. + Entity Tags are used to express some similarities/affinity between entities. + + :ivar name: Name of the tag. Entity Tag names will be unique globally. Required. + :vartype name: str + :ivar confidence_score: Detection score between 0 and 1 of the extracted entity. + :vartype confidence_score: float + """ + + name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Name of the tag. Entity Tag names will be unique globally. Required.""" + confidence_score: Optional[float] = rest_field( + name="confidenceScore", visibility=["read", "create", "update", "delete", "query"] + ) + """Detection score between 0 and 1 of the extracted entity.""" + + @overload + def __init__( + self, + *, + name: str, + confidence_score: Optional[float] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class EntityWithMetadata(_Model): + """Entity object with tags and metadata. + + :ivar text: Entity text as appears in the request. Required. + :vartype text: str + :ivar category: Entity type. Required. + :vartype category: str + :ivar subcategory: (Optional) Entity sub type. + :vartype subcategory: str + :ivar offset: Start position for the entity text. Use of different 'stringIndexType' values can + affect the offset returned. Required. + :vartype offset: int + :ivar length: Length for the entity text. Use of different 'stringIndexType' values can affect + the length returned. Required. + :vartype length: int + :ivar confidence_score: Confidence score between 0 and 1 of the extracted entity. Required. + :vartype confidence_score: float + :ivar type: An entity type is the lowest (or finest) granularity at which the entity has been + detected. The type maps to the specific metadata attributes associated with the entity + detected. + :vartype type: str + :ivar tags: List of entity tags. Tags are to express some similarities/affinity between + entities. + :vartype tags: list[~azure.ai.language.text.models.EntityTag] + :ivar metadata: The entity metadata object. + :vartype metadata: ~azure.ai.language.text.models.BaseMetadata + """ + + text: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Entity text as appears in the request. Required.""" + category: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Entity type. Required.""" + subcategory: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """(Optional) Entity sub type.""" + offset: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Start position for the entity text. Use of different 'stringIndexType' values can affect the + offset returned. Required.""" + length: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Length for the entity text. Use of different 'stringIndexType' values can affect the length + returned. Required.""" + confidence_score: float = rest_field( + name="confidenceScore", visibility=["read", "create", "update", "delete", "query"] + ) + """Confidence score between 0 and 1 of the extracted entity. Required.""" + type: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """An entity type is the lowest (or finest) granularity at which the entity has been detected. The + type maps to the specific metadata attributes associated with the entity detected.""" + tags: Optional[List["_models.EntityTag"]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """List of entity tags. Tags are to express some similarities/affinity between entities.""" + metadata: Optional["_models.BaseMetadata"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The entity metadata object.""" + + @overload + def __init__( + self, + *, + text: str, + category: str, + offset: int, + length: int, + confidence_score: float, + subcategory: Optional[str] = None, + type: Optional[str] = None, + tags: Optional[List["_models.EntityTag"]] = None, + metadata: Optional["_models.BaseMetadata"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class Error(_Model): + """The error response object returned when the service encounters some errors during processing + the request. + + :ivar code: One of a server-defined set of error codes. Required. Known values are: + "InvalidRequest", "InvalidArgument", "Unauthorized", "Forbidden", "NotFound", + "ProjectNotFound", "OperationNotFound", "AzureCognitiveSearchNotFound", + "AzureCognitiveSearchIndexNotFound", "TooManyRequests", "AzureCognitiveSearchThrottling", + "AzureCognitiveSearchIndexLimitReached", "InternalServerError", "ServiceUnavailable", + "Timeout", "QuotaExceeded", "Conflict", and "Warning". + :vartype code: str or ~azure.ai.language.text.models.ErrorCode + :ivar message: A human-readable representation of the error. Required. + :vartype message: str + :ivar target: The target of the error. + :vartype target: str + :ivar details: An array of details about specific errors that led to this reported error. + :vartype details: list[~azure.ai.language.text.models.Error] + :ivar innererror: An object containing more specific information than the current object about + the error. + :vartype innererror: ~azure.ai.language.text.models.InnerErrorModel + """ + + code: Union[str, "_models.ErrorCode"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """One of a server-defined set of error codes. Required. Known values are: \"InvalidRequest\", + \"InvalidArgument\", \"Unauthorized\", \"Forbidden\", \"NotFound\", \"ProjectNotFound\", + \"OperationNotFound\", \"AzureCognitiveSearchNotFound\", \"AzureCognitiveSearchIndexNotFound\", + \"TooManyRequests\", \"AzureCognitiveSearchThrottling\", + \"AzureCognitiveSearchIndexLimitReached\", \"InternalServerError\", \"ServiceUnavailable\", + \"Timeout\", \"QuotaExceeded\", \"Conflict\", and \"Warning\".""" + message: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """A human-readable representation of the error. Required.""" + target: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The target of the error.""" + details: Optional[List["_models.Error"]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """An array of details about specific errors that led to this reported error.""" + innererror: Optional["_models.InnerErrorModel"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """An object containing more specific information than the current object about the error.""" + + @overload + def __init__( + self, + *, + code: Union[str, "_models.ErrorCode"], + message: str, + target: Optional[str] = None, + details: Optional[List["_models.Error"]] = None, + innererror: Optional["_models.InnerErrorModel"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class ErrorResponse(_Model): + """Error response. + + :ivar error: The error object. Required. + :vartype error: ~azure.ai.language.text.models.Error + """ + + error: "_models.Error" = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The error object. Required.""" + + @overload + def __init__( + self, + *, + error: "_models.Error", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class ExtractedSummaryDocumentResultWithDetectedLanguage(_Model): # pylint: disable=name-too-long + """A ranked list of sentences representing the extracted summary. + + :ivar id: Unique, non-empty document identifier. Required. + :vartype id: str + :ivar warnings: Warnings encountered while processing document. Required. + :vartype warnings: list[~azure.ai.language.text.models.DocumentWarning] + :ivar statistics: if showStats=true was specified in the request this field will contain + information about the document payload. + :vartype statistics: ~azure.ai.language.text.models.DocumentStatistics + :ivar sentences: Specifies the the extracted sentences from the input document. Required. + :vartype sentences: list[~azure.ai.language.text.models.ExtractedSummarySentence] + :ivar detected_language: If 'language' is set to 'auto' for the document in the request this + field will contain a 2 letter ISO 639-1 representation of the language detected for this + document. + :vartype detected_language: ~azure.ai.language.text.models.DetectedLanguage + """ + + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Unique, non-empty document identifier. Required.""" + warnings: List["_models.DocumentWarning"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Warnings encountered while processing document. Required.""" + statistics: Optional["_models.DocumentStatistics"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """if showStats=true was specified in the request this field will contain information about the + document payload.""" + sentences: List["_models.ExtractedSummarySentence"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Specifies the the extracted sentences from the input document. Required.""" + detected_language: Optional["_models.DetectedLanguage"] = rest_field( + name="detectedLanguage", visibility=["read", "create", "update", "delete", "query"] + ) + """If 'language' is set to 'auto' for the document in the request this field will contain a 2 + letter ISO 639-1 representation of the language detected for this document.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + warnings: List["_models.DocumentWarning"], + sentences: List["_models.ExtractedSummarySentence"], + statistics: Optional["_models.DocumentStatistics"] = None, + detected_language: Optional["_models.DetectedLanguage"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class ExtractedSummarySentence(_Model): + """Represents an extracted sentences from the input document. + + :ivar text: The extracted sentence text. Required. + :vartype text: str + :ivar rank_score: A double value representing the relevance of the sentence within the summary. + Higher values indicate higher importance. Required. + :vartype rank_score: float + :ivar offset: The sentence offset from the start of the document, based on the value of the + parameter StringIndexType. Required. + :vartype offset: int + :ivar length: The length of the sentence. Required. + :vartype length: int + """ + + text: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The extracted sentence text. Required.""" + rank_score: float = rest_field(name="rankScore", visibility=["read", "create", "update", "delete", "query"]) + """A double value representing the relevance of the sentence within the summary. Higher values + indicate higher importance. Required.""" + offset: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The sentence offset from the start of the document, based on the value of the parameter + StringIndexType. Required.""" + length: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The length of the sentence. Required.""" + + @overload + def __init__( + self, + *, + text: str, + rank_score: float, + offset: int, + length: int, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class ExtractiveSummarizationLROResult(AnalyzeTextLROResult, discriminator="ExtractiveSummarizationLROResults"): + """An object representing the results for an Extractive Summarization task. + + :ivar last_update_date_time: The last updated time in UTC for the task. Required. + :vartype last_update_date_time: ~datetime.datetime + :ivar status: The status of the task at the mentioned last update time. Required. Known values + are: "notStarted", "running", "succeeded", "partiallyCompleted", "failed", "cancelled", and + "cancelling". + :vartype status: str or ~azure.ai.language.text.models.State + :ivar task_name: task name. + :vartype task_name: str + :ivar kind: Kind of the task. Required. Extractive summarization LRO results + :vartype kind: str or ~azure.ai.language.text.models.EXTRACTIVE_SUMMARIZATION_LRO_RESULTS + :ivar results: Results of the task. Required. + :vartype results: ~azure.ai.language.text.models.ExtractiveSummarizationResult + """ + + kind: Literal[AnalyzeTextLROResultsKind.EXTRACTIVE_SUMMARIZATION_LRO_RESULTS] = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """Kind of the task. Required. Extractive summarization LRO results""" + results: "_models.ExtractiveSummarizationResult" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Results of the task. Required.""" + + @overload + def __init__( + self, + *, + last_update_date_time: datetime.datetime, + status: Union[str, "_models.State"], + results: "_models.ExtractiveSummarizationResult", + task_name: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, kind=AnalyzeTextLROResultsKind.EXTRACTIVE_SUMMARIZATION_LRO_RESULTS, **kwargs) + + +class ExtractiveSummarizationLROTask(AnalyzeTextLROTask, discriminator="ExtractiveSummarization"): + """An object representing the task definition for an Extractive Summarization task. + + :ivar task_name: task name. + :vartype task_name: str + :ivar kind: The Extractive Summarization kind of the long running task. Required. Extractive + summarization task + :vartype kind: str or ~azure.ai.language.text.models.EXTRACTIVE_SUMMARIZATION + :ivar parameters: Parameters for the Extractive Summarization task. + :vartype parameters: ~azure.ai.language.text.models.ExtractiveSummarizationTaskParameters + """ + + kind: Literal[AnalyzeTextLROTaskKind.EXTRACTIVE_SUMMARIZATION] = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The Extractive Summarization kind of the long running task. Required. Extractive summarization + task""" + parameters: Optional["_models.ExtractiveSummarizationTaskParameters"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Parameters for the Extractive Summarization task.""" + + @overload + def __init__( + self, + *, + task_name: Optional[str] = None, + parameters: Optional["_models.ExtractiveSummarizationTaskParameters"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, kind=AnalyzeTextLROTaskKind.EXTRACTIVE_SUMMARIZATION, **kwargs) + + +class ExtractiveSummarizationResult(_Model): + """An object representing the pre-built Extractive Summarization results of each document. + + :ivar errors: Errors by document id. Required. + :vartype errors: list[~azure.ai.language.text.models.DocumentError] + :ivar statistics: if showStats=true was specified in the request this field will contain + information about the request payload. + :vartype statistics: ~azure.ai.language.text.models.RequestStatistics + :ivar model_version: This field indicates which model is used for scoring. Required. + :vartype model_version: str + :ivar documents: Response by document. Required. + :vartype documents: + list[~azure.ai.language.text.models.ExtractedSummaryDocumentResultWithDetectedLanguage] + """ + + errors: List["_models.DocumentError"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Errors by document id. Required.""" + statistics: Optional["_models.RequestStatistics"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """if showStats=true was specified in the request this field will contain information about the + request payload.""" + model_version: str = rest_field(name="modelVersion", visibility=["read", "create", "update", "delete", "query"]) + """This field indicates which model is used for scoring. Required.""" + documents: List["_models.ExtractedSummaryDocumentResultWithDetectedLanguage"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Response by document. Required.""" + + @overload + def __init__( + self, + *, + errors: List["_models.DocumentError"], + model_version: str, + documents: List["_models.ExtractedSummaryDocumentResultWithDetectedLanguage"], + statistics: Optional["_models.RequestStatistics"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class ExtractiveSummarizationTaskParameters(_Model): + """Supported parameters for an Extractive Summarization task. + + :ivar logging_opt_out: logging opt out. + :vartype logging_opt_out: bool + :ivar model_version: model version. + :vartype model_version: str + :ivar sentence_count: Specifies the number of sentences in the extracted summary. + :vartype sentence_count: int + :ivar sort_by: Specifies how to sort the extracted summaries. Known values are: "Offset" and + "Rank". + :vartype sort_by: str or ~azure.ai.language.text.models.ExtractiveSummarizationSortingCriteria + :ivar string_index_type: Specifies the method used to interpret string offsets. Known values + are: "TextElements_v8", "UnicodeCodePoint", and "Utf16CodeUnit". + :vartype string_index_type: str or ~azure.ai.language.text.models.StringIndexType + :ivar query: (Optional) If provided, the query will be used to extract most relevant sentences + from the document. + :vartype query: str + """ + + logging_opt_out: Optional[bool] = rest_field( + name="loggingOptOut", visibility=["read", "create", "update", "delete", "query"] + ) + """logging opt out.""" + model_version: Optional[str] = rest_field( + name="modelVersion", visibility=["read", "create", "update", "delete", "query"] + ) + """model version.""" + sentence_count: Optional[int] = rest_field( + name="sentenceCount", visibility=["read", "create", "update", "delete", "query"] + ) + """Specifies the number of sentences in the extracted summary.""" + sort_by: Optional[Union[str, "_models.ExtractiveSummarizationSortingCriteria"]] = rest_field( + name="sortBy", visibility=["read", "create", "update", "delete", "query"] + ) + """Specifies how to sort the extracted summaries. Known values are: \"Offset\" and \"Rank\".""" + string_index_type: Optional[Union[str, "_models.StringIndexType"]] = rest_field( + name="stringIndexType", visibility=["read", "create", "update", "delete", "query"] + ) + """Specifies the method used to interpret string offsets. Known values are: \"TextElements_v8\", + \"UnicodeCodePoint\", and \"Utf16CodeUnit\".""" + query: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """(Optional) If provided, the query will be used to extract most relevant sentences from the + document.""" + + @overload + def __init__( + self, + *, + logging_opt_out: Optional[bool] = None, + model_version: Optional[str] = None, + sentence_count: Optional[int] = None, + sort_by: Optional[Union[str, "_models.ExtractiveSummarizationSortingCriteria"]] = None, + string_index_type: Optional[Union[str, "_models.StringIndexType"]] = None, + query: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class FhirBundle(_Model): + """JSON bundle containing a FHIR compatible object for consumption in other Healthcare tools. For + additional information see `https://www.hl7.org/fhir/overview.html + `_. + + """ + + +class HealthcareAssertion(_Model): + """Assertion of the entity. + + :ivar conditionality: Describes any conditionality on the entity. Known values are: + "hypothetical" and "conditional". + :vartype conditionality: str or ~azure.ai.language.text.models.Conditionality + :ivar certainty: Describes the entities certainty and polarity. Known values are: "positive", + "positivePossible", "neutralPossible", "negativePossible", and "negative". + :vartype certainty: str or ~azure.ai.language.text.models.Certainty + :ivar association: Describes if the entity is the subject of the text or if it describes + someone else. Known values are: "subject" and "other". + :vartype association: str or ~azure.ai.language.text.models.Association + :ivar temporality: Describes temporal information regarding the entity. Known values are: + "current", "past", and "future". + :vartype temporality: str or ~azure.ai.language.text.models.Temporality + """ + + conditionality: Optional[Union[str, "_models.Conditionality"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Describes any conditionality on the entity. Known values are: \"hypothetical\" and + \"conditional\".""" + certainty: Optional[Union[str, "_models.Certainty"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Describes the entities certainty and polarity. Known values are: \"positive\", + \"positivePossible\", \"neutralPossible\", \"negativePossible\", and \"negative\".""" + association: Optional[Union[str, "_models.Association"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Describes if the entity is the subject of the text or if it describes someone else. Known + values are: \"subject\" and \"other\".""" + temporality: Optional[Union[str, "_models.Temporality"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Describes temporal information regarding the entity. Known values are: \"current\", \"past\", + and \"future\".""" + + @overload + def __init__( + self, + *, + conditionality: Optional[Union[str, "_models.Conditionality"]] = None, + certainty: Optional[Union[str, "_models.Certainty"]] = None, + association: Optional[Union[str, "_models.Association"]] = None, + temporality: Optional[Union[str, "_models.Temporality"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class HealthcareEntitiesDocumentResultWithDocumentDetectedLanguage(_Model): # pylint: disable=name-too-long + """Result object for the processed Healthcare document with detected language. + + :ivar id: Unique, non-empty document identifier. Required. + :vartype id: str + :ivar warnings: Warnings encountered while processing document. Required. + :vartype warnings: list[~azure.ai.language.text.models.DocumentWarning] + :ivar statistics: if showStats=true was specified in the request this field will contain + information about the document payload. + :vartype statistics: ~azure.ai.language.text.models.DocumentStatistics + :ivar entities: Healthcare entities. Required. + :vartype entities: list[~azure.ai.language.text.models.HealthcareEntity] + :ivar relations: Healthcare entity relations. Required. + :vartype relations: list[~azure.ai.language.text.models.HealthcareRelation] + :ivar fhir_bundle: JSON bundle containing a FHIR compatible object for consumption in other + Healthcare tools. For additional information see `https://www.hl7.org/fhir/overview.html + `_. + :vartype fhir_bundle: ~azure.ai.language.text.models.FhirBundle + :ivar detected_language: If 'language' is set to 'auto' for the document in the request this + field will contain a 2 letter ISO 639-1 representation of the language detected for this + document. + :vartype detected_language: ~azure.ai.language.text.models.DetectedLanguage + """ + + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Unique, non-empty document identifier. Required.""" + warnings: List["_models.DocumentWarning"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Warnings encountered while processing document. Required.""" + statistics: Optional["_models.DocumentStatistics"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """if showStats=true was specified in the request this field will contain information about the + document payload.""" + entities: List["_models.HealthcareEntity"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Healthcare entities. Required.""" + relations: List["_models.HealthcareRelation"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Healthcare entity relations. Required.""" + fhir_bundle: Optional["_models.FhirBundle"] = rest_field( + name="fhirBundle", visibility=["read", "create", "update", "delete", "query"] + ) + """JSON bundle containing a FHIR compatible object for consumption in other Healthcare tools. For + additional information see `https://www.hl7.org/fhir/overview.html + `_.""" + detected_language: Optional["_models.DetectedLanguage"] = rest_field( + name="detectedLanguage", visibility=["read", "create", "update", "delete", "query"] + ) + """If 'language' is set to 'auto' for the document in the request this field will contain a 2 + letter ISO 639-1 representation of the language detected for this document.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + warnings: List["_models.DocumentWarning"], + entities: List["_models.HealthcareEntity"], + relations: List["_models.HealthcareRelation"], + statistics: Optional["_models.DocumentStatistics"] = None, + fhir_bundle: Optional["_models.FhirBundle"] = None, + detected_language: Optional["_models.DetectedLanguage"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class HealthcareEntity(_Model): + """Healthcare entity extracted from the document. + + :ivar text: Entity text as appears in the request. Required. + :vartype text: str + :ivar category: Healthcare Entity Category. Required. Known values are: "BodyStructure", "Age", + "Gender", "ExaminationName", "Date", "Direction", "Frequency", "MeasurementValue", + "MeasurementUnit", "RelationalOperator", "Time", "GeneOrProtein", "Variant", + "AdministrativeEvent", "CareEnvironment", "HealthcareProfession", "Diagnosis", "SymptomOrSign", + "ConditionQualifier", "MedicationClass", "MedicationName", "Dosage", "MedicationForm", + "MedicationRoute", "FamilyRelation", "TreatmentName", "Ethnicity", "Course", "Expression", + "MutationType", "ConditionScale", "Allergen", "Employment", "LivingStatus", "SubstanceUse", and + "SubstanceUseAmount". + :vartype category: str or ~azure.ai.language.text.models.HealthcareEntityCategory + :ivar subcategory: (Optional) Entity sub type. + :vartype subcategory: str + :ivar offset: Start position for the entity text. Use of different 'stringIndexType' values can + affect the offset returned. Required. + :vartype offset: int + :ivar length: Length for the entity text. Use of different 'stringIndexType' values can affect + the length returned. Required. + :vartype length: int + :ivar confidence_score: Confidence score between 0 and 1 of the extracted entity. Required. + :vartype confidence_score: float + :ivar assertion: Assertion of the entity. + :vartype assertion: ~azure.ai.language.text.models.HealthcareAssertion + :ivar name: Preferred name for the entity. Example: 'histologically' would have a 'name' of + 'histologic'. + :vartype name: str + :ivar links: Entity references in known data sources. + :vartype links: list[~azure.ai.language.text.models.HealthcareEntityLink] + """ + + text: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Entity text as appears in the request. Required.""" + category: Union[str, "_models.HealthcareEntityCategory"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Healthcare Entity Category. Required. Known values are: \"BodyStructure\", \"Age\", \"Gender\", + \"ExaminationName\", \"Date\", \"Direction\", \"Frequency\", \"MeasurementValue\", + \"MeasurementUnit\", \"RelationalOperator\", \"Time\", \"GeneOrProtein\", \"Variant\", + \"AdministrativeEvent\", \"CareEnvironment\", \"HealthcareProfession\", \"Diagnosis\", + \"SymptomOrSign\", \"ConditionQualifier\", \"MedicationClass\", \"MedicationName\", \"Dosage\", + \"MedicationForm\", \"MedicationRoute\", \"FamilyRelation\", \"TreatmentName\", \"Ethnicity\", + \"Course\", \"Expression\", \"MutationType\", \"ConditionScale\", \"Allergen\", \"Employment\", + \"LivingStatus\", \"SubstanceUse\", and \"SubstanceUseAmount\".""" + subcategory: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """(Optional) Entity sub type.""" + offset: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Start position for the entity text. Use of different 'stringIndexType' values can affect the + offset returned. Required.""" + length: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Length for the entity text. Use of different 'stringIndexType' values can affect the length + returned. Required.""" + confidence_score: float = rest_field( + name="confidenceScore", visibility=["read", "create", "update", "delete", "query"] + ) + """Confidence score between 0 and 1 of the extracted entity. Required.""" + assertion: Optional["_models.HealthcareAssertion"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Assertion of the entity.""" + name: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Preferred name for the entity. Example: 'histologically' would have a 'name' of 'histologic'.""" + links: Optional[List["_models.HealthcareEntityLink"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Entity references in known data sources.""" + + @overload + def __init__( + self, + *, + text: str, + category: Union[str, "_models.HealthcareEntityCategory"], + offset: int, + length: int, + confidence_score: float, + subcategory: Optional[str] = None, + assertion: Optional["_models.HealthcareAssertion"] = None, + name: Optional[str] = None, + links: Optional[List["_models.HealthcareEntityLink"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class HealthcareEntityLink(_Model): + """Reference to an entity in known data sources. + + :ivar data_source: Entity Catalog. Examples include: UMLS, CHV, MSH, etc. Required. + :vartype data_source: str + :ivar id: Entity id in the given source catalog. Required. + :vartype id: str + """ + + data_source: str = rest_field(name="dataSource", visibility=["read", "create", "update", "delete", "query"]) + """Entity Catalog. Examples include: UMLS, CHV, MSH, etc. Required.""" + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Entity id in the given source catalog. Required.""" + + @overload + def __init__( + self, + *, + data_source: str, + id: str, # pylint: disable=redefined-builtin + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class HealthcareLROResult(AnalyzeTextLROResult, discriminator="HealthcareLROResults"): + """Healthcare Analyze Text long tunning operation result object. + + :ivar last_update_date_time: The last updated time in UTC for the task. Required. + :vartype last_update_date_time: ~datetime.datetime + :ivar status: The status of the task at the mentioned last update time. Required. Known values + are: "notStarted", "running", "succeeded", "partiallyCompleted", "failed", "cancelled", and + "cancelling". + :vartype status: str or ~azure.ai.language.text.models.State + :ivar task_name: task name. + :vartype task_name: str + :ivar kind: Kind of the task. Required. Healthcare LRO results + :vartype kind: str or ~azure.ai.language.text.models.HEALTHCARE_LRO_RESULTS + :ivar results: Results of the task. Required. + :vartype results: ~azure.ai.language.text.models.HealthcareResult + """ + + kind: Literal[AnalyzeTextLROResultsKind.HEALTHCARE_LRO_RESULTS] = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """Kind of the task. Required. Healthcare LRO results""" + results: "_models.HealthcareResult" = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Results of the task. Required.""" + + @overload + def __init__( + self, + *, + last_update_date_time: datetime.datetime, + status: Union[str, "_models.State"], + results: "_models.HealthcareResult", + task_name: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, kind=AnalyzeTextLROResultsKind.HEALTHCARE_LRO_RESULTS, **kwargs) + + +class HealthcareLROTask(AnalyzeTextLROTask, discriminator="Healthcare"): + """The long running task to be performed by the service on the Healthcare input documents. + + :ivar task_name: task name. + :vartype task_name: str + :ivar kind: Healthcare kind of the long running task. Required. Healthcare task + :vartype kind: str or ~azure.ai.language.text.models.HEALTHCARE + :ivar parameters: Parameters for the Healthcare task. + :vartype parameters: ~azure.ai.language.text.models.HealthcareTaskParameters + """ + + kind: Literal[AnalyzeTextLROTaskKind.HEALTHCARE] = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """Healthcare kind of the long running task. Required. Healthcare task""" + parameters: Optional["_models.HealthcareTaskParameters"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Parameters for the Healthcare task.""" + + @overload + def __init__( + self, + *, + task_name: Optional[str] = None, + parameters: Optional["_models.HealthcareTaskParameters"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, kind=AnalyzeTextLROTaskKind.HEALTHCARE, **kwargs) + + +class HealthcareRelation(_Model): + """Every relation is an entity graph of a certain relationType, where all entities are connected + and have specific roles within the relation context. + + :ivar relation_type: Type of relation. Examples include: ``DosageOfMedication`` or + 'FrequencyOfMedication', etc. Required. Known values are: "Abbreviation", + "DirectionOfBodyStructure", "DirectionOfCondition", "DirectionOfExamination", + "DirectionOfTreatment", "DosageOfMedication", "FormOfMedication", "FrequencyOfMedication", + "FrequencyOfTreatment", "QualifierOfCondition", "RelationOfExamination", "RouteOfMedication", + "TimeOfCondition", "TimeOfEvent", "TimeOfExamination", "TimeOfMedication", "TimeOfTreatment", + "UnitOfCondition", "UnitOfExamination", "ValueOfCondition", "ValueOfExamination", + "BodySiteOfCondition", "BodySiteOfTreatment", "CourseOfCondition", "CourseOfExamination", + "CourseOfMedication", "CourseOfTreatment", "ExaminationFindsCondition", "ExpressionOfGene", + "ExpressionOfVariant", "FrequencyOfCondition", "MutationTypeOfGene", "MutationTypeOfVariant", + "ScaleOfCondition", and "VariantOfGene". + :vartype relation_type: str or ~azure.ai.language.text.models.RelationType + :ivar entities: The entities in the relation. Required. + :vartype entities: list[~azure.ai.language.text.models.HealthcareRelationEntity] + :ivar confidence_score: Confidence score between 0 and 1 of the extracted relation. + :vartype confidence_score: float + """ + + relation_type: Union[str, "_models.RelationType"] = rest_field( + name="relationType", visibility=["read", "create", "update", "delete", "query"] + ) + """Type of relation. Examples include: ``DosageOfMedication`` or 'FrequencyOfMedication', etc. + Required. Known values are: \"Abbreviation\", \"DirectionOfBodyStructure\", + \"DirectionOfCondition\", \"DirectionOfExamination\", \"DirectionOfTreatment\", + \"DosageOfMedication\", \"FormOfMedication\", \"FrequencyOfMedication\", + \"FrequencyOfTreatment\", \"QualifierOfCondition\", \"RelationOfExamination\", + \"RouteOfMedication\", \"TimeOfCondition\", \"TimeOfEvent\", \"TimeOfExamination\", + \"TimeOfMedication\", \"TimeOfTreatment\", \"UnitOfCondition\", \"UnitOfExamination\", + \"ValueOfCondition\", \"ValueOfExamination\", \"BodySiteOfCondition\", \"BodySiteOfTreatment\", + \"CourseOfCondition\", \"CourseOfExamination\", \"CourseOfMedication\", \"CourseOfTreatment\", + \"ExaminationFindsCondition\", \"ExpressionOfGene\", \"ExpressionOfVariant\", + \"FrequencyOfCondition\", \"MutationTypeOfGene\", \"MutationTypeOfVariant\", + \"ScaleOfCondition\", and \"VariantOfGene\".""" + entities: List["_models.HealthcareRelationEntity"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The entities in the relation. Required.""" + confidence_score: Optional[float] = rest_field( + name="confidenceScore", visibility=["read", "create", "update", "delete", "query"] + ) + """Confidence score between 0 and 1 of the extracted relation.""" + + @overload + def __init__( + self, + *, + relation_type: Union[str, "_models.RelationType"], + entities: List["_models.HealthcareRelationEntity"], + confidence_score: Optional[float] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class HealthcareRelationEntity(_Model): + """Entity in the relation. + + :ivar ref: Reference link object, using a JSON pointer RFC 6901 (URI Fragment Identifier + Representation), pointing to the entity . Required. + :vartype ref: str + :ivar role: Role of entity in the relationship. For example: 'CD20-positive diffuse large + B-cell lymphoma' has the following entities with their roles in parenthesis: CD20 + (GeneOrProtein), Positive (Expression), diffuse large B-cell lymphoma (Diagnosis). Required. + :vartype role: str + """ + + ref: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Reference link object, using a JSON pointer RFC 6901 (URI Fragment Identifier Representation), + pointing to the entity . Required.""" + role: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Role of entity in the relationship. For example: 'CD20-positive diffuse large B-cell lymphoma' + has the following entities with their roles in parenthesis: CD20 (GeneOrProtein), Positive + (Expression), diffuse large B-cell lymphoma (Diagnosis). Required.""" + + @overload + def __init__( + self, + *, + ref: str, + role: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class HealthcareResult(_Model): + """Result object for the processed Healthcare task. + + :ivar errors: Errors by document id. Required. + :vartype errors: list[~azure.ai.language.text.models.DocumentError] + :ivar statistics: if showStats=true was specified in the request this field will contain + information about the request payload. + :vartype statistics: ~azure.ai.language.text.models.RequestStatistics + :ivar model_version: This field indicates which model is used for scoring. Required. + :vartype model_version: str + :ivar documents: List of result objects for the processed Healthcare documents. Required. + :vartype documents: + list[~azure.ai.language.text.models.HealthcareEntitiesDocumentResultWithDocumentDetectedLanguage] + """ + + errors: List["_models.DocumentError"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Errors by document id. Required.""" + statistics: Optional["_models.RequestStatistics"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """if showStats=true was specified in the request this field will contain information about the + request payload.""" + model_version: str = rest_field(name="modelVersion", visibility=["read", "create", "update", "delete", "query"]) + """This field indicates which model is used for scoring. Required.""" + documents: List["_models.HealthcareEntitiesDocumentResultWithDocumentDetectedLanguage"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """List of result objects for the processed Healthcare documents. Required.""" + + @overload + def __init__( + self, + *, + errors: List["_models.DocumentError"], + model_version: str, + documents: List["_models.HealthcareEntitiesDocumentResultWithDocumentDetectedLanguage"], + statistics: Optional["_models.RequestStatistics"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class HealthcareTaskParameters(_Model): + """Supported parameters for a Healthcare task. + + :ivar logging_opt_out: logging opt out. + :vartype logging_opt_out: bool + :ivar model_version: model version. + :vartype model_version: str + :ivar string_index_type: Specifies the method used to interpret string offsets. Known values + are: "TextElements_v8", "UnicodeCodePoint", and "Utf16CodeUnit". + :vartype string_index_type: str or ~azure.ai.language.text.models.StringIndexType + :ivar fhir_version: The FHIR Spec version that the result will use to format the fhirBundle. + For additional information see `https://www.hl7.org/fhir/overview.html + `_. "4.0.1" + :vartype fhir_version: str or ~azure.ai.language.text.models.FhirVersion + :ivar document_type: Document type that can be provided as input for Fhir Documents. Expect to + have fhirVersion provided when used. Behavior of using None enum is the same as not using the + documentType parameter. Known values are: "None", "ClinicalTrial", "DischargeSummary", + "ProgressNote", "HistoryAndPhysical", "Consult", "Imaging", "Pathology", and "ProcedureNote". + :vartype document_type: str or ~azure.ai.language.text.models.HealthcareDocumentType + """ + + logging_opt_out: Optional[bool] = rest_field( + name="loggingOptOut", visibility=["read", "create", "update", "delete", "query"] + ) + """logging opt out.""" + model_version: Optional[str] = rest_field( + name="modelVersion", visibility=["read", "create", "update", "delete", "query"] + ) + """model version.""" + string_index_type: Optional[Union[str, "_models.StringIndexType"]] = rest_field( + name="stringIndexType", visibility=["read", "create", "update", "delete", "query"] + ) + """Specifies the method used to interpret string offsets. Known values are: \"TextElements_v8\", + \"UnicodeCodePoint\", and \"Utf16CodeUnit\".""" + fhir_version: Optional[Union[str, "_models.FhirVersion"]] = rest_field( + name="fhirVersion", visibility=["read", "create", "update", "delete", "query"] + ) + """The FHIR Spec version that the result will use to format the fhirBundle. For additional + information see `https://www.hl7.org/fhir/overview.html + `_. \"4.0.1\"""" + document_type: Optional[Union[str, "_models.HealthcareDocumentType"]] = rest_field( + name="documentType", visibility=["read", "create", "update", "delete", "query"] + ) + """Document type that can be provided as input for Fhir Documents. Expect to have fhirVersion + provided when used. Behavior of using None enum is the same as not using the documentType + parameter. Known values are: \"None\", \"ClinicalTrial\", \"DischargeSummary\", + \"ProgressNote\", \"HistoryAndPhysical\", \"Consult\", \"Imaging\", \"Pathology\", and + \"ProcedureNote\".""" + + @overload + def __init__( + self, + *, + logging_opt_out: Optional[bool] = None, + model_version: Optional[str] = None, + string_index_type: Optional[Union[str, "_models.StringIndexType"]] = None, + fhir_version: Optional[Union[str, "_models.FhirVersion"]] = None, + document_type: Optional[Union[str, "_models.HealthcareDocumentType"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class InformationMetadata(BaseMetadata, discriminator="InformationMetadata"): + """Represents the Information (data) entity Metadata model. + + :ivar value: The numeric value that the extracted text denotes. Required. + :vartype value: float + :ivar metadata_kind: Kind of the metadata. Required. Metadata for information-related values. + :vartype metadata_kind: str or ~azure.ai.language.text.models.INFORMATION_METADATA + :ivar unit: Unit of measure for information. Required. Known values are: "Unspecified", "Bit", + "Kilobit", "Megabit", "Gigabit", "Terabit", "Petabit", "Byte", "Kilobyte", "Megabyte", + "Gigabyte", "Terabyte", and "Petabyte". + :vartype unit: str or ~azure.ai.language.text.models.InformationUnit + """ + + value: float = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The numeric value that the extracted text denotes. Required.""" + metadata_kind: Literal[MetadataKind.INFORMATION_METADATA] = rest_discriminator(name="metadataKind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """Kind of the metadata. Required. Metadata for information-related values.""" + unit: Union[str, "_models.InformationUnit"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Unit of measure for information. Required. Known values are: \"Unspecified\", \"Bit\", + \"Kilobit\", \"Megabit\", \"Gigabit\", \"Terabit\", \"Petabit\", \"Byte\", \"Kilobyte\", + \"Megabyte\", \"Gigabyte\", \"Terabyte\", and \"Petabyte\".""" + + @overload + def __init__( + self, + *, + value: float, + unit: Union[str, "_models.InformationUnit"], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, metadata_kind=MetadataKind.INFORMATION_METADATA, **kwargs) + + +class InnerErrorModel(_Model): + """An object containing more specific information about the error. As per Microsoft One API + guidelines - + `https://github.com/Microsoft/api-guidelines/blob/vNext/Guidelines.md#7102-error-condition-responses + `_. + + :ivar code: One of a server-defined set of error codes. Required. Known values are: + "InvalidRequest", "InvalidParameterValue", "KnowledgeBaseNotFound", + "AzureCognitiveSearchNotFound", "AzureCognitiveSearchThrottling", "ExtractionFailure", + "InvalidRequestBodyFormat", "EmptyRequest", "MissingInputDocuments", "InvalidDocument", + "ModelVersionIncorrect", "InvalidDocumentBatch", "UnsupportedLanguageCode", and + "InvalidCountryHint". + :vartype code: str or ~azure.ai.language.text.models.InnerErrorCode + :ivar message: Error message. Required. + :vartype message: str + :ivar details: Error details. + :vartype details: dict[str, str] + :ivar target: Error target. + :vartype target: str + :ivar innererror: An object containing more specific information than the current object about + the error. + :vartype innererror: ~azure.ai.language.text.models.InnerErrorModel + """ + + code: Union[str, "_models.InnerErrorCode"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """One of a server-defined set of error codes. Required. Known values are: \"InvalidRequest\", + \"InvalidParameterValue\", \"KnowledgeBaseNotFound\", \"AzureCognitiveSearchNotFound\", + \"AzureCognitiveSearchThrottling\", \"ExtractionFailure\", \"InvalidRequestBodyFormat\", + \"EmptyRequest\", \"MissingInputDocuments\", \"InvalidDocument\", \"ModelVersionIncorrect\", + \"InvalidDocumentBatch\", \"UnsupportedLanguageCode\", and \"InvalidCountryHint\".""" + message: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Error message. Required.""" + details: Optional[Dict[str, str]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Error details.""" + target: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Error target.""" + innererror: Optional["_models.InnerErrorModel"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """An object containing more specific information than the current object about the error.""" + + @overload + def __init__( + self, + *, + code: Union[str, "_models.InnerErrorCode"], + message: str, + details: Optional[Dict[str, str]] = None, + target: Optional[str] = None, + innererror: Optional["_models.InnerErrorModel"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class KeyPhraseExtractionLROResult(AnalyzeTextLROResult, discriminator="KeyPhraseExtractionLROResults"): + """Contains the analyze text KeyPhraseExtraction LRO task. + + :ivar last_update_date_time: The last updated time in UTC for the task. Required. + :vartype last_update_date_time: ~datetime.datetime + :ivar status: The status of the task at the mentioned last update time. Required. Known values + are: "notStarted", "running", "succeeded", "partiallyCompleted", "failed", "cancelled", and + "cancelling". + :vartype status: str or ~azure.ai.language.text.models.State + :ivar task_name: task name. + :vartype task_name: str + :ivar kind: Kind of the task. Required. Key phrase extraction LRO results + :vartype kind: str or ~azure.ai.language.text.models.KEY_PHRASE_EXTRACTION_LRO_RESULTS + :ivar results: The list of Key phrase extraction results. Required. + :vartype results: ~azure.ai.language.text.models.KeyPhraseResult + """ + + kind: Literal[AnalyzeTextLROResultsKind.KEY_PHRASE_EXTRACTION_LRO_RESULTS] = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """Kind of the task. Required. Key phrase extraction LRO results""" + results: "_models.KeyPhraseResult" = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The list of Key phrase extraction results. Required.""" + + @overload + def __init__( + self, + *, + last_update_date_time: datetime.datetime, + status: Union[str, "_models.State"], + results: "_models.KeyPhraseResult", + task_name: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, kind=AnalyzeTextLROResultsKind.KEY_PHRASE_EXTRACTION_LRO_RESULTS, **kwargs) + + +class KeyPhraseLROTask(AnalyzeTextLROTask, discriminator="KeyPhraseExtraction"): + """An object representing the task definition for a Key Phrase Extraction task. + + :ivar task_name: task name. + :vartype task_name: str + :ivar kind: Kind of the task. Required. Key phrase extraction task + :vartype kind: str or ~azure.ai.language.text.models.KEY_PHRASE_EXTRACTION + :ivar parameters: Key phrase extraction task parameters. + :vartype parameters: ~azure.ai.language.text.models.KeyPhraseTaskParameters + """ + + kind: Literal[AnalyzeTextLROTaskKind.KEY_PHRASE_EXTRACTION] = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """Kind of the task. Required. Key phrase extraction task""" + parameters: Optional["_models.KeyPhraseTaskParameters"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Key phrase extraction task parameters.""" + + @overload + def __init__( + self, + *, + task_name: Optional[str] = None, + parameters: Optional["_models.KeyPhraseTaskParameters"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, kind=AnalyzeTextLROTaskKind.KEY_PHRASE_EXTRACTION, **kwargs) + + +class KeyPhraseResult(_Model): + """Contains the KeyPhraseResult. + + :ivar errors: Errors by document id. Required. + :vartype errors: list[~azure.ai.language.text.models.DocumentError] + :ivar statistics: if showStats=true was specified in the request this field will contain + information about the request payload. + :vartype statistics: ~azure.ai.language.text.models.RequestStatistics + :ivar model_version: This field indicates which model is used for scoring. Required. + :vartype model_version: str + :ivar documents: Response by document. Required. + :vartype documents: + list[~azure.ai.language.text.models.KeyPhrasesDocumentResultWithDetectedLanguage] + """ + + errors: List["_models.DocumentError"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Errors by document id. Required.""" + statistics: Optional["_models.RequestStatistics"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """if showStats=true was specified in the request this field will contain information about the + request payload.""" + model_version: str = rest_field(name="modelVersion", visibility=["read", "create", "update", "delete", "query"]) + """This field indicates which model is used for scoring. Required.""" + documents: List["_models.KeyPhrasesDocumentResultWithDetectedLanguage"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Response by document. Required.""" + + @overload + def __init__( + self, + *, + errors: List["_models.DocumentError"], + model_version: str, + documents: List["_models.KeyPhrasesDocumentResultWithDetectedLanguage"], + statistics: Optional["_models.RequestStatistics"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class KeyPhrasesDocumentResultWithDetectedLanguage(_Model): # pylint: disable=name-too-long + """A ranked list of sentences representing the extracted summary. + + :ivar id: Unique, non-empty document identifier. Required. + :vartype id: str + :ivar warnings: Warnings encountered while processing document. Required. + :vartype warnings: list[~azure.ai.language.text.models.DocumentWarning] + :ivar statistics: if showStats=true was specified in the request this field will contain + information about the document payload. + :vartype statistics: ~azure.ai.language.text.models.DocumentStatistics + :ivar key_phrases: A list of representative words or phrases. The number of key phrases + returned is proportional to the number of words in the input document. Required. + :vartype key_phrases: list[str] + :ivar detected_language: If 'language' is set to 'auto' for the document in the request this + field will contain a 2 letter ISO 639-1 representation of the language detected for this + document. + :vartype detected_language: ~azure.ai.language.text.models.DetectedLanguage + """ + + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Unique, non-empty document identifier. Required.""" + warnings: List["_models.DocumentWarning"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Warnings encountered while processing document. Required.""" + statistics: Optional["_models.DocumentStatistics"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """if showStats=true was specified in the request this field will contain information about the + document payload.""" + key_phrases: List[str] = rest_field(name="keyPhrases", visibility=["read", "create", "update", "delete", "query"]) + """A list of representative words or phrases. The number of key phrases returned is proportional + to the number of words in the input document. Required.""" + detected_language: Optional["_models.DetectedLanguage"] = rest_field( + name="detectedLanguage", visibility=["read", "create", "update", "delete", "query"] + ) + """If 'language' is set to 'auto' for the document in the request this field will contain a 2 + letter ISO 639-1 representation of the language detected for this document.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + warnings: List["_models.DocumentWarning"], + key_phrases: List[str], + statistics: Optional["_models.DocumentStatistics"] = None, + detected_language: Optional["_models.DetectedLanguage"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class KeyPhraseTaskParameters(_Model): + """Supported parameters for a Key Phrase Extraction task. + + :ivar logging_opt_out: logging opt out. + :vartype logging_opt_out: bool + :ivar model_version: model version. + :vartype model_version: str + """ + + logging_opt_out: Optional[bool] = rest_field( + name="loggingOptOut", visibility=["read", "create", "update", "delete", "query"] + ) + """logging opt out.""" + model_version: Optional[str] = rest_field( + name="modelVersion", visibility=["read", "create", "update", "delete", "query"] + ) + """model version.""" + + @overload + def __init__( + self, + *, + logging_opt_out: Optional[bool] = None, + model_version: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class KeyPhraseTaskResult(AnalyzeTextTaskResult, discriminator="KeyPhraseExtractionResults"): + """Contains the analyze text KeyPhraseExtraction task result. + + :ivar kind: Kind of the task results. Required. Key phrase extraction results + :vartype kind: str or ~azure.ai.language.text.models.KEY_PHRASE_EXTRACTION_RESULTS + :ivar results: The list of Key phrase extraction results. Required. + :vartype results: ~azure.ai.language.text.models.KeyPhraseResult + """ + + kind: Literal[AnalyzeTextTaskResultsKind.KEY_PHRASE_EXTRACTION_RESULTS] = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """Kind of the task results. Required. Key phrase extraction results""" + results: "_models.KeyPhraseResult" = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The list of Key phrase extraction results. Required.""" + + @overload + def __init__( + self, + *, + results: "_models.KeyPhraseResult", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, kind=AnalyzeTextTaskResultsKind.KEY_PHRASE_EXTRACTION_RESULTS, **kwargs) + + +class LanguageDetectionAnalysisInput(_Model): + """Contains the language detection document analysis input. + + :ivar documents: List of documents to be analyzed. + :vartype documents: list[~azure.ai.language.text.models.LanguageInput] + """ + + documents: Optional[List["_models.LanguageInput"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """List of documents to be analyzed.""" + + @overload + def __init__( + self, + *, + documents: Optional[List["_models.LanguageInput"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class LanguageDetectionDocumentResult(_Model): + """Contains the language detection for a document. + + :ivar id: Unique, non-empty document identifier. Required. + :vartype id: str + :ivar warnings: Warnings encountered while processing document. Required. + :vartype warnings: list[~azure.ai.language.text.models.DocumentWarning] + :ivar statistics: if showStats=true was specified in the request this field will contain + information about the document payload. + :vartype statistics: ~azure.ai.language.text.models.DocumentStatistics + :ivar detected_language: Detected Language. Required. + :vartype detected_language: ~azure.ai.language.text.models.DetectedLanguage + """ + + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Unique, non-empty document identifier. Required.""" + warnings: List["_models.DocumentWarning"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Warnings encountered while processing document. Required.""" + statistics: Optional["_models.DocumentStatistics"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """if showStats=true was specified in the request this field will contain information about the + document payload.""" + detected_language: "_models.DetectedLanguage" = rest_field( + name="detectedLanguage", visibility=["read", "create", "update", "delete", "query"] + ) + """Detected Language. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + warnings: List["_models.DocumentWarning"], + detected_language: "_models.DetectedLanguage", + statistics: Optional["_models.DocumentStatistics"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class LanguageDetectionResult(_Model): + """Contains the language detection result for the request. + + :ivar errors: Errors by document id. Required. + :vartype errors: list[~azure.ai.language.text.models.DocumentError] + :ivar statistics: if showStats=true was specified in the request this field will contain + information about the request payload. + :vartype statistics: ~azure.ai.language.text.models.RequestStatistics + :ivar model_version: This field indicates which model is used for scoring. Required. + :vartype model_version: str + :ivar documents: Enumeration of language detection results for each input document. Required. + :vartype documents: list[~azure.ai.language.text.models.LanguageDetectionDocumentResult] + """ + + errors: List["_models.DocumentError"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Errors by document id. Required.""" + statistics: Optional["_models.RequestStatistics"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """if showStats=true was specified in the request this field will contain information about the + request payload.""" + model_version: str = rest_field(name="modelVersion", visibility=["read", "create", "update", "delete", "query"]) + """This field indicates which model is used for scoring. Required.""" + documents: List["_models.LanguageDetectionDocumentResult"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Enumeration of language detection results for each input document. Required.""" + + @overload + def __init__( + self, + *, + errors: List["_models.DocumentError"], + model_version: str, + documents: List["_models.LanguageDetectionDocumentResult"], + statistics: Optional["_models.RequestStatistics"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class LanguageDetectionTaskParameters(_Model): + """Supported parameters for a Language Detection task. + + :ivar logging_opt_out: logging opt out. + :vartype logging_opt_out: bool + :ivar model_version: model version. + :vartype model_version: str + """ + + logging_opt_out: Optional[bool] = rest_field( + name="loggingOptOut", visibility=["read", "create", "update", "delete", "query"] + ) + """logging opt out.""" + model_version: Optional[str] = rest_field( + name="modelVersion", visibility=["read", "create", "update", "delete", "query"] + ) + """model version.""" + + @overload + def __init__( + self, + *, + logging_opt_out: Optional[bool] = None, + model_version: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class LanguageDetectionTaskResult(AnalyzeTextTaskResult, discriminator="LanguageDetectionResults"): + """Contains the language detection task result for the request. + + :ivar kind: Kind of the task result. Required. Language detection results + :vartype kind: str or ~azure.ai.language.text.models.LANGUAGE_DETECTION_RESULTS + :ivar results: Contains the language detection results. Required. + :vartype results: ~azure.ai.language.text.models.LanguageDetectionResult + """ + + kind: Literal[AnalyzeTextTaskResultsKind.LANGUAGE_DETECTION_RESULTS] = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """Kind of the task result. Required. Language detection results""" + results: "_models.LanguageDetectionResult" = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Contains the language detection results. Required.""" + + @overload + def __init__( + self, + *, + results: "_models.LanguageDetectionResult", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, kind=AnalyzeTextTaskResultsKind.LANGUAGE_DETECTION_RESULTS, **kwargs) + + +class LanguageInput(_Model): + """Contains the language detection input. + + :ivar id: A unique, non-empty document identifier. Required. + :vartype id: str + :ivar text: The input text to process. Required. + :vartype text: str + :ivar country_hint: The country hint to help with language detection of the text. + :vartype country_hint: str + """ + + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """A unique, non-empty document identifier. Required.""" + text: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The input text to process. Required.""" + country_hint: Optional[str] = rest_field( + name="countryHint", visibility=["read", "create", "update", "delete", "query"] + ) + """The country hint to help with language detection of the text.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + text: str, + country_hint: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class LengthMetadata(BaseMetadata, discriminator="LengthMetadata"): + """Represents the Length entity Metadata model. + + :ivar value: The numeric value that the extracted text denotes. Required. + :vartype value: float + :ivar metadata_kind: Kind of the metadata. Required. Metadata for length-related values. + :vartype metadata_kind: str or ~azure.ai.language.text.models.LENGTH_METADATA + :ivar unit: Unit of measure for length. Required. Known values are: "Unspecified", "Kilometer", + "Hectometer", "Decameter", "Meter", "Decimeter", "Centimeter", "Millimeter", "Micrometer", + "Nanometer", "Picometer", "Mile", "Yard", "Inch", "Foot", "LightYear", and "Point". + :vartype unit: str or ~azure.ai.language.text.models.LengthUnit + """ + + value: float = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The numeric value that the extracted text denotes. Required.""" + metadata_kind: Literal[MetadataKind.LENGTH_METADATA] = rest_discriminator(name="metadataKind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """Kind of the metadata. Required. Metadata for length-related values.""" + unit: Union[str, "_models.LengthUnit"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Unit of measure for length. Required. Known values are: \"Unspecified\", \"Kilometer\", + \"Hectometer\", \"Decameter\", \"Meter\", \"Decimeter\", \"Centimeter\", \"Millimeter\", + \"Micrometer\", \"Nanometer\", \"Picometer\", \"Mile\", \"Yard\", \"Inch\", \"Foot\", + \"LightYear\", and \"Point\".""" + + @overload + def __init__( + self, + *, + value: float, + unit: Union[str, "_models.LengthUnit"], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, metadata_kind=MetadataKind.LENGTH_METADATA, **kwargs) + + +class LinkedEntity(_Model): + """The LinkedEntity object containing the detected entity with the associated sources/links. + + :ivar name: Entity Linking formal name. Required. + :vartype name: str + :ivar matches: List of instances this entity appears in the text. Required. + :vartype matches: list[~azure.ai.language.text.models.Match] + :ivar language: Language used in the data source. Required. + :vartype language: str + :ivar id: Unique identifier of the recognized entity from the data source. + :vartype id: str + :ivar url: URL for the entity's page from the data source. Required. + :vartype url: str + :ivar data_source: Data source used to extract entity linking, such as Wiki/Bing etc. Required. + :vartype data_source: str + :ivar bing_id: Bing Entity Search API unique identifier of the recognized entity. + :vartype bing_id: str + """ + + name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Entity Linking formal name. Required.""" + matches: List["_models.Match"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """List of instances this entity appears in the text. Required.""" + language: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Language used in the data source. Required.""" + id: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Unique identifier of the recognized entity from the data source.""" + url: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """URL for the entity's page from the data source. Required.""" + data_source: str = rest_field(name="dataSource", visibility=["read", "create", "update", "delete", "query"]) + """Data source used to extract entity linking, such as Wiki/Bing etc. Required.""" + bing_id: Optional[str] = rest_field(name="bingId", visibility=["read", "create", "update", "delete", "query"]) + """Bing Entity Search API unique identifier of the recognized entity.""" + + @overload + def __init__( + self, + *, + name: str, + matches: List["_models.Match"], + language: str, + url: str, + data_source: str, + id: Optional[str] = None, # pylint: disable=redefined-builtin + bing_id: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class Match(_Model): + """The Match object containing the detected entity text with the offset and the length. + + :ivar confidence_score: If a well known item is recognized, a decimal number denoting the + confidence level between 0 and 1 will be returned. Required. + :vartype confidence_score: float + :ivar text: Entity text as appears in the request. Required. + :vartype text: str + :ivar offset: Start position for the entity match text. Required. + :vartype offset: int + :ivar length: Length for the entity match text. Required. + :vartype length: int + """ + + confidence_score: float = rest_field( + name="confidenceScore", visibility=["read", "create", "update", "delete", "query"] + ) + """If a well known item is recognized, a decimal number denoting the confidence level between 0 + and 1 will be returned. Required.""" + text: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Entity text as appears in the request. Required.""" + offset: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Start position for the entity match text. Required.""" + length: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Length for the entity match text. Required.""" + + @overload + def __init__( + self, + *, + confidence_score: float, + text: str, + offset: int, + length: int, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MatchLongestEntityPolicyType(BaseEntityOverlapPolicy, discriminator="matchLongest"): + """Represents the Match longest overlap policy. No overlapping entities as far as it is possible. + 1. If there are overlapping entities, the longest one will be returned. 2. If the set of + characters predicted for 2 or more entities are exactly the same, select the entity that has + the higher confidence score.3. If the entity scores are identical, return all entities that are + still present after applying the previous rules. 3. If there is partial overlap (as in Hello + Text Analytics) follow the above steps starting from 1. + + :ivar policy_kind: The entity OverlapPolicy object kind. Required. Represents + MatchLongestEntityPolicyType + :vartype policy_kind: str or ~azure.ai.language.text.models.MATCH_LONGEST + """ + + policy_kind: Literal[PolicyKind.MATCH_LONGEST] = rest_discriminator(name="policyKind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The entity OverlapPolicy object kind. Required. Represents MatchLongestEntityPolicyType""" + + @overload + def __init__( + self, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, policy_kind=PolicyKind.MATCH_LONGEST, **kwargs) + + +class MultiLanguageAnalysisInput(_Model): + """Collection of input documents to be analyzed by the service. + + :ivar documents: The input documents to be analyzed. + :vartype documents: list[~azure.ai.language.text.models.MultiLanguageInput] + """ + + documents: Optional[List["_models.MultiLanguageInput"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The input documents to be analyzed.""" + + @overload + def __init__( + self, + *, + documents: Optional[List["_models.MultiLanguageInput"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MultiLanguageInput(_Model): + """Contains an input document to be analyzed by the service. + + :ivar id: A unique, non-empty document identifier. Required. + :vartype id: str + :ivar text: The input text to process. Required. + :vartype text: str + :ivar language: (Optional) This is the 2 letter ISO 639-1 representation of a language. For + example, use \\"en\\" for English; \\"es\\" for Spanish etc. If not set, use \\"en\\" for + English as default. (Following only applies to 2023-04-15-preview and above) For Auto Language + Detection, use \\"auto\\". If not set, use \\"en\\" for English as default. + :vartype language: str + """ + + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """A unique, non-empty document identifier. Required.""" + text: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The input text to process. Required.""" + language: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """(Optional) This is the 2 letter ISO 639-1 representation of a language. For example, use + \\"en\\" for English; \\"es\\" for Spanish etc. If not set, use \\"en\\" for English as + default. (Following only applies to 2023-04-15-preview and above) For Auto Language Detection, + use \\"auto\\". If not set, use \\"en\\" for English as default.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + text: str, + language: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class NoMaskPolicyType(BaseRedactionPolicy, discriminator="noMask"): + """Represents the policy of not redacting found PII. + + :ivar policy_kind: The entity RedactionPolicy object kind. Required. Do not redact detected + entities. + :vartype policy_kind: str or ~azure.ai.language.text.models.NO_MASK + """ + + policy_kind: Literal[RedactionPolicyKind.NO_MASK] = rest_discriminator(name="policyKind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The entity RedactionPolicy object kind. Required. Do not redact detected entities.""" + + @overload + def __init__( + self, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, policy_kind=RedactionPolicyKind.NO_MASK, **kwargs) + + +class NumberMetadata(BaseMetadata, discriminator="NumberMetadata"): + """A metadata for numeric entity instances. + + :ivar metadata_kind: Kind of the metadata. Required. Metadata for numeric values. + :vartype metadata_kind: str or ~azure.ai.language.text.models.NUMBER_METADATA + :ivar number_kind: Kind of the number type. Required. Known values are: "Integer", "Decimal", + "Power", "Fraction", "Percent", and "Unspecified". + :vartype number_kind: str or ~azure.ai.language.text.models.NumberKind + :ivar value: A numeric representation of what the extracted text denotes. Required. + :vartype value: float + """ + + metadata_kind: Literal[MetadataKind.NUMBER_METADATA] = rest_discriminator(name="metadataKind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """Kind of the metadata. Required. Metadata for numeric values.""" + number_kind: Union[str, "_models.NumberKind"] = rest_field( + name="numberKind", visibility=["read", "create", "update", "delete", "query"] + ) + """Kind of the number type. Required. Known values are: \"Integer\", \"Decimal\", \"Power\", + \"Fraction\", \"Percent\", and \"Unspecified\".""" + value: float = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """A numeric representation of what the extracted text denotes. Required.""" + + @overload + def __init__( + self, + *, + number_kind: Union[str, "_models.NumberKind"], + value: float, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, metadata_kind=MetadataKind.NUMBER_METADATA, **kwargs) + + +class NumericRangeMetadata(BaseMetadata, discriminator="NumericRangeMetadata"): + """represents the Metadata of numeric intervals. + + :ivar metadata_kind: Kind of the metadata. Required. Metadata for numeric range values. + :vartype metadata_kind: str or ~azure.ai.language.text.models.NUMERIC_RANGE_METADATA + :ivar range_kind: Kind of numeric ranges supported - like Number, Speed, etc. Required. Known + values are: "Number", "Speed", "Weight", "Length", "Volume", "Area", "Age", "Information", + "Temperature", and "Currency". + :vartype range_kind: str or ~azure.ai.language.text.models.RangeKind + :ivar minimum: The beginning value of the interval. Required. + :vartype minimum: float + :ivar maximum: The ending value of the interval. Required. + :vartype maximum: float + :ivar range_inclusivity: The inclusiveness of this range. Known values are: "NoneInclusive", + "LeftInclusive", "RightInclusive", and "LeftRightInclusive". + :vartype range_inclusivity: str or ~azure.ai.language.text.models.RangeInclusivity + """ + + metadata_kind: Literal[MetadataKind.NUMERIC_RANGE_METADATA] = rest_discriminator(name="metadataKind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """Kind of the metadata. Required. Metadata for numeric range values.""" + range_kind: Union[str, "_models.RangeKind"] = rest_field( + name="rangeKind", visibility=["read", "create", "update", "delete", "query"] + ) + """Kind of numeric ranges supported - like Number, Speed, etc. Required. Known values are: + \"Number\", \"Speed\", \"Weight\", \"Length\", \"Volume\", \"Area\", \"Age\", \"Information\", + \"Temperature\", and \"Currency\".""" + minimum: float = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The beginning value of the interval. Required.""" + maximum: float = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The ending value of the interval. Required.""" + range_inclusivity: Optional[Union[str, "_models.RangeInclusivity"]] = rest_field( + name="rangeInclusivity", visibility=["read", "create", "update", "delete", "query"] + ) + """The inclusiveness of this range. Known values are: \"NoneInclusive\", \"LeftInclusive\", + \"RightInclusive\", and \"LeftRightInclusive\".""" + + @overload + def __init__( + self, + *, + range_kind: Union[str, "_models.RangeKind"], + minimum: float, + maximum: float, + range_inclusivity: Optional[Union[str, "_models.RangeInclusivity"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, metadata_kind=MetadataKind.NUMERIC_RANGE_METADATA, **kwargs) + + +class OrdinalMetadata(BaseMetadata, discriminator="OrdinalMetadata"): + """A metadata for numeric entity instances. + + :ivar metadata_kind: Kind of the metadata. Required. Metadata for ordinal numbers. + :vartype metadata_kind: str or ~azure.ai.language.text.models.ORDINAL_METADATA + :ivar offset: The offset with respect to the reference (e.g., offset = -1 indicates the second + to last). Required. + :vartype offset: str + :ivar relative_to: The reference point that the ordinal number denotes. Required. Known values + are: "Current", "End", and "Start". + :vartype relative_to: str or ~azure.ai.language.text.models.RelativeTo + :ivar value: A simple arithmetic expression that the ordinal denotes. Required. + :vartype value: str + """ + + metadata_kind: Literal[MetadataKind.ORDINAL_METADATA] = rest_discriminator(name="metadataKind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """Kind of the metadata. Required. Metadata for ordinal numbers.""" + offset: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The offset with respect to the reference (e.g., offset = -1 indicates the second to last). + Required.""" + relative_to: Union[str, "_models.RelativeTo"] = rest_field( + name="relativeTo", visibility=["read", "create", "update", "delete", "query"] + ) + """The reference point that the ordinal number denotes. Required. Known values are: \"Current\", + \"End\", and \"Start\".""" + value: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """A simple arithmetic expression that the ordinal denotes. Required.""" + + @overload + def __init__( + self, + *, + offset: str, + relative_to: Union[str, "_models.RelativeTo"], + value: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, metadata_kind=MetadataKind.ORDINAL_METADATA, **kwargs) + + +class PiiEntityRecognitionLROResult(AnalyzeTextLROResult, discriminator="PiiEntityRecognitionLROResults"): + """Contains the PII LRO results. + + :ivar last_update_date_time: The last updated time in UTC for the task. Required. + :vartype last_update_date_time: ~datetime.datetime + :ivar status: The status of the task at the mentioned last update time. Required. Known values + are: "notStarted", "running", "succeeded", "partiallyCompleted", "failed", "cancelled", and + "cancelling". + :vartype status: str or ~azure.ai.language.text.models.State + :ivar task_name: task name. + :vartype task_name: str + :ivar kind: The kind of the task. Required. PII entity recognition LRO results + :vartype kind: str or ~azure.ai.language.text.models.PII_ENTITY_RECOGNITION_LRO_RESULTS + :ivar results: The list of pii results. Required. + :vartype results: ~azure.ai.language.text.models.PiiResult + """ + + kind: Literal[AnalyzeTextLROResultsKind.PII_ENTITY_RECOGNITION_LRO_RESULTS] = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The kind of the task. Required. PII entity recognition LRO results""" + results: "_models.PiiResult" = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The list of pii results. Required.""" + + @overload + def __init__( + self, + *, + last_update_date_time: datetime.datetime, + status: Union[str, "_models.State"], + results: "_models.PiiResult", + task_name: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, kind=AnalyzeTextLROResultsKind.PII_ENTITY_RECOGNITION_LRO_RESULTS, **kwargs) + + +class PiiEntityWithTags(_Model): + """Entity object with tags. + + :ivar text: Entity text as appears in the request. Required. + :vartype text: str + :ivar category: Entity type. Required. + :vartype category: str + :ivar subcategory: (Optional) Entity sub type. + :vartype subcategory: str + :ivar offset: Start position for the entity text. Use of different 'stringIndexType' values can + affect the offset returned. Required. + :vartype offset: int + :ivar length: Length for the entity text. Use of different 'stringIndexType' values can affect + the length returned. Required. + :vartype length: int + :ivar confidence_score: Confidence score between 0 and 1 of the extracted entity. Required. + :vartype confidence_score: float + :ivar type: An entity type is the lowest (or finest) granularity at which the entity has been + detected. The type maps to the specific metadata attributes associated with the entity + detected. + :vartype type: str + :ivar tags: List of entity tags. Tags are to express some similarities/affinity between + entities. + :vartype tags: list[~azure.ai.language.text.models.EntityTag] + :ivar mask: Optional field which will be returned only when using the redaction policy kind + “MaskWithEntityType”. This field will contain the exact mask text used to mask the PII entity + in the original text. + :vartype mask: str + :ivar mask_offset: Start position of masked text in the redacted text when using the redaction + policy kind “MaskWithEntityType”. + :vartype mask_offset: int + :ivar mask_length: The length of the masked text. Will be present when using the redaction + policy kind “MaskWithEntityType”. + :vartype mask_length: int + """ + + text: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Entity text as appears in the request. Required.""" + category: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Entity type. Required.""" + subcategory: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """(Optional) Entity sub type.""" + offset: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Start position for the entity text. Use of different 'stringIndexType' values can affect the + offset returned. Required.""" + length: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Length for the entity text. Use of different 'stringIndexType' values can affect the length + returned. Required.""" + confidence_score: float = rest_field( + name="confidenceScore", visibility=["read", "create", "update", "delete", "query"] + ) + """Confidence score between 0 and 1 of the extracted entity. Required.""" + type: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """An entity type is the lowest (or finest) granularity at which the entity has been detected. The + type maps to the specific metadata attributes associated with the entity detected.""" + tags: Optional[List["_models.EntityTag"]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """List of entity tags. Tags are to express some similarities/affinity between entities.""" + mask: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Optional field which will be returned only when using the redaction policy kind + “MaskWithEntityType”. This field will contain the exact mask text used to mask the PII entity + in the original text.""" + mask_offset: Optional[int] = rest_field( + name="maskOffset", visibility=["read", "create", "update", "delete", "query"] + ) + """Start position of masked text in the redacted text when using the redaction policy kind + “MaskWithEntityType”.""" + mask_length: Optional[int] = rest_field( + name="maskLength", visibility=["read", "create", "update", "delete", "query"] + ) + """The length of the masked text. Will be present when using the redaction policy kind + “MaskWithEntityType”.""" + + @overload + def __init__( + self, + *, + text: str, + category: str, + offset: int, + length: int, + confidence_score: float, + subcategory: Optional[str] = None, + type: Optional[str] = None, + tags: Optional[List["_models.EntityTag"]] = None, + mask: Optional[str] = None, + mask_offset: Optional[int] = None, + mask_length: Optional[int] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class PiiLROTask(AnalyzeTextLROTask, discriminator="PiiEntityRecognition"): + """Contains the analyze text PIIEntityRecognition LRO task. + + :ivar task_name: task name. + :vartype task_name: str + :ivar kind: Kind of the task. Required. PII entity recognition task + :vartype kind: str or ~azure.ai.language.text.models.PII_ENTITY_RECOGNITION + :ivar parameters: Pii task parameters. + :vartype parameters: ~azure.ai.language.text.models.PiiTaskParameters + """ + + kind: Literal[AnalyzeTextLROTaskKind.PII_ENTITY_RECOGNITION] = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """Kind of the task. Required. PII entity recognition task""" + parameters: Optional["_models.PiiTaskParameters"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Pii task parameters.""" + + @overload + def __init__( + self, + *, + task_name: Optional[str] = None, + parameters: Optional["_models.PiiTaskParameters"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, kind=AnalyzeTextLROTaskKind.PII_ENTITY_RECOGNITION, **kwargs) + + +class PiiResult(_Model): + """Contains the PiiResult. + + :ivar errors: Errors by document id. Required. + :vartype errors: list[~azure.ai.language.text.models.DocumentError] + :ivar statistics: if showStats=true was specified in the request this field will contain + information about the request payload. + :vartype statistics: ~azure.ai.language.text.models.RequestStatistics + :ivar model_version: This field indicates which model is used for scoring. Required. + :vartype model_version: str + :ivar documents: Response by document. Required. + :vartype documents: list[~azure.ai.language.text.models.PiiResultWithDetectedLanguage] + """ + + errors: List["_models.DocumentError"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Errors by document id. Required.""" + statistics: Optional["_models.RequestStatistics"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """if showStats=true was specified in the request this field will contain information about the + request payload.""" + model_version: str = rest_field(name="modelVersion", visibility=["read", "create", "update", "delete", "query"]) + """This field indicates which model is used for scoring. Required.""" + documents: List["_models.PiiResultWithDetectedLanguage"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Response by document. Required.""" + + @overload + def __init__( + self, + *, + errors: List["_models.DocumentError"], + model_version: str, + documents: List["_models.PiiResultWithDetectedLanguage"], + statistics: Optional["_models.RequestStatistics"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class PiiResultWithDetectedLanguage(_Model): + """Contains the PII results with detected language. + + :ivar id: Unique, non-empty document identifier. Required. + :vartype id: str + :ivar warnings: Warnings encountered while processing document. Required. + :vartype warnings: list[~azure.ai.language.text.models.DocumentWarning] + :ivar statistics: if showStats=true was specified in the request this field will contain + information about the document payload. + :vartype statistics: ~azure.ai.language.text.models.DocumentStatistics + :ivar redacted_text: Returns redacted text. Required. + :vartype redacted_text: str + :ivar entities: Recognized entities in the document. Required. + :vartype entities: list[~azure.ai.language.text.models.PiiEntityWithTags] + :ivar detected_language: If 'language' is set to 'auto' for the document in the request this + field will contain a 2 letter ISO 639-1 representation of the language detected for this + document. + :vartype detected_language: ~azure.ai.language.text.models.DetectedLanguage + """ + + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Unique, non-empty document identifier. Required.""" + warnings: List["_models.DocumentWarning"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Warnings encountered while processing document. Required.""" + statistics: Optional["_models.DocumentStatistics"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """if showStats=true was specified in the request this field will contain information about the + document payload.""" + redacted_text: str = rest_field(name="redactedText", visibility=["read", "create", "update", "delete", "query"]) + """Returns redacted text. Required.""" + entities: List["_models.PiiEntityWithTags"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Recognized entities in the document. Required.""" + detected_language: Optional["_models.DetectedLanguage"] = rest_field( + name="detectedLanguage", visibility=["read", "create", "update", "delete", "query"] + ) + """If 'language' is set to 'auto' for the document in the request this field will contain a 2 + letter ISO 639-1 representation of the language detected for this document.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + warnings: List["_models.DocumentWarning"], + redacted_text: str, + entities: List["_models.PiiEntityWithTags"], + statistics: Optional["_models.DocumentStatistics"] = None, + detected_language: Optional["_models.DetectedLanguage"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class PiiTaskParameters(_Model): + """Supported parameters for a PII Entities Recognition task. + + :ivar logging_opt_out: logging opt out. + :vartype logging_opt_out: bool + :ivar model_version: model version. + :vartype model_version: str + :ivar domain: Domain for PII task. Known values are: "phi" and "none". + :vartype domain: str or ~azure.ai.language.text.models.PiiDomain + :ivar pii_categories: Enumeration of PII categories to be returned in the response. + :vartype pii_categories: list[str or ~azure.ai.language.text.models.PiiCategory] + :ivar string_index_type: StringIndexType to be used for analysis. Known values are: + "TextElements_v8", "UnicodeCodePoint", and "Utf16CodeUnit". + :vartype string_index_type: str or ~azure.ai.language.text.models.StringIndexType + :ivar exclude_pii_categories: Enumeration of PII categories to be excluded in the response. + :vartype exclude_pii_categories: list[str or + ~azure.ai.language.text.models.PiiCategoriesExclude] + :ivar redaction_policy: RedactionPolicy to be used on the input. + :vartype redaction_policy: ~azure.ai.language.text.models.BaseRedactionPolicy + :ivar value_exclusion_policy: Policy for specific words and terms that should be excluded from + detection by the PII detection service. + :vartype value_exclusion_policy: ~azure.ai.language.text.models.ValueExclusionPolicy + :ivar entity_synonyms: (Optional) request parameter that allows the user to provide synonyms + for context words that to enhance pii entity detection. + :vartype entity_synonyms: list[~azure.ai.language.text.models.EntitySynonyms] + """ + + logging_opt_out: Optional[bool] = rest_field( + name="loggingOptOut", visibility=["read", "create", "update", "delete", "query"] + ) + """logging opt out.""" + model_version: Optional[str] = rest_field( + name="modelVersion", visibility=["read", "create", "update", "delete", "query"] + ) + """model version.""" + domain: Optional[Union[str, "_models.PiiDomain"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Domain for PII task. Known values are: \"phi\" and \"none\".""" + pii_categories: Optional[List[Union[str, "_models.PiiCategory"]]] = rest_field( + name="piiCategories", visibility=["read", "create", "update", "delete", "query"] + ) + """Enumeration of PII categories to be returned in the response.""" + string_index_type: Optional[Union[str, "_models.StringIndexType"]] = rest_field( + name="stringIndexType", visibility=["read", "create", "update", "delete", "query"] + ) + """StringIndexType to be used for analysis. Known values are: \"TextElements_v8\", + \"UnicodeCodePoint\", and \"Utf16CodeUnit\".""" + exclude_pii_categories: Optional[List[Union[str, "_models.PiiCategoriesExclude"]]] = rest_field( + name="excludePiiCategories", visibility=["read", "create", "update", "delete", "query"] + ) + """Enumeration of PII categories to be excluded in the response.""" + redaction_policy: Optional["_models.BaseRedactionPolicy"] = rest_field( + name="redactionPolicy", visibility=["read", "create", "update", "delete", "query"] + ) + """RedactionPolicy to be used on the input.""" + value_exclusion_policy: Optional["_models.ValueExclusionPolicy"] = rest_field( + name="valueExclusionPolicy", visibility=["read", "create", "update", "delete", "query"] + ) + """Policy for specific words and terms that should be excluded from detection by the PII detection + service.""" + entity_synonyms: Optional[List["_models.EntitySynonyms"]] = rest_field( + name="entitySynonyms", visibility=["read", "create", "update", "delete", "query"] + ) + """(Optional) request parameter that allows the user to provide synonyms for context words that to + enhance pii entity detection.""" + + @overload + def __init__( + self, + *, + logging_opt_out: Optional[bool] = None, + model_version: Optional[str] = None, + domain: Optional[Union[str, "_models.PiiDomain"]] = None, + pii_categories: Optional[List[Union[str, "_models.PiiCategory"]]] = None, + string_index_type: Optional[Union[str, "_models.StringIndexType"]] = None, + exclude_pii_categories: Optional[List[Union[str, "_models.PiiCategoriesExclude"]]] = None, + redaction_policy: Optional["_models.BaseRedactionPolicy"] = None, + value_exclusion_policy: Optional["_models.ValueExclusionPolicy"] = None, + entity_synonyms: Optional[List["_models.EntitySynonyms"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class PiiTaskResult(AnalyzeTextTaskResult, discriminator="PiiEntityRecognitionResults"): + """Contains the analyze text PIIEntityRecognition LRO task. + + :ivar kind: The kind of the task. Required. PII entity recognition results + :vartype kind: str or ~azure.ai.language.text.models.PII_ENTITY_RECOGNITION_RESULTS + :ivar results: The list of pii results. Required. + :vartype results: ~azure.ai.language.text.models.PiiResult + """ + + kind: Literal[AnalyzeTextTaskResultsKind.PII_ENTITY_RECOGNITION_RESULTS] = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The kind of the task. Required. PII entity recognition results""" + results: "_models.PiiResult" = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The list of pii results. Required.""" + + @overload + def __init__( + self, + *, + results: "_models.PiiResult", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, kind=AnalyzeTextTaskResultsKind.PII_ENTITY_RECOGNITION_RESULTS, **kwargs) + + +class RequestStatistics(_Model): + """if showStats=true was specified in the request this field will contain information about the + request payload. + + :ivar documents_count: Number of documents submitted in the request. Required. + :vartype documents_count: int + :ivar valid_documents_count: Number of valid documents. This excludes empty, over-size limit or + non-supported languages documents. Required. + :vartype valid_documents_count: int + :ivar erroneous_documents_count: Number of invalid documents. This includes empty, over-size + limit or non-supported languages documents. Required. + :vartype erroneous_documents_count: int + :ivar transactions_count: Number of transactions for the request. Required. + :vartype transactions_count: int + """ + + documents_count: int = rest_field(name="documentsCount", visibility=["read", "create", "update", "delete", "query"]) + """Number of documents submitted in the request. Required.""" + valid_documents_count: int = rest_field( + name="validDocumentsCount", visibility=["read", "create", "update", "delete", "query"] + ) + """Number of valid documents. This excludes empty, over-size limit or non-supported languages + documents. Required.""" + erroneous_documents_count: int = rest_field( + name="erroneousDocumentsCount", visibility=["read", "create", "update", "delete", "query"] + ) + """Number of invalid documents. This includes empty, over-size limit or non-supported languages + documents. Required.""" + transactions_count: int = rest_field( + name="transactionsCount", visibility=["read", "create", "update", "delete", "query"] + ) + """Number of transactions for the request. Required.""" + + @overload + def __init__( + self, + *, + documents_count: int, + valid_documents_count: int, + erroneous_documents_count: int, + transactions_count: int, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class SentenceAssessment(_Model): + """Represents a sentence assessment and the assessments or target objects related to it. + + :ivar sentiment: The sentiment of the sentence. Required. Known values are: "positive", + "mixed", and "negative". + :vartype sentiment: str or ~azure.ai.language.text.models.TokenSentimentValue + :ivar confidence_scores: Represents the confidence scores across all sentiment classes: + positive and negative. Required. + :vartype confidence_scores: ~azure.ai.language.text.models.TargetConfidenceScoreLabel + :ivar offset: The target offset from the start of the sentence. Required. + :vartype offset: int + :ivar length: The length of the target. Required. + :vartype length: int + :ivar text: The target text detected. Required. + :vartype text: str + :ivar is_negated: The indicator representing if the assessment is negated. Required. + :vartype is_negated: bool + """ + + sentiment: Union[str, "_models.TokenSentimentValue"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The sentiment of the sentence. Required. Known values are: \"positive\", \"mixed\", and + \"negative\".""" + confidence_scores: "_models.TargetConfidenceScoreLabel" = rest_field( + name="confidenceScores", visibility=["read", "create", "update", "delete", "query"] + ) + """Represents the confidence scores across all sentiment classes: positive and negative. Required.""" + offset: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The target offset from the start of the sentence. Required.""" + length: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The length of the target. Required.""" + text: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The target text detected. Required.""" + is_negated: bool = rest_field(name="isNegated", visibility=["read", "create", "update", "delete", "query"]) + """The indicator representing if the assessment is negated. Required.""" + + @overload + def __init__( + self, + *, + sentiment: Union[str, "_models.TokenSentimentValue"], + confidence_scores: "_models.TargetConfidenceScoreLabel", + offset: int, + length: int, + text: str, + is_negated: bool, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class SentenceSentiment(_Model): + """A document's sentence sentiment. + + :ivar text: The sentence text. Required. + :vartype text: str + :ivar sentiment: The predicted Sentiment for the sentence. Required. Known values are: + "positive", "neutral", and "negative". + :vartype sentiment: str or ~azure.ai.language.text.models.SentenceSentimentValue + :ivar confidence_scores: The sentiment confidence score between 0 and 1 for the sentence for + all classes. Required. + :vartype confidence_scores: ~azure.ai.language.text.models.SentimentConfidenceScores + :ivar offset: The target offset from the start of the sentence. Required. + :vartype offset: int + :ivar length: The length of the target. Required. + :vartype length: int + :ivar targets: The array of sentence targets for the sentence. + :vartype targets: list[~azure.ai.language.text.models.SentenceTarget] + :ivar assessments: The array of assessments for the sentence. + :vartype assessments: list[~azure.ai.language.text.models.SentenceAssessment] + """ + + text: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The sentence text. Required.""" + sentiment: Union[str, "_models.SentenceSentimentValue"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The predicted Sentiment for the sentence. Required. Known values are: \"positive\", + \"neutral\", and \"negative\".""" + confidence_scores: "_models.SentimentConfidenceScores" = rest_field( + name="confidenceScores", visibility=["read", "create", "update", "delete", "query"] + ) + """The sentiment confidence score between 0 and 1 for the sentence for all classes. Required.""" + offset: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The target offset from the start of the sentence. Required.""" + length: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The length of the target. Required.""" + targets: Optional[List["_models.SentenceTarget"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The array of sentence targets for the sentence.""" + assessments: Optional[List["_models.SentenceAssessment"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The array of assessments for the sentence.""" + + @overload + def __init__( + self, + *, + text: str, + sentiment: Union[str, "_models.SentenceSentimentValue"], + confidence_scores: "_models.SentimentConfidenceScores", + offset: int, + length: int, + targets: Optional[List["_models.SentenceTarget"]] = None, + assessments: Optional[List["_models.SentenceAssessment"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class SentenceTarget(_Model): + """Represents a sentence target and the assessments or target objects related to it. + + :ivar sentiment: The sentiment of the sentence. Required. Known values are: "positive", + "mixed", and "negative". + :vartype sentiment: str or ~azure.ai.language.text.models.TokenSentimentValue + :ivar confidence_scores: Represents the confidence scores across all sentiment classes: + positive and negative. Required. + :vartype confidence_scores: ~azure.ai.language.text.models.TargetConfidenceScoreLabel + :ivar offset: The target offset from the start of the sentence. Required. + :vartype offset: int + :ivar length: The length of the target. Required. + :vartype length: int + :ivar text: The target text detected. Required. + :vartype text: str + :ivar relations: The array of either assessment or target objects which is related to the + target. Required. + :vartype relations: list[~azure.ai.language.text.models.TargetRelation] + """ + + sentiment: Union[str, "_models.TokenSentimentValue"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The sentiment of the sentence. Required. Known values are: \"positive\", \"mixed\", and + \"negative\".""" + confidence_scores: "_models.TargetConfidenceScoreLabel" = rest_field( + name="confidenceScores", visibility=["read", "create", "update", "delete", "query"] + ) + """Represents the confidence scores across all sentiment classes: positive and negative. Required.""" + offset: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The target offset from the start of the sentence. Required.""" + length: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The length of the target. Required.""" + text: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The target text detected. Required.""" + relations: List["_models.TargetRelation"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The array of either assessment or target objects which is related to the target. Required.""" + + @overload + def __init__( + self, + *, + sentiment: Union[str, "_models.TokenSentimentValue"], + confidence_scores: "_models.TargetConfidenceScoreLabel", + offset: int, + length: int, + text: str, + relations: List["_models.TargetRelation"], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class SentimentAnalysisLROTask(AnalyzeTextLROTask, discriminator="SentimentAnalysis"): + """An object representing the task definition for a Sentiment Analysis task. + + :ivar task_name: task name. + :vartype task_name: str + :ivar kind: The Sentiment Analysis kind of the long running task. Required. Sentiment analysis + task + :vartype kind: str or ~azure.ai.language.text.models.SENTIMENT_ANALYSIS + :ivar parameters: Parameters for the Sentiment Analysis task. + :vartype parameters: ~azure.ai.language.text.models.SentimentAnalysisTaskParameters + """ + + kind: Literal[AnalyzeTextLROTaskKind.SENTIMENT_ANALYSIS] = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The Sentiment Analysis kind of the long running task. Required. Sentiment analysis task""" + parameters: Optional["_models.SentimentAnalysisTaskParameters"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Parameters for the Sentiment Analysis task.""" + + @overload + def __init__( + self, + *, + task_name: Optional[str] = None, + parameters: Optional["_models.SentimentAnalysisTaskParameters"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, kind=AnalyzeTextLROTaskKind.SENTIMENT_ANALYSIS, **kwargs) + + +class SentimentAnalysisTaskParameters(_Model): + """Supported parameters for a Sentiment Analysis task. + + :ivar logging_opt_out: logging opt out. + :vartype logging_opt_out: bool + :ivar model_version: model version. + :vartype model_version: str + :ivar opinion_mining: Whether to use opinion mining in the request or not. + :vartype opinion_mining: bool + :ivar string_index_type: Specifies the method used to interpret string offsets. Known values + are: "TextElements_v8", "UnicodeCodePoint", and "Utf16CodeUnit". + :vartype string_index_type: str or ~azure.ai.language.text.models.StringIndexType + """ + + logging_opt_out: Optional[bool] = rest_field( + name="loggingOptOut", visibility=["read", "create", "update", "delete", "query"] + ) + """logging opt out.""" + model_version: Optional[str] = rest_field( + name="modelVersion", visibility=["read", "create", "update", "delete", "query"] + ) + """model version.""" + opinion_mining: Optional[bool] = rest_field( + name="opinionMining", visibility=["read", "create", "update", "delete", "query"] + ) + """Whether to use opinion mining in the request or not.""" + string_index_type: Optional[Union[str, "_models.StringIndexType"]] = rest_field( + name="stringIndexType", visibility=["read", "create", "update", "delete", "query"] + ) + """Specifies the method used to interpret string offsets. Known values are: \"TextElements_v8\", + \"UnicodeCodePoint\", and \"Utf16CodeUnit\".""" + + @overload + def __init__( + self, + *, + logging_opt_out: Optional[bool] = None, + model_version: Optional[str] = None, + opinion_mining: Optional[bool] = None, + string_index_type: Optional[Union[str, "_models.StringIndexType"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class SentimentConfidenceScores(_Model): + """Represents the confidence scores between 0 and 1 across all sentiment classes: positive, + neutral, negative. + + :ivar positive: Confidence score for positive sentiment. Required. + :vartype positive: float + :ivar neutral: Confidence score for neutral sentiment. Required. + :vartype neutral: float + :ivar negative: Confidence score for negative sentiment. Required. + :vartype negative: float + """ + + positive: float = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Confidence score for positive sentiment. Required.""" + neutral: float = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Confidence score for neutral sentiment. Required.""" + negative: float = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Confidence score for negative sentiment. Required.""" + + @overload + def __init__( + self, + *, + positive: float, + neutral: float, + negative: float, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class SentimentDocumentResultWithDetectedLanguage(_Model): # pylint: disable=name-too-long + """Sentiment analysis per document. + + :ivar id: Unique, non-empty document identifier. Required. + :vartype id: str + :ivar warnings: Warnings encountered while processing document. Required. + :vartype warnings: list[~azure.ai.language.text.models.DocumentWarning] + :ivar statistics: if showStats=true was specified in the request this field will contain + information about the document payload. + :vartype statistics: ~azure.ai.language.text.models.DocumentStatistics + :ivar sentiment: Predicted sentiment for document (Negative, Neutral, Positive, or Mixed). + Required. Known values are: "positive", "neutral", "negative", and "mixed". + :vartype sentiment: str or ~azure.ai.language.text.models.DocumentSentimentValue + :ivar confidence_scores: The sentiment confidence score between 0 and 1 for the sentence for + all classes. Required. + :vartype confidence_scores: ~azure.ai.language.text.models.SentimentConfidenceScores + :ivar sentences: The document's sentences sentiment. Required. + :vartype sentences: list[~azure.ai.language.text.models.SentenceSentiment] + :ivar detected_language: If 'language' is set to 'auto' for the document in the request this + field will contain a 2 letter ISO 639-1 representation of the language detected for this + document. + :vartype detected_language: ~azure.ai.language.text.models.DetectedLanguage + """ + + id: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Unique, non-empty document identifier. Required.""" + warnings: List["_models.DocumentWarning"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Warnings encountered while processing document. Required.""" + statistics: Optional["_models.DocumentStatistics"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """if showStats=true was specified in the request this field will contain information about the + document payload.""" + sentiment: Union[str, "_models.DocumentSentimentValue"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Predicted sentiment for document (Negative, Neutral, Positive, or Mixed). Required. Known + values are: \"positive\", \"neutral\", \"negative\", and \"mixed\".""" + confidence_scores: "_models.SentimentConfidenceScores" = rest_field( + name="confidenceScores", visibility=["read", "create", "update", "delete", "query"] + ) + """The sentiment confidence score between 0 and 1 for the sentence for all classes. Required.""" + sentences: List["_models.SentenceSentiment"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The document's sentences sentiment. Required.""" + detected_language: Optional["_models.DetectedLanguage"] = rest_field( + name="detectedLanguage", visibility=["read", "create", "update", "delete", "query"] + ) + """If 'language' is set to 'auto' for the document in the request this field will contain a 2 + letter ISO 639-1 representation of the language detected for this document.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + warnings: List["_models.DocumentWarning"], + sentiment: Union[str, "_models.DocumentSentimentValue"], + confidence_scores: "_models.SentimentConfidenceScores", + sentences: List["_models.SentenceSentiment"], + statistics: Optional["_models.DocumentStatistics"] = None, + detected_language: Optional["_models.DetectedLanguage"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class SentimentLROResult(AnalyzeTextLROResult, discriminator="SentimentAnalysisLROResults"): + """Contains the Sentiment Analysis LRO results. + + :ivar last_update_date_time: The last updated time in UTC for the task. Required. + :vartype last_update_date_time: ~datetime.datetime + :ivar status: The status of the task at the mentioned last update time. Required. Known values + are: "notStarted", "running", "succeeded", "partiallyCompleted", "failed", "cancelled", and + "cancelling". + :vartype status: str or ~azure.ai.language.text.models.State + :ivar task_name: task name. + :vartype task_name: str + :ivar kind: Kind of the task. Required. Sentiment analysis LRO results + :vartype kind: str or ~azure.ai.language.text.models.SENTIMENT_ANALYSIS_LRO_RESULTS + :ivar results: The sentiment analysis results. Required. + :vartype results: ~azure.ai.language.text.models.SentimentResponse + """ + + kind: Literal[AnalyzeTextLROResultsKind.SENTIMENT_ANALYSIS_LRO_RESULTS] = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """Kind of the task. Required. Sentiment analysis LRO results""" + results: "_models.SentimentResponse" = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The sentiment analysis results. Required.""" + + @overload + def __init__( + self, + *, + last_update_date_time: datetime.datetime, + status: Union[str, "_models.State"], + results: "_models.SentimentResponse", + task_name: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, kind=AnalyzeTextLROResultsKind.SENTIMENT_ANALYSIS_LRO_RESULTS, **kwargs) + + +class SentimentResponse(_Model): + """Sentiment analysis results for the input documents. + + :ivar errors: Errors by document id. Required. + :vartype errors: list[~azure.ai.language.text.models.DocumentError] + :ivar statistics: if showStats=true was specified in the request this field will contain + information about the request payload. + :vartype statistics: ~azure.ai.language.text.models.RequestStatistics + :ivar model_version: This field indicates which model is used for scoring. Required. + :vartype model_version: str + :ivar documents: The sentiment analysis results for each document in the input. Required. + :vartype documents: + list[~azure.ai.language.text.models.SentimentDocumentResultWithDetectedLanguage] + """ + + errors: List["_models.DocumentError"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Errors by document id. Required.""" + statistics: Optional["_models.RequestStatistics"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """if showStats=true was specified in the request this field will contain information about the + request payload.""" + model_version: str = rest_field(name="modelVersion", visibility=["read", "create", "update", "delete", "query"]) + """This field indicates which model is used for scoring. Required.""" + documents: List["_models.SentimentDocumentResultWithDetectedLanguage"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The sentiment analysis results for each document in the input. Required.""" + + @overload + def __init__( + self, + *, + errors: List["_models.DocumentError"], + model_version: str, + documents: List["_models.SentimentDocumentResultWithDetectedLanguage"], + statistics: Optional["_models.RequestStatistics"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class SentimentTaskResult(AnalyzeTextTaskResult, discriminator="SentimentAnalysisResults"): + """Contains the analyze text SentimentAnalysis LRO task result. + + :ivar kind: Kind of the task. Required. Sentiment analysis results + :vartype kind: str or ~azure.ai.language.text.models.SENTIMENT_ANALYSIS_RESULTS + :ivar results: The sentiment analysis results. Required. + :vartype results: ~azure.ai.language.text.models.SentimentResponse + """ + + kind: Literal[AnalyzeTextTaskResultsKind.SENTIMENT_ANALYSIS_RESULTS] = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """Kind of the task. Required. Sentiment analysis results""" + results: "_models.SentimentResponse" = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The sentiment analysis results. Required.""" + + @overload + def __init__( + self, + *, + results: "_models.SentimentResponse", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, kind=AnalyzeTextTaskResultsKind.SENTIMENT_ANALYSIS_RESULTS, **kwargs) + + +class SpeedMetadata(BaseMetadata, discriminator="SpeedMetadata"): + """Represents the Speed entity Metadata model. + + :ivar value: The numeric value that the extracted text denotes. Required. + :vartype value: float + :ivar metadata_kind: Kind of the metadata. Required. Metadata for speed-related values. + :vartype metadata_kind: str or ~azure.ai.language.text.models.SPEED_METADATA + :ivar unit: Unit of measure for speed. Required. Known values are: "Unspecified", + "MetersPerSecond", "KilometersPerHour", "KilometersPerMinute", "KilometersPerSecond", + "MilesPerHour", "Knots", "FeetPerSecond", "FeetPerMinute", "YardsPerMinute", "YardsPerSecond", + "MetersPerMillisecond", "CentimetersPerMillisecond", and "KilometersPerMillisecond". + :vartype unit: str or ~azure.ai.language.text.models.SpeedUnit + """ + + value: float = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The numeric value that the extracted text denotes. Required.""" + metadata_kind: Literal[MetadataKind.SPEED_METADATA] = rest_discriminator(name="metadataKind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """Kind of the metadata. Required. Metadata for speed-related values.""" + unit: Union[str, "_models.SpeedUnit"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Unit of measure for speed. Required. Known values are: \"Unspecified\", \"MetersPerSecond\", + \"KilometersPerHour\", \"KilometersPerMinute\", \"KilometersPerSecond\", \"MilesPerHour\", + \"Knots\", \"FeetPerSecond\", \"FeetPerMinute\", \"YardsPerMinute\", \"YardsPerSecond\", + \"MetersPerMillisecond\", \"CentimetersPerMillisecond\", and \"KilometersPerMillisecond\".""" + + @overload + def __init__( + self, + *, + value: float, + unit: Union[str, "_models.SpeedUnit"], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, metadata_kind=MetadataKind.SPEED_METADATA, **kwargs) + + +class SummaryContext(_Model): + """The context of the summary. + + :ivar offset: Start position for the context. Use of different 'stringIndexType' values can + affect the offset returned. Required. + :vartype offset: int + :ivar length: The length of the context. Use of different 'stringIndexType' values can affect + the length returned. Required. + :vartype length: int + """ + + offset: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Start position for the context. Use of different 'stringIndexType' values can affect the offset + returned. Required.""" + length: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The length of the context. Use of different 'stringIndexType' values can affect the length + returned. Required.""" + + @overload + def __init__( + self, + *, + offset: int, + length: int, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class TargetConfidenceScoreLabel(_Model): + """Represents the confidence scores across all sentiment classes: positive and negative. + + :ivar positive: Confidence score for positive sentiment. Required. + :vartype positive: float + :ivar negative: Confidence score for negative sentiment. Required. + :vartype negative: float + """ + + positive: float = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Confidence score for positive sentiment. Required.""" + negative: float = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Confidence score for negative sentiment. Required.""" + + @overload + def __init__( + self, + *, + positive: float, + negative: float, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class TargetRelation(_Model): + """Represents the relation between assessments and/or targets. + + :ivar ref: The JSON pointer indicating the linked object. Required. + :vartype ref: str + :ivar relation_type: The type related to the target. Required. Known values are: "assessment" + and "target". + :vartype relation_type: str or ~azure.ai.language.text.models.TargetRelationType + """ + + ref: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The JSON pointer indicating the linked object. Required.""" + relation_type: Union[str, "_models.TargetRelationType"] = rest_field( + name="relationType", visibility=["read", "create", "update", "delete", "query"] + ) + """The type related to the target. Required. Known values are: \"assessment\" and \"target\".""" + + @overload + def __init__( + self, + *, + ref: str, + relation_type: Union[str, "_models.TargetRelationType"], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class Tasks(_Model): + """Container for the tasks status for the LRO job. + + :ivar completed: Count of completed tasks. Required. + :vartype completed: int + :ivar failed: Count of failed tasks. Required. + :vartype failed: int + :ivar in_progress: Count of inprogress tasks. Required. + :vartype in_progress: int + :ivar total: Count of total tasks. Required. + :vartype total: int + :ivar items_property: Enumerable of Analyze text job results. + :vartype items_property: list[~azure.ai.language.text.models.AnalyzeTextLROResult] + """ + + completed: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Count of completed tasks. Required.""" + failed: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Count of failed tasks. Required.""" + in_progress: int = rest_field(name="inProgress", visibility=["read", "create", "update", "delete", "query"]) + """Count of inprogress tasks. Required.""" + total: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Count of total tasks. Required.""" + items_property: Optional[List["_models.AnalyzeTextLROResult"]] = rest_field( + name="items", visibility=["read", "create", "update", "delete", "query"] + ) + """Enumerable of Analyze text job results.""" + + @overload + def __init__( + self, + *, + completed: int, + failed: int, + in_progress: int, + total: int, + items_property: Optional[List["_models.AnalyzeTextLROResult"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class TemperatureMetadata(BaseMetadata, discriminator="TemperatureMetadata"): + """Represents the Information entity Metadata model. + + :ivar value: The numeric value that the extracted text denotes. Required. + :vartype value: float + :ivar metadata_kind: Kind of the metadata. Required. Metadata for temperature-related values. + :vartype metadata_kind: str or ~azure.ai.language.text.models.TEMPERATURE_METADATA + :ivar unit: Unit of measure for temperature. Required. Known values are: "Unspecified", + "Fahrenheit", "Kelvin", "Rankine", and "Celsius". + :vartype unit: str or ~azure.ai.language.text.models.TemperatureUnit + """ + + value: float = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The numeric value that the extracted text denotes. Required.""" + metadata_kind: Literal[MetadataKind.TEMPERATURE_METADATA] = rest_discriminator(name="metadataKind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """Kind of the metadata. Required. Metadata for temperature-related values.""" + unit: Union[str, "_models.TemperatureUnit"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Unit of measure for temperature. Required. Known values are: \"Unspecified\", \"Fahrenheit\", + \"Kelvin\", \"Rankine\", and \"Celsius\".""" + + @overload + def __init__( + self, + *, + value: float, + unit: Union[str, "_models.TemperatureUnit"], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, metadata_kind=MetadataKind.TEMPERATURE_METADATA, **kwargs) + + +class TemporalSetMetadata(BaseMetadata, discriminator="TemporalSetMetadata"): + """A Metadata for temporal set entity instances. + + :ivar date_values: List of date values. + :vartype date_values: list[~azure.ai.language.text.models.DateValue] + :ivar metadata_kind: Kind of the metadata. Required. Metadata for set of time-related values. + :vartype metadata_kind: str or ~azure.ai.language.text.models.TEMPORAL_SET_METADATA + """ + + date_values: Optional[List["_models.DateValue"]] = rest_field( + name="dateValues", visibility=["read", "create", "update", "delete", "query"] + ) + """List of date values.""" + metadata_kind: Literal[MetadataKind.TEMPORAL_SET_METADATA] = rest_discriminator(name="metadataKind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """Kind of the metadata. Required. Metadata for set of time-related values.""" + + @overload + def __init__( + self, + *, + date_values: Optional[List["_models.DateValue"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, metadata_kind=MetadataKind.TEMPORAL_SET_METADATA, **kwargs) + + +class TemporalSpanMetadata(BaseMetadata, discriminator="TemporalSpanMetadata"): + """represents the Metadata of a date and/or time span. + + :ivar metadata_kind: Kind of the metadata. Required. Metadata for temporal span values. + :vartype metadata_kind: str or ~azure.ai.language.text.models.TEMPORAL_SPAN_METADATA + :ivar span_values: List of temporal spans detected. + :vartype span_values: list[~azure.ai.language.text.models.TemporalSpanValues] + """ + + metadata_kind: Literal[MetadataKind.TEMPORAL_SPAN_METADATA] = rest_discriminator(name="metadataKind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """Kind of the metadata. Required. Metadata for temporal span values.""" + span_values: Optional[List["_models.TemporalSpanValues"]] = rest_field( + name="spanValues", visibility=["read", "create", "update", "delete", "query"] + ) + """List of temporal spans detected.""" + + @overload + def __init__( + self, + *, + span_values: Optional[List["_models.TemporalSpanValues"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, metadata_kind=MetadataKind.TEMPORAL_SPAN_METADATA, **kwargs) + + +class TemporalSpanValues(_Model): + """Temporal span object. + + :ivar begin: Start value for the span. + :vartype begin: str + :ivar end: End value for the span. + :vartype end: str + :ivar duration: An optional duration value formatted based on the ISO 8601 + (`https://en.wikipedia.org/wiki/ISO_8601#Durations + `_). + :vartype duration: str + :ivar modifier: Modifier for datetime to indicate point of reference like before, after etc. + Known values are: "AfterApprox", "Before", "BeforeStart", "Approx", "ReferenceUndefined", + "SinceEnd", "AfterMid", "Start", "After", "BeforeEnd", "Until", "End", "Less", "Since", + "AfterStart", "BeforeApprox", "Mid", and "More". + :vartype modifier: str or ~azure.ai.language.text.models.TemporalModifier + :ivar timex: An optional triplet containing the beginning, the end, and the duration all stated + as ISO 8601 formatted strings. + :vartype timex: str + """ + + begin: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Start value for the span.""" + end: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """End value for the span.""" + duration: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """An optional duration value formatted based on the ISO 8601 + (`https://en.wikipedia.org/wiki/ISO_8601#Durations + `_).""" + modifier: Optional[Union[str, "_models.TemporalModifier"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Modifier for datetime to indicate point of reference like before, after etc. Known values are: + \"AfterApprox\", \"Before\", \"BeforeStart\", \"Approx\", \"ReferenceUndefined\", \"SinceEnd\", + \"AfterMid\", \"Start\", \"After\", \"BeforeEnd\", \"Until\", \"End\", \"Less\", \"Since\", + \"AfterStart\", \"BeforeApprox\", \"Mid\", and \"More\".""" + timex: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """An optional triplet containing the beginning, the end, and the duration all stated as ISO 8601 + formatted strings.""" + + @overload + def __init__( + self, + *, + begin: Optional[str] = None, + end: Optional[str] = None, + duration: Optional[str] = None, + modifier: Optional[Union[str, "_models.TemporalModifier"]] = None, + timex: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class TimeMetadata(BaseMetadata, discriminator="TimeMetadata"): + """A Metadata for time entity instances. + + :ivar date_values: List of date values. + :vartype date_values: list[~azure.ai.language.text.models.DateValue] + :ivar metadata_kind: Kind of the metadata. Required. Metadata for time-related values. + :vartype metadata_kind: str or ~azure.ai.language.text.models.TIME_METADATA + """ + + date_values: Optional[List["_models.DateValue"]] = rest_field( + name="dateValues", visibility=["read", "create", "update", "delete", "query"] + ) + """List of date values.""" + metadata_kind: Literal[MetadataKind.TIME_METADATA] = rest_discriminator(name="metadataKind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """Kind of the metadata. Required. Metadata for time-related values.""" + + @overload + def __init__( + self, + *, + date_values: Optional[List["_models.DateValue"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, metadata_kind=MetadataKind.TIME_METADATA, **kwargs) + + +class ValueExclusionPolicy(_Model): + """Policy for specific words and terms that should be excluded from detection by the PII detection + service. + + :ivar case_sensitive: Option to make the values excluded values case sensitive. Required. + :vartype case_sensitive: bool + :ivar excluded_values: List of words and terms that should be excluded from detection by the + PII detection service. Required. + :vartype excluded_values: list[str] + """ + + case_sensitive: bool = rest_field(name="caseSensitive", visibility=["read", "create", "update", "delete", "query"]) + """Option to make the values excluded values case sensitive. Required.""" + excluded_values: List[str] = rest_field( + name="excludedValues", visibility=["read", "create", "update", "delete", "query"] + ) + """List of words and terms that should be excluded from detection by the PII detection service. + Required.""" + + @overload + def __init__( + self, + *, + case_sensitive: bool, + excluded_values: List[str], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class VolumeMetadata(BaseMetadata, discriminator="VolumeMetadata"): + """Represents the Volume entity Metadata model. + + :ivar value: The numeric value that the extracted text denotes. Required. + :vartype value: float + :ivar metadata_kind: Kind of the metadata. Required. Metadata for volume-related values. + :vartype metadata_kind: str or ~azure.ai.language.text.models.VOLUME_METADATA + :ivar unit: Unit of measure for volume. Required. Known values are: "Unspecified", + "CubicMeter", "CubicCentimeter", "CubicMillimeter", "Hectoliter", "Decaliter", "Liter", + "Centiliter", "Milliliter", "CubicYard", "CubicInch", "CubicFoot", "CubicMile", "FluidOunce", + "Teaspoon", "Tablespoon", "Pint", "Quart", "Cup", "Gill", "Pinch", "FluidDram", "Barrel", + "Minim", "Cord", "Peck", "Bushel", and "Hogshead". + :vartype unit: str or ~azure.ai.language.text.models.VolumeUnit + """ + + value: float = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The numeric value that the extracted text denotes. Required.""" + metadata_kind: Literal[MetadataKind.VOLUME_METADATA] = rest_discriminator(name="metadataKind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """Kind of the metadata. Required. Metadata for volume-related values.""" + unit: Union[str, "_models.VolumeUnit"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Unit of measure for volume. Required. Known values are: \"Unspecified\", \"CubicMeter\", + \"CubicCentimeter\", \"CubicMillimeter\", \"Hectoliter\", \"Decaliter\", \"Liter\", + \"Centiliter\", \"Milliliter\", \"CubicYard\", \"CubicInch\", \"CubicFoot\", \"CubicMile\", + \"FluidOunce\", \"Teaspoon\", \"Tablespoon\", \"Pint\", \"Quart\", \"Cup\", \"Gill\", + \"Pinch\", \"FluidDram\", \"Barrel\", \"Minim\", \"Cord\", \"Peck\", \"Bushel\", and + \"Hogshead\".""" + + @overload + def __init__( + self, + *, + value: float, + unit: Union[str, "_models.VolumeUnit"], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, metadata_kind=MetadataKind.VOLUME_METADATA, **kwargs) + + +class WeightMetadata(BaseMetadata, discriminator="WeightMetadata"): + """Represents the Weight ) entity Metadata model. + + :ivar value: The numeric value that the extracted text denotes. Required. + :vartype value: float + :ivar metadata_kind: Kind of the metadata. Required. Metadata for weight-related values. + :vartype metadata_kind: str or ~azure.ai.language.text.models.WEIGHT_METADATA + :ivar unit: Unit of measure for weight. Required. Known values are: "Unspecified", "Kilogram", + "Gram", "Milligram", "Gallon", "MetricTon", "Ton", "Pound", "Ounce", "Grain", "PennyWeight", + "LongTonBritish", "ShortTonUS", "ShortHundredWeightUS", "Stone", and "Dram". + :vartype unit: str or ~azure.ai.language.text.models.WeightUnit + """ + + value: float = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The numeric value that the extracted text denotes. Required.""" + metadata_kind: Literal[MetadataKind.WEIGHT_METADATA] = rest_discriminator(name="metadataKind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """Kind of the metadata. Required. Metadata for weight-related values.""" + unit: Union[str, "_models.WeightUnit"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Unit of measure for weight. Required. Known values are: \"Unspecified\", \"Kilogram\", + \"Gram\", \"Milligram\", \"Gallon\", \"MetricTon\", \"Ton\", \"Pound\", \"Ounce\", \"Grain\", + \"PennyWeight\", \"LongTonBritish\", \"ShortTonUS\", \"ShortHundredWeightUS\", \"Stone\", and + \"Dram\".""" + + @overload + def __init__( + self, + *, + value: float, + unit: Union[str, "_models.WeightUnit"], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, metadata_kind=MetadataKind.WEIGHT_METADATA, **kwargs) diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/models/_patch.py b/sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/models/_patch.py new file mode 100644 index 000000000000..8bcb627aa475 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/models/_patch.py @@ -0,0 +1,21 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------- +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" +from typing import List + +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level + + +def patch_sdk(): + """Do not remove from this file. + + `patch_sdk` is a last resort escape hatch that allows you to do customizations + you can't accomplish using the techniques described in + https://aka.ms/azsdk/python/dpcodegen/python/customize + """ diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/py.typed b/sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/py.typed new file mode 100644 index 000000000000..e5aff4f83af8 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-textanalytics/azure/ai/language/text/py.typed @@ -0,0 +1 @@ +# Marker file for PEP 561. \ No newline at end of file diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/dev_requirements.txt b/sdk/cognitivelanguage/azure-ai-textanalytics/dev_requirements.txt new file mode 100644 index 000000000000..105486471444 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-textanalytics/dev_requirements.txt @@ -0,0 +1,3 @@ +-e ../../../tools/azure-sdk-tools +../../core/azure-core +aiohttp \ No newline at end of file diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_abstractive_summarization_summary_length_prompt_task_result.py b/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_abstractive_summarization_summary_length_prompt_task_result.py new file mode 100644 index 000000000000..9c607cdc304b --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_abstractive_summarization_summary_length_prompt_task_result.py @@ -0,0 +1,33 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.ai.language.text import TextClient + +""" +# PREREQUISITES + pip install azure-ai-language-text +# USAGE + python successful_abstractive_summarization_summary_length_prompt_task_result.py +""" + + +def main(): + client = TextClient( + endpoint="{Endpoint}", + credential="CREDENTIAL", + ) + + response = client.analyze_text_job_status( + job_id="c0f2a446-05d9-48fc-ba8f-3ef4af8d0b18", + ) + print(response) + + +# x-ms-original-file: 2025-05-15-preview/SuccessfulAbstractiveSummarizationSummaryLengthPromptTaskResult.json +if __name__ == "__main__": + main() diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_abstractive_summarization_task_result.py b/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_abstractive_summarization_task_result.py new file mode 100644 index 000000000000..0f89d0c5e240 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_abstractive_summarization_task_result.py @@ -0,0 +1,33 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.ai.language.text import TextClient + +""" +# PREREQUISITES + pip install azure-ai-language-text +# USAGE + python successful_abstractive_summarization_task_result.py +""" + + +def main(): + client = TextClient( + endpoint="{Endpoint}", + credential="CREDENTIAL", + ) + + response = client.analyze_text_job_status( + job_id="c0f2a446-05d9-48fc-ba8f-3ef4af8d0b18", + ) + print(response) + + +# x-ms-original-file: 2025-05-15-preview/SuccessfulAbstractiveSummarizationTaskResult.json +if __name__ == "__main__": + main() diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_analyze_text_jobs_cancel_request.py b/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_analyze_text_jobs_cancel_request.py new file mode 100644 index 000000000000..91fd094a774f --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_analyze_text_jobs_cancel_request.py @@ -0,0 +1,32 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.ai.language.text import TextClient + +""" +# PREREQUISITES + pip install azure-ai-language-text +# USAGE + python successful_analyze_text_jobs_cancel_request.py +""" + + +def main(): + client = TextClient( + endpoint="{Endpoint}", + credential="CREDENTIAL", + ) + + client.begin_analyze_text_cancel_job( + job_id="c0f2a446-05d9-48fc-ba8f-3ef4af8d0b18", + ).result() + + +# x-ms-original-file: 2025-05-15-preview/SuccessfulAnalyzeTextJobsCancelRequest.json +if __name__ == "__main__": + main() diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_analyze_text_jobs_multiple_task_status_request.py b/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_analyze_text_jobs_multiple_task_status_request.py new file mode 100644 index 000000000000..dd9730bee5bb --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_analyze_text_jobs_multiple_task_status_request.py @@ -0,0 +1,33 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.ai.language.text import TextClient + +""" +# PREREQUISITES + pip install azure-ai-language-text +# USAGE + python successful_analyze_text_jobs_multiple_task_status_request.py +""" + + +def main(): + client = TextClient( + endpoint="{Endpoint}", + credential="CREDENTIAL", + ) + + response = client.analyze_text_job_status( + job_id="c0f2a446-05d9-48fc-ba8f-3ef4af8d0b18", + ) + print(response) + + +# x-ms-original-file: 2025-05-15-preview/SuccessfulAnalyzeTextJobsMultipleTaskStatusRequest.json +if __name__ == "__main__": + main() diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_entity_linking_request.py b/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_entity_linking_request.py new file mode 100644 index 000000000000..96ced6d09b24 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_entity_linking_request.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.ai.language.text import TextClient + +""" +# PREREQUISITES + pip install azure-ai-language-text +# USAGE + python successful_entity_linking_request.py +""" + + +def main(): + client = TextClient( + endpoint="{Endpoint}", + credential="CREDENTIAL", + ) + + response = client.analyze_text( + body={ + "analysisInput": { + "documents": [ + {"id": "1", "language": "en", "text": "Microsoft was founded by Bill Gates and Paul Allen."}, + {"id": "2", "language": "en", "text": "Pike place market is my favorite Seattle attraction."}, + ] + }, + "kind": "EntityLinking", + "parameters": {"modelVersion": "latest"}, + }, + ) + print(response) + + +# x-ms-original-file: 2025-05-15-preview/SuccessfulEntityLinkingRequest.json +if __name__ == "__main__": + main() diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_entity_recognition_exclusion_request.py b/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_entity_recognition_exclusion_request.py new file mode 100644 index 000000000000..19062717c3bf --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_entity_recognition_exclusion_request.py @@ -0,0 +1,51 @@ +# pylint: disable=line-too-long,useless-suppression +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.ai.language.text import TextClient + +""" +# PREREQUISITES + pip install azure-ai-language-text +# USAGE + python successful_entity_recognition_exclusion_request.py +""" + + +def main(): + client = TextClient( + endpoint="{Endpoint}", + credential="CREDENTIAL", + ) + + response = client.analyze_text( + body={ + "analysisInput": { + "documents": [ + {"id": "2", "language": "en", "text": "When I was 5 years old I had $90.00 dollars to my name."}, + { + "id": "3", + "language": "en", + "text": "When we flew from LAX it seemed like we were moving at 10 meters per second. I was lucky to see Amsterdam, Effile Tower, and the Nile.", + }, + ] + }, + "kind": "EntityRecognition", + "parameters": { + "exclusionList": ["Numeric"], + "modelVersion": "latest", + "overlapPolicy": {"policyKind": "allowOverlap"}, + }, + }, + ) + print(response) + + +# x-ms-original-file: 2025-05-15-preview/SuccessfulEntityRecognitionExclusionRequest.json +if __name__ == "__main__": + main() diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_entity_recognition_inclusion_request.py b/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_entity_recognition_inclusion_request.py new file mode 100644 index 000000000000..c98cf982d60f --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_entity_recognition_inclusion_request.py @@ -0,0 +1,47 @@ +# pylint: disable=line-too-long,useless-suppression +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.ai.language.text import TextClient + +""" +# PREREQUISITES + pip install azure-ai-language-text +# USAGE + python successful_entity_recognition_inclusion_request.py +""" + + +def main(): + client = TextClient( + endpoint="{Endpoint}", + credential="CREDENTIAL", + ) + + response = client.analyze_text( + body={ + "analysisInput": { + "documents": [ + {"id": "2", "language": "en", "text": "When I was 5 years old I had $90.00 dollars to my name."}, + { + "id": "3", + "language": "en", + "text": "When we flew from LAX it seemed like we were moving at 10 meters per second. I was lucky to see Amsterdam, Effile Tower, and the Nile.", + }, + ] + }, + "kind": "EntityRecognition", + "parameters": {"inclusionList": ["Location"], "modelVersion": "latest"}, + }, + ) + print(response) + + +# x-ms-original-file: 2025-05-15-preview/SuccessfulEntityRecognitionInclusionRequest.json +if __name__ == "__main__": + main() diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_entity_recognition_inference_options_request.py b/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_entity_recognition_inference_options_request.py new file mode 100644 index 000000000000..7c203c72d92b --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_entity_recognition_inference_options_request.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.ai.language.text import TextClient + +""" +# PREREQUISITES + pip install azure-ai-language-text +# USAGE + python successful_entity_recognition_inference_options_request.py +""" + + +def main(): + client = TextClient( + endpoint="{Endpoint}", + credential="CREDENTIAL", + ) + + response = client.analyze_text( + body={ + "analysisInput": { + "documents": [ + {"id": "1", "language": "en", "text": "When I was 5 years old I had $90.00 dollars to my name."} + ] + }, + "kind": "EntityRecognition", + "parameters": {"inferenceOptions": {"excludeNormalizedValues": True}, "modelVersion": "latest"}, + }, + ) + print(response) + + +# x-ms-original-file: 2025-05-15-preview/SuccessfulEntityRecognitionInferenceOptionsRequest.json +if __name__ == "__main__": + main() diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_entity_recognition_overlap_policy.py b/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_entity_recognition_overlap_policy.py new file mode 100644 index 000000000000..966ca6e63e29 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_entity_recognition_overlap_policy.py @@ -0,0 +1,45 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.ai.language.text import TextClient + +""" +# PREREQUISITES + pip install azure-ai-language-text +# USAGE + python successful_entity_recognition_overlap_policy.py +""" + + +def main(): + client = TextClient( + endpoint="{Endpoint}", + credential="CREDENTIAL", + ) + + response = client.analyze_text( + body={ + "analysisInput": { + "documents": [ + { + "id": "4", + "language": "en", + "text": "25th April Meeting was an intresting one. At least we gont to experience the WorldCup", + } + ] + }, + "kind": "EntityRecognition", + "parameters": {"modelVersion": "latest", "overlapPolicy": {"policyKind": "matchLongest"}}, + }, + ) + print(response) + + +# x-ms-original-file: 2025-05-15-preview/SuccessfulEntityRecognitionOverlapPolicy.json +if __name__ == "__main__": + main() diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_entity_recognition_request.py b/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_entity_recognition_request.py new file mode 100644 index 000000000000..19f4912f59a6 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_entity_recognition_request.py @@ -0,0 +1,53 @@ +# pylint: disable=line-too-long,useless-suppression +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.ai.language.text import TextClient + +""" +# PREREQUISITES + pip install azure-ai-language-text +# USAGE + python successful_entity_recognition_request.py +""" + + +def main(): + client = TextClient( + endpoint="{Endpoint}", + credential="CREDENTIAL", + ) + + response = client.analyze_text( + body={ + "analysisInput": { + "documents": [ + {"id": "2", "language": "en", "text": "When I was 5 years old I had $90.00 dollars to my name."}, + { + "id": "3", + "language": "en", + "text": "When we flew from LAX it seemed like we were moving at 10 meters per second. I was lucky to see Amsterdam, Effile Tower, and the Nile.", + }, + { + "id": "4", + "language": "en", + "text": "25th April Meeting was an intresting one. At least we gont to experience the WorldCup", + }, + {"id": "5", "language": "en", "text": "My IP is 127.12.1.1 and my phone number is 5555555555"}, + ] + }, + "kind": "EntityRecognition", + "parameters": {"modelVersion": "latest", "overlapPolicy": {"policyKind": "allowOverlap"}}, + }, + ) + print(response) + + +# x-ms-original-file: 2025-05-15-preview/SuccessfulEntityRecognitionRequest.json +if __name__ == "__main__": + main() diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_healthcare_document_type_task_status_request.py b/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_healthcare_document_type_task_status_request.py new file mode 100644 index 000000000000..015b648129c9 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_healthcare_document_type_task_status_request.py @@ -0,0 +1,33 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.ai.language.text import TextClient + +""" +# PREREQUISITES + pip install azure-ai-language-text +# USAGE + python successful_healthcare_document_type_task_status_request.py +""" + + +def main(): + client = TextClient( + endpoint="{Endpoint}", + credential="CREDENTIAL", + ) + + response = client.analyze_text_job_status( + job_id="15e4a46b-62e2-4386-8d36-9c2a92bb45dd", + ) + print(response) + + +# x-ms-original-file: 2025-05-15-preview/SuccessfulHealthcareDocumentTypeTaskStatusRequest.json +if __name__ == "__main__": + main() diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_healthcare_task_status_request.py b/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_healthcare_task_status_request.py new file mode 100644 index 000000000000..fcbf623c3ddc --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_healthcare_task_status_request.py @@ -0,0 +1,33 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.ai.language.text import TextClient + +""" +# PREREQUISITES + pip install azure-ai-language-text +# USAGE + python successful_healthcare_task_status_request.py +""" + + +def main(): + client = TextClient( + endpoint="{Endpoint}", + credential="CREDENTIAL", + ) + + response = client.analyze_text_job_status( + job_id="1780194a-e9c1-4298-b0d4-fdc59ba818a0", + ) + print(response) + + +# x-ms-original-file: 2025-05-15-preview/SuccessfulHealthcareTaskStatusRequest.json +if __name__ == "__main__": + main() diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_key_phrase_extraction_request.py b/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_key_phrase_extraction_request.py new file mode 100644 index 000000000000..8dcfc53f7d5b --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_key_phrase_extraction_request.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.ai.language.text import TextClient + +""" +# PREREQUISITES + pip install azure-ai-language-text +# USAGE + python successful_key_phrase_extraction_request.py +""" + + +def main(): + client = TextClient( + endpoint="{Endpoint}", + credential="CREDENTIAL", + ) + + response = client.analyze_text( + body={ + "analysisInput": { + "documents": [ + {"id": "1", "language": "en", "text": "Microsoft was founded by Bill Gates and Paul Allen."}, + {"id": "2", "language": "en", "text": "Text Analytics is one of the Azure Cognitive Services."}, + {"id": "3", "language": "en", "text": "My cat might need to see a veterinarian."}, + ] + }, + "kind": "KeyPhraseExtraction", + "parameters": {"modelVersion": "latest"}, + }, + ) + print(response) + + +# x-ms-original-file: 2025-05-15-preview/SuccessfulKeyPhraseExtractionRequest.json +if __name__ == "__main__": + main() diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_language_detection_request.py b/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_language_detection_request.py new file mode 100644 index 000000000000..dbedbe2775c4 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_language_detection_request.py @@ -0,0 +1,44 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.ai.language.text import TextClient + +""" +# PREREQUISITES + pip install azure-ai-language-text +# USAGE + python successful_language_detection_request.py +""" + + +def main(): + client = TextClient( + endpoint="{Endpoint}", + credential="CREDENTIAL", + ) + + response = client.analyze_text( + body={ + "analysisInput": { + "documents": [ + {"id": "1", "text": "Hello world"}, + {"id": "2", "text": "Bonjour tout le monde"}, + {"id": "3", "text": "Hola mundo"}, + {"id": "4", "text": "Tumhara naam kya hai?"}, + ] + }, + "kind": "LanguageDetection", + "parameters": {"modelVersion": "latest"}, + }, + ) + print(response) + + +# x-ms-original-file: 2025-05-15-preview/SuccessfulLanguageDetectionRequest.json +if __name__ == "__main__": + main() diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_pii_entity_recognition_exclusion_request.py b/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_pii_entity_recognition_exclusion_request.py new file mode 100644 index 000000000000..37135ba5db5f --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_pii_entity_recognition_exclusion_request.py @@ -0,0 +1,48 @@ +# pylint: disable=line-too-long,useless-suppression +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.ai.language.text import TextClient + +""" +# PREREQUISITES + pip install azure-ai-language-text +# USAGE + python successful_pii_entity_recognition_exclusion_request.py +""" + + +def main(): + client = TextClient( + endpoint="{Endpoint}", + credential="CREDENTIAL", + ) + + response = client.analyze_text( + body={ + "analysisInput": { + "documents": [ + {"id": "1", "language": "en", "text": "My SSN is 859-98-0987"}, + { + "id": "2", + "language": "en", + "text": "Your ABA number - 111000025 - is the first 9 digits in the lower left hand corner of your personal check.", + }, + {"id": "3", "language": "en", "text": "Is 998.214.865-68 your Brazilian CPF number?"}, + ] + }, + "kind": "PiiEntityRecognition", + "parameters": {"excludePiiCategories": ["USSocialSecurityNumber"], "modelVersion": "latest"}, + }, + ) + print(response) + + +# x-ms-original-file: 2025-05-15-preview/SuccessfulPiiEntityRecognitionExclusionRequest.json +if __name__ == "__main__": + main() diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_pii_entity_recognition_masked_entities.py b/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_pii_entity_recognition_masked_entities.py new file mode 100644 index 000000000000..5afc0a8b1d6e --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_pii_entity_recognition_masked_entities.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.ai.language.text import TextClient + +""" +# PREREQUISITES + pip install azure-ai-language-text +# USAGE + python successful_pii_entity_recognition_masked_entities.py +""" + + +def main(): + client = TextClient( + endpoint="{Endpoint}", + credential="CREDENTIAL", + ) + + response = client.analyze_text( + body={ + "analysisInput": { + "documents": [ + {"id": "1", "language": "en", "text": "My name is John Doe My phone number is 424 878 9192"} + ] + }, + "kind": "PiiEntityRecognition", + "parameters": {"modelVersion": "latest", "redactionPolicy": {"policyKind": "entityMask"}}, + }, + ) + print(response) + + +# x-ms-original-file: 2025-05-15-preview/SuccessfulPiiEntityRecognitionMaskedEntities.json +if __name__ == "__main__": + main() diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_pii_entity_recognition_redaction_policy_request.py b/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_pii_entity_recognition_redaction_policy_request.py new file mode 100644 index 000000000000..90130df832f8 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_pii_entity_recognition_redaction_policy_request.py @@ -0,0 +1,51 @@ +# pylint: disable=line-too-long,useless-suppression +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.ai.language.text import TextClient + +""" +# PREREQUISITES + pip install azure-ai-language-text +# USAGE + python successful_pii_entity_recognition_redaction_policy_request.py +""" + + +def main(): + client = TextClient( + endpoint="{Endpoint}", + credential="CREDENTIAL", + ) + + response = client.analyze_text( + body={ + "analysisInput": { + "documents": [ + {"id": "1", "language": "en", "text": "My SSN is 859-98-0987"}, + { + "id": "2", + "language": "en", + "text": "Your ABA number - 111000025 - is the first 9 digits in the lower left hand corner of your personal check.", + }, + {"id": "3", "language": "en", "text": "Is 998.214.865-68 your Brazilian CPF number?"}, + ] + }, + "kind": "PiiEntityRecognition", + "parameters": { + "modelVersion": "latest", + "redactionPolicy": {"policyKind": "characterMask", "redactionCharacter": "-"}, + }, + }, + ) + print(response) + + +# x-ms-original-file: 2025-05-15-preview/SuccessfulPiiEntityRecognitionRedactionPolicyRequest.json +if __name__ == "__main__": + main() diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_pii_entity_recognition_request.py b/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_pii_entity_recognition_request.py new file mode 100644 index 000000000000..edd7e0264ec2 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_pii_entity_recognition_request.py @@ -0,0 +1,48 @@ +# pylint: disable=line-too-long,useless-suppression +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.ai.language.text import TextClient + +""" +# PREREQUISITES + pip install azure-ai-language-text +# USAGE + python successful_pii_entity_recognition_request.py +""" + + +def main(): + client = TextClient( + endpoint="{Endpoint}", + credential="CREDENTIAL", + ) + + response = client.analyze_text( + body={ + "analysisInput": { + "documents": [ + {"id": "1", "language": "en", "text": "My SSN is 859-98-0987"}, + { + "id": "2", + "language": "en", + "text": "Your ABA number - 111000025 - is the first 9 digits in the lower left hand corner of your personal check.", + }, + {"id": "3", "language": "en", "text": "Is 998.214.865-68 your Brazilian CPF number?"}, + ] + }, + "kind": "PiiEntityRecognition", + "parameters": {"modelVersion": "latest"}, + }, + ) + print(response) + + +# x-ms-original-file: 2025-05-15-preview/SuccessfulPiiEntityRecognitionRequest.json +if __name__ == "__main__": + main() diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_sentiment_analysis_request.py b/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_sentiment_analysis_request.py new file mode 100644 index 000000000000..461f7b886b31 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_sentiment_analysis_request.py @@ -0,0 +1,46 @@ +# pylint: disable=line-too-long,useless-suppression +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.ai.language.text import TextClient + +""" +# PREREQUISITES + pip install azure-ai-language-text +# USAGE + python successful_sentiment_analysis_request.py +""" + + +def main(): + client = TextClient( + endpoint="{Endpoint}", + credential="CREDENTIAL", + ) + + response = client.analyze_text( + body={ + "analysisInput": { + "documents": [ + { + "id": "1", + "language": "en", + "text": "Great atmosphere. Close to plenty of restaurants, hotels, and transit! Staff are friendly and helpful.", + } + ] + }, + "kind": "SentimentAnalysis", + "parameters": {"modelVersion": "latest"}, + }, + ) + print(response) + + +# x-ms-original-file: 2025-05-15-preview/SuccessfulSentimentAnalysisRequest.json +if __name__ == "__main__": + main() diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/generated_tests/conftest.py b/sdk/cognitivelanguage/azure-ai-textanalytics/generated_tests/conftest.py new file mode 100644 index 000000000000..09a9ff0054bd --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-textanalytics/generated_tests/conftest.py @@ -0,0 +1,35 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import os +import pytest +from dotenv import load_dotenv +from devtools_testutils import ( + test_proxy, + add_general_regex_sanitizer, + add_body_key_sanitizer, + add_header_regex_sanitizer, +) + +load_dotenv() + + +# For security, please avoid record sensitive identity information in recordings +@pytest.fixture(scope="session", autouse=True) +def add_sanitizers(test_proxy): + text_subscription_id = os.environ.get("TEXT_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") + text_tenant_id = os.environ.get("TEXT_TENANT_ID", "00000000-0000-0000-0000-000000000000") + text_client_id = os.environ.get("TEXT_CLIENT_ID", "00000000-0000-0000-0000-000000000000") + text_client_secret = os.environ.get("TEXT_CLIENT_SECRET", "00000000-0000-0000-0000-000000000000") + add_general_regex_sanitizer(regex=text_subscription_id, value="00000000-0000-0000-0000-000000000000") + add_general_regex_sanitizer(regex=text_tenant_id, value="00000000-0000-0000-0000-000000000000") + add_general_regex_sanitizer(regex=text_client_id, value="00000000-0000-0000-0000-000000000000") + add_general_regex_sanitizer(regex=text_client_secret, value="00000000-0000-0000-0000-000000000000") + + add_header_regex_sanitizer(key="Set-Cookie", value="[set-cookie;]") + add_header_regex_sanitizer(key="Cookie", value="cookie;") + add_body_key_sanitizer(json_path="$..access_token", value="access_token") diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/generated_tests/test_text.py b/sdk/cognitivelanguage/azure-ai-textanalytics/generated_tests/test_text.py new file mode 100644 index 000000000000..d88a9f16299f --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-textanalytics/generated_tests/test_text.py @@ -0,0 +1,69 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import pytest +from devtools_testutils import recorded_by_proxy +from testpreparer import TextClientTestBase, TextPreparer + + +@pytest.mark.skip("you may need to update the auto-generated test case before run it") +class TestText(TextClientTestBase): + @TextPreparer() + @recorded_by_proxy + def test_analyze_text(self, text_endpoint): + client = self.create_client(endpoint=text_endpoint) + response = client.analyze_text( + body={ + "kind": "EntityLinking", + "analysisInput": {"documents": [{"id": "str", "text": "str", "language": "str"}]}, + "parameters": {"loggingOptOut": bool, "modelVersion": "str", "stringIndexType": "str"}, + }, + ) + + # please add some check logic here by yourself + # ... + + @TextPreparer() + @recorded_by_proxy + def test_analyze_text_job_status(self, text_endpoint): + client = self.create_client(endpoint=text_endpoint) + response = client.analyze_text_job_status( + job_id="str", + ) + + # please add some check logic here by yourself + # ... + + @TextPreparer() + @recorded_by_proxy + def test_begin_analyze_text_submit_job(self, text_endpoint): + client = self.create_client(endpoint=text_endpoint) + response = client.begin_analyze_text_submit_job( + body={ + "analysisInput": {"documents": [{"id": "str", "text": "str", "language": "str"}]}, + "tasks": ["analyze_text_lro_task"], + "cancelAfter": 0.0, + "defaultLanguage": "str", + "displayName": "str", + }, + analysis_input={"documents": [{"id": "str", "text": "str", "language": "str"}]}, + tasks=["analyze_text_lro_task"], + ).result() # call '.result()' to poll until service return final result + + # please add some check logic here by yourself + # ... + + @TextPreparer() + @recorded_by_proxy + def test_begin_analyze_text_cancel_job(self, text_endpoint): + client = self.create_client(endpoint=text_endpoint) + response = client.begin_analyze_text_cancel_job( + job_id="str", + ).result() # call '.result()' to poll until service return final result + + # please add some check logic here by yourself + # ... diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/generated_tests/test_text_async.py b/sdk/cognitivelanguage/azure-ai-textanalytics/generated_tests/test_text_async.py new file mode 100644 index 000000000000..ad8c21e68626 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-textanalytics/generated_tests/test_text_async.py @@ -0,0 +1,74 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import pytest +from devtools_testutils.aio import recorded_by_proxy_async +from testpreparer import TextPreparer +from testpreparer_async import TextClientTestBaseAsync + + +@pytest.mark.skip("you may need to update the auto-generated test case before run it") +class TestTextAsync(TextClientTestBaseAsync): + @TextPreparer() + @recorded_by_proxy_async + async def test_analyze_text(self, text_endpoint): + client = self.create_async_client(endpoint=text_endpoint) + response = await client.analyze_text( + body={ + "kind": "EntityLinking", + "analysisInput": {"documents": [{"id": "str", "text": "str", "language": "str"}]}, + "parameters": {"loggingOptOut": bool, "modelVersion": "str", "stringIndexType": "str"}, + }, + ) + + # please add some check logic here by yourself + # ... + + @TextPreparer() + @recorded_by_proxy_async + async def test_analyze_text_job_status(self, text_endpoint): + client = self.create_async_client(endpoint=text_endpoint) + response = await client.analyze_text_job_status( + job_id="str", + ) + + # please add some check logic here by yourself + # ... + + @TextPreparer() + @recorded_by_proxy_async + async def test_begin_analyze_text_submit_job(self, text_endpoint): + client = self.create_async_client(endpoint=text_endpoint) + response = await ( + await client.begin_analyze_text_submit_job( + body={ + "analysisInput": {"documents": [{"id": "str", "text": "str", "language": "str"}]}, + "tasks": ["analyze_text_lro_task"], + "cancelAfter": 0.0, + "defaultLanguage": "str", + "displayName": "str", + }, + analysis_input={"documents": [{"id": "str", "text": "str", "language": "str"}]}, + tasks=["analyze_text_lro_task"], + ) + ).result() # call '.result()' to poll until service return final result + + # please add some check logic here by yourself + # ... + + @TextPreparer() + @recorded_by_proxy_async + async def test_begin_analyze_text_cancel_job(self, text_endpoint): + client = self.create_async_client(endpoint=text_endpoint) + response = await ( + await client.begin_analyze_text_cancel_job( + job_id="str", + ) + ).result() # call '.result()' to poll until service return final result + + # please add some check logic here by yourself + # ... diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/generated_tests/testpreparer.py b/sdk/cognitivelanguage/azure-ai-textanalytics/generated_tests/testpreparer.py new file mode 100644 index 000000000000..68d7ff8b7e0d --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-textanalytics/generated_tests/testpreparer.py @@ -0,0 +1,24 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from azure.ai.language.text import TextClient +from devtools_testutils import AzureRecordedTestCase, PowerShellPreparer +import functools + + +class TextClientTestBase(AzureRecordedTestCase): + + def create_client(self, endpoint): + credential = self.get_credential(TextClient) + return self.create_client_from_credential( + TextClient, + credential=credential, + endpoint=endpoint, + ) + + +TextPreparer = functools.partial(PowerShellPreparer, "text", text_endpoint="https://fake_text_endpoint.com") diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/generated_tests/testpreparer_async.py b/sdk/cognitivelanguage/azure-ai-textanalytics/generated_tests/testpreparer_async.py new file mode 100644 index 000000000000..5fe5ba289a28 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-textanalytics/generated_tests/testpreparer_async.py @@ -0,0 +1,20 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from azure.ai.language.text.aio import TextClient +from devtools_testutils import AzureRecordedTestCase + + +class TextClientTestBaseAsync(AzureRecordedTestCase): + + def create_async_client(self, endpoint): + credential = self.get_credential(TextClient, is_async=True) + return self.create_client_from_credential( + TextClient, + credential=credential, + endpoint=endpoint, + ) diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/pyproject.toml b/sdk/cognitivelanguage/azure-ai-textanalytics/pyproject.toml new file mode 100644 index 000000000000..e7687fdae93b --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-textanalytics/pyproject.toml @@ -0,0 +1,2 @@ +[packaging] +auto_update = false \ No newline at end of file diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/setup.py b/sdk/cognitivelanguage/azure-ai-textanalytics/setup.py new file mode 100644 index 000000000000..9553205c44e5 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-textanalytics/setup.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + + +import os +import re +from setuptools import setup, find_packages + + +PACKAGE_NAME = "azure-ai-language-text" +PACKAGE_PPRINT_NAME = "Azure Ai Language Text" +PACKAGE_NAMESPACE = "azure.ai.language.text" + +# a.b.c => a/b/c +package_folder_path = PACKAGE_NAMESPACE.replace(".", "/") + +# Version extraction inspired from 'requests' +with open(os.path.join(package_folder_path, "_version.py"), "r") as fd: + version = re.search(r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]', fd.read(), re.MULTILINE).group(1) + +if not version: + raise RuntimeError("Cannot find version information") + + +setup( + name=PACKAGE_NAME, + version=version, + description="Microsoft Corporation {} Client Library for Python".format(PACKAGE_PPRINT_NAME), + long_description=open("README.md", "r").read(), + long_description_content_type="text/markdown", + license="MIT License", + author="Microsoft Corporation", + author_email="azpysdkhelp@microsoft.com", + url="https://github.com/Azure/azure-sdk-for-python/tree/main/sdk", + keywords="azure, azure sdk", + classifiers=[ + "Development Status :: 4 - Beta", + "Programming Language :: Python", + "Programming Language :: Python :: 3 :: Only", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", + "License :: OSI Approved :: MIT License", + ], + zip_safe=False, + packages=find_packages( + exclude=[ + "tests", + # Exclude packages that will be covered by PEP420 or nspkg + "azure", + "azure.ai", + "azure.ai.language", + ] + ), + include_package_data=True, + package_data={ + "azure.ai.language.text": ["py.typed"], + }, + install_requires=[ + "isodate>=0.6.1", + "azure-core>=1.35.0", + "typing-extensions>=4.6.0", + ], + python_requires=">=3.9", +) diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/tsp-location.yaml b/sdk/cognitivelanguage/azure-ai-textanalytics/tsp-location.yaml new file mode 100644 index 000000000000..7151f7cd9736 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-textanalytics/tsp-location.yaml @@ -0,0 +1,4 @@ +directory: specification/cognitiveservices/Language.AnalyzeText +commit: 7c640375cebac863ef3430e8ac9aa8cae4843677 +repo: Azure/azure-rest-api-specs +additionalDirectories: diff --git a/sdk/cognitivelanguage/ci.yml b/sdk/cognitivelanguage/ci.yml index 10f8866342d6..b758b9b6992a 100644 --- a/sdk/cognitivelanguage/ci.yml +++ b/sdk/cognitivelanguage/ci.yml @@ -35,3 +35,5 @@ extends: safeName: azureailanguagequestionanswering - name: azure-ai-language-conversations safeName: azureailanguageconversations + - name: azure-ai-textanalytics + safeName: azureaitextanalytics \ No newline at end of file From 2edeaae87504dcda54ee0ff12a39346a4cab44ae Mon Sep 17 00:00:00 2001 From: "Amber Chen (Centific Technologies Inc)" Date: Thu, 21 Aug 2025 21:20:47 -0700 Subject: [PATCH 2/5] add cspell.json --- .vscode/cspell.json | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.vscode/cspell.json b/.vscode/cspell.json index f5b5f2d49f62..37d235c5b9ff 100644 --- a/.vscode/cspell.json +++ b/.vscode/cspell.json @@ -798,6 +798,11 @@ "filename": "sdk/core/azure-core-tracing-opentelemetry/**", "words": [ "ctxt", "somethingstuid" ] }, + { + "filename": "sdk/cognitivelanguage/azure-ai-textanalytics/**", + "words": [ "Fhir", "BRCPF", "IDRG", "fhir", "EUGPS", "PLREGON", "REGON", "ESDNI", "USUK" ], + "caseSensitive": false + }, { "filename": "sdk/cognitivelanguage/azure-ai-language-conversations/**", "words": [ "conv", "summ" ], From b489b5cacc4bc93f9b3dc6963440af433900221a Mon Sep 17 00:00:00 2001 From: "Amber Chen (Centific Technologies Inc)" Date: Thu, 21 Aug 2025 21:34:16 -0700 Subject: [PATCH 3/5] add more words in cspell --- .vscode/cspell.json | 61 ++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 60 insertions(+), 1 deletion(-) diff --git a/.vscode/cspell.json b/.vscode/cspell.json index 37d235c5b9ff..6a09981e6caa 100644 --- a/.vscode/cspell.json +++ b/.vscode/cspell.json @@ -800,7 +800,66 @@ }, { "filename": "sdk/cognitivelanguage/azure-ai-textanalytics/**", - "words": [ "Fhir", "BRCPF", "IDRG", "fhir", "EUGPS", "PLREGON", "REGON", "ESDNI", "USUK" ], + "words": [ + "fhir", + "BRCPF", + "IDRG", + "EUGPS", + "PLREGON", + "REGON", + "ESDNI", + "USUK", + "ARMN", + "Bangla", + "ETHI", + "Ethi", + "GEOR", + "Geor", + "GREK", + "Grek", + "GUJR", + "Gujr", + "HANI", + "Hani", + "HEBR", + "Hebr", + "JPAN", + "Jpan", + "KHMR", + "Khmr", + "KNDA", + "Knda", + "LAOO", + "Laoo", + "MLYM", + "Mlym", + "MTEI", + "Mtei", + "Meitei", + "MYMR", + "Mymr", + "OLCK", + "Olck", + "ORYA", + "Orya", + "SHRD", + "Shrd", + "Sharada", + "TAML", + "Taml", + "TELU", + "Telu", + "THAA", + "Thaa", + "Thaana", + "TIBT", + "Tibt", + "UMLS", + "Effile", + "gont", + "mundo", + "naam" + ], "caseSensitive": false }, { From d735c88d045a06108f7968b8cc1514bcadbf623f Mon Sep 17 00:00:00 2001 From: "Amber Chen (Centific Technologies Inc)" Date: Thu, 21 Aug 2025 21:37:58 -0700 Subject: [PATCH 4/5] add a new readme --- .../azure-ai-textanalytics/README.md | 503 +++++++++++++++++- 1 file changed, 480 insertions(+), 23 deletions(-) diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/README.md b/sdk/cognitivelanguage/azure-ai-textanalytics/README.md index dae8b84e3ac4..eae94891746c 100644 --- a/sdk/cognitivelanguage/azure-ai-textanalytics/README.md +++ b/sdk/cognitivelanguage/azure-ai-textanalytics/README.md @@ -1,43 +1,500 @@ -# Azure Ai Language Text client library for Python - +[![Build Status](https://dev.azure.com/azure-sdk/public/_apis/build/status/azure-sdk-for-python.client?branchName=main)](https://dev.azure.com/azure-sdk/public/_build/latest?definitionId=46?branchName=main) + +# Azure Conversational Language Understanding client library for Python +Conversational Language Understanding - aka **CLU** for short - is a cloud-based conversational AI service which provides many language understanding capabilities like: +- Conversation App: It's used in extracting intents and entities in conversations +- Workflow app: Acts like an orchestrator to select the best candidate to analyze conversations to get best response from apps like Qna, Luis, and Conversation App +- Conversational Summarization: Used to analyze conversations in the form of issues/resolution, chapter title, and narrative summarizations + +[Source code][conversationallanguage_client_src] +| [Package (PyPI)][conversationallanguage_pypi_package] +| [Package (Conda)](https://anaconda.org/microsoft/azure-ai-language-conversations/) +| [API reference documentation][api_reference_documentation] +| [Samples][conversationallanguage_samples] +| [Product documentation][conversationallanguage_docs] +| [REST API documentation][conversationallanguage_restdocs] ## Getting started +### Prerequisites + +* Python 3.7 or later is required to use this package. +* An [Azure subscription][azure_subscription] +* A [Language service resource][language_resource] + + ### Install the package +Install the Azure Conversations client library for Python with [pip][pip_link]: + ```bash -python -m pip install azure-ai-language-text +pip install azure-ai-language-conversations +``` + +> Note: This version of the client library defaults to the 2023-04-01 version of the service + +### Authenticate the client +In order to interact with the CLU service, you'll need to create an instance of the [ConversationAnalysisClient][conversationanalysisclient_class] class, or [ConversationAuthoringClient][conversationauthoringclient_class] class. You will need an **endpoint**, and an **API key** to instantiate a client object. For more information regarding authenticating with Cognitive Services, see [Authenticate requests to Azure Cognitive Services][cognitive_auth]. + +#### Get an API key +You can get the **endpoint** and an **API key** from the Cognitive Services resource in the [Azure Portal][azure_portal]. + +Alternatively, use the [Azure CLI][azure_cli] command shown below to get the API key from the Cognitive Service resource. + +```powershell +az cognitiveservices account keys list --resource-group --name +``` + + +#### Create ConversationAnalysisClient +Once you've determined your **endpoint** and **API key** you can instantiate a `ConversationAnalysisClient`: + +```python +from azure.core.credentials import AzureKeyCredential +from azure.ai.language.conversations import ConversationAnalysisClient + +endpoint = "https://.cognitiveservices.azure.com/" +credential = AzureKeyCredential("") +client = ConversationAnalysisClient(endpoint, credential) +``` + +#### Create ConversationAuthoringClient +Once you've determined your **endpoint** and **API key** you can instantiate a `ConversationAuthoringClient`: + +```python +from azure.core.credentials import AzureKeyCredential +from azure.ai.language.conversations.authoring import ConversationAuthoringClient + +endpoint = "https://.cognitiveservices.azure.com/" +credential = AzureKeyCredential("") +client = ConversationAuthoringClient(endpoint, credential) +``` + +#### Create a client with an Azure Active Directory Credential + +To use an [Azure Active Directory (AAD) token credential][cognitive_authentication_aad], +provide an instance of the desired credential type obtained from the +[azure-identity][azure_identity_credentials] library. +Note that regional endpoints do not support AAD authentication. Create a [custom subdomain][custom_subdomain] +name for your resource in order to use this type of authentication. + +Authentication with AAD requires some initial setup: + +- [Install azure-identity][install_azure_identity] +- [Register a new AAD application][register_aad_app] +- [Grant access][grant_role_access] to the Language service by assigning the "Cognitive Services Language Reader" role to your service principal. + +After setup, you can choose which type of [credential][azure_identity_credentials] from azure.identity to use. +As an example, [DefaultAzureCredential][default_azure_credential] +can be used to authenticate the client: + +Set the values of the client ID, tenant ID, and client secret of the AAD application as environment variables: +`AZURE_CLIENT_ID`, `AZURE_TENANT_ID`, `AZURE_CLIENT_SECRET` + +Use the returned token credential to authenticate the client: + +```python +from azure.ai.language.conversations import ConversationAnalysisClient +from azure.identity import DefaultAzureCredential + +credential = DefaultAzureCredential() +client = ConversationAnalysisClient(endpoint="https://.cognitiveservices.azure.com/", credential=credential) +``` + +## Key concepts + +### ConversationAnalysisClient +The [ConversationAnalysisClient][conversationanalysisclient_class] is the primary interface for making predictions using your deployed Conversations models. For asynchronous operations, an async `ConversationAnalysisClient` is in the `azure.ai.language.conversation.aio` namespace. + +### ConversationAuthoringClient +You can use the [ConversationAuthoringClient][conversationauthoringclient_class] to interface with the [Azure Language Portal][azure_language_portal] to carry out authoring operations on your language resource/project. For example, you can use it to create a project, populate with training data, train, test, and deploy. For asynchronous operations, an async `ConversationAuthoringClient` is in the `azure.ai.language.conversation.authoring.aio` namespace. + +## Examples +The `azure-ai-language-conversation` client library provides both synchronous and asynchronous APIs. + +The following examples show common scenarios using the `client` [created above](#create-conversationanalysisclient). + +### Analyze Text with a Conversation App +If you would like to extract custom intents and entities from a user utterance, you can call the `client.analyze_conversation()` method with your conversation's project name as follows: + + +```python +# import libraries +import os +from azure.core.credentials import AzureKeyCredential +from azure.ai.language.conversations import ConversationAnalysisClient + +# get secrets +clu_endpoint = os.environ["AZURE_CONVERSATIONS_ENDPOINT"] +clu_key = os.environ["AZURE_CONVERSATIONS_KEY"] +project_name = os.environ["AZURE_CONVERSATIONS_PROJECT_NAME"] +deployment_name = os.environ["AZURE_CONVERSATIONS_DEPLOYMENT_NAME"] + +# analyze quey +client = ConversationAnalysisClient(clu_endpoint, AzureKeyCredential(clu_key)) +with client: + query = "Send an email to Carol about the tomorrow's demo" + result = client.analyze_conversation( + task={ + "kind": "Conversation", + "analysisInput": { + "conversationItem": { + "participantId": "1", + "id": "1", + "modality": "text", + "language": "en", + "text": query + }, + "isLoggingEnabled": False + }, + "parameters": { + "projectName": project_name, + "deploymentName": deployment_name, + "verbose": True + } + } + ) + +# view result +print("query: {}".format(result["result"]["query"])) +print("project kind: {}\n".format(result["result"]["prediction"]["projectKind"])) + +print("top intent: {}".format(result["result"]["prediction"]["topIntent"])) +print("category: {}".format(result["result"]["prediction"]["intents"][0]["category"])) +print("confidence score: {}\n".format(result["result"]["prediction"]["intents"][0]["confidenceScore"])) + +print("entities:") +for entity in result["result"]["prediction"]["entities"]: + print("\ncategory: {}".format(entity["category"])) + print("text: {}".format(entity["text"])) + print("confidence score: {}".format(entity["confidenceScore"])) + if "resolutions" in entity: + print("resolutions") + for resolution in entity["resolutions"]: + print("kind: {}".format(resolution["resolutionKind"])) + print("value: {}".format(resolution["value"])) + if "extraInformation" in entity: + print("extra info") + for data in entity["extraInformation"]: + print("kind: {}".format(data["extraInformationKind"])) + if data["extraInformationKind"] == "ListKey": + print("key: {}".format(data["key"])) + if data["extraInformationKind"] == "EntitySubtype": + print("value: {}".format(data["value"])) ``` -#### Prequisites +### Analyze Text with an Orchestration App -- Python 3.9 or later is required to use this package. -- You need an [Azure subscription][azure_sub] to use this package. -- An existing Azure Ai Language Text instance. +If you would like to pass the user utterance to your orchestrator (worflow) app, you can call the `client.analyze_conversation()` method with your orchestration's project name. The orchestrator project simply orchestrates the submitted user utterance between your language apps (Luis, Conversation, and Question Answering) to get the best response according to the user intent. See the next example: +```python +# import libraries +import os +from azure.core.credentials import AzureKeyCredential +from azure.ai.language.conversations import ConversationAnalysisClient + +# get secrets +clu_endpoint = os.environ["AZURE_CONVERSATIONS_ENDPOINT"] +clu_key = os.environ["AZURE_CONVERSATIONS_KEY"] +project_name = os.environ["AZURE_CONVERSATIONS_WORKFLOW_PROJECT_NAME"] +deployment_name = os.environ["AZURE_CONVERSATIONS_WORKFLOW_DEPLOYMENT_NAME"] + +# analyze query +client = ConversationAnalysisClient(clu_endpoint, AzureKeyCredential(clu_key)) +with client: + query = "Reserve a table for 2 at the Italian restaurant" + result = client.analyze_conversation( + task={ + "kind": "Conversation", + "analysisInput": { + "conversationItem": { + "participantId": "1", + "id": "1", + "modality": "text", + "language": "en", + "text": query + }, + "isLoggingEnabled": False + }, + "parameters": { + "projectName": project_name, + "deploymentName": deployment_name, + "verbose": True + } + } + ) + +# view result +print("query: {}".format(result["result"]["query"])) +print("project kind: {}\n".format(result["result"]["prediction"]["projectKind"])) + +# top intent +top_intent = result["result"]["prediction"]["topIntent"] +print("top intent: {}".format(top_intent)) +top_intent_object = result["result"]["prediction"]["intents"][top_intent] +print("confidence score: {}".format(top_intent_object["confidenceScore"])) +print("project kind: {}".format(top_intent_object["targetProjectKind"])) + +if top_intent_object["targetProjectKind"] == "Luis": + print("\nluis response:") + luis_response = top_intent_object["result"]["prediction"] + print("top intent: {}".format(luis_response["topIntent"])) + print("\nentities:") + for entity in luis_response["entities"]: + print("\n{}".format(entity)) +``` + +### Conversational Summarization + +You can use this sample if you need to summarize a conversation in the form of an issue, and final resolution. For example, a dialog from tech support: + +```python +# import libraries +import os +from azure.core.credentials import AzureKeyCredential +from azure.ai.language.conversations import ConversationAnalysisClient +# get secrets +endpoint = os.environ["AZURE_CONVERSATIONS_ENDPOINT"] +key = os.environ["AZURE_CONVERSATIONS_KEY"] +# analyze query +client = ConversationAnalysisClient(endpoint, AzureKeyCredential(key)) +with client: + poller = client.begin_conversation_analysis( + task={ + "displayName": "Analyze conversations from xxx", + "analysisInput": { + "conversations": [ + { + "conversationItems": [ + { + "text": "Hello, how can I help you?", + "modality": "text", + "id": "1", + "role": "Agent", + "participantId": "Agent" + }, + { + "text": "How to upgrade Office? I am getting error messages the whole day.", + "modality": "text", + "id": "2", + "role": "Customer", + "participantId": "Customer" + }, + { + "text": "Press the upgrade button please. Then sign in and follow the instructions.", + "modality": "text", + "id": "3", + "role": "Agent", + "participantId": "Agent" + } + ], + "modality": "text", + "id": "conversation1", + "language": "en" + }, + ] + }, + "tasks": [ + { + "taskName": "Issue task", + "kind": "ConversationalSummarizationTask", + "parameters": { + "summaryAspects": ["issue"] + } + }, + { + "taskName": "Resolution task", + "kind": "ConversationalSummarizationTask", + "parameters": { + "summaryAspects": ["resolution"] + } + }, + ] + } + ) + + # view result + result = poller.result() + task_results = result["tasks"]["items"] + for task in task_results: + print(f"\n{task['taskName']} status: {task['status']}") + task_result = task["results"] + if task_result["errors"]: + print("... errors occurred ...") + for error in task_result["errors"]: + print(error) + else: + conversation_result = task_result["conversations"][0] + if conversation_result["warnings"]: + print("... view warnings ...") + for warning in conversation_result["warnings"]: + print(warning) + else: + summaries = conversation_result["summaries"] + print("... view task result ...") + for summary in summaries: + print(f"{summary['aspect']}: {summary['text']}") +``` + +### Import a Conversation Project +This sample shows a common scenario for the authoring part of the SDK + +```python +import os +from azure.core.credentials import AzureKeyCredential +from azure.ai.language.conversations.authoring import ConversationAuthoringClient + +clu_endpoint = os.environ["AZURE_CONVERSATIONS_ENDPOINT"] +clu_key = os.environ["AZURE_CONVERSATIONS_KEY"] + +project_name = "test_project" + +exported_project_assets = { + "projectKind": "Conversation", + "intents": [{"category": "Read"}, {"category": "Delete"}], + "entities": [{"category": "Sender"}], + "utterances": [ + { + "text": "Open Blake's email", + "dataset": "Train", + "intent": "Read", + "entities": [{"category": "Sender", "offset": 5, "length": 5}], + }, + { + "text": "Delete last email", + "language": "en-gb", + "dataset": "Test", + "intent": "Delete", + "entities": [], + }, + ], +} + +client = ConversationAuthoringClient( + clu_endpoint, AzureKeyCredential(clu_key) +) +poller = client.begin_import_project( + project_name=project_name, + project={ + "assets": exported_project_assets, + "metadata": { + "projectKind": "Conversation", + "settings": {"confidenceThreshold": 0.7}, + "projectName": "EmailApp", + "multilingual": True, + "description": "Trying out CLU", + "language": "en-us", + }, + "projectFileVersion": "2022-05-01", + "stringIndexType": "Utf16CodeUnit", + }, +) +response = poller.result() +print(response) + +``` + + +## Optional Configuration + +Optional keyword arguments can be passed in at the client and per-operation level. The azure-core [reference documentation][azure_core_ref_docs] describes available configurations for retries, logging, transport protocols, and more. + +## Troubleshooting + +### General + +The Conversations client will raise exceptions defined in [Azure Core][azure_core_exceptions]. + +### Logging + +This library uses the standard +[logging][python_logging] library for logging. +Basic information about HTTP sessions (URLs, headers, etc.) is logged at INFO +level. + +Detailed DEBUG level logging, including request/response bodies and unredacted +headers, can be enabled on a client with the `logging_enable` argument. + +See full SDK logging documentation with examples [here][sdk_logging_docs]. + +```python +import sys +import logging +from azure.core.credentials import AzureKeyCredential +from azure.ai.language.conversations import ConversationAnalysisClient + +# Create a logger for the 'azure' SDK +logger = logging.getLogger('azure') +logger.setLevel(logging.DEBUG) + +# Configure a console output +handler = logging.StreamHandler(stream=sys.stdout) +logger.addHandler(handler) + +endpoint = "https://.cognitiveservices.azure.com/" +credential = AzureKeyCredential("") + +# This client will log detailed information about its HTTP sessions, at DEBUG level +client = ConversationAnalysisClient(endpoint, credential, logging_enable=True) +result = client.analyze_conversation(...) +``` + +Similarly, `logging_enable` can enable detailed logging for a single operation, even when it isn't enabled for the client: + +```python +result = client.analyze_conversation(..., logging_enable=True) +``` + +## Next steps + +### More sample code + +See the [Sample README][conversationallanguage_samples] for several code snippets illustrating common patterns used in the CLU Python API. + ## Contributing -This project welcomes contributions and suggestions. Most contributions require -you to agree to a Contributor License Agreement (CLA) declaring that you have -the right to, and actually do, grant us the rights to use your contribution. -For details, visit https://cla.microsoft.com. +See the [CONTRIBUTING.md][contributing] for details on building, testing, and contributing to this library. + +This project welcomes contributions and suggestions. Most contributions require you to agree to a Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us the rights to use your contribution. For details, visit [cla.microsoft.com][cla]. -When you submit a pull request, a CLA-bot will automatically determine whether -you need to provide a CLA and decorate the PR appropriately (e.g., label, -comment). Simply follow the instructions provided by the bot. You will only -need to do this once across all repos using our CLA. +When you submit a pull request, a CLA-bot will automatically determine whether you need to provide a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions provided by the bot. You will only need to do this once across all repos using our CLA. -This project has adopted the -[Microsoft Open Source Code of Conduct][code_of_conduct]. For more information, -see the Code of Conduct FAQ or contact opencode@microsoft.com with any -additional questions or comments. +This project has adopted the [Microsoft Open Source Code of Conduct][code_of_conduct]. For more information see the [Code of Conduct FAQ][coc_faq] or contact [opencode@microsoft.com][coc_contact] with any additional questions or comments. +[azure_cli]: https://learn.microsoft.com/cli/azure/ +[azure_portal]: https://portal.azure.com/ +[azure_subscription]: https://azure.microsoft.com/free/ +[language_resource]: https://portal.azure.com/#create/Microsoft.CognitiveServicesTextAnalytics +[cla]: https://cla.microsoft.com +[coc_contact]: mailto:opencode@microsoft.com +[coc_faq]: https://opensource.microsoft.com/codeofconduct/faq/ [code_of_conduct]: https://opensource.microsoft.com/codeofconduct/ -[authenticate_with_token]: https://docs.microsoft.com/azure/cognitive-services/authentication?tabs=powershell#authenticate-with-an-authentication-token +[cognitive_auth]: https://learn.microsoft.com/azure/cognitive-services/authentication/ +[contributing]: https://github.com/Azure/azure-sdk-for-python/blob/main/CONTRIBUTING.md +[python_logging]: https://docs.python.org/3/library/logging.html +[sdk_logging_docs]: https://learn.microsoft.com/azure/developer/python/azure-sdk-logging +[azure_core_ref_docs]: https://azuresdkdocs.z19.web.core.windows.net/python/azure-core/latest/azure.core.html +[azure_core_readme]: https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/core/azure-core/README.md +[pip_link]:https://pypi.org/project/pip/ +[conversationallanguage_client_src]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/cognitivelanguage/azure-ai-language-conversations +[conversationallanguage_pypi_package]: https://pypi.org/project/azure-ai-language-conversations/ +[api_reference_documentation]:https://azuresdkdocs.z19.web.core.windows.net/python/azure-ai-language-conversations/latest/azure.ai.language.conversations.html +[conversationallanguage_refdocs]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/cognitivelanguage/azure-ai-language-conversations +[conversationallanguage_docs]: https://learn.microsoft.com/azure/cognitive-services/language-service/conversational-language-understanding/overview +[conversationallanguage_samples]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/cognitivelanguage/azure-ai-language-conversations/samples/README.md +[conversationallanguage_restdocs]: https://learn.microsoft.com/rest/api/language/ +[conversationanalysisclient_class]: https://azuresdkdocs.z19.web.core.windows.net/python/azure-ai-language-conversations/latest/azure.ai.language.conversations.html#azure.ai.language.conversations.ConversationAnalysisClient +[conversationauthoringclient_class]: https://azuresdkdocs.z19.web.core.windows.net/python/azure-ai-language-conversations/latest/azure.ai.language.conversations.html#azure.ai.language.conversations.ConversationAuthoringClient +[azure_core_exceptions]: https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/core/azure-core/README.md +[azure_language_portal]: https://language.cognitive.azure.com/home +[cognitive_authentication_aad]: https://learn.microsoft.com/azure/cognitive-services/authentication#authenticate-with-azure-active-directory [azure_identity_credentials]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/identity/azure-identity#credentials -[azure_identity_pip]: https://pypi.org/project/azure-identity/ +[custom_subdomain]: https://learn.microsoft.com/azure/cognitive-services/authentication#create-a-resource-with-a-custom-subdomain +[install_azure_identity]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/identity/azure-identity#install-the-package +[register_aad_app]: https://learn.microsoft.com/azure/cognitive-services/authentication#assign-a-role-to-a-service-principal +[grant_role_access]: https://learn.microsoft.com/azure/cognitive-services/authentication#assign-a-role-to-a-service-principal [default_azure_credential]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/identity/azure-identity#defaultazurecredential -[pip]: https://pypi.org/project/pip/ -[azure_sub]: https://azure.microsoft.com/free/ + + From 45db5c28828429bab2fca81db400052496659277 Mon Sep 17 00:00:00 2001 From: "Amber Chen (Centific Technologies Inc)" Date: Thu, 21 Aug 2025 22:21:56 -0700 Subject: [PATCH 5/5] removed generated test and sample --- ...ation_summary_length_prompt_task_result.py | 33 --------- ...l_abstractive_summarization_task_result.py | 33 --------- ...essful_analyze_text_jobs_cancel_request.py | 32 -------- ..._text_jobs_multiple_task_status_request.py | 33 --------- .../successful_entity_linking_request.py | 42 ----------- ...ul_entity_recognition_exclusion_request.py | 51 ------------- ...ul_entity_recognition_inclusion_request.py | 47 ------------ ...y_recognition_inference_options_request.py | 41 ---------- ...ssful_entity_recognition_overlap_policy.py | 45 ----------- .../successful_entity_recognition_request.py | 53 ------------- ...hcare_document_type_task_status_request.py | 33 --------- ...ccessful_healthcare_task_status_request.py | 33 --------- ...uccessful_key_phrase_extraction_request.py | 43 ----------- .../successful_language_detection_request.py | 44 ----------- ...ii_entity_recognition_exclusion_request.py | 48 ------------ ..._pii_entity_recognition_masked_entities.py | 41 ---------- ...ty_recognition_redaction_policy_request.py | 51 ------------- ...ccessful_pii_entity_recognition_request.py | 48 ------------ .../successful_sentiment_analysis_request.py | 46 ------------ .../generated_tests/conftest.py | 35 --------- .../generated_tests/test_text.py | 69 ----------------- .../generated_tests/test_text_async.py | 74 ------------------- .../generated_tests/testpreparer.py | 24 ------ .../generated_tests/testpreparer_async.py | 20 ----- 24 files changed, 1019 deletions(-) delete mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_abstractive_summarization_summary_length_prompt_task_result.py delete mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_abstractive_summarization_task_result.py delete mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_analyze_text_jobs_cancel_request.py delete mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_analyze_text_jobs_multiple_task_status_request.py delete mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_entity_linking_request.py delete mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_entity_recognition_exclusion_request.py delete mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_entity_recognition_inclusion_request.py delete mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_entity_recognition_inference_options_request.py delete mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_entity_recognition_overlap_policy.py delete mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_entity_recognition_request.py delete mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_healthcare_document_type_task_status_request.py delete mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_healthcare_task_status_request.py delete mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_key_phrase_extraction_request.py delete mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_language_detection_request.py delete mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_pii_entity_recognition_exclusion_request.py delete mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_pii_entity_recognition_masked_entities.py delete mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_pii_entity_recognition_redaction_policy_request.py delete mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_pii_entity_recognition_request.py delete mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_sentiment_analysis_request.py delete mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/generated_tests/conftest.py delete mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/generated_tests/test_text.py delete mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/generated_tests/test_text_async.py delete mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/generated_tests/testpreparer.py delete mode 100644 sdk/cognitivelanguage/azure-ai-textanalytics/generated_tests/testpreparer_async.py diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_abstractive_summarization_summary_length_prompt_task_result.py b/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_abstractive_summarization_summary_length_prompt_task_result.py deleted file mode 100644 index 9c607cdc304b..000000000000 --- a/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_abstractive_summarization_summary_length_prompt_task_result.py +++ /dev/null @@ -1,33 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from azure.ai.language.text import TextClient - -""" -# PREREQUISITES - pip install azure-ai-language-text -# USAGE - python successful_abstractive_summarization_summary_length_prompt_task_result.py -""" - - -def main(): - client = TextClient( - endpoint="{Endpoint}", - credential="CREDENTIAL", - ) - - response = client.analyze_text_job_status( - job_id="c0f2a446-05d9-48fc-ba8f-3ef4af8d0b18", - ) - print(response) - - -# x-ms-original-file: 2025-05-15-preview/SuccessfulAbstractiveSummarizationSummaryLengthPromptTaskResult.json -if __name__ == "__main__": - main() diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_abstractive_summarization_task_result.py b/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_abstractive_summarization_task_result.py deleted file mode 100644 index 0f89d0c5e240..000000000000 --- a/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_abstractive_summarization_task_result.py +++ /dev/null @@ -1,33 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from azure.ai.language.text import TextClient - -""" -# PREREQUISITES - pip install azure-ai-language-text -# USAGE - python successful_abstractive_summarization_task_result.py -""" - - -def main(): - client = TextClient( - endpoint="{Endpoint}", - credential="CREDENTIAL", - ) - - response = client.analyze_text_job_status( - job_id="c0f2a446-05d9-48fc-ba8f-3ef4af8d0b18", - ) - print(response) - - -# x-ms-original-file: 2025-05-15-preview/SuccessfulAbstractiveSummarizationTaskResult.json -if __name__ == "__main__": - main() diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_analyze_text_jobs_cancel_request.py b/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_analyze_text_jobs_cancel_request.py deleted file mode 100644 index 91fd094a774f..000000000000 --- a/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_analyze_text_jobs_cancel_request.py +++ /dev/null @@ -1,32 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from azure.ai.language.text import TextClient - -""" -# PREREQUISITES - pip install azure-ai-language-text -# USAGE - python successful_analyze_text_jobs_cancel_request.py -""" - - -def main(): - client = TextClient( - endpoint="{Endpoint}", - credential="CREDENTIAL", - ) - - client.begin_analyze_text_cancel_job( - job_id="c0f2a446-05d9-48fc-ba8f-3ef4af8d0b18", - ).result() - - -# x-ms-original-file: 2025-05-15-preview/SuccessfulAnalyzeTextJobsCancelRequest.json -if __name__ == "__main__": - main() diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_analyze_text_jobs_multiple_task_status_request.py b/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_analyze_text_jobs_multiple_task_status_request.py deleted file mode 100644 index dd9730bee5bb..000000000000 --- a/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_analyze_text_jobs_multiple_task_status_request.py +++ /dev/null @@ -1,33 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from azure.ai.language.text import TextClient - -""" -# PREREQUISITES - pip install azure-ai-language-text -# USAGE - python successful_analyze_text_jobs_multiple_task_status_request.py -""" - - -def main(): - client = TextClient( - endpoint="{Endpoint}", - credential="CREDENTIAL", - ) - - response = client.analyze_text_job_status( - job_id="c0f2a446-05d9-48fc-ba8f-3ef4af8d0b18", - ) - print(response) - - -# x-ms-original-file: 2025-05-15-preview/SuccessfulAnalyzeTextJobsMultipleTaskStatusRequest.json -if __name__ == "__main__": - main() diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_entity_linking_request.py b/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_entity_linking_request.py deleted file mode 100644 index 96ced6d09b24..000000000000 --- a/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_entity_linking_request.py +++ /dev/null @@ -1,42 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from azure.ai.language.text import TextClient - -""" -# PREREQUISITES - pip install azure-ai-language-text -# USAGE - python successful_entity_linking_request.py -""" - - -def main(): - client = TextClient( - endpoint="{Endpoint}", - credential="CREDENTIAL", - ) - - response = client.analyze_text( - body={ - "analysisInput": { - "documents": [ - {"id": "1", "language": "en", "text": "Microsoft was founded by Bill Gates and Paul Allen."}, - {"id": "2", "language": "en", "text": "Pike place market is my favorite Seattle attraction."}, - ] - }, - "kind": "EntityLinking", - "parameters": {"modelVersion": "latest"}, - }, - ) - print(response) - - -# x-ms-original-file: 2025-05-15-preview/SuccessfulEntityLinkingRequest.json -if __name__ == "__main__": - main() diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_entity_recognition_exclusion_request.py b/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_entity_recognition_exclusion_request.py deleted file mode 100644 index 19062717c3bf..000000000000 --- a/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_entity_recognition_exclusion_request.py +++ /dev/null @@ -1,51 +0,0 @@ -# pylint: disable=line-too-long,useless-suppression -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from azure.ai.language.text import TextClient - -""" -# PREREQUISITES - pip install azure-ai-language-text -# USAGE - python successful_entity_recognition_exclusion_request.py -""" - - -def main(): - client = TextClient( - endpoint="{Endpoint}", - credential="CREDENTIAL", - ) - - response = client.analyze_text( - body={ - "analysisInput": { - "documents": [ - {"id": "2", "language": "en", "text": "When I was 5 years old I had $90.00 dollars to my name."}, - { - "id": "3", - "language": "en", - "text": "When we flew from LAX it seemed like we were moving at 10 meters per second. I was lucky to see Amsterdam, Effile Tower, and the Nile.", - }, - ] - }, - "kind": "EntityRecognition", - "parameters": { - "exclusionList": ["Numeric"], - "modelVersion": "latest", - "overlapPolicy": {"policyKind": "allowOverlap"}, - }, - }, - ) - print(response) - - -# x-ms-original-file: 2025-05-15-preview/SuccessfulEntityRecognitionExclusionRequest.json -if __name__ == "__main__": - main() diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_entity_recognition_inclusion_request.py b/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_entity_recognition_inclusion_request.py deleted file mode 100644 index c98cf982d60f..000000000000 --- a/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_entity_recognition_inclusion_request.py +++ /dev/null @@ -1,47 +0,0 @@ -# pylint: disable=line-too-long,useless-suppression -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from azure.ai.language.text import TextClient - -""" -# PREREQUISITES - pip install azure-ai-language-text -# USAGE - python successful_entity_recognition_inclusion_request.py -""" - - -def main(): - client = TextClient( - endpoint="{Endpoint}", - credential="CREDENTIAL", - ) - - response = client.analyze_text( - body={ - "analysisInput": { - "documents": [ - {"id": "2", "language": "en", "text": "When I was 5 years old I had $90.00 dollars to my name."}, - { - "id": "3", - "language": "en", - "text": "When we flew from LAX it seemed like we were moving at 10 meters per second. I was lucky to see Amsterdam, Effile Tower, and the Nile.", - }, - ] - }, - "kind": "EntityRecognition", - "parameters": {"inclusionList": ["Location"], "modelVersion": "latest"}, - }, - ) - print(response) - - -# x-ms-original-file: 2025-05-15-preview/SuccessfulEntityRecognitionInclusionRequest.json -if __name__ == "__main__": - main() diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_entity_recognition_inference_options_request.py b/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_entity_recognition_inference_options_request.py deleted file mode 100644 index 7c203c72d92b..000000000000 --- a/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_entity_recognition_inference_options_request.py +++ /dev/null @@ -1,41 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from azure.ai.language.text import TextClient - -""" -# PREREQUISITES - pip install azure-ai-language-text -# USAGE - python successful_entity_recognition_inference_options_request.py -""" - - -def main(): - client = TextClient( - endpoint="{Endpoint}", - credential="CREDENTIAL", - ) - - response = client.analyze_text( - body={ - "analysisInput": { - "documents": [ - {"id": "1", "language": "en", "text": "When I was 5 years old I had $90.00 dollars to my name."} - ] - }, - "kind": "EntityRecognition", - "parameters": {"inferenceOptions": {"excludeNormalizedValues": True}, "modelVersion": "latest"}, - }, - ) - print(response) - - -# x-ms-original-file: 2025-05-15-preview/SuccessfulEntityRecognitionInferenceOptionsRequest.json -if __name__ == "__main__": - main() diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_entity_recognition_overlap_policy.py b/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_entity_recognition_overlap_policy.py deleted file mode 100644 index 966ca6e63e29..000000000000 --- a/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_entity_recognition_overlap_policy.py +++ /dev/null @@ -1,45 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from azure.ai.language.text import TextClient - -""" -# PREREQUISITES - pip install azure-ai-language-text -# USAGE - python successful_entity_recognition_overlap_policy.py -""" - - -def main(): - client = TextClient( - endpoint="{Endpoint}", - credential="CREDENTIAL", - ) - - response = client.analyze_text( - body={ - "analysisInput": { - "documents": [ - { - "id": "4", - "language": "en", - "text": "25th April Meeting was an intresting one. At least we gont to experience the WorldCup", - } - ] - }, - "kind": "EntityRecognition", - "parameters": {"modelVersion": "latest", "overlapPolicy": {"policyKind": "matchLongest"}}, - }, - ) - print(response) - - -# x-ms-original-file: 2025-05-15-preview/SuccessfulEntityRecognitionOverlapPolicy.json -if __name__ == "__main__": - main() diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_entity_recognition_request.py b/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_entity_recognition_request.py deleted file mode 100644 index 19f4912f59a6..000000000000 --- a/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_entity_recognition_request.py +++ /dev/null @@ -1,53 +0,0 @@ -# pylint: disable=line-too-long,useless-suppression -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from azure.ai.language.text import TextClient - -""" -# PREREQUISITES - pip install azure-ai-language-text -# USAGE - python successful_entity_recognition_request.py -""" - - -def main(): - client = TextClient( - endpoint="{Endpoint}", - credential="CREDENTIAL", - ) - - response = client.analyze_text( - body={ - "analysisInput": { - "documents": [ - {"id": "2", "language": "en", "text": "When I was 5 years old I had $90.00 dollars to my name."}, - { - "id": "3", - "language": "en", - "text": "When we flew from LAX it seemed like we were moving at 10 meters per second. I was lucky to see Amsterdam, Effile Tower, and the Nile.", - }, - { - "id": "4", - "language": "en", - "text": "25th April Meeting was an intresting one. At least we gont to experience the WorldCup", - }, - {"id": "5", "language": "en", "text": "My IP is 127.12.1.1 and my phone number is 5555555555"}, - ] - }, - "kind": "EntityRecognition", - "parameters": {"modelVersion": "latest", "overlapPolicy": {"policyKind": "allowOverlap"}}, - }, - ) - print(response) - - -# x-ms-original-file: 2025-05-15-preview/SuccessfulEntityRecognitionRequest.json -if __name__ == "__main__": - main() diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_healthcare_document_type_task_status_request.py b/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_healthcare_document_type_task_status_request.py deleted file mode 100644 index 015b648129c9..000000000000 --- a/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_healthcare_document_type_task_status_request.py +++ /dev/null @@ -1,33 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from azure.ai.language.text import TextClient - -""" -# PREREQUISITES - pip install azure-ai-language-text -# USAGE - python successful_healthcare_document_type_task_status_request.py -""" - - -def main(): - client = TextClient( - endpoint="{Endpoint}", - credential="CREDENTIAL", - ) - - response = client.analyze_text_job_status( - job_id="15e4a46b-62e2-4386-8d36-9c2a92bb45dd", - ) - print(response) - - -# x-ms-original-file: 2025-05-15-preview/SuccessfulHealthcareDocumentTypeTaskStatusRequest.json -if __name__ == "__main__": - main() diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_healthcare_task_status_request.py b/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_healthcare_task_status_request.py deleted file mode 100644 index fcbf623c3ddc..000000000000 --- a/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_healthcare_task_status_request.py +++ /dev/null @@ -1,33 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from azure.ai.language.text import TextClient - -""" -# PREREQUISITES - pip install azure-ai-language-text -# USAGE - python successful_healthcare_task_status_request.py -""" - - -def main(): - client = TextClient( - endpoint="{Endpoint}", - credential="CREDENTIAL", - ) - - response = client.analyze_text_job_status( - job_id="1780194a-e9c1-4298-b0d4-fdc59ba818a0", - ) - print(response) - - -# x-ms-original-file: 2025-05-15-preview/SuccessfulHealthcareTaskStatusRequest.json -if __name__ == "__main__": - main() diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_key_phrase_extraction_request.py b/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_key_phrase_extraction_request.py deleted file mode 100644 index 8dcfc53f7d5b..000000000000 --- a/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_key_phrase_extraction_request.py +++ /dev/null @@ -1,43 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from azure.ai.language.text import TextClient - -""" -# PREREQUISITES - pip install azure-ai-language-text -# USAGE - python successful_key_phrase_extraction_request.py -""" - - -def main(): - client = TextClient( - endpoint="{Endpoint}", - credential="CREDENTIAL", - ) - - response = client.analyze_text( - body={ - "analysisInput": { - "documents": [ - {"id": "1", "language": "en", "text": "Microsoft was founded by Bill Gates and Paul Allen."}, - {"id": "2", "language": "en", "text": "Text Analytics is one of the Azure Cognitive Services."}, - {"id": "3", "language": "en", "text": "My cat might need to see a veterinarian."}, - ] - }, - "kind": "KeyPhraseExtraction", - "parameters": {"modelVersion": "latest"}, - }, - ) - print(response) - - -# x-ms-original-file: 2025-05-15-preview/SuccessfulKeyPhraseExtractionRequest.json -if __name__ == "__main__": - main() diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_language_detection_request.py b/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_language_detection_request.py deleted file mode 100644 index dbedbe2775c4..000000000000 --- a/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_language_detection_request.py +++ /dev/null @@ -1,44 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from azure.ai.language.text import TextClient - -""" -# PREREQUISITES - pip install azure-ai-language-text -# USAGE - python successful_language_detection_request.py -""" - - -def main(): - client = TextClient( - endpoint="{Endpoint}", - credential="CREDENTIAL", - ) - - response = client.analyze_text( - body={ - "analysisInput": { - "documents": [ - {"id": "1", "text": "Hello world"}, - {"id": "2", "text": "Bonjour tout le monde"}, - {"id": "3", "text": "Hola mundo"}, - {"id": "4", "text": "Tumhara naam kya hai?"}, - ] - }, - "kind": "LanguageDetection", - "parameters": {"modelVersion": "latest"}, - }, - ) - print(response) - - -# x-ms-original-file: 2025-05-15-preview/SuccessfulLanguageDetectionRequest.json -if __name__ == "__main__": - main() diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_pii_entity_recognition_exclusion_request.py b/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_pii_entity_recognition_exclusion_request.py deleted file mode 100644 index 37135ba5db5f..000000000000 --- a/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_pii_entity_recognition_exclusion_request.py +++ /dev/null @@ -1,48 +0,0 @@ -# pylint: disable=line-too-long,useless-suppression -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from azure.ai.language.text import TextClient - -""" -# PREREQUISITES - pip install azure-ai-language-text -# USAGE - python successful_pii_entity_recognition_exclusion_request.py -""" - - -def main(): - client = TextClient( - endpoint="{Endpoint}", - credential="CREDENTIAL", - ) - - response = client.analyze_text( - body={ - "analysisInput": { - "documents": [ - {"id": "1", "language": "en", "text": "My SSN is 859-98-0987"}, - { - "id": "2", - "language": "en", - "text": "Your ABA number - 111000025 - is the first 9 digits in the lower left hand corner of your personal check.", - }, - {"id": "3", "language": "en", "text": "Is 998.214.865-68 your Brazilian CPF number?"}, - ] - }, - "kind": "PiiEntityRecognition", - "parameters": {"excludePiiCategories": ["USSocialSecurityNumber"], "modelVersion": "latest"}, - }, - ) - print(response) - - -# x-ms-original-file: 2025-05-15-preview/SuccessfulPiiEntityRecognitionExclusionRequest.json -if __name__ == "__main__": - main() diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_pii_entity_recognition_masked_entities.py b/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_pii_entity_recognition_masked_entities.py deleted file mode 100644 index 5afc0a8b1d6e..000000000000 --- a/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_pii_entity_recognition_masked_entities.py +++ /dev/null @@ -1,41 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from azure.ai.language.text import TextClient - -""" -# PREREQUISITES - pip install azure-ai-language-text -# USAGE - python successful_pii_entity_recognition_masked_entities.py -""" - - -def main(): - client = TextClient( - endpoint="{Endpoint}", - credential="CREDENTIAL", - ) - - response = client.analyze_text( - body={ - "analysisInput": { - "documents": [ - {"id": "1", "language": "en", "text": "My name is John Doe My phone number is 424 878 9192"} - ] - }, - "kind": "PiiEntityRecognition", - "parameters": {"modelVersion": "latest", "redactionPolicy": {"policyKind": "entityMask"}}, - }, - ) - print(response) - - -# x-ms-original-file: 2025-05-15-preview/SuccessfulPiiEntityRecognitionMaskedEntities.json -if __name__ == "__main__": - main() diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_pii_entity_recognition_redaction_policy_request.py b/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_pii_entity_recognition_redaction_policy_request.py deleted file mode 100644 index 90130df832f8..000000000000 --- a/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_pii_entity_recognition_redaction_policy_request.py +++ /dev/null @@ -1,51 +0,0 @@ -# pylint: disable=line-too-long,useless-suppression -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from azure.ai.language.text import TextClient - -""" -# PREREQUISITES - pip install azure-ai-language-text -# USAGE - python successful_pii_entity_recognition_redaction_policy_request.py -""" - - -def main(): - client = TextClient( - endpoint="{Endpoint}", - credential="CREDENTIAL", - ) - - response = client.analyze_text( - body={ - "analysisInput": { - "documents": [ - {"id": "1", "language": "en", "text": "My SSN is 859-98-0987"}, - { - "id": "2", - "language": "en", - "text": "Your ABA number - 111000025 - is the first 9 digits in the lower left hand corner of your personal check.", - }, - {"id": "3", "language": "en", "text": "Is 998.214.865-68 your Brazilian CPF number?"}, - ] - }, - "kind": "PiiEntityRecognition", - "parameters": { - "modelVersion": "latest", - "redactionPolicy": {"policyKind": "characterMask", "redactionCharacter": "-"}, - }, - }, - ) - print(response) - - -# x-ms-original-file: 2025-05-15-preview/SuccessfulPiiEntityRecognitionRedactionPolicyRequest.json -if __name__ == "__main__": - main() diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_pii_entity_recognition_request.py b/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_pii_entity_recognition_request.py deleted file mode 100644 index edd7e0264ec2..000000000000 --- a/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_pii_entity_recognition_request.py +++ /dev/null @@ -1,48 +0,0 @@ -# pylint: disable=line-too-long,useless-suppression -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from azure.ai.language.text import TextClient - -""" -# PREREQUISITES - pip install azure-ai-language-text -# USAGE - python successful_pii_entity_recognition_request.py -""" - - -def main(): - client = TextClient( - endpoint="{Endpoint}", - credential="CREDENTIAL", - ) - - response = client.analyze_text( - body={ - "analysisInput": { - "documents": [ - {"id": "1", "language": "en", "text": "My SSN is 859-98-0987"}, - { - "id": "2", - "language": "en", - "text": "Your ABA number - 111000025 - is the first 9 digits in the lower left hand corner of your personal check.", - }, - {"id": "3", "language": "en", "text": "Is 998.214.865-68 your Brazilian CPF number?"}, - ] - }, - "kind": "PiiEntityRecognition", - "parameters": {"modelVersion": "latest"}, - }, - ) - print(response) - - -# x-ms-original-file: 2025-05-15-preview/SuccessfulPiiEntityRecognitionRequest.json -if __name__ == "__main__": - main() diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_sentiment_analysis_request.py b/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_sentiment_analysis_request.py deleted file mode 100644 index 461f7b886b31..000000000000 --- a/sdk/cognitivelanguage/azure-ai-textanalytics/generated_samples/successful_sentiment_analysis_request.py +++ /dev/null @@ -1,46 +0,0 @@ -# pylint: disable=line-too-long,useless-suppression -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from azure.ai.language.text import TextClient - -""" -# PREREQUISITES - pip install azure-ai-language-text -# USAGE - python successful_sentiment_analysis_request.py -""" - - -def main(): - client = TextClient( - endpoint="{Endpoint}", - credential="CREDENTIAL", - ) - - response = client.analyze_text( - body={ - "analysisInput": { - "documents": [ - { - "id": "1", - "language": "en", - "text": "Great atmosphere. Close to plenty of restaurants, hotels, and transit! Staff are friendly and helpful.", - } - ] - }, - "kind": "SentimentAnalysis", - "parameters": {"modelVersion": "latest"}, - }, - ) - print(response) - - -# x-ms-original-file: 2025-05-15-preview/SuccessfulSentimentAnalysisRequest.json -if __name__ == "__main__": - main() diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/generated_tests/conftest.py b/sdk/cognitivelanguage/azure-ai-textanalytics/generated_tests/conftest.py deleted file mode 100644 index 09a9ff0054bd..000000000000 --- a/sdk/cognitivelanguage/azure-ai-textanalytics/generated_tests/conftest.py +++ /dev/null @@ -1,35 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import os -import pytest -from dotenv import load_dotenv -from devtools_testutils import ( - test_proxy, - add_general_regex_sanitizer, - add_body_key_sanitizer, - add_header_regex_sanitizer, -) - -load_dotenv() - - -# For security, please avoid record sensitive identity information in recordings -@pytest.fixture(scope="session", autouse=True) -def add_sanitizers(test_proxy): - text_subscription_id = os.environ.get("TEXT_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") - text_tenant_id = os.environ.get("TEXT_TENANT_ID", "00000000-0000-0000-0000-000000000000") - text_client_id = os.environ.get("TEXT_CLIENT_ID", "00000000-0000-0000-0000-000000000000") - text_client_secret = os.environ.get("TEXT_CLIENT_SECRET", "00000000-0000-0000-0000-000000000000") - add_general_regex_sanitizer(regex=text_subscription_id, value="00000000-0000-0000-0000-000000000000") - add_general_regex_sanitizer(regex=text_tenant_id, value="00000000-0000-0000-0000-000000000000") - add_general_regex_sanitizer(regex=text_client_id, value="00000000-0000-0000-0000-000000000000") - add_general_regex_sanitizer(regex=text_client_secret, value="00000000-0000-0000-0000-000000000000") - - add_header_regex_sanitizer(key="Set-Cookie", value="[set-cookie;]") - add_header_regex_sanitizer(key="Cookie", value="cookie;") - add_body_key_sanitizer(json_path="$..access_token", value="access_token") diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/generated_tests/test_text.py b/sdk/cognitivelanguage/azure-ai-textanalytics/generated_tests/test_text.py deleted file mode 100644 index d88a9f16299f..000000000000 --- a/sdk/cognitivelanguage/azure-ai-textanalytics/generated_tests/test_text.py +++ /dev/null @@ -1,69 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import pytest -from devtools_testutils import recorded_by_proxy -from testpreparer import TextClientTestBase, TextPreparer - - -@pytest.mark.skip("you may need to update the auto-generated test case before run it") -class TestText(TextClientTestBase): - @TextPreparer() - @recorded_by_proxy - def test_analyze_text(self, text_endpoint): - client = self.create_client(endpoint=text_endpoint) - response = client.analyze_text( - body={ - "kind": "EntityLinking", - "analysisInput": {"documents": [{"id": "str", "text": "str", "language": "str"}]}, - "parameters": {"loggingOptOut": bool, "modelVersion": "str", "stringIndexType": "str"}, - }, - ) - - # please add some check logic here by yourself - # ... - - @TextPreparer() - @recorded_by_proxy - def test_analyze_text_job_status(self, text_endpoint): - client = self.create_client(endpoint=text_endpoint) - response = client.analyze_text_job_status( - job_id="str", - ) - - # please add some check logic here by yourself - # ... - - @TextPreparer() - @recorded_by_proxy - def test_begin_analyze_text_submit_job(self, text_endpoint): - client = self.create_client(endpoint=text_endpoint) - response = client.begin_analyze_text_submit_job( - body={ - "analysisInput": {"documents": [{"id": "str", "text": "str", "language": "str"}]}, - "tasks": ["analyze_text_lro_task"], - "cancelAfter": 0.0, - "defaultLanguage": "str", - "displayName": "str", - }, - analysis_input={"documents": [{"id": "str", "text": "str", "language": "str"}]}, - tasks=["analyze_text_lro_task"], - ).result() # call '.result()' to poll until service return final result - - # please add some check logic here by yourself - # ... - - @TextPreparer() - @recorded_by_proxy - def test_begin_analyze_text_cancel_job(self, text_endpoint): - client = self.create_client(endpoint=text_endpoint) - response = client.begin_analyze_text_cancel_job( - job_id="str", - ).result() # call '.result()' to poll until service return final result - - # please add some check logic here by yourself - # ... diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/generated_tests/test_text_async.py b/sdk/cognitivelanguage/azure-ai-textanalytics/generated_tests/test_text_async.py deleted file mode 100644 index ad8c21e68626..000000000000 --- a/sdk/cognitivelanguage/azure-ai-textanalytics/generated_tests/test_text_async.py +++ /dev/null @@ -1,74 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import pytest -from devtools_testutils.aio import recorded_by_proxy_async -from testpreparer import TextPreparer -from testpreparer_async import TextClientTestBaseAsync - - -@pytest.mark.skip("you may need to update the auto-generated test case before run it") -class TestTextAsync(TextClientTestBaseAsync): - @TextPreparer() - @recorded_by_proxy_async - async def test_analyze_text(self, text_endpoint): - client = self.create_async_client(endpoint=text_endpoint) - response = await client.analyze_text( - body={ - "kind": "EntityLinking", - "analysisInput": {"documents": [{"id": "str", "text": "str", "language": "str"}]}, - "parameters": {"loggingOptOut": bool, "modelVersion": "str", "stringIndexType": "str"}, - }, - ) - - # please add some check logic here by yourself - # ... - - @TextPreparer() - @recorded_by_proxy_async - async def test_analyze_text_job_status(self, text_endpoint): - client = self.create_async_client(endpoint=text_endpoint) - response = await client.analyze_text_job_status( - job_id="str", - ) - - # please add some check logic here by yourself - # ... - - @TextPreparer() - @recorded_by_proxy_async - async def test_begin_analyze_text_submit_job(self, text_endpoint): - client = self.create_async_client(endpoint=text_endpoint) - response = await ( - await client.begin_analyze_text_submit_job( - body={ - "analysisInput": {"documents": [{"id": "str", "text": "str", "language": "str"}]}, - "tasks": ["analyze_text_lro_task"], - "cancelAfter": 0.0, - "defaultLanguage": "str", - "displayName": "str", - }, - analysis_input={"documents": [{"id": "str", "text": "str", "language": "str"}]}, - tasks=["analyze_text_lro_task"], - ) - ).result() # call '.result()' to poll until service return final result - - # please add some check logic here by yourself - # ... - - @TextPreparer() - @recorded_by_proxy_async - async def test_begin_analyze_text_cancel_job(self, text_endpoint): - client = self.create_async_client(endpoint=text_endpoint) - response = await ( - await client.begin_analyze_text_cancel_job( - job_id="str", - ) - ).result() # call '.result()' to poll until service return final result - - # please add some check logic here by yourself - # ... diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/generated_tests/testpreparer.py b/sdk/cognitivelanguage/azure-ai-textanalytics/generated_tests/testpreparer.py deleted file mode 100644 index 68d7ff8b7e0d..000000000000 --- a/sdk/cognitivelanguage/azure-ai-textanalytics/generated_tests/testpreparer.py +++ /dev/null @@ -1,24 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from azure.ai.language.text import TextClient -from devtools_testutils import AzureRecordedTestCase, PowerShellPreparer -import functools - - -class TextClientTestBase(AzureRecordedTestCase): - - def create_client(self, endpoint): - credential = self.get_credential(TextClient) - return self.create_client_from_credential( - TextClient, - credential=credential, - endpoint=endpoint, - ) - - -TextPreparer = functools.partial(PowerShellPreparer, "text", text_endpoint="https://fake_text_endpoint.com") diff --git a/sdk/cognitivelanguage/azure-ai-textanalytics/generated_tests/testpreparer_async.py b/sdk/cognitivelanguage/azure-ai-textanalytics/generated_tests/testpreparer_async.py deleted file mode 100644 index 5fe5ba289a28..000000000000 --- a/sdk/cognitivelanguage/azure-ai-textanalytics/generated_tests/testpreparer_async.py +++ /dev/null @@ -1,20 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from azure.ai.language.text.aio import TextClient -from devtools_testutils import AzureRecordedTestCase - - -class TextClientTestBaseAsync(AzureRecordedTestCase): - - def create_async_client(self, endpoint): - credential = self.get_credential(TextClient, is_async=True) - return self.create_client_from_credential( - TextClient, - credential=credential, - endpoint=endpoint, - )