From fb00d2f88c54e5d6aadb994485dbb910c907bf30 Mon Sep 17 00:00:00 2001 From: Himanshu Bisht Date: Thu, 15 Jan 2026 09:07:10 +0000 Subject: [PATCH 1/2] Updating sdk for 2025-11-01-preview api version --- .../azure-developer-loadtesting/MANIFEST.in | 2 +- .../_metadata.json | 3 + .../apiview-properties.json | 193 + .../azure/developer/loadtesting/_client.py | 18 +- .../developer/loadtesting/_configuration.py | 8 +- .../loadtesting/_operations/__init__.py | 9 +- .../loadtesting/_operations/_operations.py | 4366 ++++++++++++----- .../loadtesting/_operations/_patch.py | 4 +- .../developer/loadtesting/_utils/__init__.py | 6 + .../loadtesting/_utils/model_base.py | 1343 +++++ .../loadtesting/_utils/serialization.py | 2041 ++++++++ .../developer/loadtesting/_utils/utils.py | 25 + .../developer/loadtesting/_validation.py | 20 +- .../developer/loadtesting/aio/_client.py | 18 +- .../loadtesting/aio/_configuration.py | 8 +- .../loadtesting/aio/_operations/__init__.py | 9 +- .../aio/_operations/_operations.py | 1731 ++++++- .../loadtesting/aio/_operations/_patch.py | 4 +- .../developer/loadtesting/models/__init__.py | 64 + .../developer/loadtesting/models/_enums.py | 111 + .../developer/loadtesting/models/_models.py | 1391 +++++- .../create_or_update_notification_rule.py | 59 + .../create_or_update_test.py | 99 + .../create_or_update_test_app_components.py | 52 + .../create_or_update_test_locust.py | 99 + .../create_or_update_test_profile.py | 55 + ...reate_or_update_test_run_app_components.py | 52 + ...r_update_test_run_server_metrics_config.py | 55 + ...te_or_update_test_server_metrics_config.py | 55 + .../create_or_update_trigger.py | 50 + .../delete_notification_rule.py | 40 + .../generated_samples/delete_test.py | 40 + .../generated_samples/delete_test_file.py | 41 + .../generated_samples/delete_test_profile.py | 40 + .../delete_test_profile_run.py | 40 + .../generated_samples/delete_test_run.py | 40 + .../generated_samples/delete_trigger.py | 40 + .../generate_test_plan_recommendations.py | 41 + .../generate_test_run_insights.py | 40 + .../get_notification_rule.py | 41 + .../generated_samples/get_status.py | 41 + .../generated_samples/get_test.py | 41 + .../generated_samples/get_test_file.py | 42 + .../generated_samples/get_test_profile.py | 41 + .../get_test_profile_run_executed.py | 41 + .../get_test_profile_run_executing.py | 41 + .../generated_samples/get_test_run.py | 41 + .../get_test_run_executing.py | 41 + .../generated_samples/get_test_run_file.py | 42 + .../get_test_run_latest_insights.py | 41 + .../generated_samples/get_trigger.py | 41 + .../list_metric_dimension_values.py | 46 + .../list_notification_rules.py | 40 + .../list_test_app_components.py | 41 + .../generated_samples/list_test_files.py | 42 + .../list_test_profile_runs.py | 40 + .../generated_samples/list_test_profiles.py | 40 + .../list_test_run_app_components.py | 41 + .../list_test_run_metrics.py | 45 + .../list_test_run_metrics_definitions.py | 42 + .../list_test_run_metrics_namespaces.py | 41 + .../list_test_run_server_metrics_config.py | 41 + .../generated_samples/list_test_runs.py | 40 + .../list_test_runs_test_ids.py | 40 + .../list_test_server_metrics_config.py | 41 + .../generated_samples/list_tests.py | 40 + .../generated_samples/list_triggers.py | 40 + .../patch_test_run_latest_insights.py | 42 + .../stop_test_profile_run.py | 41 + .../generated_samples/stop_test_run.py | 41 + .../generated_tests/conftest.py | 56 + .../test_load_test_administration.py | 495 ++ .../test_load_test_administration_async.py | 500 ++ .../generated_tests/test_load_test_run.py | 281 ++ .../test_load_test_run_async.py | 284 ++ .../generated_tests/testpreparer.py | 44 + .../generated_tests/testpreparer_async.py | 31 + .../pyproject.toml | 62 + .../create_or_update_app_components_test.py | 1 + .../samples/create_or_update_test.py | 1 + .../samples/create_or_update_test_profile.py | 7 +- .../samples/upload_test_file.py | 1 + .../azure-developer-loadtesting/setup.py | 71 - ...test_async_load_test_administration_ops.py | 60 +- .../tests/test_async_load_test_run_ops.py | 69 +- .../test_load_test_administration_ops.py | 63 +- .../tests/test_load_test_run_ops.py | 79 +- .../tests/testcase.py | 2 +- .../tests/testcase_async.py | 6 +- .../tsp-location.yaml | 4 +- 90 files changed, 14109 insertions(+), 1598 deletions(-) create mode 100644 sdk/loadtesting/azure-developer-loadtesting/_metadata.json create mode 100644 sdk/loadtesting/azure-developer-loadtesting/apiview-properties.json create mode 100644 sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/_utils/__init__.py create mode 100644 sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/_utils/model_base.py create mode 100644 sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/_utils/serialization.py create mode 100644 sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/_utils/utils.py create mode 100644 sdk/loadtesting/azure-developer-loadtesting/generated_samples/create_or_update_notification_rule.py create mode 100644 sdk/loadtesting/azure-developer-loadtesting/generated_samples/create_or_update_test.py create mode 100644 sdk/loadtesting/azure-developer-loadtesting/generated_samples/create_or_update_test_app_components.py create mode 100644 sdk/loadtesting/azure-developer-loadtesting/generated_samples/create_or_update_test_locust.py create mode 100644 sdk/loadtesting/azure-developer-loadtesting/generated_samples/create_or_update_test_profile.py create mode 100644 sdk/loadtesting/azure-developer-loadtesting/generated_samples/create_or_update_test_run_app_components.py create mode 100644 sdk/loadtesting/azure-developer-loadtesting/generated_samples/create_or_update_test_run_server_metrics_config.py create mode 100644 sdk/loadtesting/azure-developer-loadtesting/generated_samples/create_or_update_test_server_metrics_config.py create mode 100644 sdk/loadtesting/azure-developer-loadtesting/generated_samples/create_or_update_trigger.py create mode 100644 sdk/loadtesting/azure-developer-loadtesting/generated_samples/delete_notification_rule.py create mode 100644 sdk/loadtesting/azure-developer-loadtesting/generated_samples/delete_test.py create mode 100644 sdk/loadtesting/azure-developer-loadtesting/generated_samples/delete_test_file.py create mode 100644 sdk/loadtesting/azure-developer-loadtesting/generated_samples/delete_test_profile.py create mode 100644 sdk/loadtesting/azure-developer-loadtesting/generated_samples/delete_test_profile_run.py create mode 100644 sdk/loadtesting/azure-developer-loadtesting/generated_samples/delete_test_run.py create mode 100644 sdk/loadtesting/azure-developer-loadtesting/generated_samples/delete_trigger.py create mode 100644 sdk/loadtesting/azure-developer-loadtesting/generated_samples/generate_test_plan_recommendations.py create mode 100644 sdk/loadtesting/azure-developer-loadtesting/generated_samples/generate_test_run_insights.py create mode 100644 sdk/loadtesting/azure-developer-loadtesting/generated_samples/get_notification_rule.py create mode 100644 sdk/loadtesting/azure-developer-loadtesting/generated_samples/get_status.py create mode 100644 sdk/loadtesting/azure-developer-loadtesting/generated_samples/get_test.py create mode 100644 sdk/loadtesting/azure-developer-loadtesting/generated_samples/get_test_file.py create mode 100644 sdk/loadtesting/azure-developer-loadtesting/generated_samples/get_test_profile.py create mode 100644 sdk/loadtesting/azure-developer-loadtesting/generated_samples/get_test_profile_run_executed.py create mode 100644 sdk/loadtesting/azure-developer-loadtesting/generated_samples/get_test_profile_run_executing.py create mode 100644 sdk/loadtesting/azure-developer-loadtesting/generated_samples/get_test_run.py create mode 100644 sdk/loadtesting/azure-developer-loadtesting/generated_samples/get_test_run_executing.py create mode 100644 sdk/loadtesting/azure-developer-loadtesting/generated_samples/get_test_run_file.py create mode 100644 sdk/loadtesting/azure-developer-loadtesting/generated_samples/get_test_run_latest_insights.py create mode 100644 sdk/loadtesting/azure-developer-loadtesting/generated_samples/get_trigger.py create mode 100644 sdk/loadtesting/azure-developer-loadtesting/generated_samples/list_metric_dimension_values.py create mode 100644 sdk/loadtesting/azure-developer-loadtesting/generated_samples/list_notification_rules.py create mode 100644 sdk/loadtesting/azure-developer-loadtesting/generated_samples/list_test_app_components.py create mode 100644 sdk/loadtesting/azure-developer-loadtesting/generated_samples/list_test_files.py create mode 100644 sdk/loadtesting/azure-developer-loadtesting/generated_samples/list_test_profile_runs.py create mode 100644 sdk/loadtesting/azure-developer-loadtesting/generated_samples/list_test_profiles.py create mode 100644 sdk/loadtesting/azure-developer-loadtesting/generated_samples/list_test_run_app_components.py create mode 100644 sdk/loadtesting/azure-developer-loadtesting/generated_samples/list_test_run_metrics.py create mode 100644 sdk/loadtesting/azure-developer-loadtesting/generated_samples/list_test_run_metrics_definitions.py create mode 100644 sdk/loadtesting/azure-developer-loadtesting/generated_samples/list_test_run_metrics_namespaces.py create mode 100644 sdk/loadtesting/azure-developer-loadtesting/generated_samples/list_test_run_server_metrics_config.py create mode 100644 sdk/loadtesting/azure-developer-loadtesting/generated_samples/list_test_runs.py create mode 100644 sdk/loadtesting/azure-developer-loadtesting/generated_samples/list_test_runs_test_ids.py create mode 100644 sdk/loadtesting/azure-developer-loadtesting/generated_samples/list_test_server_metrics_config.py create mode 100644 sdk/loadtesting/azure-developer-loadtesting/generated_samples/list_tests.py create mode 100644 sdk/loadtesting/azure-developer-loadtesting/generated_samples/list_triggers.py create mode 100644 sdk/loadtesting/azure-developer-loadtesting/generated_samples/patch_test_run_latest_insights.py create mode 100644 sdk/loadtesting/azure-developer-loadtesting/generated_samples/stop_test_profile_run.py create mode 100644 sdk/loadtesting/azure-developer-loadtesting/generated_samples/stop_test_run.py create mode 100644 sdk/loadtesting/azure-developer-loadtesting/generated_tests/conftest.py create mode 100644 sdk/loadtesting/azure-developer-loadtesting/generated_tests/test_load_test_administration.py create mode 100644 sdk/loadtesting/azure-developer-loadtesting/generated_tests/test_load_test_administration_async.py create mode 100644 sdk/loadtesting/azure-developer-loadtesting/generated_tests/test_load_test_run.py create mode 100644 sdk/loadtesting/azure-developer-loadtesting/generated_tests/test_load_test_run_async.py create mode 100644 sdk/loadtesting/azure-developer-loadtesting/generated_tests/testpreparer.py create mode 100644 sdk/loadtesting/azure-developer-loadtesting/generated_tests/testpreparer_async.py delete mode 100644 sdk/loadtesting/azure-developer-loadtesting/setup.py diff --git a/sdk/loadtesting/azure-developer-loadtesting/MANIFEST.in b/sdk/loadtesting/azure-developer-loadtesting/MANIFEST.in index 6ebb7e55a108..32f595395d47 100644 --- a/sdk/loadtesting/azure-developer-loadtesting/MANIFEST.in +++ b/sdk/loadtesting/azure-developer-loadtesting/MANIFEST.in @@ -4,4 +4,4 @@ include azure/developer/loadtesting/py.typed recursive-include tests *.py recursive-include samples *.py *.md include azure/__init__.py -include azure/developer/__init__.py \ No newline at end of file +include azure/developer/__init__.py diff --git a/sdk/loadtesting/azure-developer-loadtesting/_metadata.json b/sdk/loadtesting/azure-developer-loadtesting/_metadata.json new file mode 100644 index 000000000000..66892d1cb69d --- /dev/null +++ b/sdk/loadtesting/azure-developer-loadtesting/_metadata.json @@ -0,0 +1,3 @@ +{ + "apiVersion": "2025-11-01-preview" +} \ No newline at end of file diff --git a/sdk/loadtesting/azure-developer-loadtesting/apiview-properties.json b/sdk/loadtesting/azure-developer-loadtesting/apiview-properties.json new file mode 100644 index 000000000000..52d2ded487e5 --- /dev/null +++ b/sdk/loadtesting/azure-developer-loadtesting/apiview-properties.json @@ -0,0 +1,193 @@ +{ + "CrossLanguagePackageId": "Microsoft.LoadTestService", + "CrossLanguageDefinitionId": { + "azure.developer.loadtesting.models.AppComponent": "Microsoft.LoadTestService.AppComponent", + "azure.developer.loadtesting.models.ArtifactsContainerInfo": "Microsoft.LoadTestService.ArtifactsContainerInfo", + "azure.developer.loadtesting.models.AutoStopCriteria": "Microsoft.LoadTestService.AutoStopCriteria", + "azure.developer.loadtesting.models.CertificateMetadata": "Microsoft.LoadTestService.CertificateMetadata", + "azure.developer.loadtesting.models.Recurrence": "Microsoft.LoadTestService.Recurrence", + "azure.developer.loadtesting.models.DailyRecurrence": "Microsoft.LoadTestService.DailyRecurrence", + "azure.developer.loadtesting.models.DimensionFilter": "Microsoft.LoadTestService.DimensionFilter", + "azure.developer.loadtesting.models.DimensionValue": "Microsoft.LoadTestService.DimensionValue", + "azure.developer.loadtesting.models.ErrorDetails": "Microsoft.LoadTestService.ErrorDetails", + "azure.developer.loadtesting.models.FunctionFlexConsumptionResourceConfiguration": "Microsoft.LoadTestService.FunctionFlexConsumptionResourceConfiguration", + "azure.developer.loadtesting.models.TargetResourceConfigurations": "Microsoft.LoadTestService.TargetResourceConfigurations", + "azure.developer.loadtesting.models.FunctionFlexConsumptionTargetResourceConfigurations": "Microsoft.LoadTestService.FunctionFlexConsumptionTargetResourceConfigurations", + "azure.developer.loadtesting.models.HourlyRecurrence": "Microsoft.LoadTestService.HourlyRecurrence", + "azure.developer.loadtesting.models.LoadTestConfiguration": "Microsoft.LoadTestService.LoadTestConfiguration", + "azure.developer.loadtesting.models.MetricAvailability": "Microsoft.LoadTestService.MetricAvailability", + "azure.developer.loadtesting.models.MetricDefinition": "Microsoft.LoadTestService.MetricDefinition", + "azure.developer.loadtesting.models.MetricDefinitionCollection": "Microsoft.LoadTestService.MetricDefinitionCollection", + "azure.developer.loadtesting.models.MetricNamespace": "Microsoft.LoadTestService.MetricNamespace", + "azure.developer.loadtesting.models.MetricNamespaceCollection": "Microsoft.LoadTestService.MetricNamespaceCollection", + "azure.developer.loadtesting.models.MetricRequestPayload": "Microsoft.LoadTestService.MetricRequestPayload", + "azure.developer.loadtesting.models.MetricValue": "Microsoft.LoadTestService.MetricValue", + "azure.developer.loadtesting.models.MonthlyRecurrenceByDates": "Microsoft.LoadTestService.MonthlyRecurrenceByDates", + "azure.developer.loadtesting.models.MonthlyRecurrenceByWeekDays": "Microsoft.LoadTestService.MonthlyRecurrenceByWeekDays", + "azure.developer.loadtesting.models.NameAndDescription": "Microsoft.LoadTestService.NameAndDescription", + "azure.developer.loadtesting.models.NotificationRule": "Microsoft.LoadTestService.NotificationRule", + "azure.developer.loadtesting.models.OperationStatus": "Microsoft.LoadTestService.OperationStatus", + "azure.developer.loadtesting.models.OptionalLoadTestConfiguration": "Microsoft.LoadTestService.OptionalLoadTestConfiguration", + "azure.developer.loadtesting.models.PassFailCriteria": "Microsoft.LoadTestService.PassFailCriteria", + "azure.developer.loadtesting.models.PassFailMetric": "Microsoft.LoadTestService.PassFailMetric", + "azure.developer.loadtesting.models.PassFailServerMetric": "Microsoft.LoadTestService.PassFailServerMetric", + "azure.developer.loadtesting.models.RecurrenceEnd": "Microsoft.LoadTestService.RecurrenceEnd", + "azure.developer.loadtesting.models.RecurrenceStatus": "Microsoft.LoadTestService.RecurrenceStatus", + "azure.developer.loadtesting.models.RecurrenceWithCron": "Microsoft.LoadTestService.RecurrenceWithCron", + "azure.developer.loadtesting.models.RegionalConfiguration": "Microsoft.LoadTestService.RegionalConfiguration", + "azure.developer.loadtesting.models.ResourceMetric": "Microsoft.LoadTestService.ResourceMetric", + "azure.developer.loadtesting.models.Trigger": "Microsoft.LoadTestService.Trigger", + "azure.developer.loadtesting.models.ScheduleTestsTrigger": "Microsoft.LoadTestService.ScheduleTestsTrigger", + "azure.developer.loadtesting.models.Secret": "Microsoft.LoadTestService.Secret", + "azure.developer.loadtesting.models.StateDetails": "Microsoft.LoadTestService.StateDetails", + "azure.developer.loadtesting.models.Test": "Microsoft.LoadTestService.Test", + "azure.developer.loadtesting.models.TestAppComponents": "Microsoft.LoadTestService.TestAppComponents", + "azure.developer.loadtesting.models.TestFileInfo": "Microsoft.LoadTestService.TestFileInfo", + "azure.developer.loadtesting.models.TestInputArtifacts": "Microsoft.LoadTestService.TestInputArtifacts", + "azure.developer.loadtesting.models.TestPreferences": "Microsoft.LoadTestService.TestPreferences", + "azure.developer.loadtesting.models.TestProfile": "Microsoft.LoadTestService.TestProfile", + "azure.developer.loadtesting.models.TestProfileRun": "Microsoft.LoadTestService.TestProfileRun", + "azure.developer.loadtesting.models.TestProfileRunRecommendation": "Microsoft.LoadTestService.TestProfileRunRecommendation", + "azure.developer.loadtesting.models.TestRun": "Microsoft.LoadTestService.TestRun", + "azure.developer.loadtesting.models.TestRunAppComponents": "Microsoft.LoadTestService.TestRunAppComponents", + "azure.developer.loadtesting.models.TestRunArtifacts": "Microsoft.LoadTestService.TestRunArtifacts", + "azure.developer.loadtesting.models.TestRunDetail": "Microsoft.LoadTestService.TestRunDetail", + "azure.developer.loadtesting.models.TestRunEndedEventCondition": "Microsoft.LoadTestService.TestRunEndedEventCondition", + "azure.developer.loadtesting.models.TestsNotificationEventFilter": "Microsoft.LoadTestService.TestsNotificationEventFilter", + "azure.developer.loadtesting.models.TestRunEndedNotificationEventFilter": "Microsoft.LoadTestService.TestRunEndedNotificationEventFilter", + "azure.developer.loadtesting.models.TestRunFileInfo": "Microsoft.LoadTestService.TestRunFileInfo", + "azure.developer.loadtesting.models.TestRunInputArtifacts": "Microsoft.LoadTestService.TestRunInputArtifacts", + "azure.developer.loadtesting.models.TestRunInsightColumn": "Microsoft.LoadTestService.TestRunInsightColumn", + "azure.developer.loadtesting.models.TestRunInsights": "Microsoft.LoadTestService.TestRunInsights", + "azure.developer.loadtesting.models.TestRunOutputArtifacts": "Microsoft.LoadTestService.TestRunOutputArtifacts", + "azure.developer.loadtesting.models.TestRunServerMetricsConfiguration": "Microsoft.LoadTestService.TestRunServerMetricsConfiguration", + "azure.developer.loadtesting.models.TestRunStartedNotificationEventFilter": "Microsoft.LoadTestService.TestRunStartedNotificationEventFilter", + "azure.developer.loadtesting.models.TestRunStatistics": "Microsoft.LoadTestService.TestRunStatistics", + "azure.developer.loadtesting.models.TestServerMetricsConfiguration": "Microsoft.LoadTestService.TestServerMetricsConfiguration", + "azure.developer.loadtesting.models.TestsNotificationRule": "Microsoft.LoadTestService.TestsNotificationRule", + "azure.developer.loadtesting.models.TimeSeriesElement": "Microsoft.LoadTestService.TimeSeriesElement", + "azure.developer.loadtesting.models.TriggerCompletedNotificationEventFilter": "Microsoft.LoadTestService.TriggerCompletedNotificationEventFilter", + "azure.developer.loadtesting.models.TriggerDisabledNotificationEventFilter": "Microsoft.LoadTestService.TriggerDisabledNotificationEventFilter", + "azure.developer.loadtesting.models.WeeklyRecurrence": "Microsoft.LoadTestService.WeeklyRecurrence", + "azure.developer.loadtesting.models.PFMetrics": "Microsoft.LoadTestService.PFMetrics", + "azure.developer.loadtesting.models.PassFailAggregationFunction": "Microsoft.LoadTestService.PassFailAggregationFunction", + "azure.developer.loadtesting.models.PassFailAction": "Microsoft.LoadTestService.PassFailAction", + "azure.developer.loadtesting.models.PassFailResult": "Microsoft.LoadTestService.PassFailResult", + "azure.developer.loadtesting.models.SecretType": "Microsoft.LoadTestService.SecretType", + "azure.developer.loadtesting.models.CertificateType": "Microsoft.LoadTestService.CertificateType", + "azure.developer.loadtesting.models.FileType": "Microsoft.LoadTestService.FileType", + "azure.developer.loadtesting.models.FileValidationStatus": "Microsoft.LoadTestService.FileValidationStatus", + "azure.developer.loadtesting.models.TestKind": "Microsoft.LoadTestService.TestKind", + "azure.developer.loadtesting.models.ManagedIdentityType": "Microsoft.LoadTestService.ManagedIdentityType", + "azure.developer.loadtesting.models.ResourceKind": "Microsoft.LoadTestService.ResourceKind", + "azure.developer.loadtesting.models.TriggerType": "Microsoft.LoadTestService.TriggerType", + "azure.developer.loadtesting.models.TriggerState": "Microsoft.LoadTestService.TriggerState", + "azure.developer.loadtesting.models.Frequency": "Microsoft.LoadTestService.Frequency", + "azure.developer.loadtesting.models.WeekDays": "Microsoft.LoadTestService.WeekDays", + "azure.developer.loadtesting.models.NotificationScopeType": "Microsoft.LoadTestService.NotificationScopeType", + "azure.developer.loadtesting.models.NotificationEventType": "Microsoft.LoadTestService.NotificationEventType", + "azure.developer.loadtesting.models.TestRunStatus": "Microsoft.LoadTestService.TestRunStatus", + "azure.developer.loadtesting.models.PassFailTestResult": "Microsoft.LoadTestService.PassFailTestResult", + "azure.developer.loadtesting.models.OperationState": "Azure.Core.Foundations.OperationState", + "azure.developer.loadtesting.models.OperationKind": "Microsoft.LoadTestService.OperationKind", + "azure.developer.loadtesting.models.RequestDataLevel": "Microsoft.LoadTestService.RequestDataLevel", + "azure.developer.loadtesting.models.CreatedByType": "Microsoft.LoadTestService.CreatedByType", + "azure.developer.loadtesting.models.TimeGrain": "Microsoft.LoadTestService.TimeGrain", + "azure.developer.loadtesting.models.Aggregation": "Microsoft.LoadTestService.Aggregation", + "azure.developer.loadtesting.models.MetricUnit": "Microsoft.LoadTestService.MetricUnit", + "azure.developer.loadtesting.models.TestProfileRunStatus": "Microsoft.LoadTestService.TestProfileRunStatus", + "azure.developer.loadtesting.models.RecommendationCategory": "Microsoft.LoadTestService.RecommendationCategory", + "azure.developer.loadtesting.LoadTestAdministrationClient.create_or_update_test": "Customizations.AdministrationOperations.createOrUpdateTest", + "azure.developer.loadtesting.aio.LoadTestAdministrationClient.create_or_update_test": "Customizations.AdministrationOperations.createOrUpdateTest", + "azure.developer.loadtesting.LoadTestAdministrationClient.create_or_update_app_components": "Customizations.AdministrationOperations.createOrUpdateAppComponents", + "azure.developer.loadtesting.aio.LoadTestAdministrationClient.create_or_update_app_components": "Customizations.AdministrationOperations.createOrUpdateAppComponents", + "azure.developer.loadtesting.LoadTestAdministrationClient.create_or_update_server_metrics_config": "Customizations.AdministrationOperations.createOrUpdateServerMetricsConfig", + "azure.developer.loadtesting.aio.LoadTestAdministrationClient.create_or_update_server_metrics_config": "Customizations.AdministrationOperations.createOrUpdateServerMetricsConfig", + "azure.developer.loadtesting.LoadTestAdministrationClient.get_app_components": "Customizations.AdministrationOperations.getAppComponents", + "azure.developer.loadtesting.aio.LoadTestAdministrationClient.get_app_components": "Customizations.AdministrationOperations.getAppComponents", + "azure.developer.loadtesting.LoadTestAdministrationClient.get_server_metrics_config": "Customizations.AdministrationOperations.getServerMetricsConfig", + "azure.developer.loadtesting.aio.LoadTestAdministrationClient.get_server_metrics_config": "Customizations.AdministrationOperations.getServerMetricsConfig", + "azure.developer.loadtesting.LoadTestAdministrationClient.get_test": "Customizations.AdministrationOperations.getTest", + "azure.developer.loadtesting.aio.LoadTestAdministrationClient.get_test": "Customizations.AdministrationOperations.getTest", + "azure.developer.loadtesting.LoadTestAdministrationClient.get_test_file": "Customizations.AdministrationOperations.getTestFile", + "azure.developer.loadtesting.aio.LoadTestAdministrationClient.get_test_file": "Customizations.AdministrationOperations.getTestFile", + "azure.developer.loadtesting.LoadTestAdministrationClient.list_test_files": "Customizations.AdministrationOperations.listTestFiles", + "azure.developer.loadtesting.aio.LoadTestAdministrationClient.list_test_files": "Customizations.AdministrationOperations.listTestFiles", + "azure.developer.loadtesting.LoadTestAdministrationClient.list_tests": "Customizations.AdministrationOperations.listTests", + "azure.developer.loadtesting.aio.LoadTestAdministrationClient.list_tests": "Customizations.AdministrationOperations.listTests", + "azure.developer.loadtesting.LoadTestAdministrationClient.delete_test_file": "Customizations.AdministrationOperations.deleteTestFile", + "azure.developer.loadtesting.aio.LoadTestAdministrationClient.delete_test_file": "Customizations.AdministrationOperations.deleteTestFile", + "azure.developer.loadtesting.LoadTestAdministrationClient.delete_test": "Customizations.AdministrationOperations.deleteTest", + "azure.developer.loadtesting.aio.LoadTestAdministrationClient.delete_test": "Customizations.AdministrationOperations.deleteTest", + "azure.developer.loadtesting.LoadTestAdministrationClient.create_or_update_test_profile": "Customizations.AdministrationOperations.createOrUpdateTestProfile", + "azure.developer.loadtesting.aio.LoadTestAdministrationClient.create_or_update_test_profile": "Customizations.AdministrationOperations.createOrUpdateTestProfile", + "azure.developer.loadtesting.LoadTestAdministrationClient.delete_test_profile": "Customizations.AdministrationOperations.deleteTestProfile", + "azure.developer.loadtesting.aio.LoadTestAdministrationClient.delete_test_profile": "Customizations.AdministrationOperations.deleteTestProfile", + "azure.developer.loadtesting.LoadTestAdministrationClient.get_test_profile": "Customizations.AdministrationOperations.getTestProfile", + "azure.developer.loadtesting.aio.LoadTestAdministrationClient.get_test_profile": "Customizations.AdministrationOperations.getTestProfile", + "azure.developer.loadtesting.LoadTestAdministrationClient.list_test_profiles": "Customizations.AdministrationOperations.listTestProfiles", + "azure.developer.loadtesting.aio.LoadTestAdministrationClient.list_test_profiles": "Customizations.AdministrationOperations.listTestProfiles", + "azure.developer.loadtesting.LoadTestAdministrationClient.create_or_update_trigger": "Customizations.AdministrationOperations.createOrUpdateTrigger", + "azure.developer.loadtesting.aio.LoadTestAdministrationClient.create_or_update_trigger": "Customizations.AdministrationOperations.createOrUpdateTrigger", + "azure.developer.loadtesting.LoadTestAdministrationClient.delete_trigger": "Customizations.AdministrationOperations.deleteTrigger", + "azure.developer.loadtesting.aio.LoadTestAdministrationClient.delete_trigger": "Customizations.AdministrationOperations.deleteTrigger", + "azure.developer.loadtesting.LoadTestAdministrationClient.get_trigger": "Customizations.AdministrationOperations.getTrigger", + "azure.developer.loadtesting.aio.LoadTestAdministrationClient.get_trigger": "Customizations.AdministrationOperations.getTrigger", + "azure.developer.loadtesting.LoadTestAdministrationClient.list_triggers": "Customizations.AdministrationOperations.listTriggers", + "azure.developer.loadtesting.aio.LoadTestAdministrationClient.list_triggers": "Customizations.AdministrationOperations.listTriggers", + "azure.developer.loadtesting.LoadTestAdministrationClient.create_or_update_notification_rule": "Customizations.AdministrationOperations.createOrUpdateNotificationRule", + "azure.developer.loadtesting.aio.LoadTestAdministrationClient.create_or_update_notification_rule": "Customizations.AdministrationOperations.createOrUpdateNotificationRule", + "azure.developer.loadtesting.LoadTestAdministrationClient.delete_notification_rule": "Customizations.AdministrationOperations.deleteNotificationRule", + "azure.developer.loadtesting.aio.LoadTestAdministrationClient.delete_notification_rule": "Customizations.AdministrationOperations.deleteNotificationRule", + "azure.developer.loadtesting.LoadTestAdministrationClient.get_notification_rule": "Customizations.AdministrationOperations.getNotificationRule", + "azure.developer.loadtesting.aio.LoadTestAdministrationClient.get_notification_rule": "Customizations.AdministrationOperations.getNotificationRule", + "azure.developer.loadtesting.LoadTestAdministrationClient.list_notification_rules": "Customizations.AdministrationOperations.listNotificationRules", + "azure.developer.loadtesting.aio.LoadTestAdministrationClient.list_notification_rules": "Customizations.AdministrationOperations.listNotificationRules", + "azure.developer.loadtesting.LoadTestAdministrationClient.begin_clone_test": "Customizations.AdministrationOperations.cloneTest", + "azure.developer.loadtesting.aio.LoadTestAdministrationClient.begin_clone_test": "Customizations.AdministrationOperations.cloneTest", + "azure.developer.loadtesting.LoadTestAdministrationClient.begin_generate_test_plan_recommendations": "Customizations.AdministrationOperations.generateTestPlanRecommendations", + "azure.developer.loadtesting.aio.LoadTestAdministrationClient.begin_generate_test_plan_recommendations": "Customizations.AdministrationOperations.generateTestPlanRecommendations", + "azure.developer.loadtesting.LoadTestAdministrationClient.get_operation_status": "Customizations.AdministrationOperations.getOperationStatus", + "azure.developer.loadtesting.aio.LoadTestAdministrationClient.get_operation_status": "Customizations.AdministrationOperations.getOperationStatus", + "azure.developer.loadtesting.LoadTestRunClient.create_or_update_app_components": "Customizations.TestRunOperations.createOrUpdateAppComponents", + "azure.developer.loadtesting.aio.LoadTestRunClient.create_or_update_app_components": "Customizations.TestRunOperations.createOrUpdateAppComponents", + "azure.developer.loadtesting.LoadTestRunClient.create_or_update_server_metrics_config": "Customizations.TestRunOperations.createOrUpdateServerMetricsConfig", + "azure.developer.loadtesting.aio.LoadTestRunClient.create_or_update_server_metrics_config": "Customizations.TestRunOperations.createOrUpdateServerMetricsConfig", + "azure.developer.loadtesting.LoadTestRunClient.delete_test_run": "Customizations.TestRunOperations.deleteTestRun", + "azure.developer.loadtesting.aio.LoadTestRunClient.delete_test_run": "Customizations.TestRunOperations.deleteTestRun", + "azure.developer.loadtesting.LoadTestRunClient.get_app_components": "Customizations.TestRunOperations.getAppComponents", + "azure.developer.loadtesting.aio.LoadTestRunClient.get_app_components": "Customizations.TestRunOperations.getAppComponents", + "azure.developer.loadtesting.LoadTestRunClient.get_server_metrics_config": "Customizations.TestRunOperations.getServerMetricsConfig", + "azure.developer.loadtesting.aio.LoadTestRunClient.get_server_metrics_config": "Customizations.TestRunOperations.getServerMetricsConfig", + "azure.developer.loadtesting.LoadTestRunClient.get_test_run": "Customizations.TestRunOperations.getTestRun", + "azure.developer.loadtesting.aio.LoadTestRunClient.get_test_run": "Customizations.TestRunOperations.getTestRun", + "azure.developer.loadtesting.LoadTestRunClient.get_test_run_file": "Customizations.TestRunOperations.getTestRunFile", + "azure.developer.loadtesting.aio.LoadTestRunClient.get_test_run_file": "Customizations.TestRunOperations.getTestRunFile", + "azure.developer.loadtesting.LoadTestRunClient.list_metric_dimension_values": "Customizations.TestRunOperations.listMetricDimensionValues", + "azure.developer.loadtesting.aio.LoadTestRunClient.list_metric_dimension_values": "Customizations.TestRunOperations.listMetricDimensionValues", + "azure.developer.loadtesting.LoadTestRunClient.get_metric_definitions": "Customizations.TestRunOperations.listMetricDefinitions", + "azure.developer.loadtesting.aio.LoadTestRunClient.get_metric_definitions": "Customizations.TestRunOperations.listMetricDefinitions", + "azure.developer.loadtesting.LoadTestRunClient.get_metric_namespaces": "Customizations.TestRunOperations.listMetricNamespaces", + "azure.developer.loadtesting.aio.LoadTestRunClient.get_metric_namespaces": "Customizations.TestRunOperations.listMetricNamespaces", + "azure.developer.loadtesting.LoadTestRunClient.list_metrics": "Customizations.TestRunOperations.listMetrics", + "azure.developer.loadtesting.aio.LoadTestRunClient.list_metrics": "Customizations.TestRunOperations.listMetrics", + "azure.developer.loadtesting.LoadTestRunClient.list_test_runs": "Customizations.TestRunOperations.listTestRuns", + "azure.developer.loadtesting.aio.LoadTestRunClient.list_test_runs": "Customizations.TestRunOperations.listTestRuns", + "azure.developer.loadtesting.LoadTestRunClient.stop_test_run": "Customizations.TestRunOperations.stop", + "azure.developer.loadtesting.aio.LoadTestRunClient.stop_test_run": "Customizations.TestRunOperations.stop", + "azure.developer.loadtesting.LoadTestRunClient.delete_test_profile_run": "Customizations.TestRunOperations.deleteTestProfileRun", + "azure.developer.loadtesting.aio.LoadTestRunClient.delete_test_profile_run": "Customizations.TestRunOperations.deleteTestProfileRun", + "azure.developer.loadtesting.LoadTestRunClient.get_test_profile_run": "Customizations.TestRunOperations.getTestProfileRun", + "azure.developer.loadtesting.aio.LoadTestRunClient.get_test_profile_run": "Customizations.TestRunOperations.getTestProfileRun", + "azure.developer.loadtesting.LoadTestRunClient.list_test_profile_runs": "Customizations.TestRunOperations.listTestProfileRuns", + "azure.developer.loadtesting.aio.LoadTestRunClient.list_test_profile_runs": "Customizations.TestRunOperations.listTestProfileRuns", + "azure.developer.loadtesting.LoadTestRunClient.stop_test_profile_run": "Customizations.TestRunOperations.stopTestProfileRun", + "azure.developer.loadtesting.aio.LoadTestRunClient.stop_test_profile_run": "Customizations.TestRunOperations.stopTestProfileRun", + "azure.developer.loadtesting.LoadTestRunClient.get_latest_test_run_insights": "Customizations.TestRunOperations.getLatestTestRunInsights", + "azure.developer.loadtesting.aio.LoadTestRunClient.get_latest_test_run_insights": "Customizations.TestRunOperations.getLatestTestRunInsights", + "azure.developer.loadtesting.LoadTestRunClient.patch_latest_test_run_insights": "Customizations.TestRunOperations.patchLatestTestRunInsights", + "azure.developer.loadtesting.aio.LoadTestRunClient.patch_latest_test_run_insights": "Customizations.TestRunOperations.patchLatestTestRunInsights", + "azure.developer.loadtesting.LoadTestRunClient.begin_generate_test_run_insights": "Customizations.TestRunOperations.generateTestRunInsights", + "azure.developer.loadtesting.aio.LoadTestRunClient.begin_generate_test_run_insights": "Customizations.TestRunOperations.generateTestRunInsights" + } +} \ No newline at end of file diff --git a/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/_client.py b/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/_client.py index 3e5aa0fba177..8d8c57d55d66 100644 --- a/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/_client.py +++ b/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/_client.py @@ -15,14 +15,14 @@ from azure.core.rest import HttpRequest, HttpResponse from ._configuration import LoadTestAdministrationClientConfiguration, LoadTestRunClientConfiguration -from ._operations import LoadTestAdministrationClientOperationsMixin, LoadTestRunClientOperationsMixin -from ._serialization import Deserializer, Serializer +from ._operations import _LoadTestAdministrationClientOperationsMixin, _LoadTestRunClientOperationsMixin +from ._utils.serialization import Deserializer, Serializer if TYPE_CHECKING: from azure.core.credentials import TokenCredential -class LoadTestAdministrationClient(LoadTestAdministrationClientOperationsMixin): +class LoadTestAdministrationClient(_LoadTestAdministrationClientOperationsMixin): """LoadTestAdministrationClient. :param endpoint: Required. @@ -30,14 +30,17 @@ class LoadTestAdministrationClient(LoadTestAdministrationClientOperationsMixin): :param credential: Credential used to authenticate requests to the service. Required. :type credential: ~azure.core.credentials.TokenCredential :keyword api_version: The API version to use for this operation. Default value is - "2024-12-01-preview". Note that overriding this default value may result in unsupported + "2025-11-01-preview". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. """ def __init__(self, endpoint: str, credential: "TokenCredential", **kwargs: Any) -> None: _endpoint = "https://{endpoint}" self._config = LoadTestAdministrationClientConfiguration(endpoint=endpoint, credential=credential, **kwargs) + _policies = kwargs.pop("policies", None) if _policies is None: _policies = [ @@ -98,7 +101,7 @@ def __exit__(self, *exc_details: Any) -> None: self._client.__exit__(*exc_details) -class LoadTestRunClient(LoadTestRunClientOperationsMixin): +class LoadTestRunClient(_LoadTestRunClientOperationsMixin): """LoadTestRunClient. :param endpoint: Required. @@ -106,14 +109,17 @@ class LoadTestRunClient(LoadTestRunClientOperationsMixin): :param credential: Credential used to authenticate requests to the service. Required. :type credential: ~azure.core.credentials.TokenCredential :keyword api_version: The API version to use for this operation. Default value is - "2024-12-01-preview". Note that overriding this default value may result in unsupported + "2025-11-01-preview". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. """ def __init__(self, endpoint: str, credential: "TokenCredential", **kwargs: Any) -> None: _endpoint = "https://{endpoint}" self._config = LoadTestRunClientConfiguration(endpoint=endpoint, credential=credential, **kwargs) + _policies = kwargs.pop("policies", None) if _policies is None: _policies = [ diff --git a/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/_configuration.py b/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/_configuration.py index adf093acd20f..fc6108764990 100644 --- a/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/_configuration.py +++ b/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/_configuration.py @@ -27,13 +27,13 @@ class LoadTestAdministrationClientConfiguration: # pylint: disable=too-many-ins :param credential: Credential used to authenticate requests to the service. Required. :type credential: ~azure.core.credentials.TokenCredential :keyword api_version: The API version to use for this operation. Default value is - "2024-12-01-preview". Note that overriding this default value may result in unsupported + "2025-11-01-preview". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str """ def __init__(self, endpoint: str, credential: "TokenCredential", **kwargs: Any) -> None: - api_version: str = kwargs.pop("api_version", "2024-12-01-preview") + api_version: str = kwargs.pop("api_version", "2025-11-01-preview") if endpoint is None: raise ValueError("Parameter 'endpoint' must not be None.") @@ -75,13 +75,13 @@ class LoadTestRunClientConfiguration: # pylint: disable=too-many-instance-attri :param credential: Credential used to authenticate requests to the service. Required. :type credential: ~azure.core.credentials.TokenCredential :keyword api_version: The API version to use for this operation. Default value is - "2024-12-01-preview". Note that overriding this default value may result in unsupported + "2025-11-01-preview". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str """ def __init__(self, endpoint: str, credential: "TokenCredential", **kwargs: Any) -> None: - api_version: str = kwargs.pop("api_version", "2024-12-01-preview") + api_version: str = kwargs.pop("api_version", "2025-11-01-preview") if endpoint is None: raise ValueError("Parameter 'endpoint' must not be None.") diff --git a/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/_operations/__init__.py b/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/_operations/__init__.py index 93b9c55d70d8..8809651f9db9 100644 --- a/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/_operations/__init__.py +++ b/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/_operations/__init__.py @@ -12,16 +12,13 @@ if TYPE_CHECKING: from ._patch import * # pylint: disable=unused-wildcard-import -from ._operations import LoadTestAdministrationClientOperationsMixin # type: ignore -from ._operations import LoadTestRunClientOperationsMixin # type: ignore +from ._operations import _LoadTestAdministrationClientOperationsMixin # type: ignore # pylint: disable=unused-import +from ._operations import _LoadTestRunClientOperationsMixin # type: ignore # pylint: disable=unused-import from ._patch import __all__ as _patch_all from ._patch import * from ._patch import patch_sdk as _patch_sdk -__all__ = [ - "LoadTestAdministrationClientOperationsMixin", - "LoadTestRunClientOperationsMixin", -] +__all__ = [] __all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore _patch_sdk() diff --git a/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/_operations/_operations.py b/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/_operations/_operations.py index 2e86f4dbdb46..0783f86f0d3a 100644 --- a/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/_operations/_operations.py +++ b/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/_operations/_operations.py @@ -6,13 +6,14 @@ # Code generated by Microsoft (R) Python Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- +from collections.abc import MutableMapping import datetime from io import IOBase import json -import sys -from typing import Any, Callable, Dict, IO, Iterable, List, Optional, TypeVar, Union, overload +from typing import Any, Callable, IO, Iterator, Optional, TypeVar, Union, cast, overload import urllib.parse +from azure.core import PipelineClient from azure.core.exceptions import ( ClientAuthenticationError, HttpResponseError, @@ -25,23 +26,23 @@ ) from azure.core.paging import ItemPaged from azure.core.pipeline import PipelineResponse +from azure.core.polling import LROPoller, NoPolling, PollingMethod +from azure.core.polling.base_polling import LROBasePolling from azure.core.rest import HttpRequest, HttpResponse from azure.core.tracing.decorator import distributed_trace from azure.core.utils import case_insensitive_dict from .. import models as _models -from .._model_base import SdkJSONEncoder, _deserialize -from .._serialization import Serializer +from .._configuration import LoadTestAdministrationClientConfiguration, LoadTestRunClientConfiguration +from .._utils.model_base import SdkJSONEncoder, _deserialize +from .._utils.serialization import Serializer +from .._utils.utils import ClientMixinABC from .._validation import api_version_validation -from .._vendor import LoadTestAdministrationClientMixinABC, LoadTestRunClientMixinABC -if sys.version_info >= (3, 9): - from collections.abc import MutableMapping -else: - from typing import MutableMapping # type: ignore -JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object +JSON = MutableMapping[str, Any] +_Unset: Any = object() T = TypeVar("T") -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, dict[str, Any]], Any]] _SERIALIZER = Serializer() _SERIALIZER.client_side_validation = False @@ -54,7 +55,7 @@ def build_load_test_administration_create_or_update_test_request( # pylint: dis _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-12-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -83,7 +84,7 @@ def build_load_test_administration_create_or_update_app_components_request( # p _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("content-type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-12-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -112,7 +113,7 @@ def build_load_test_administration_create_or_update_server_metrics_config_reques _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("content-type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-12-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -140,7 +141,7 @@ def build_load_test_administration_get_app_components_request( # pylint: disabl _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-12-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -166,7 +167,7 @@ def build_load_test_administration_get_server_metrics_config_request( # pylint: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-12-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -192,7 +193,7 @@ def build_load_test_administration_get_test_request( # pylint: disable=name-too _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-12-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -218,7 +219,7 @@ def build_load_test_administration_get_test_file_request( # pylint: disable=nam _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-12-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -245,7 +246,7 @@ def build_load_test_administration_list_test_files_request( # pylint: disable=n _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-12-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -277,7 +278,7 @@ def build_load_test_administration_list_tests_request( # pylint: disable=name-t _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-12-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -311,7 +312,7 @@ def build_load_test_administration_begin_upload_test_file_request( # pylint: di _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: str = kwargs.pop("content_type") - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-12-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -338,12 +339,9 @@ def build_load_test_administration_begin_upload_test_file_request( # pylint: di def build_load_test_administration_delete_test_file_request( # pylint: disable=name-too-long test_id: str, file_name: str, **kwargs: Any ) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-12-01-preview")) - accept = _headers.pop("Accept", "application/json") - + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) # Construct URL _url = "/tests/{testId}/files/{fileName}" path_format_arguments = { @@ -356,21 +354,15 @@ def build_load_test_administration_delete_test_file_request( # pylint: disable= # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) + return HttpRequest(method="DELETE", url=_url, params=_params, **kwargs) def build_load_test_administration_delete_test_request( # pylint: disable=name-too-long test_id: str, **kwargs: Any ) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-12-01-preview")) - accept = _headers.pop("Accept", "application/json") - + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) # Construct URL _url = "/tests/{testId}" path_format_arguments = { @@ -382,10 +374,7 @@ def build_load_test_administration_delete_test_request( # pylint: disable=name- # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) + return HttpRequest(method="DELETE", url=_url, params=_params, **kwargs) def build_load_test_administration_create_or_update_test_profile_request( # pylint: disable=name-too-long @@ -395,7 +384,7 @@ def build_load_test_administration_create_or_update_test_profile_request( # pyl _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-12-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -420,12 +409,9 @@ def build_load_test_administration_create_or_update_test_profile_request( # pyl def build_load_test_administration_delete_test_profile_request( # pylint: disable=name-too-long test_profile_id: str, **kwargs: Any ) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-12-01-preview")) - accept = _headers.pop("Accept", "application/json") - + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) # Construct URL _url = "/test-profiles/{testProfileId}" path_format_arguments = { @@ -437,10 +423,7 @@ def build_load_test_administration_delete_test_profile_request( # pylint: disab # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) + return HttpRequest(method="DELETE", url=_url, params=_params, **kwargs) def build_load_test_administration_get_test_profile_request( # pylint: disable=name-too-long @@ -449,7 +432,7 @@ def build_load_test_administration_get_test_profile_request( # pylint: disable= _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-12-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -474,14 +457,14 @@ def build_load_test_administration_list_test_profiles_request( # pylint: disabl maxpagesize: Optional[int] = None, last_modified_start_time: Optional[datetime.datetime] = None, last_modified_end_time: Optional[datetime.datetime] = None, - test_profile_ids: Optional[List[str]] = None, - test_ids: Optional[List[str]] = None, + test_profile_ids: Optional[list[str]] = None, + test_ids: Optional[list[str]] = None, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-12-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -508,28 +491,26 @@ def build_load_test_administration_list_test_profiles_request( # pylint: disabl return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) -def build_load_test_run_begin_test_run_request( # pylint: disable=name-too-long - test_run_id: str, *, old_test_run_id: Optional[str] = None, **kwargs: Any +def build_load_test_administration_create_or_update_trigger_request( # pylint: disable=name-too-long + trigger_id: str, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-12-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL - _url = "/test-runs/{testRunId}" + _url = "/triggers/{triggerId}" path_format_arguments = { - "testRunId": _SERIALIZER.url("test_run_id", test_run_id, "str"), + "triggerId": _SERIALIZER.url("trigger_id", trigger_id, "str"), } _url: str = _url.format(**path_format_arguments) # type: ignore # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if old_test_run_id is not None: - _params["oldTestRunId"] = _SERIALIZER.query("old_test_run_id", old_test_run_id, "str") # Construct headers if content_type is not None: @@ -539,20 +520,16 @@ def build_load_test_run_begin_test_run_request( # pylint: disable=name-too-long return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs) -def build_load_test_run_create_or_update_app_components_request( # pylint: disable=name-too-long - test_run_id: str, **kwargs: Any +def build_load_test_administration_delete_trigger_request( # pylint: disable=name-too-long + trigger_id: str, **kwargs: Any ) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("content-type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-12-01-preview")) - accept = _headers.pop("Accept", "application/json") - + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) # Construct URL - _url = "/test-runs/{testRunId}/app-components" + _url = "/triggers/{triggerId}" path_format_arguments = { - "testRunId": _SERIALIZER.url("test_run_id", test_run_id, "str"), + "triggerId": _SERIALIZER.url("trigger_id", trigger_id, "str"), } _url: str = _url.format(**path_format_arguments) # type: ignore @@ -560,28 +537,22 @@ def build_load_test_run_create_or_update_app_components_request( # pylint: disa # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - # Construct headers - if content_type is not None: - _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs) + return HttpRequest(method="DELETE", url=_url, params=_params, **kwargs) -def build_load_test_run_create_or_update_server_metrics_config_request( # pylint: disable=name-too-long - test_run_id: str, **kwargs: Any +def build_load_test_administration_get_trigger_request( # pylint: disable=name-too-long + trigger_id: str, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("content-type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-12-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL - _url = "/test-runs/{testRunId}/server-metrics-config" + _url = "/triggers/{triggerId}" path_format_arguments = { - "testRunId": _SERIALIZER.url("test_run_id", test_run_id, "str"), + "triggerId": _SERIALIZER.url("trigger_id", trigger_id, "str"), } _url: str = _url.format(**path_format_arguments) # type: ignore @@ -590,52 +561,64 @@ def build_load_test_run_create_or_update_server_metrics_config_request( # pylin _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") # Construct headers - if content_type is not None: - _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs) + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) -def build_load_test_run_delete_test_run_request( # pylint: disable=name-too-long - test_run_id: str, **kwargs: Any +def build_load_test_administration_list_triggers_request( # pylint: disable=name-too-long + *, + test_ids: Optional[str] = None, + states: Optional[Union[str, _models.TriggerState]] = None, + last_modified_start_time: Optional[datetime.datetime] = None, + last_modified_end_time: Optional[datetime.datetime] = None, + maxpagesize: Optional[int] = None, + **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-12-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL - _url = "/test-runs/{testRunId}" - path_format_arguments = { - "testRunId": _SERIALIZER.url("test_run_id", test_run_id, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore + _url = "/triggers" # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if test_ids is not None: + _params["testIds"] = _SERIALIZER.query("test_ids", test_ids, "str") + if states is not None: + _params["states"] = _SERIALIZER.query("states", states, "str") + if last_modified_start_time is not None: + _params["lastModifiedStartTime"] = _SERIALIZER.query( + "last_modified_start_time", last_modified_start_time, "iso-8601" + ) + if last_modified_end_time is not None: + _params["lastModifiedEndTime"] = _SERIALIZER.query("last_modified_end_time", last_modified_end_time, "iso-8601") + if maxpagesize is not None: + _params["maxpagesize"] = _SERIALIZER.query("maxpagesize", maxpagesize, "int") # Construct headers _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) -def build_load_test_run_get_app_components_request( # pylint: disable=name-too-long - test_run_id: str, **kwargs: Any +def build_load_test_administration_create_or_update_notification_rule_request( # pylint: disable=name-too-long + notification_rule_id: str, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-12-01-preview")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL - _url = "/test-runs/{testRunId}/app-components" + _url = "/notification-rules/{notificationRuleId}" path_format_arguments = { - "testRunId": _SERIALIZER.url("test_run_id", test_run_id, "str"), + "notificationRuleId": _SERIALIZER.url("notification_rule_id", notification_rule_id, "str"), } _url: str = _url.format(**path_format_arguments) # type: ignore @@ -644,24 +627,23 @@ def build_load_test_run_get_app_components_request( # pylint: disable=name-too- _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs) -def build_load_test_run_get_server_metrics_config_request( # pylint: disable=name-too-long - test_run_id: str, **kwargs: Any +def build_load_test_administration_delete_notification_rule_request( # pylint: disable=name-too-long + notification_rule_id: str, **kwargs: Any ) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-12-01-preview")) - accept = _headers.pop("Accept", "application/json") - + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) # Construct URL - _url = "/test-runs/{testRunId}/server-metrics-config" + _url = "/notification-rules/{notificationRuleId}" path_format_arguments = { - "testRunId": _SERIALIZER.url("test_run_id", test_run_id, "str"), + "notificationRuleId": _SERIALIZER.url("notification_rule_id", notification_rule_id, "str"), } _url: str = _url.format(**path_format_arguments) # type: ignore @@ -669,23 +651,22 @@ def build_load_test_run_get_server_metrics_config_request( # pylint: disable=na # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + return HttpRequest(method="DELETE", url=_url, params=_params, **kwargs) -def build_load_test_run_get_test_run_request(test_run_id: str, **kwargs: Any) -> HttpRequest: +def build_load_test_administration_get_notification_rule_request( # pylint: disable=name-too-long + notification_rule_id: str, **kwargs: Any +) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-12-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL - _url = "/test-runs/{testRunId}" + _url = "/notification-rules/{notificationRuleId}" path_format_arguments = { - "testRunId": _SERIALIZER.url("test_run_id", test_run_id, "str"), + "notificationRuleId": _SERIALIZER.url("notification_rule_id", notification_rule_id, "str"), } _url: str = _url.format(**path_format_arguments) # type: ignore @@ -699,26 +680,38 @@ def build_load_test_run_get_test_run_request(test_run_id: str, **kwargs: Any) -> return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) -def build_load_test_run_get_test_run_file_request( # pylint: disable=name-too-long - test_run_id: str, file_name: str, **kwargs: Any +def build_load_test_administration_list_notification_rules_request( # pylint: disable=name-too-long + *, + test_ids: Optional[str] = None, + scopes: Optional[str] = None, + last_modified_start_time: Optional[datetime.datetime] = None, + last_modified_end_time: Optional[datetime.datetime] = None, + maxpagesize: Optional[int] = None, + **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-12-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL - _url = "/test-runs/{testRunId}/files/{fileName}" - path_format_arguments = { - "testRunId": _SERIALIZER.url("test_run_id", test_run_id, "str"), - "fileName": _SERIALIZER.url("file_name", file_name, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore + _url = "/notification-rules" # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if test_ids is not None: + _params["testIds"] = _SERIALIZER.query("test_ids", test_ids, "str") + if scopes is not None: + _params["scopes"] = _SERIALIZER.query("scopes", scopes, "str") + if last_modified_start_time is not None: + _params["lastModifiedStartTime"] = _SERIALIZER.query( + "last_modified_start_time", last_modified_start_time, "iso-8601" + ) + if last_modified_end_time is not None: + _params["lastModifiedEndTime"] = _SERIALIZER.query("last_modified_end_time", last_modified_end_time, "iso-8601") + if maxpagesize is not None: + _params["maxpagesize"] = _SERIALIZER.query("maxpagesize", maxpagesize, "int") # Construct headers _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") @@ -726,85 +719,74 @@ def build_load_test_run_get_test_run_file_request( # pylint: disable=name-too-l return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) -def build_load_test_run_list_metric_dimension_values_request( # pylint: disable=name-too-long - test_run_id: str, - name: str, - *, - metric_name: str, - metric_namespace: str, - time_interval: str, - interval: Optional[Union[str, _models.TimeGrain]] = None, - **kwargs: Any +def build_load_test_administration_clone_test_request( # pylint: disable=name-too-long + test_id: str, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-12-01-preview")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL - _url = "/test-runs/{testRunId}/metric-dimensions/{name}/values" + _url = "/tests/{testId}:clone" path_format_arguments = { - "testRunId": _SERIALIZER.url("test_run_id", test_run_id, "str"), - "name": _SERIALIZER.url("name", name, "str"), + "testId": _SERIALIZER.url("test_id", test_id, "str"), } _url: str = _url.format(**path_format_arguments) # type: ignore # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - _params["metricname"] = _SERIALIZER.query("metric_name", metric_name, "str") - if interval is not None: - _params["interval"] = _SERIALIZER.query("interval", interval, "str") - _params["metricNamespace"] = _SERIALIZER.query("metric_namespace", metric_namespace, "str") - _params["timespan"] = _SERIALIZER.query("time_interval", time_interval, "str") # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) -def build_load_test_run_get_metric_definitions_request( # pylint: disable=name-too-long - test_run_id: str, *, metric_namespace: str, **kwargs: Any +def build_load_test_administration_generate_test_plan_recommendations_request( # pylint: disable=name-too-long + test_id: str, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-12-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL - _url = "/test-runs/{testRunId}/metric-definitions" + _url = "/tests/{testId}:generateTestPlanRecommendations" path_format_arguments = { - "testRunId": _SERIALIZER.url("test_run_id", test_run_id, "str"), + "testId": _SERIALIZER.url("test_id", test_id, "str"), } _url: str = _url.format(**path_format_arguments) # type: ignore # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - _params["metricNamespace"] = _SERIALIZER.query("metric_namespace", metric_namespace, "str") # Construct headers _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) -def build_load_test_run_get_metric_namespaces_request( # pylint: disable=name-too-long - test_run_id: str, **kwargs: Any +def build_load_test_administration_get_operation_status_request( # pylint: disable=name-too-long + operation_id: str, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-12-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL - _url = "/test-runs/{testRunId}/metric-namespaces" + _url = "/operations/{operationId}" path_format_arguments = { - "testRunId": _SERIALIZER.url("test_run_id", test_run_id, "str"), + "operationId": _SERIALIZER.url("operation_id", operation_id, "str"), } _url: str = _url.format(**path_format_arguments) # type: ignore @@ -818,25 +800,18 @@ def build_load_test_run_get_metric_namespaces_request( # pylint: disable=name-t return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) -def build_load_test_run_list_metrics_request( - test_run_id: str, - *, - metric_name: str, - metric_namespace: str, - time_interval: str, - aggregation: Optional[str] = None, - interval: Optional[Union[str, _models.TimeGrain]] = None, - **kwargs: Any +def build_load_test_run_begin_test_run_request( # pylint: disable=name-too-long + test_run_id: str, *, old_test_run_id: Optional[str] = None, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-12-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL - _url = "/test-runs/{testRunId}/metrics" + _url = "/test-runs/{testRunId}" path_format_arguments = { "testRunId": _SERIALIZER.url("test_run_id", test_run_id, "str"), } @@ -845,76 +820,58 @@ def build_load_test_run_list_metrics_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if aggregation is not None: - _params["aggregation"] = _SERIALIZER.query("aggregation", aggregation, "str") - _params["metricname"] = _SERIALIZER.query("metric_name", metric_name, "str") - if interval is not None: - _params["interval"] = _SERIALIZER.query("interval", interval, "str") - _params["metricNamespace"] = _SERIALIZER.query("metric_namespace", metric_namespace, "str") - _params["timespan"] = _SERIALIZER.query("time_interval", time_interval, "str") + if old_test_run_id is not None: + _params["oldTestRunId"] = _SERIALIZER.query("old_test_run_id", old_test_run_id, "str") # Construct headers if content_type is not None: _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs) -def build_load_test_run_list_test_runs_request( # pylint: disable=name-too-long - *, - orderby: Optional[str] = None, - search: Optional[str] = None, - test_id: Optional[str] = None, - execution_from: Optional[datetime.datetime] = None, - execution_to: Optional[datetime.datetime] = None, - status: Optional[str] = None, - maxpagesize: Optional[int] = None, - **kwargs: Any +def build_load_test_run_create_or_update_app_components_request( # pylint: disable=name-too-long + test_run_id: str, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-12-01-preview")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("content-type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL - _url = "/test-runs" + _url = "/test-runs/{testRunId}/app-components" + path_format_arguments = { + "testRunId": _SERIALIZER.url("test_run_id", test_run_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if orderby is not None: - _params["orderby"] = _SERIALIZER.query("orderby", orderby, "str") - if search is not None: - _params["search"] = _SERIALIZER.query("search", search, "str") - if test_id is not None: - _params["testId"] = _SERIALIZER.query("test_id", test_id, "str") - if execution_from is not None: - _params["executionFrom"] = _SERIALIZER.query("execution_from", execution_from, "iso-8601") - if execution_to is not None: - _params["executionTo"] = _SERIALIZER.query("execution_to", execution_to, "iso-8601") - if status is not None: - _params["status"] = _SERIALIZER.query("status", status, "str") - if maxpagesize is not None: - _params["maxpagesize"] = _SERIALIZER.query("maxpagesize", maxpagesize, "int") # Construct headers + if content_type is not None: + _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs) -def build_load_test_run_stop_test_run_request( # pylint: disable=name-too-long +def build_load_test_run_create_or_update_server_metrics_config_request( # pylint: disable=name-too-long test_run_id: str, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-12-01-preview")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("content-type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL - _url = "/test-runs/{testRunId}:stop" + _url = "/test-runs/{testRunId}/server-metrics-config" path_format_arguments = { "testRunId": _SERIALIZER.url("test_run_id", test_run_id, "str"), } @@ -925,25 +882,23 @@ def build_load_test_run_stop_test_run_request( # pylint: disable=name-too-long _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") # Construct headers + if content_type is not None: + _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs) -def build_load_test_run_begin_test_profile_run_request( # pylint: disable=name-too-long - test_profile_run_id: str, **kwargs: Any +def build_load_test_run_delete_test_run_request( # pylint: disable=name-too-long + test_run_id: str, **kwargs: Any ) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-12-01-preview")) - accept = _headers.pop("Accept", "application/json") - + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) # Construct URL - _url = "/test-profile-runs/{testProfileRunId}" + _url = "/test-runs/{testRunId}" path_format_arguments = { - "testProfileRunId": _SERIALIZER.url("test_profile_run_id", test_profile_run_id, "str"), + "testRunId": _SERIALIZER.url("test_run_id", test_run_id, "str"), } _url: str = _url.format(**path_format_arguments) # type: ignore @@ -951,27 +906,22 @@ def build_load_test_run_begin_test_profile_run_request( # pylint: disable=name- # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - # Construct headers - if content_type is not None: - _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs) + return HttpRequest(method="DELETE", url=_url, params=_params, **kwargs) -def build_load_test_run_delete_test_profile_run_request( # pylint: disable=name-too-long - test_profile_run_id: str, **kwargs: Any +def build_load_test_run_get_app_components_request( # pylint: disable=name-too-long + test_run_id: str, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-12-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL - _url = "/test-profile-runs/{testProfileRunId}" + _url = "/test-runs/{testRunId}/app-components" path_format_arguments = { - "testProfileRunId": _SERIALIZER.url("test_profile_run_id", test_profile_run_id, "str"), + "testRunId": _SERIALIZER.url("test_run_id", test_run_id, "str"), } _url: str = _url.format(**path_format_arguments) # type: ignore @@ -982,22 +932,22 @@ def build_load_test_run_delete_test_profile_run_request( # pylint: disable=name # Construct headers _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) -def build_load_test_run_get_test_profile_run_request( # pylint: disable=name-too-long - test_profile_run_id: str, **kwargs: Any +def build_load_test_run_get_server_metrics_config_request( # pylint: disable=name-too-long + test_run_id: str, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-12-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL - _url = "/test-profile-runs/{testProfileRunId}" + _url = "/test-runs/{testRunId}/server-metrics-config" path_format_arguments = { - "testProfileRunId": _SERIALIZER.url("test_profile_run_id", test_profile_run_id, "str"), + "testRunId": _SERIALIZER.url("test_run_id", test_run_id, "str"), } _url: str = _url.format(**path_format_arguments) # type: ignore @@ -1011,53 +961,23 @@ def build_load_test_run_get_test_profile_run_request( # pylint: disable=name-to return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) -def build_load_test_run_list_test_profile_runs_request( # pylint: disable=name-too-long - *, - maxpagesize: Optional[int] = None, - min_start_date_time: Optional[datetime.datetime] = None, - max_start_date_time: Optional[datetime.datetime] = None, - min_end_date_time: Optional[datetime.datetime] = None, - max_end_date_time: Optional[datetime.datetime] = None, - created_date_start_time: Optional[datetime.datetime] = None, - created_date_end_time: Optional[datetime.datetime] = None, - test_profile_run_ids: Optional[List[str]] = None, - test_profile_ids: Optional[List[str]] = None, - statuses: Optional[List[str]] = None, - **kwargs: Any -) -> HttpRequest: +def build_load_test_run_get_test_run_request(test_run_id: str, **kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-12-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL - _url = "/test-profile-runs" + _url = "/test-runs/{testRunId}" + path_format_arguments = { + "testRunId": _SERIALIZER.url("test_run_id", test_run_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if maxpagesize is not None: - _params["maxpagesize"] = _SERIALIZER.query("maxpagesize", maxpagesize, "int") - if min_start_date_time is not None: - _params["minStartDateTime"] = _SERIALIZER.query("min_start_date_time", min_start_date_time, "iso-8601") - if max_start_date_time is not None: - _params["maxStartDateTime"] = _SERIALIZER.query("max_start_date_time", max_start_date_time, "iso-8601") - if min_end_date_time is not None: - _params["minEndDateTime"] = _SERIALIZER.query("min_end_date_time", min_end_date_time, "iso-8601") - if max_end_date_time is not None: - _params["maxEndDateTime"] = _SERIALIZER.query("max_end_date_time", max_end_date_time, "iso-8601") - if created_date_start_time is not None: - _params["createdDateStartTime"] = _SERIALIZER.query( - "created_date_start_time", created_date_start_time, "iso-8601" - ) - if created_date_end_time is not None: - _params["createdDateEndTime"] = _SERIALIZER.query("created_date_end_time", created_date_end_time, "iso-8601") - if test_profile_run_ids is not None: - _params["testProfileRunIds"] = _SERIALIZER.query("test_profile_run_ids", test_profile_run_ids, "[str]", div=",") - if test_profile_ids is not None: - _params["testProfileIds"] = _SERIALIZER.query("test_profile_ids", test_profile_ids, "[str]", div=",") - if statuses is not None: - _params["statuses"] = _SERIALIZER.query("statuses", statuses, "[str]", div=",") # Construct headers _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") @@ -1065,19 +985,20 @@ def build_load_test_run_list_test_profile_runs_request( # pylint: disable=name- return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) -def build_load_test_run_stop_test_profile_run_request( # pylint: disable=name-too-long - test_profile_run_id: str, **kwargs: Any +def build_load_test_run_get_test_run_file_request( # pylint: disable=name-too-long + test_run_id: str, file_name: str, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-12-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL - _url = "/test-profile-runs/{testProfileRunId}:stop" + _url = "/test-runs/{testRunId}/files/{fileName}" path_format_arguments = { - "testProfileRunId": _SERIALIZER.url("test_profile_run_id", test_profile_run_id, "str"), + "testRunId": _SERIALIZER.url("test_run_id", test_run_id, "str"), + "fileName": _SERIALIZER.url("file_name", file_name, "str"), } _url: str = _url.format(**path_format_arguments) # type: ignore @@ -1088,238 +1009,2042 @@ def build_load_test_run_stop_test_profile_run_request( # pylint: disable=name-t # Construct headers _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) -class LoadTestAdministrationClientOperationsMixin( # pylint: disable=name-too-long - LoadTestAdministrationClientMixinABC -): +def build_load_test_run_list_metric_dimension_values_request( # pylint: disable=name-too-long + test_run_id: str, + name: str, + *, + metric_name: str, + metric_namespace: str, + time_interval: str, + interval: Optional[Union[str, _models.TimeGrain]] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - @overload - def create_or_update_test( - self, test_id: str, body: _models.Test, *, content_type: str = "application/merge-patch+json", **kwargs: Any - ) -> _models.Test: - """Create a new test or update an existing test by providing the test Id. + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) + accept = _headers.pop("Accept", "application/json") - Create a new test or update an existing test by providing the test Id. + # Construct URL + _url = "/test-runs/{testRunId}/metric-dimensions/{name}/values" + path_format_arguments = { + "testRunId": _SERIALIZER.url("test_run_id", test_run_id, "str"), + "name": _SERIALIZER.url("name", name, "str"), + } - :param test_id: Unique test identifier for the load test, must contain only lower-case - alphabetic, numeric, underscore or hyphen characters. Required. - :type test_id: str - :param body: The resource instance. Required. - :type body: ~azure.developer.loadtesting.models.Test - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/merge-patch+json". - :paramtype content_type: str - :return: Test. The Test is compatible with MutableMapping - :rtype: ~azure.developer.loadtesting.models.Test - :raises ~azure.core.exceptions.HttpResponseError: - """ + _url: str = _url.format(**path_format_arguments) # type: ignore - @overload - def create_or_update_test( - self, test_id: str, body: JSON, *, content_type: str = "application/merge-patch+json", **kwargs: Any - ) -> _models.Test: - """Create a new test or update an existing test by providing the test Id. + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + _params["metricname"] = _SERIALIZER.query("metric_name", metric_name, "str") + if interval is not None: + _params["interval"] = _SERIALIZER.query("interval", interval, "str") + _params["metricNamespace"] = _SERIALIZER.query("metric_namespace", metric_namespace, "str") + _params["timespan"] = _SERIALIZER.query("time_interval", time_interval, "str") - Create a new test or update an existing test by providing the test Id. + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - :param test_id: Unique test identifier for the load test, must contain only lower-case - alphabetic, numeric, underscore or hyphen characters. Required. - :type test_id: str - :param body: The resource instance. Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/merge-patch+json". - :paramtype content_type: str - :return: Test. The Test is compatible with MutableMapping - :rtype: ~azure.developer.loadtesting.models.Test - :raises ~azure.core.exceptions.HttpResponseError: - """ + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - @overload - def create_or_update_test( - self, test_id: str, body: IO[bytes], *, content_type: str = "application/merge-patch+json", **kwargs: Any - ) -> _models.Test: - """Create a new test or update an existing test by providing the test Id. - Create a new test or update an existing test by providing the test Id. +def build_load_test_run_get_metric_definitions_request( # pylint: disable=name-too-long + test_run_id: str, *, metric_namespace: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - :param test_id: Unique test identifier for the load test, must contain only lower-case - alphabetic, numeric, underscore or hyphen characters. Required. - :type test_id: str - :param body: The resource instance. Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/merge-patch+json". - :paramtype content_type: str - :return: Test. The Test is compatible with MutableMapping - :rtype: ~azure.developer.loadtesting.models.Test - :raises ~azure.core.exceptions.HttpResponseError: - """ + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) + accept = _headers.pop("Accept", "application/json") - @distributed_trace - def create_or_update_test( - self, test_id: str, body: Union[_models.Test, JSON, IO[bytes]], **kwargs: Any - ) -> _models.Test: - """Create a new test or update an existing test by providing the test Id. + # Construct URL + _url = "/test-runs/{testRunId}/metric-definitions" + path_format_arguments = { + "testRunId": _SERIALIZER.url("test_run_id", test_run_id, "str"), + } - Create a new test or update an existing test by providing the test Id. + _url: str = _url.format(**path_format_arguments) # type: ignore - :param test_id: Unique test identifier for the load test, must contain only lower-case - alphabetic, numeric, underscore or hyphen characters. Required. - :type test_id: str - :param body: The resource instance. Is one of the following types: Test, JSON, IO[bytes] - Required. - :type body: ~azure.developer.loadtesting.models.Test or JSON or IO[bytes] - :return: Test. The Test is compatible with MutableMapping - :rtype: ~azure.developer.loadtesting.models.Test - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + _params["metricNamespace"] = _SERIALIZER.query("metric_namespace", metric_namespace, "str") - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.Test] = kwargs.pop("cls", None) + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - content_type = content_type or "application/merge-patch+json" - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_load_test_administration_create_or_update_test_request( - test_id=test_id, - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) +def build_load_test_run_get_metric_namespaces_request( # pylint: disable=name-too-long + test_run_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) + accept = _headers.pop("Accept", "application/json") - response = pipeline_response.http_response + # Construct URL + _url = "/test-runs/{testRunId}/metric-namespaces" + path_format_arguments = { + "testRunId": _SERIALIZER.url("test_run_id", test_run_id, "str"), + } - if response.status_code not in [200, 201]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + _url: str = _url.format(**path_format_arguments) # type: ignore - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.Test, response.json()) + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_load_test_run_list_metrics_request( + test_run_id: str, + *, + metric_name: str, + metric_namespace: str, + time_interval: str, + aggregation: Optional[str] = None, + interval: Optional[Union[str, _models.TimeGrain]] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/test-runs/{testRunId}/metrics" + path_format_arguments = { + "testRunId": _SERIALIZER.url("test_run_id", test_run_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if aggregation is not None: + _params["aggregation"] = _SERIALIZER.query("aggregation", aggregation, "str") + _params["metricname"] = _SERIALIZER.query("metric_name", metric_name, "str") + if interval is not None: + _params["interval"] = _SERIALIZER.query("interval", interval, "str") + _params["metricNamespace"] = _SERIALIZER.query("metric_namespace", metric_namespace, "str") + _params["timespan"] = _SERIALIZER.query("time_interval", time_interval, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_load_test_run_list_test_runs_request( # pylint: disable=name-too-long + *, + orderby: Optional[str] = None, + search: Optional[str] = None, + test_id: Optional[str] = None, + execution_from: Optional[datetime.datetime] = None, + execution_to: Optional[datetime.datetime] = None, + status: Optional[str] = None, + maxpagesize: Optional[int] = None, + created_by_types: Optional[list[str]] = None, + test_ids: Optional[list[str]] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/test-runs" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if orderby is not None: + _params["orderby"] = _SERIALIZER.query("orderby", orderby, "str") + if search is not None: + _params["search"] = _SERIALIZER.query("search", search, "str") + if test_id is not None: + _params["testId"] = _SERIALIZER.query("test_id", test_id, "str") + if execution_from is not None: + _params["executionFrom"] = _SERIALIZER.query("execution_from", execution_from, "iso-8601") + if execution_to is not None: + _params["executionTo"] = _SERIALIZER.query("execution_to", execution_to, "iso-8601") + if status is not None: + _params["status"] = _SERIALIZER.query("status", status, "str") + if maxpagesize is not None: + _params["maxpagesize"] = _SERIALIZER.query("maxpagesize", maxpagesize, "int") + if created_by_types is not None: + _params["createdByTypes"] = _SERIALIZER.query("created_by_types", created_by_types, "[str]", div=",") + if test_ids is not None: + _params["testIds"] = _SERIALIZER.query("test_ids", test_ids, "[str]", div=",") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_load_test_run_stop_test_run_request( # pylint: disable=name-too-long + test_run_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/test-runs/{testRunId}:stop" + path_format_arguments = { + "testRunId": _SERIALIZER.url("test_run_id", test_run_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_load_test_run_begin_test_profile_run_request( # pylint: disable=name-too-long + test_profile_run_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/test-profile-runs/{testProfileRunId}" + path_format_arguments = { + "testProfileRunId": _SERIALIZER.url("test_profile_run_id", test_profile_run_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_load_test_run_delete_test_profile_run_request( # pylint: disable=name-too-long + test_profile_run_id: str, **kwargs: Any +) -> HttpRequest: + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) + # Construct URL + _url = "/test-profile-runs/{testProfileRunId}" + path_format_arguments = { + "testProfileRunId": _SERIALIZER.url("test_profile_run_id", test_profile_run_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + return HttpRequest(method="DELETE", url=_url, params=_params, **kwargs) + + +def build_load_test_run_get_test_profile_run_request( # pylint: disable=name-too-long + test_profile_run_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/test-profile-runs/{testProfileRunId}" + path_format_arguments = { + "testProfileRunId": _SERIALIZER.url("test_profile_run_id", test_profile_run_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_load_test_run_list_test_profile_runs_request( # pylint: disable=name-too-long + *, + maxpagesize: Optional[int] = None, + min_start_date_time: Optional[datetime.datetime] = None, + max_start_date_time: Optional[datetime.datetime] = None, + min_end_date_time: Optional[datetime.datetime] = None, + max_end_date_time: Optional[datetime.datetime] = None, + created_date_start_time: Optional[datetime.datetime] = None, + created_date_end_time: Optional[datetime.datetime] = None, + test_profile_run_ids: Optional[list[str]] = None, + test_profile_ids: Optional[list[str]] = None, + statuses: Optional[list[str]] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/test-profile-runs" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if maxpagesize is not None: + _params["maxpagesize"] = _SERIALIZER.query("maxpagesize", maxpagesize, "int") + if min_start_date_time is not None: + _params["minStartDateTime"] = _SERIALIZER.query("min_start_date_time", min_start_date_time, "iso-8601") + if max_start_date_time is not None: + _params["maxStartDateTime"] = _SERIALIZER.query("max_start_date_time", max_start_date_time, "iso-8601") + if min_end_date_time is not None: + _params["minEndDateTime"] = _SERIALIZER.query("min_end_date_time", min_end_date_time, "iso-8601") + if max_end_date_time is not None: + _params["maxEndDateTime"] = _SERIALIZER.query("max_end_date_time", max_end_date_time, "iso-8601") + if created_date_start_time is not None: + _params["createdDateStartTime"] = _SERIALIZER.query( + "created_date_start_time", created_date_start_time, "iso-8601" + ) + if created_date_end_time is not None: + _params["createdDateEndTime"] = _SERIALIZER.query("created_date_end_time", created_date_end_time, "iso-8601") + if test_profile_run_ids is not None: + _params["testProfileRunIds"] = _SERIALIZER.query("test_profile_run_ids", test_profile_run_ids, "[str]", div=",") + if test_profile_ids is not None: + _params["testProfileIds"] = _SERIALIZER.query("test_profile_ids", test_profile_ids, "[str]", div=",") + if statuses is not None: + _params["statuses"] = _SERIALIZER.query("statuses", statuses, "[str]", div=",") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_load_test_run_stop_test_profile_run_request( # pylint: disable=name-too-long + test_profile_run_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/test-profile-runs/{testProfileRunId}:stop" + path_format_arguments = { + "testProfileRunId": _SERIALIZER.url("test_profile_run_id", test_profile_run_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_load_test_run_get_latest_test_run_insights_request( # pylint: disable=name-too-long + test_run_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/test-runs/{testRunId}/insights/latest" + path_format_arguments = { + "testRunId": _SERIALIZER.url("test_run_id", test_run_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_load_test_run_patch_latest_test_run_insights_request( # pylint: disable=name-too-long + test_run_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("content-type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/test-runs/{testRunId}/insights/latest" + path_format_arguments = { + "testRunId": _SERIALIZER.url("test_run_id", test_run_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_load_test_run_generate_test_run_insights_request( # pylint: disable=name-too-long + test_run_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-11-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/test-runs/{testRunId}/insights:generate" + path_format_arguments = { + "testRunId": _SERIALIZER.url("test_run_id", test_run_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +class _LoadTestAdministrationClientOperationsMixin( # pylint: disable=too-many-public-methods + ClientMixinABC[PipelineClient[HttpRequest, HttpResponse], LoadTestAdministrationClientConfiguration] +): + + @overload + def create_or_update_test( + self, test_id: str, body: _models.Test, *, content_type: str = "application/merge-patch+json", **kwargs: Any + ) -> _models.Test: + """Create a new test or update an existing test by providing the test Id. + + Create a new test or update an existing test by providing the test Id. + + :param test_id: Unique test identifier for the load test, must contain only lower-case + alphabetic, numeric, underscore or hyphen characters. Required. + :type test_id: str + :param body: The resource instance. Required. + :type body: ~azure.developer.loadtesting.models.Test + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/merge-patch+json". + :paramtype content_type: str + :return: Test. The Test is compatible with MutableMapping + :rtype: ~azure.developer.loadtesting.models.Test + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_or_update_test( + self, test_id: str, body: JSON, *, content_type: str = "application/merge-patch+json", **kwargs: Any + ) -> _models.Test: + """Create a new test or update an existing test by providing the test Id. + + Create a new test or update an existing test by providing the test Id. + + :param test_id: Unique test identifier for the load test, must contain only lower-case + alphabetic, numeric, underscore or hyphen characters. Required. + :type test_id: str + :param body: The resource instance. Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/merge-patch+json". + :paramtype content_type: str + :return: Test. The Test is compatible with MutableMapping + :rtype: ~azure.developer.loadtesting.models.Test + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_or_update_test( + self, test_id: str, body: IO[bytes], *, content_type: str = "application/merge-patch+json", **kwargs: Any + ) -> _models.Test: + """Create a new test or update an existing test by providing the test Id. + + Create a new test or update an existing test by providing the test Id. + + :param test_id: Unique test identifier for the load test, must contain only lower-case + alphabetic, numeric, underscore or hyphen characters. Required. + :type test_id: str + :param body: The resource instance. Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/merge-patch+json". + :paramtype content_type: str + :return: Test. The Test is compatible with MutableMapping + :rtype: ~azure.developer.loadtesting.models.Test + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_or_update_test( + self, test_id: str, body: Union[_models.Test, JSON, IO[bytes]], **kwargs: Any + ) -> _models.Test: + """Create a new test or update an existing test by providing the test Id. + + Create a new test or update an existing test by providing the test Id. + + :param test_id: Unique test identifier for the load test, must contain only lower-case + alphabetic, numeric, underscore or hyphen characters. Required. + :type test_id: str + :param body: The resource instance. Is one of the following types: Test, JSON, IO[bytes] + Required. + :type body: ~azure.developer.loadtesting.models.Test or JSON or IO[bytes] + :return: Test. The Test is compatible with MutableMapping + :rtype: ~azure.developer.loadtesting.models.Test + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Test] = kwargs.pop("cls", None) + + content_type = content_type or "application/merge-patch+json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_load_test_administration_create_or_update_test_request( + test_id=test_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.Test, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def create_or_update_app_components( + self, + test_id: str, + body: _models.TestAppComponents, + *, + content_type: str = "application/merge-patch+json", + **kwargs: Any + ) -> _models.TestAppComponents: + """Add an app component to a test. + + Add an app component to a test by providing the resource Id, name and type. + + :param test_id: Unique name for the load test, must contain only lower-case alphabetic, + numeric, underscore or hyphen characters. Required. + :type test_id: str + :param body: App Component model. Required. + :type body: ~azure.developer.loadtesting.models.TestAppComponents + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/merge-patch+json". + :paramtype content_type: str + :return: TestAppComponents. The TestAppComponents is compatible with MutableMapping + :rtype: ~azure.developer.loadtesting.models.TestAppComponents + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_or_update_app_components( + self, test_id: str, body: JSON, *, content_type: str = "application/merge-patch+json", **kwargs: Any + ) -> _models.TestAppComponents: + """Add an app component to a test. + + Add an app component to a test by providing the resource Id, name and type. + + :param test_id: Unique name for the load test, must contain only lower-case alphabetic, + numeric, underscore or hyphen characters. Required. + :type test_id: str + :param body: App Component model. Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/merge-patch+json". + :paramtype content_type: str + :return: TestAppComponents. The TestAppComponents is compatible with MutableMapping + :rtype: ~azure.developer.loadtesting.models.TestAppComponents + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_or_update_app_components( + self, test_id: str, body: IO[bytes], *, content_type: str = "application/merge-patch+json", **kwargs: Any + ) -> _models.TestAppComponents: + """Add an app component to a test. + + Add an app component to a test by providing the resource Id, name and type. + + :param test_id: Unique name for the load test, must contain only lower-case alphabetic, + numeric, underscore or hyphen characters. Required. + :type test_id: str + :param body: App Component model. Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/merge-patch+json". + :paramtype content_type: str + :return: TestAppComponents. The TestAppComponents is compatible with MutableMapping + :rtype: ~azure.developer.loadtesting.models.TestAppComponents + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_or_update_app_components( + self, test_id: str, body: Union[_models.TestAppComponents, JSON, IO[bytes]], **kwargs: Any + ) -> _models.TestAppComponents: + """Add an app component to a test. + + Add an app component to a test by providing the resource Id, name and type. + + :param test_id: Unique name for the load test, must contain only lower-case alphabetic, + numeric, underscore or hyphen characters. Required. + :type test_id: str + :param body: App Component model. Is one of the following types: TestAppComponents, JSON, + IO[bytes] Required. + :type body: ~azure.developer.loadtesting.models.TestAppComponents or JSON or IO[bytes] + :return: TestAppComponents. The TestAppComponents is compatible with MutableMapping + :rtype: ~azure.developer.loadtesting.models.TestAppComponents + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("content-type", None)) + cls: ClsType[_models.TestAppComponents] = kwargs.pop("cls", None) + + content_type = content_type or "application/merge-patch+json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_load_test_administration_create_or_update_app_components_request( + test_id=test_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.TestAppComponents, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def create_or_update_server_metrics_config( + self, + test_id: str, + body: _models.TestServerMetricsConfiguration, + *, + content_type: str = "application/merge-patch+json", + **kwargs: Any + ) -> _models.TestServerMetricsConfiguration: + """Configure server metrics for a test. + + Configure server metrics for a test. + + :param test_id: Unique name for the load test, must contain only lower-case alphabetic, + numeric, underscore or hyphen characters. Required. + :type test_id: str + :param body: Server metric configuration model. Required. + :type body: ~azure.developer.loadtesting.models.TestServerMetricsConfiguration + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/merge-patch+json". + :paramtype content_type: str + :return: TestServerMetricsConfiguration. The TestServerMetricsConfiguration is compatible with + MutableMapping + :rtype: ~azure.developer.loadtesting.models.TestServerMetricsConfiguration + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_or_update_server_metrics_config( + self, test_id: str, body: JSON, *, content_type: str = "application/merge-patch+json", **kwargs: Any + ) -> _models.TestServerMetricsConfiguration: + """Configure server metrics for a test. + + Configure server metrics for a test. + + :param test_id: Unique name for the load test, must contain only lower-case alphabetic, + numeric, underscore or hyphen characters. Required. + :type test_id: str + :param body: Server metric configuration model. Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/merge-patch+json". + :paramtype content_type: str + :return: TestServerMetricsConfiguration. The TestServerMetricsConfiguration is compatible with + MutableMapping + :rtype: ~azure.developer.loadtesting.models.TestServerMetricsConfiguration + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_or_update_server_metrics_config( + self, test_id: str, body: IO[bytes], *, content_type: str = "application/merge-patch+json", **kwargs: Any + ) -> _models.TestServerMetricsConfiguration: + """Configure server metrics for a test. + + Configure server metrics for a test. + + :param test_id: Unique name for the load test, must contain only lower-case alphabetic, + numeric, underscore or hyphen characters. Required. + :type test_id: str + :param body: Server metric configuration model. Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/merge-patch+json". + :paramtype content_type: str + :return: TestServerMetricsConfiguration. The TestServerMetricsConfiguration is compatible with + MutableMapping + :rtype: ~azure.developer.loadtesting.models.TestServerMetricsConfiguration + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_or_update_server_metrics_config( + self, test_id: str, body: Union[_models.TestServerMetricsConfiguration, JSON, IO[bytes]], **kwargs: Any + ) -> _models.TestServerMetricsConfiguration: + """Configure server metrics for a test. + + Configure server metrics for a test. + + :param test_id: Unique name for the load test, must contain only lower-case alphabetic, + numeric, underscore or hyphen characters. Required. + :type test_id: str + :param body: Server metric configuration model. Is one of the following types: + TestServerMetricsConfiguration, JSON, IO[bytes] Required. + :type body: ~azure.developer.loadtesting.models.TestServerMetricsConfiguration or JSON or + IO[bytes] + :return: TestServerMetricsConfiguration. The TestServerMetricsConfiguration is compatible with + MutableMapping + :rtype: ~azure.developer.loadtesting.models.TestServerMetricsConfiguration + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("content-type", None)) + cls: ClsType[_models.TestServerMetricsConfiguration] = kwargs.pop("cls", None) + + content_type = content_type or "application/merge-patch+json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_load_test_administration_create_or_update_server_metrics_config_request( + test_id=test_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.TestServerMetricsConfiguration, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def get_app_components(self, test_id: str, **kwargs: Any) -> _models.TestAppComponents: + """Get associated app component (collection of azure resources) for the given test. + + Get associated app component (collection of azure resources) for the given test. + + :param test_id: Unique name for the load test, must contain only lower-case alphabetic, + numeric, underscore or hyphen characters. Required. + :type test_id: str + :return: TestAppComponents. The TestAppComponents is compatible with MutableMapping + :rtype: ~azure.developer.loadtesting.models.TestAppComponents + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.TestAppComponents] = kwargs.pop("cls", None) + + _request = build_load_test_administration_get_app_components_request( + test_id=test_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.TestAppComponents, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def get_server_metrics_config(self, test_id: str, **kwargs: Any) -> _models.TestServerMetricsConfiguration: + """List server metrics configuration for the given test. + + List server metrics configuration for the given test. + + :param test_id: Unique name for the load test, must contain only lower-case alphabetic, + numeric, underscore or hyphen characters. Required. + :type test_id: str + :return: TestServerMetricsConfiguration. The TestServerMetricsConfiguration is compatible with + MutableMapping + :rtype: ~azure.developer.loadtesting.models.TestServerMetricsConfiguration + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.TestServerMetricsConfiguration] = kwargs.pop("cls", None) + + _request = build_load_test_administration_get_server_metrics_config_request( + test_id=test_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.TestServerMetricsConfiguration, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def get_test(self, test_id: str, **kwargs: Any) -> _models.Test: + """Get load test details by test Id. + + Get load test details by test Id. + + :param test_id: Unique test identifier for the load test, must contain only lower-case + alphabetic, numeric, underscore or hyphen characters. Required. + :type test_id: str + :return: Test. The Test is compatible with MutableMapping + :rtype: ~azure.developer.loadtesting.models.Test + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.Test] = kwargs.pop("cls", None) + + _request = build_load_test_administration_get_test_request( + test_id=test_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.Test, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def get_test_file(self, test_id: str, file_name: str, **kwargs: Any) -> _models.TestFileInfo: + """Get all the files that are associated with a test. + + Get all the files that are associated with a test. + + :param test_id: Unique test identifier for the load test, must contain only lower-case + alphabetic, numeric, underscore or hyphen characters. Required. + :type test_id: str + :param file_name: Name of the file. Required. + :type file_name: str + :return: TestFileInfo. The TestFileInfo is compatible with MutableMapping + :rtype: ~azure.developer.loadtesting.models.TestFileInfo + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.TestFileInfo] = kwargs.pop("cls", None) + + _request = build_load_test_administration_get_test_file_request( + test_id=test_id, + file_name=file_name, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.TestFileInfo, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list_test_files(self, test_id: str, **kwargs: Any) -> ItemPaged["_models.TestFileInfo"]: + """Get all test files. + + Get all test files. + + :param test_id: Unique test identifier for the load test, must contain only lower-case + alphabetic, numeric, underscore or hyphen characters. Required. + :type test_id: str + :return: An iterator like instance of TestFileInfo + :rtype: ~azure.core.paging.ItemPaged[~azure.developer.loadtesting.models.TestFileInfo] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[list[_models.TestFileInfo]] = kwargs.pop("cls", None) + + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_load_test_administration_list_test_files_request( + test_id=test_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(list[_models.TestFileInfo], deserialized.get("value", [])) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("nextLink") or None, iter(list_of_elem) + + def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + return pipeline_response + + return ItemPaged(get_next, extract_data) + + @distributed_trace + def list_tests( + self, + *, + orderby: Optional[str] = None, + search: Optional[str] = None, + last_modified_start_time: Optional[datetime.datetime] = None, + last_modified_end_time: Optional[datetime.datetime] = None, + **kwargs: Any + ) -> ItemPaged["_models.Test"]: + """Get all load tests by the fully qualified resource Id e.g + subscriptions/{subId}/resourceGroups/{rg}/providers/Microsoft.LoadTestService/loadtests/{resName}. + + Get all load tests by the fully qualified resource Id e.g + subscriptions/{subId}/resourceGroups/{rg}/providers/Microsoft.LoadTestService/loadtests/{resName}. + + :keyword orderby: Sort on the supported fields in (field asc/desc) format. eg: + lastModifiedDateTime asc. Supported fields - lastModifiedDateTime. Default value is None. + :paramtype orderby: str + :keyword search: Prefix based, case sensitive search on searchable fields - displayName, + createdBy. For example, to search for a test, with display name is Login Test, + the search parameter can be Login. Default value is None. + :paramtype search: str + :keyword last_modified_start_time: Start DateTime(RFC 3339 literal format) of the last updated + time range to filter tests. Default value is None. + :paramtype last_modified_start_time: ~datetime.datetime + :keyword last_modified_end_time: End DateTime(RFC 3339 literal format) of the last updated time + range to filter tests. Default value is None. + :paramtype last_modified_end_time: ~datetime.datetime + :return: An iterator like instance of Test + :rtype: ~azure.core.paging.ItemPaged[~azure.developer.loadtesting.models.Test] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + maxpagesize = kwargs.pop("maxpagesize", None) + cls: ClsType[list[_models.Test]] = kwargs.pop("cls", None) + + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_load_test_administration_list_tests_request( + orderby=orderby, + search=search, + last_modified_start_time=last_modified_start_time, + last_modified_end_time=last_modified_end_time, + maxpagesize=maxpagesize, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(list[_models.Test], deserialized.get("value", [])) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("nextLink") or None, iter(list_of_elem) + + def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + return pipeline_response + + return ItemPaged(get_next, extract_data) + + @distributed_trace + def _begin_upload_test_file( + self, + test_id: str, + file_name: str, + body: bytes, + *, + file_type: Optional[Union[str, _models.FileType]] = None, + **kwargs: Any + ) -> _models.TestFileInfo: + """Upload input file for a given test Id. File size can't be more than 50 MB. + Existing file with same name for the given test will be overwritten. File + should be provided in the request body as application/octet-stream. + + Upload input file for a given test Id. File size can't be more than 50 MB. + Existing file with same name for the given test will be overwritten. File + should be provided in the request body as application/octet-stream. + + :param test_id: Unique name for the load test, must contain only lower-case alphabetic, + numeric, underscore or hyphen characters. Required. + :type test_id: str + :param file_name: Unique name for test file with file extension like : App.jmx. Required. + :type file_name: str + :param body: The file content as application/octet-stream. Required. + :type body: bytes + :keyword file_type: File type. Known values are: "JMX_FILE", "USER_PROPERTIES", + "ADDITIONAL_ARTIFACTS", "ZIPPED_ARTIFACTS", "URL_TEST_CONFIG", "TEST_SCRIPT", + "BROWSER_RECORDING", and "TEST_PLAN_RECOMMENDATIONS". Default value is None. + :paramtype file_type: str or ~azure.developer.loadtesting.models.FileType + :return: TestFileInfo. The TestFileInfo is compatible with MutableMapping + :rtype: ~azure.developer.loadtesting.models.TestFileInfo + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: str = kwargs.pop("content_type", _headers.pop("content-type", "application/octet-stream")) + cls: ClsType[_models.TestFileInfo] = kwargs.pop("cls", None) + + _content = body + + _request = build_load_test_administration_begin_upload_test_file_request( + test_id=test_id, + file_name=file_name, + file_type=file_type, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [201]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.TestFileInfo, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def delete_test_file( # pylint: disable=inconsistent-return-statements + self, test_id: str, file_name: str, **kwargs: Any + ) -> None: + """Delete file by the file name for a test. + + Delete file by the file name for a test. + + :param test_id: Unique test identifier for the load test, must contain only lower-case + alphabetic, numeric, underscore or hyphen characters. Required. + :type test_id: str + :param file_name: Name of the file. Required. + :type file_name: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_load_test_administration_delete_test_file_request( + test_id=test_id, + file_name=file_name, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + @distributed_trace + def delete_test(self, test_id: str, **kwargs: Any) -> None: # pylint: disable=inconsistent-return-statements + """Delete a test by its test Id. + + Delete a test by its test Id. + + :param test_id: Unique test identifier for the load test, must contain only lower-case + alphabetic, numeric, underscore or hyphen characters. Required. + :type test_id: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_load_test_administration_delete_test_request( + test_id=test_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + @overload + def create_or_update_test_profile( + self, + test_profile_id: str, + body: _models.TestProfile, + *, + content_type: str = "application/merge-patch+json", + **kwargs: Any + ) -> _models.TestProfile: + """Create a new test profile or update an existing test profile. + + Create a new test profile or update an existing test profile by providing the test profile Id. + + :param test_profile_id: Unique identifier for the test profile, must contain only lower-case + alphabetic, numeric, underscore or hyphen characters. Required. + :type test_profile_id: str + :param body: The resource instance. Required. + :type body: ~azure.developer.loadtesting.models.TestProfile + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/merge-patch+json". + :paramtype content_type: str + :return: TestProfile. The TestProfile is compatible with MutableMapping + :rtype: ~azure.developer.loadtesting.models.TestProfile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_or_update_test_profile( + self, test_profile_id: str, body: JSON, *, content_type: str = "application/merge-patch+json", **kwargs: Any + ) -> _models.TestProfile: + """Create a new test profile or update an existing test profile. + + Create a new test profile or update an existing test profile by providing the test profile Id. + + :param test_profile_id: Unique identifier for the test profile, must contain only lower-case + alphabetic, numeric, underscore or hyphen characters. Required. + :type test_profile_id: str + :param body: The resource instance. Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/merge-patch+json". + :paramtype content_type: str + :return: TestProfile. The TestProfile is compatible with MutableMapping + :rtype: ~azure.developer.loadtesting.models.TestProfile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_or_update_test_profile( + self, + test_profile_id: str, + body: IO[bytes], + *, + content_type: str = "application/merge-patch+json", + **kwargs: Any + ) -> _models.TestProfile: + """Create a new test profile or update an existing test profile. + + Create a new test profile or update an existing test profile by providing the test profile Id. + + :param test_profile_id: Unique identifier for the test profile, must contain only lower-case + alphabetic, numeric, underscore or hyphen characters. Required. + :type test_profile_id: str + :param body: The resource instance. Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/merge-patch+json". + :paramtype content_type: str + :return: TestProfile. The TestProfile is compatible with MutableMapping + :rtype: ~azure.developer.loadtesting.models.TestProfile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + @api_version_validation( + method_added_on="2024-05-01-preview", + params_added_on={"2024-05-01-preview": ["api_version", "test_profile_id", "content_type", "accept"]}, + api_versions_list=[ + "2024-05-01-preview", + "2024-07-01-preview", + "2024-12-01-preview", + "2025-03-01-preview", + "2025-11-01-preview", + ], + ) + def create_or_update_test_profile( + self, test_profile_id: str, body: Union[_models.TestProfile, JSON, IO[bytes]], **kwargs: Any + ) -> _models.TestProfile: + """Create a new test profile or update an existing test profile. + + Create a new test profile or update an existing test profile by providing the test profile Id. + + :param test_profile_id: Unique identifier for the test profile, must contain only lower-case + alphabetic, numeric, underscore or hyphen characters. Required. + :type test_profile_id: str + :param body: The resource instance. Is one of the following types: TestProfile, JSON, IO[bytes] + Required. + :type body: ~azure.developer.loadtesting.models.TestProfile or JSON or IO[bytes] + :return: TestProfile. The TestProfile is compatible with MutableMapping + :rtype: ~azure.developer.loadtesting.models.TestProfile + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.TestProfile] = kwargs.pop("cls", None) + + content_type = content_type or "application/merge-patch+json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_load_test_administration_create_or_update_test_profile_request( + test_profile_id=test_profile_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.TestProfile, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + @api_version_validation( + method_added_on="2024-05-01-preview", + params_added_on={"2024-05-01-preview": ["api_version", "test_profile_id"]}, + api_versions_list=[ + "2024-05-01-preview", + "2024-07-01-preview", + "2024-12-01-preview", + "2025-03-01-preview", + "2025-11-01-preview", + ], + ) + def delete_test_profile( # pylint: disable=inconsistent-return-statements + self, test_profile_id: str, **kwargs: Any + ) -> None: + """Delete a test profile. + + Delete a test profile by its test profile Id. + + :param test_profile_id: Unique identifier for the test profile, must contain only lower-case + alphabetic, numeric, underscore or hyphen characters. Required. + :type test_profile_id: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_load_test_administration_delete_test_profile_request( + test_profile_id=test_profile_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + @distributed_trace + @api_version_validation( + method_added_on="2024-05-01-preview", + params_added_on={"2024-05-01-preview": ["api_version", "test_profile_id", "accept"]}, + api_versions_list=[ + "2024-05-01-preview", + "2024-07-01-preview", + "2024-12-01-preview", + "2025-03-01-preview", + "2025-11-01-preview", + ], + ) + def get_test_profile(self, test_profile_id: str, **kwargs: Any) -> _models.TestProfile: + """Get load test profile details. + + Get load test profile details by test profile Id. + + :param test_profile_id: Unique identifier for the test profile, must contain only lower-case + alphabetic, numeric, underscore or hyphen characters. Required. + :type test_profile_id: str + :return: TestProfile. The TestProfile is compatible with MutableMapping + :rtype: ~azure.developer.loadtesting.models.TestProfile + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.TestProfile] = kwargs.pop("cls", None) + + _request = build_load_test_administration_get_test_profile_request( + test_profile_id=test_profile_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.TestProfile, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + @api_version_validation( + method_added_on="2024-05-01-preview", + params_added_on={ + "2024-05-01-preview": [ + "api_version", + "maxpagesize", + "last_modified_start_time", + "last_modified_end_time", + "test_profile_ids", + "test_ids", + "accept", + ] + }, + api_versions_list=[ + "2024-05-01-preview", + "2024-07-01-preview", + "2024-12-01-preview", + "2025-03-01-preview", + "2025-11-01-preview", + ], + ) + def list_test_profiles( + self, + *, + last_modified_start_time: Optional[datetime.datetime] = None, + last_modified_end_time: Optional[datetime.datetime] = None, + test_profile_ids: Optional[list[str]] = None, + test_ids: Optional[list[str]] = None, + **kwargs: Any + ) -> ItemPaged["_models.TestProfile"]: + """List test profiles. + + Get all test profiles for the given filters. + + :keyword last_modified_start_time: Start DateTime(RFC 3339 literal format) of the last updated + time range to filter test profiles. Default value is None. + :paramtype last_modified_start_time: ~datetime.datetime + :keyword last_modified_end_time: End DateTime(RFC 3339 literal format) of the last updated time + range to filter test profiles. Default value is None. + :paramtype last_modified_end_time: ~datetime.datetime + :keyword test_profile_ids: Comma separated list of IDs of the test profiles to filter. Default + value is None. + :paramtype test_profile_ids: list[str] + :keyword test_ids: Comma separated list IDs of the tests which should be associated with the + test profiles to fetch. Default value is None. + :paramtype test_ids: list[str] + :return: An iterator like instance of TestProfile + :rtype: ~azure.core.paging.ItemPaged[~azure.developer.loadtesting.models.TestProfile] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + maxpagesize = kwargs.pop("maxpagesize", None) + cls: ClsType[list[_models.TestProfile]] = kwargs.pop("cls", None) + + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_load_test_administration_list_test_profiles_request( + maxpagesize=maxpagesize, + last_modified_start_time=last_modified_start_time, + last_modified_end_time=last_modified_end_time, + test_profile_ids=test_profile_ids, + test_ids=test_ids, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(list[_models.TestProfile], deserialized.get("value", [])) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("nextLink") or None, iter(list_of_elem) + + def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) - return deserialized # type: ignore + return pipeline_response + + return ItemPaged(get_next, extract_data) @overload - def create_or_update_app_components( + def create_or_update_trigger( self, - test_id: str, - body: _models.TestAppComponents, + trigger_id: str, + body: _models.Trigger, *, content_type: str = "application/merge-patch+json", **kwargs: Any - ) -> _models.TestAppComponents: - """Add an app component to a test. - - Add an app component to a test by providing the resource Id, name and type. + ) -> _models.Trigger: + """Create or update operation template. - :param test_id: Unique name for the load test, must contain only lower-case alphabetic, - numeric, underscore or hyphen characters. Required. - :type test_id: str - :param body: App Component model. Required. - :type body: ~azure.developer.loadtesting.models.TestAppComponents + :param trigger_id: The unique identifier of the trigger. Required. + :type trigger_id: str + :param body: The resource instance. Required. + :type body: ~azure.developer.loadtesting.models.Trigger :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/merge-patch+json". :paramtype content_type: str - :return: TestAppComponents. The TestAppComponents is compatible with MutableMapping - :rtype: ~azure.developer.loadtesting.models.TestAppComponents + :return: Trigger. The Trigger is compatible with MutableMapping + :rtype: ~azure.developer.loadtesting.models.Trigger :raises ~azure.core.exceptions.HttpResponseError: """ @overload - def create_or_update_app_components( - self, test_id: str, body: JSON, *, content_type: str = "application/merge-patch+json", **kwargs: Any - ) -> _models.TestAppComponents: - """Add an app component to a test. - - Add an app component to a test by providing the resource Id, name and type. + def create_or_update_trigger( + self, trigger_id: str, body: JSON, *, content_type: str = "application/merge-patch+json", **kwargs: Any + ) -> _models.Trigger: + """Create or update operation template. - :param test_id: Unique name for the load test, must contain only lower-case alphabetic, - numeric, underscore or hyphen characters. Required. - :type test_id: str - :param body: App Component model. Required. + :param trigger_id: The unique identifier of the trigger. Required. + :type trigger_id: str + :param body: The resource instance. Required. :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/merge-patch+json". :paramtype content_type: str - :return: TestAppComponents. The TestAppComponents is compatible with MutableMapping - :rtype: ~azure.developer.loadtesting.models.TestAppComponents + :return: Trigger. The Trigger is compatible with MutableMapping + :rtype: ~azure.developer.loadtesting.models.Trigger :raises ~azure.core.exceptions.HttpResponseError: """ @overload - def create_or_update_app_components( - self, test_id: str, body: IO[bytes], *, content_type: str = "application/merge-patch+json", **kwargs: Any - ) -> _models.TestAppComponents: - """Add an app component to a test. + def create_or_update_trigger( + self, trigger_id: str, body: IO[bytes], *, content_type: str = "application/merge-patch+json", **kwargs: Any + ) -> _models.Trigger: + """Create or update operation template. - Add an app component to a test by providing the resource Id, name and type. - - :param test_id: Unique name for the load test, must contain only lower-case alphabetic, - numeric, underscore or hyphen characters. Required. - :type test_id: str - :param body: App Component model. Required. + :param trigger_id: The unique identifier of the trigger. Required. + :type trigger_id: str + :param body: The resource instance. Required. :type body: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/merge-patch+json". :paramtype content_type: str - :return: TestAppComponents. The TestAppComponents is compatible with MutableMapping - :rtype: ~azure.developer.loadtesting.models.TestAppComponents + :return: Trigger. The Trigger is compatible with MutableMapping + :rtype: ~azure.developer.loadtesting.models.Trigger :raises ~azure.core.exceptions.HttpResponseError: """ @distributed_trace - def create_or_update_app_components( - self, test_id: str, body: Union[_models.TestAppComponents, JSON, IO[bytes]], **kwargs: Any - ) -> _models.TestAppComponents: - """Add an app component to a test. - - Add an app component to a test by providing the resource Id, name and type. - - :param test_id: Unique name for the load test, must contain only lower-case alphabetic, - numeric, underscore or hyphen characters. Required. - :type test_id: str - :param body: App Component model. Is one of the following types: TestAppComponents, JSON, - IO[bytes] Required. - :type body: ~azure.developer.loadtesting.models.TestAppComponents or JSON or IO[bytes] - :return: TestAppComponents. The TestAppComponents is compatible with MutableMapping - :rtype: ~azure.developer.loadtesting.models.TestAppComponents + def create_or_update_trigger( + self, trigger_id: str, body: Union[_models.Trigger, JSON, IO[bytes]], **kwargs: Any + ) -> _models.Trigger: + """Create or update operation template. + + :param trigger_id: The unique identifier of the trigger. Required. + :type trigger_id: str + :param body: The resource instance. Is one of the following types: Trigger, JSON, IO[bytes] + Required. + :type body: ~azure.developer.loadtesting.models.Trigger or JSON or IO[bytes] + :return: Trigger. The Trigger is compatible with MutableMapping + :rtype: ~azure.developer.loadtesting.models.Trigger :raises ~azure.core.exceptions.HttpResponseError: """ error_map: MutableMapping = { @@ -1333,8 +3058,8 @@ def create_or_update_app_components( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = kwargs.pop("params", {}) or {} - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("content-type", None)) - cls: ClsType[_models.TestAppComponents] = kwargs.pop("cls", None) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Trigger] = kwargs.pop("cls", None) content_type = content_type or "application/merge-patch+json" _content = None @@ -1343,8 +3068,8 @@ def create_or_update_app_components( else: _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_load_test_administration_create_or_update_app_components_request( - test_id=test_id, + _request = build_load_test_administration_create_or_update_trigger_request( + trigger_id=trigger_id, content_type=content_type, api_version=self._config.api_version, content=_content, @@ -1375,102 +3100,69 @@ def create_or_update_app_components( if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.TestAppComponents, response.json()) + deserialized = _deserialize(_models.Trigger, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore return deserialized # type: ignore - @overload - def create_or_update_server_metrics_config( - self, - test_id: str, - body: _models.TestServerMetricsConfiguration, - *, - content_type: str = "application/merge-patch+json", - **kwargs: Any - ) -> _models.TestServerMetricsConfiguration: - """Configure server metrics for a test. - - Configure server metrics for a test. + @distributed_trace + def delete_trigger(self, trigger_id: str, **kwargs: Any) -> None: # pylint: disable=inconsistent-return-statements + """Resource delete operation template. - :param test_id: Unique name for the load test, must contain only lower-case alphabetic, - numeric, underscore or hyphen characters. Required. - :type test_id: str - :param body: Server metric configuration model. Required. - :type body: ~azure.developer.loadtesting.models.TestServerMetricsConfiguration - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/merge-patch+json". - :paramtype content_type: str - :return: TestServerMetricsConfiguration. The TestServerMetricsConfiguration is compatible with - MutableMapping - :rtype: ~azure.developer.loadtesting.models.TestServerMetricsConfiguration + :param trigger_id: The unique identifier of the trigger. Required. + :type trigger_id: str + :return: None + :rtype: None :raises ~azure.core.exceptions.HttpResponseError: """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) - @overload - def create_or_update_server_metrics_config( - self, test_id: str, body: JSON, *, content_type: str = "application/merge-patch+json", **kwargs: Any - ) -> _models.TestServerMetricsConfiguration: - """Configure server metrics for a test. + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} - Configure server metrics for a test. + cls: ClsType[None] = kwargs.pop("cls", None) - :param test_id: Unique name for the load test, must contain only lower-case alphabetic, - numeric, underscore or hyphen characters. Required. - :type test_id: str - :param body: Server metric configuration model. Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/merge-patch+json". - :paramtype content_type: str - :return: TestServerMetricsConfiguration. The TestServerMetricsConfiguration is compatible with - MutableMapping - :rtype: ~azure.developer.loadtesting.models.TestServerMetricsConfiguration - :raises ~azure.core.exceptions.HttpResponseError: - """ + _request = build_load_test_administration_delete_trigger_request( + trigger_id=trigger_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) - @overload - def create_or_update_server_metrics_config( - self, test_id: str, body: IO[bytes], *, content_type: str = "application/merge-patch+json", **kwargs: Any - ) -> _models.TestServerMetricsConfiguration: - """Configure server metrics for a test. + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) - Configure server metrics for a test. + response = pipeline_response.http_response - :param test_id: Unique name for the load test, must contain only lower-case alphabetic, - numeric, underscore or hyphen characters. Required. - :type test_id: str - :param body: Server metric configuration model. Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/merge-patch+json". - :paramtype content_type: str - :return: TestServerMetricsConfiguration. The TestServerMetricsConfiguration is compatible with - MutableMapping - :rtype: ~azure.developer.loadtesting.models.TestServerMetricsConfiguration - :raises ~azure.core.exceptions.HttpResponseError: - """ + if response.status_code not in [204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) - @distributed_trace - def create_or_update_server_metrics_config( - self, test_id: str, body: Union[_models.TestServerMetricsConfiguration, JSON, IO[bytes]], **kwargs: Any - ) -> _models.TestServerMetricsConfiguration: - """Configure server metrics for a test. + if cls: + return cls(pipeline_response, None, {}) # type: ignore - Configure server metrics for a test. + @distributed_trace + def get_trigger(self, trigger_id: str, **kwargs: Any) -> _models.Trigger: + """Resource read operation template. - :param test_id: Unique name for the load test, must contain only lower-case alphabetic, - numeric, underscore or hyphen characters. Required. - :type test_id: str - :param body: Server metric configuration model. Is one of the following types: - TestServerMetricsConfiguration, JSON, IO[bytes] Required. - :type body: ~azure.developer.loadtesting.models.TestServerMetricsConfiguration or JSON or - IO[bytes] - :return: TestServerMetricsConfiguration. The TestServerMetricsConfiguration is compatible with - MutableMapping - :rtype: ~azure.developer.loadtesting.models.TestServerMetricsConfiguration + :param trigger_id: The unique identifier of the trigger. Required. + :type trigger_id: str + :return: Trigger. The Trigger is compatible with MutableMapping + :rtype: ~azure.developer.loadtesting.models.Trigger :raises ~azure.core.exceptions.HttpResponseError: """ error_map: MutableMapping = { @@ -1481,24 +3173,14 @@ def create_or_update_server_metrics_config( } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("content-type", None)) - cls: ClsType[_models.TestServerMetricsConfiguration] = kwargs.pop("cls", None) - - content_type = content_type or "application/merge-patch+json" - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + cls: ClsType[_models.Trigger] = kwargs.pop("cls", None) - _request = build_load_test_administration_create_or_update_server_metrics_config_request( - test_id=test_id, - content_type=content_type, + _request = build_load_test_administration_get_trigger_request( + trigger_id=trigger_id, api_version=self._config.api_version, - content=_content, headers=_headers, params=_params, ) @@ -1514,7 +3196,7 @@ def create_or_update_server_metrics_config( response = pipeline_response.http_response - if response.status_code not in [200, 201]: + if response.status_code not in [200]: if _stream: try: response.read() # Load the body in memory and close the socket @@ -1526,7 +3208,7 @@ def create_or_update_server_metrics_config( if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.TestServerMetricsConfiguration, response.json()) + deserialized = _deserialize(_models.Trigger, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -1534,18 +3216,39 @@ def create_or_update_server_metrics_config( return deserialized # type: ignore @distributed_trace - def get_app_components(self, test_id: str, **kwargs: Any) -> _models.TestAppComponents: - """Get associated app component (collection of azure resources) for the given test. - - Get associated app component (collection of azure resources) for the given test. + def list_triggers( + self, + *, + test_ids: Optional[str] = None, + states: Optional[Union[str, _models.TriggerState]] = None, + last_modified_start_time: Optional[datetime.datetime] = None, + last_modified_end_time: Optional[datetime.datetime] = None, + **kwargs: Any + ) -> ItemPaged["_models.Trigger"]: + """Resource list operation template. - :param test_id: Unique name for the load test, must contain only lower-case alphabetic, - numeric, underscore or hyphen characters. Required. - :type test_id: str - :return: TestAppComponents. The TestAppComponents is compatible with MutableMapping - :rtype: ~azure.developer.loadtesting.models.TestAppComponents + :keyword test_ids: Search based on triggers associated with the provided test ids. Default + value is None. + :paramtype test_ids: str + :keyword states: Filter triggers based on a comma separated list of states. Known values are: + "Active", "Paused", "Completed", and "Disabled". Default value is None. + :paramtype states: str or ~azure.developer.loadtesting.models.TriggerState + :keyword last_modified_start_time: Start DateTime(RFC 3339 literal format) of the last updated + time range to filter triggers. Default value is None. + :paramtype last_modified_start_time: ~datetime.datetime + :keyword last_modified_end_time: End DateTime(RFC 3339 literal format) of the last updated time + range to filter triggers. Default value is None. + :paramtype last_modified_end_time: ~datetime.datetime + :return: An iterator like instance of Trigger + :rtype: ~azure.core.paging.ItemPaged[~azure.developer.loadtesting.models.Trigger] :raises ~azure.core.exceptions.HttpResponseError: """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + maxpagesize = kwargs.pop("maxpagesize", None) + cls: ClsType[list[_models.Trigger]] = kwargs.pop("cls", None) + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, @@ -1554,60 +3257,155 @@ def get_app_components(self, test_id: str, **kwargs: Any) -> _models.TestAppComp } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} + def prepare_request(next_link=None): + if not next_link: + + _request = build_load_test_administration_list_triggers_request( + test_ids=test_ids, + states=states, + last_modified_start_time=last_modified_start_time, + last_modified_end_time=last_modified_end_time, + maxpagesize=maxpagesize, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(list[_models.Trigger], deserialized.get("value", [])) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("nextLink") or None, iter(list_of_elem) + + def get_next(next_link=None): + _request = prepare_request(next_link) - cls: ClsType[_models.TestAppComponents] = kwargs.pop("cls", None) + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response - _request = build_load_test_administration_get_app_components_request( - test_id=test_id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) + return pipeline_response - response = pipeline_response.http_response + return ItemPaged(get_next, extract_data) - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + @overload + def create_or_update_notification_rule( + self, + notification_rule_id: str, + body: _models.NotificationRule, + *, + content_type: str = "application/merge-patch+json", + **kwargs: Any + ) -> _models.NotificationRule: + """Create or update operation template. - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.TestAppComponents, response.json()) + :param notification_rule_id: The unique identifier of the notification rule. Required. + :type notification_rule_id: str + :param body: The resource instance. Required. + :type body: ~azure.developer.loadtesting.models.NotificationRule + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/merge-patch+json". + :paramtype content_type: str + :return: NotificationRule. The NotificationRule is compatible with MutableMapping + :rtype: ~azure.developer.loadtesting.models.NotificationRule + :raises ~azure.core.exceptions.HttpResponseError: + """ - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore + @overload + def create_or_update_notification_rule( + self, + notification_rule_id: str, + body: JSON, + *, + content_type: str = "application/merge-patch+json", + **kwargs: Any + ) -> _models.NotificationRule: + """Create or update operation template. - return deserialized # type: ignore + :param notification_rule_id: The unique identifier of the notification rule. Required. + :type notification_rule_id: str + :param body: The resource instance. Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/merge-patch+json". + :paramtype content_type: str + :return: NotificationRule. The NotificationRule is compatible with MutableMapping + :rtype: ~azure.developer.loadtesting.models.NotificationRule + :raises ~azure.core.exceptions.HttpResponseError: + """ - @distributed_trace - def get_server_metrics_config(self, test_id: str, **kwargs: Any) -> _models.TestServerMetricsConfiguration: - """List server metrics configuration for the given test. + @overload + def create_or_update_notification_rule( + self, + notification_rule_id: str, + body: IO[bytes], + *, + content_type: str = "application/merge-patch+json", + **kwargs: Any + ) -> _models.NotificationRule: + """Create or update operation template. - List server metrics configuration for the given test. + :param notification_rule_id: The unique identifier of the notification rule. Required. + :type notification_rule_id: str + :param body: The resource instance. Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/merge-patch+json". + :paramtype content_type: str + :return: NotificationRule. The NotificationRule is compatible with MutableMapping + :rtype: ~azure.developer.loadtesting.models.NotificationRule + :raises ~azure.core.exceptions.HttpResponseError: + """ - :param test_id: Unique name for the load test, must contain only lower-case alphabetic, - numeric, underscore or hyphen characters. Required. - :type test_id: str - :return: TestServerMetricsConfiguration. The TestServerMetricsConfiguration is compatible with - MutableMapping - :rtype: ~azure.developer.loadtesting.models.TestServerMetricsConfiguration + @distributed_trace + @api_version_validation( + method_added_on="2024-12-01-preview", + params_added_on={"2024-12-01-preview": ["api_version", "notification_rule_id", "content_type", "accept"]}, + api_versions_list=["2024-12-01-preview", "2025-03-01-preview", "2025-11-01-preview"], + ) + def create_or_update_notification_rule( + self, notification_rule_id: str, body: Union[_models.NotificationRule, JSON, IO[bytes]], **kwargs: Any + ) -> _models.NotificationRule: + """Create or update operation template. + + :param notification_rule_id: The unique identifier of the notification rule. Required. + :type notification_rule_id: str + :param body: The resource instance. Is one of the following types: NotificationRule, JSON, + IO[bytes] Required. + :type body: ~azure.developer.loadtesting.models.NotificationRule or JSON or IO[bytes] + :return: NotificationRule. The NotificationRule is compatible with MutableMapping + :rtype: ~azure.developer.loadtesting.models.NotificationRule :raises ~azure.core.exceptions.HttpResponseError: """ error_map: MutableMapping = { @@ -1618,14 +3416,24 @@ def get_server_metrics_config(self, test_id: str, **kwargs: Any) -> _models.Test } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = kwargs.pop("headers", {}) or {} + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.TestServerMetricsConfiguration] = kwargs.pop("cls", None) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.NotificationRule] = kwargs.pop("cls", None) - _request = build_load_test_administration_get_server_metrics_config_request( - test_id=test_id, + content_type = content_type or "application/merge-patch+json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_load_test_administration_create_or_update_notification_rule_request( + notification_rule_id=notification_rule_id, + content_type=content_type, api_version=self._config.api_version, + content=_content, headers=_headers, params=_params, ) @@ -1641,7 +3449,7 @@ def get_server_metrics_config(self, test_id: str, **kwargs: Any) -> _models.Test response = pipeline_response.http_response - if response.status_code not in [200]: + if response.status_code not in [200, 201]: if _stream: try: response.read() # Load the body in memory and close the socket @@ -1653,7 +3461,7 @@ def get_server_metrics_config(self, test_id: str, **kwargs: Any) -> _models.Test if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.TestServerMetricsConfiguration, response.json()) + deserialized = _deserialize(_models.NotificationRule, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -1661,16 +3469,20 @@ def get_server_metrics_config(self, test_id: str, **kwargs: Any) -> _models.Test return deserialized # type: ignore @distributed_trace - def get_test(self, test_id: str, **kwargs: Any) -> _models.Test: - """Get load test details by test Id. - - Get load test details by test Id. + @api_version_validation( + method_added_on="2024-12-01-preview", + params_added_on={"2024-12-01-preview": ["api_version", "notification_rule_id"]}, + api_versions_list=["2024-12-01-preview", "2025-03-01-preview", "2025-11-01-preview"], + ) + def delete_notification_rule( # pylint: disable=inconsistent-return-statements + self, notification_rule_id: str, **kwargs: Any + ) -> None: + """Resource delete operation template. - :param test_id: Unique test identifier for the load test, must contain only lower-case - alphabetic, numeric, underscore or hyphen characters. Required. - :type test_id: str - :return: Test. The Test is compatible with MutableMapping - :rtype: ~azure.developer.loadtesting.models.Test + :param notification_rule_id: The unique identifier of the notification rule. Required. + :type notification_rule_id: str + :return: None + :rtype: None :raises ~azure.core.exceptions.HttpResponseError: """ error_map: MutableMapping = { @@ -1684,10 +3496,10 @@ def get_test(self, test_id: str, **kwargs: Any) -> _models.Test: _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.Test] = kwargs.pop("cls", None) + cls: ClsType[None] = kwargs.pop("cls", None) - _request = build_load_test_administration_get_test_request( - test_id=test_id, + _request = build_load_test_administration_delete_notification_rule_request( + notification_rule_id=notification_rule_id, api_version=self._config.api_version, headers=_headers, params=_params, @@ -1697,45 +3509,33 @@ def get_test(self, test_id: str, **kwargs: Any) -> _models.Test: } _request.url = self._client.format_url(_request.url, **path_format_arguments) - _stream = kwargs.pop("stream", False) + _stream = False pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access _request, stream=_stream, **kwargs ) response = pipeline_response.http_response - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass + if response.status_code not in [204]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.Test, response.json()) - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore + return cls(pipeline_response, None, {}) # type: ignore @distributed_trace - def get_test_file(self, test_id: str, file_name: str, **kwargs: Any) -> _models.TestFileInfo: - """Get all the files that are associated with a test. - - Get all the files that are associated with a test. + @api_version_validation( + method_added_on="2024-12-01-preview", + params_added_on={"2024-12-01-preview": ["api_version", "notification_rule_id", "accept"]}, + api_versions_list=["2024-12-01-preview", "2025-03-01-preview", "2025-11-01-preview"], + ) + def get_notification_rule(self, notification_rule_id: str, **kwargs: Any) -> _models.NotificationRule: + """Resource read operation template. - :param test_id: Unique test identifier for the load test, must contain only lower-case - alphabetic, numeric, underscore or hyphen characters. Required. - :type test_id: str - :param file_name: Name of the file. Required. - :type file_name: str - :return: TestFileInfo. The TestFileInfo is compatible with MutableMapping - :rtype: ~azure.developer.loadtesting.models.TestFileInfo + :param notification_rule_id: The unique identifier of the notification rule. Required. + :type notification_rule_id: str + :return: NotificationRule. The NotificationRule is compatible with MutableMapping + :rtype: ~azure.developer.loadtesting.models.NotificationRule :raises ~azure.core.exceptions.HttpResponseError: """ error_map: MutableMapping = { @@ -1749,11 +3549,10 @@ def get_test_file(self, test_id: str, file_name: str, **kwargs: Any) -> _models. _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.TestFileInfo] = kwargs.pop("cls", None) + cls: ClsType[_models.NotificationRule] = kwargs.pop("cls", None) - _request = build_load_test_administration_get_test_file_request( - test_id=test_id, - file_name=file_name, + _request = build_load_test_administration_get_notification_rule_request( + notification_rule_id=notification_rule_id, api_version=self._config.api_version, headers=_headers, params=_params, @@ -1780,137 +3579,63 @@ def get_test_file(self, test_id: str, file_name: str, **kwargs: Any) -> _models. raise HttpResponseError(response=response) if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.TestFileInfo, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def list_test_files(self, test_id: str, **kwargs: Any) -> Iterable["_models.TestFileInfo"]: - """Get all test files. - - Get all test files. - - :param test_id: Unique test identifier for the load test, must contain only lower-case - alphabetic, numeric, underscore or hyphen characters. Required. - :type test_id: str - :return: An iterator like instance of TestFileInfo - :rtype: ~azure.core.paging.ItemPaged[~azure.developer.loadtesting.models.TestFileInfo] - :raises ~azure.core.exceptions.HttpResponseError: - """ - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[List[_models.TestFileInfo]] = kwargs.pop("cls", None) - - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - def prepare_request(next_link=None): - if not next_link: - - _request = build_load_test_administration_list_test_files_request( - test_id=test_id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - else: - # make call to next link with the client's api-version - _parsed_next_link = urllib.parse.urlparse(next_link) - _next_request_params = case_insensitive_dict( - { - key: [urllib.parse.quote(v) for v in value] - for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() - } - ) - _next_request_params["api-version"] = self._config.api_version - _request = HttpRequest( - "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - return _request - - def extract_data(pipeline_response): - deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.TestFileInfo], deserialized["value"]) - if cls: - list_of_elem = cls(list_of_elem) # type: ignore - return deserialized.get("nextLink") or None, iter(list_of_elem) - - def get_next(next_link=None): - _request = prepare_request(next_link) - - _stream = False - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.NotificationRule, response.json()) - return pipeline_response + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore - return ItemPaged(get_next, extract_data) + return deserialized # type: ignore @distributed_trace - def list_tests( + @api_version_validation( + method_added_on="2024-12-01-preview", + params_added_on={ + "2024-12-01-preview": [ + "api_version", + "test_ids", + "scopes", + "last_modified_start_time", + "last_modified_end_time", + "maxpagesize", + "accept", + ] + }, + api_versions_list=["2024-12-01-preview", "2025-03-01-preview", "2025-11-01-preview"], + ) + def list_notification_rules( self, *, - orderby: Optional[str] = None, - search: Optional[str] = None, + test_ids: Optional[str] = None, + scopes: Optional[str] = None, last_modified_start_time: Optional[datetime.datetime] = None, last_modified_end_time: Optional[datetime.datetime] = None, **kwargs: Any - ) -> Iterable["_models.Test"]: - """Get all load tests by the fully qualified resource Id e.g - subscriptions/{subId}/resourceGroups/{rg}/providers/Microsoft.LoadTestService/loadtests/{resName}. - - Get all load tests by the fully qualified resource Id e.g - subscriptions/{subId}/resourceGroups/{rg}/providers/Microsoft.LoadTestService/loadtests/{resName}. + ) -> ItemPaged["_models.NotificationRule"]: + """Resource list operation template. - :keyword orderby: Sort on the supported fields in (field asc/desc) format. eg: - lastModifiedDateTime asc. Supported fields - lastModifiedDateTime. Default value is None. - :paramtype orderby: str - :keyword search: Prefix based, case sensitive search on searchable fields - displayName, - createdBy. For example, to search for a test, with display name is Login Test, - the search parameter can be Login. Default value is None. - :paramtype search: str + :keyword test_ids: Search based on notification rules associated with the provided test ids. + Default value is None. + :paramtype test_ids: str + :keyword scopes: Search based on notification rules for the provided scopes. Default value is + None. + :paramtype scopes: str :keyword last_modified_start_time: Start DateTime(RFC 3339 literal format) of the last updated - time range to filter tests. Default value is None. + time range to filter notification rules. Default value is None. :paramtype last_modified_start_time: ~datetime.datetime :keyword last_modified_end_time: End DateTime(RFC 3339 literal format) of the last updated time - range to filter tests. Default value is None. + range to filter notification rules. Default value is None. :paramtype last_modified_end_time: ~datetime.datetime - :return: An iterator like instance of Test - :rtype: ~azure.core.paging.ItemPaged[~azure.developer.loadtesting.models.Test] + :return: An iterator like instance of NotificationRule + :rtype: ~azure.core.paging.ItemPaged[~azure.developer.loadtesting.models.NotificationRule] :raises ~azure.core.exceptions.HttpResponseError: """ _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} maxpagesize = kwargs.pop("maxpagesize", None) - cls: ClsType[List[_models.Test]] = kwargs.pop("cls", None) + cls: ClsType[list[_models.NotificationRule]] = kwargs.pop("cls", None) error_map: MutableMapping = { 401: ClientAuthenticationError, @@ -1923,9 +3648,9 @@ def list_tests( def prepare_request(next_link=None): if not next_link: - _request = build_load_test_administration_list_tests_request( - orderby=orderby, - search=search, + _request = build_load_test_administration_list_notification_rules_request( + test_ids=test_ids, + scopes=scopes, last_modified_start_time=last_modified_start_time, last_modified_end_time=last_modified_end_time, maxpagesize=maxpagesize, @@ -1960,7 +3685,7 @@ def prepare_request(next_link=None): def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.Test], deserialized["value"]) + list_of_elem = _deserialize(list[_models.NotificationRule], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("nextLink") or None, iter(list_of_elem) @@ -1982,39 +3707,21 @@ def get_next(next_link=None): return ItemPaged(get_next, extract_data) - @distributed_trace - def _begin_upload_test_file( + @api_version_validation( + method_added_on="2025-03-01-preview", + params_added_on={"2025-03-01-preview": ["api_version", "test_id", "content_type", "accept"]}, + api_versions_list=["2025-03-01-preview", "2025-11-01-preview"], + ) + def _clone_test_initial( self, test_id: str, - file_name: str, - body: bytes, + body: Union[JSON, IO[bytes]] = _Unset, *, - file_type: Optional[Union[str, _models.FileType]] = None, + new_test_id: str = _Unset, + display_name: Optional[str] = None, + description: Optional[str] = None, **kwargs: Any - ) -> _models.TestFileInfo: - """Upload input file for a given test Id. File size can't be more than 50 MB. - Existing file with same name for the given test will be overwritten. File - should be provided in the request body as application/octet-stream. - - Upload input file for a given test Id. File size can't be more than 50 MB. - Existing file with same name for the given test will be overwritten. File - should be provided in the request body as application/octet-stream. - - :param test_id: Unique name for the load test, must contain only lower-case alphabetic, - numeric, underscore or hyphen characters. Required. - :type test_id: str - :param file_name: Unique name for test file with file extension like : App.jmx. Required. - :type file_name: str - :param body: The file content as application/octet-stream. Required. - :type body: bytes - :keyword file_type: File type. Known values are: "JMX_FILE", "USER_PROPERTIES", - "ADDITIONAL_ARTIFACTS", "ZIPPED_ARTIFACTS", "URL_TEST_CONFIG", and "TEST_SCRIPT". Default value - is None. - :paramtype file_type: str or ~azure.developer.loadtesting.models.FileType - :return: TestFileInfo. The TestFileInfo is compatible with MutableMapping - :rtype: ~azure.developer.loadtesting.models.TestFileInfo - :raises ~azure.core.exceptions.HttpResponseError: - """ + ) -> Iterator[bytes]: error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, @@ -2026,15 +3733,23 @@ def _begin_upload_test_file( _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = kwargs.pop("params", {}) or {} - content_type: str = kwargs.pop("content_type", _headers.pop("content-type", "application/octet-stream")) - cls: ClsType[_models.TestFileInfo] = kwargs.pop("cls", None) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None) - _content = body + if body is _Unset: + if new_test_id is _Unset: + raise TypeError("missing required argument: new_test_id") + body = {"description": description, "displayName": display_name, "newTestId": new_test_id} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_load_test_administration_begin_upload_test_file_request( + _request = build_load_test_administration_clone_test_request( test_id=test_id, - file_name=file_name, - file_type=file_type, content_type=content_type, api_version=self._config.api_version, content=_content, @@ -2046,234 +3761,207 @@ def _begin_upload_test_file( } _request.url = self._client.format_url(_request.url, **path_format_arguments) - _stream = kwargs.pop("stream", False) + _stream = True pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access _request, stream=_stream, **kwargs ) response = pipeline_response.http_response - if response.status_code not in [201]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass + if response.status_code not in [202]: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.TestFileInfo, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def delete_test_file( # pylint: disable=inconsistent-return-statements - self, test_id: str, file_name: str, **kwargs: Any - ) -> None: - """Delete file by the file name for a test. - - Delete file by the file name for a test. - - :param test_id: Unique test identifier for the load test, must contain only lower-case - alphabetic, numeric, underscore or hyphen characters. Required. - :type test_id: str - :param file_name: Name of the file. Required. - :type file_name: str - :return: None - :rtype: None - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[None] = kwargs.pop("cls", None) - - _request = build_load_test_administration_delete_test_file_request( - test_id=test_id, - file_name=file_name, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response + response_headers = {} + response_headers["Operation-Location"] = self._deserialize("str", response.headers.get("Operation-Location")) - if response.status_code not in [204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + deserialized = response.iter_bytes() if cls: - return cls(pipeline_response, None, {}) # type: ignore - - @distributed_trace - def delete_test(self, test_id: str, **kwargs: Any) -> None: # pylint: disable=inconsistent-return-statements - """Delete a test by its test Id. - - Delete a test by its test Id. - - :param test_id: Unique test identifier for the load test, must contain only lower-case - alphabetic, numeric, underscore or hyphen characters. Required. - :type test_id: str - :return: None - :rtype: None - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[None] = kwargs.pop("cls", None) - - _request = build_load_test_administration_delete_test_request( - test_id=test_id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + return cls(pipeline_response, deserialized, response_headers) # type: ignore - if cls: - return cls(pipeline_response, None, {}) # type: ignore + return deserialized # type: ignore @overload - def create_or_update_test_profile( + def begin_clone_test( self, - test_profile_id: str, - body: _models.TestProfile, + test_id: str, *, - content_type: str = "application/merge-patch+json", + new_test_id: str, + content_type: str = "application/json", + display_name: Optional[str] = None, + description: Optional[str] = None, **kwargs: Any - ) -> _models.TestProfile: - """Create a new test profile or update an existing test profile. + ) -> LROPoller[_models.Test]: + """Clone a load test. - Create a new test profile or update an existing test profile by providing the test profile Id. + Clone the given test with optional overrides applied to the clone test. - :param test_profile_id: Unique identifier for the test profile, must contain only lower-case + :param test_id: Unique test identifier for the load test, must contain only lower-case alphabetic, numeric, underscore or hyphen characters. Required. - :type test_profile_id: str - :param body: The resource instance. Required. - :type body: ~azure.developer.loadtesting.models.TestProfile + :type test_id: str + :keyword new_test_id: Unique identifier for the new test that will be created. Required. + :paramtype new_test_id: str :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/merge-patch+json". + Default value is "application/json". :paramtype content_type: str - :return: TestProfile. The TestProfile is compatible with MutableMapping - :rtype: ~azure.developer.loadtesting.models.TestProfile + :keyword display_name: Display Name override for the newly created test. Default value is None. + :paramtype display_name: str + :keyword description: Description override for the newly created test. Default value is None. + :paramtype description: str + :return: An instance of LROPoller that returns Test. The Test is compatible with MutableMapping + :rtype: ~azure.core.polling.LROPoller[~azure.developer.loadtesting.models.Test] :raises ~azure.core.exceptions.HttpResponseError: """ @overload - def create_or_update_test_profile( - self, test_profile_id: str, body: JSON, *, content_type: str = "application/merge-patch+json", **kwargs: Any - ) -> _models.TestProfile: - """Create a new test profile or update an existing test profile. + def begin_clone_test( + self, test_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> LROPoller[_models.Test]: + """Clone a load test. - Create a new test profile or update an existing test profile by providing the test profile Id. + Clone the given test with optional overrides applied to the clone test. - :param test_profile_id: Unique identifier for the test profile, must contain only lower-case + :param test_id: Unique test identifier for the load test, must contain only lower-case alphabetic, numeric, underscore or hyphen characters. Required. - :type test_profile_id: str - :param body: The resource instance. Required. + :type test_id: str + :param body: Required. :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/merge-patch+json". + Default value is "application/json". :paramtype content_type: str - :return: TestProfile. The TestProfile is compatible with MutableMapping - :rtype: ~azure.developer.loadtesting.models.TestProfile + :return: An instance of LROPoller that returns Test. The Test is compatible with MutableMapping + :rtype: ~azure.core.polling.LROPoller[~azure.developer.loadtesting.models.Test] :raises ~azure.core.exceptions.HttpResponseError: """ @overload - def create_or_update_test_profile( - self, - test_profile_id: str, - body: IO[bytes], - *, - content_type: str = "application/merge-patch+json", - **kwargs: Any - ) -> _models.TestProfile: - """Create a new test profile or update an existing test profile. + def begin_clone_test( + self, test_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> LROPoller[_models.Test]: + """Clone a load test. - Create a new test profile or update an existing test profile by providing the test profile Id. + Clone the given test with optional overrides applied to the clone test. - :param test_profile_id: Unique identifier for the test profile, must contain only lower-case + :param test_id: Unique test identifier for the load test, must contain only lower-case alphabetic, numeric, underscore or hyphen characters. Required. - :type test_profile_id: str - :param body: The resource instance. Required. + :type test_id: str + :param body: Required. :type body: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/merge-patch+json". + Default value is "application/json". :paramtype content_type: str - :return: TestProfile. The TestProfile is compatible with MutableMapping - :rtype: ~azure.developer.loadtesting.models.TestProfile + :return: An instance of LROPoller that returns Test. The Test is compatible with MutableMapping + :rtype: ~azure.core.polling.LROPoller[~azure.developer.loadtesting.models.Test] :raises ~azure.core.exceptions.HttpResponseError: """ @distributed_trace @api_version_validation( - method_added_on="2024-05-01-preview", - params_added_on={"2024-05-01-preview": ["api_version", "test_profile_id", "content_type", "accept"]}, + method_added_on="2025-03-01-preview", + params_added_on={"2025-03-01-preview": ["api_version", "test_id", "content_type", "accept"]}, + api_versions_list=["2025-03-01-preview", "2025-11-01-preview"], ) - def create_or_update_test_profile( - self, test_profile_id: str, body: Union[_models.TestProfile, JSON, IO[bytes]], **kwargs: Any - ) -> _models.TestProfile: - """Create a new test profile or update an existing test profile. + def begin_clone_test( + self, + test_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + new_test_id: str = _Unset, + display_name: Optional[str] = None, + description: Optional[str] = None, + **kwargs: Any + ) -> LROPoller[_models.Test]: + """Clone a load test. - Create a new test profile or update an existing test profile by providing the test profile Id. + Clone the given test with optional overrides applied to the clone test. - :param test_profile_id: Unique identifier for the test profile, must contain only lower-case + :param test_id: Unique test identifier for the load test, must contain only lower-case alphabetic, numeric, underscore or hyphen characters. Required. - :type test_profile_id: str - :param body: The resource instance. Is one of the following types: TestProfile, JSON, IO[bytes] - Required. - :type body: ~azure.developer.loadtesting.models.TestProfile or JSON or IO[bytes] - :return: TestProfile. The TestProfile is compatible with MutableMapping - :rtype: ~azure.developer.loadtesting.models.TestProfile + :type test_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword new_test_id: Unique identifier for the new test that will be created. Required. + :paramtype new_test_id: str + :keyword display_name: Display Name override for the newly created test. Default value is None. + :paramtype display_name: str + :keyword description: Description override for the newly created test. Default value is None. + :paramtype description: str + :return: An instance of LROPoller that returns Test. The Test is compatible with MutableMapping + :rtype: ~azure.core.polling.LROPoller[~azure.developer.loadtesting.models.Test] :raises ~azure.core.exceptions.HttpResponseError: """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Test] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._clone_test_initial( + test_id=test_id, + body=body, + new_test_id=new_test_id, + display_name=display_name, + description=description, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + response_headers = {} + response = pipeline_response.http_response + response_headers["Operation-Location"] = self._deserialize( + "str", response.headers.get("Operation-Location") + ) + + deserialized = _deserialize(_models.Test, response.json()) + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + return deserialized + + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + } + + if polling is True: + polling_method: PollingMethod = cast( + PollingMethod, LROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) + ) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller[_models.Test].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller[_models.Test]( + self._client, raw_result, get_long_running_output, polling_method # type: ignore + ) + + @api_version_validation( + method_added_on="2025-11-01-preview", + params_added_on={"2025-11-01-preview": ["api_version", "test_id", "accept"]}, + api_versions_list=["2025-11-01-preview"], + ) + def _generate_test_plan_recommendations_initial( # pylint: disable=name-too-long + self, test_id: str, **kwargs: Any + ) -> Iterator[bytes]: error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, @@ -2282,24 +3970,14 @@ def create_or_update_test_profile( } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.TestProfile] = kwargs.pop("cls", None) - - content_type = content_type or "application/merge-patch+json" - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None) - _request = build_load_test_administration_create_or_update_test_profile_request( - test_profile_id=test_profile_id, - content_type=content_type, + _request = build_load_test_administration_generate_test_plan_recommendations_request( + test_id=test_id, api_version=self._config.api_version, - content=_content, headers=_headers, params=_params, ) @@ -2308,104 +3986,114 @@ def create_or_update_test_profile( } _request.url = self._client.format_url(_request.url, **path_format_arguments) - _stream = kwargs.pop("stream", False) + _stream = True pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access _request, stream=_stream, **kwargs ) response = pipeline_response.http_response - if response.status_code not in [200, 201]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass + if response.status_code not in [202]: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.TestProfile, response.json()) + response_headers = {} + response_headers["Operation-Location"] = self._deserialize("str", response.headers.get("Operation-Location")) + + deserialized = response.iter_bytes() if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore + return cls(pipeline_response, deserialized, response_headers) # type: ignore return deserialized # type: ignore @distributed_trace @api_version_validation( - method_added_on="2024-05-01-preview", - params_added_on={"2024-05-01-preview": ["api_version", "test_profile_id", "accept"]}, + method_added_on="2025-11-01-preview", + params_added_on={"2025-11-01-preview": ["api_version", "test_id", "accept"]}, + api_versions_list=["2025-11-01-preview"], ) - def delete_test_profile( # pylint: disable=inconsistent-return-statements - self, test_profile_id: str, **kwargs: Any - ) -> None: - """Delete a test profile. + def begin_generate_test_plan_recommendations(self, test_id: str, **kwargs: Any) -> LROPoller[_models.Test]: + """Generate load test plan recommendations. - Delete a test profile by its test profile Id. + Generate AI Recommendations to author a load test plan using the uploaded browser recording + file. - :param test_profile_id: Unique identifier for the test profile, must contain only lower-case + :param test_id: Unique test identifier for the load test, must contain only lower-case alphabetic, numeric, underscore or hyphen characters. Required. - :type test_profile_id: str - :return: None - :rtype: None + :type test_id: str + :return: An instance of LROPoller that returns Test. The Test is compatible with MutableMapping + :rtype: ~azure.core.polling.LROPoller[~azure.developer.loadtesting.models.Test] :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[None] = kwargs.pop("cls", None) + cls: ClsType[_models.Test] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._generate_test_plan_recommendations_initial( + test_id=test_id, cls=lambda x, y, z: x, headers=_headers, params=_params, **kwargs + ) + raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + response_headers = {} + response = pipeline_response.http_response + response_headers["Operation-Location"] = self._deserialize( + "str", response.headers.get("Operation-Location") + ) + + deserialized = _deserialize(_models.Test, response.json()) + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + return deserialized - _request = build_load_test_administration_delete_test_profile_request( - test_profile_id=test_profile_id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - _stream = False - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs + if polling is True: + polling_method: PollingMethod = cast( + PollingMethod, LROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) + ) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller[_models.Test].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller[_models.Test]( + self._client, raw_result, get_long_running_output, polling_method # type: ignore ) - response = pipeline_response.http_response - - if response.status_code not in [204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if cls: - return cls(pipeline_response, None, {}) # type: ignore - @distributed_trace @api_version_validation( - method_added_on="2024-05-01-preview", - params_added_on={"2024-05-01-preview": ["api_version", "test_profile_id", "accept"]}, + method_added_on="2025-03-01-preview", + params_added_on={"2025-03-01-preview": ["api_version", "operation_id", "accept"]}, + api_versions_list=["2025-03-01-preview", "2025-11-01-preview"], ) - def get_test_profile(self, test_profile_id: str, **kwargs: Any) -> _models.TestProfile: - """Get load test profile details. + def get_operation_status(self, operation_id: str, **kwargs: Any) -> _models.OperationStatus: + """Get the status of a long running operation. - Get load test profile details by test profile Id. + Get the status of a long running operation. - :param test_profile_id: Unique identifier for the test profile, must contain only lower-case - alphabetic, numeric, underscore or hyphen characters. Required. - :type test_profile_id: str - :return: TestProfile. The TestProfile is compatible with MutableMapping - :rtype: ~azure.developer.loadtesting.models.TestProfile + :param operation_id: The unique ID of the operation. Required. + :type operation_id: str + :return: OperationStatus. The OperationStatus is compatible with MutableMapping + :rtype: ~azure.developer.loadtesting.models.OperationStatus :raises ~azure.core.exceptions.HttpResponseError: """ error_map: MutableMapping = { @@ -2419,10 +4107,10 @@ def get_test_profile(self, test_profile_id: str, **kwargs: Any) -> _models.TestP _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.TestProfile] = kwargs.pop("cls", None) + cls: ClsType[_models.OperationStatus] = kwargs.pop("cls", None) - _request = build_load_test_administration_get_test_profile_request( - test_profile_id=test_profile_id, + _request = build_load_test_administration_get_operation_status_request( + operation_id=operation_id, api_version=self._config.api_version, headers=_headers, params=_params, @@ -2450,136 +4138,18 @@ def get_test_profile(self, test_profile_id: str, **kwargs: Any) -> _models.TestP if _stream: deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.TestProfile, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - @api_version_validation( - method_added_on="2024-05-01-preview", - params_added_on={ - "2024-05-01-preview": [ - "api_version", - "maxpagesize", - "last_modified_start_time", - "last_modified_end_time", - "test_profile_ids", - "test_ids", - "accept", - ] - }, - ) - def list_test_profiles( - self, - *, - last_modified_start_time: Optional[datetime.datetime] = None, - last_modified_end_time: Optional[datetime.datetime] = None, - test_profile_ids: Optional[List[str]] = None, - test_ids: Optional[List[str]] = None, - **kwargs: Any - ) -> Iterable["_models.TestProfile"]: - """List test profiles. - - Get all test profiles for the given filters. - - :keyword last_modified_start_time: Start DateTime(RFC 3339 literal format) of the last updated - time range to filter test profiles. Default value is None. - :paramtype last_modified_start_time: ~datetime.datetime - :keyword last_modified_end_time: End DateTime(RFC 3339 literal format) of the last updated time - range to filter test profiles. Default value is None. - :paramtype last_modified_end_time: ~datetime.datetime - :keyword test_profile_ids: Comma separated list of IDs of the test profiles to filter. Default - value is None. - :paramtype test_profile_ids: list[str] - :keyword test_ids: Comma separated list IDs of the tests which should be associated with the - test profiles to fetch. Default value is None. - :paramtype test_ids: list[str] - :return: An iterator like instance of TestProfile - :rtype: ~azure.core.paging.ItemPaged[~azure.developer.loadtesting.models.TestProfile] - :raises ~azure.core.exceptions.HttpResponseError: - """ - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - maxpagesize = kwargs.pop("maxpagesize", None) - cls: ClsType[List[_models.TestProfile]] = kwargs.pop("cls", None) - - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - def prepare_request(next_link=None): - if not next_link: - - _request = build_load_test_administration_list_test_profiles_request( - maxpagesize=maxpagesize, - last_modified_start_time=last_modified_start_time, - last_modified_end_time=last_modified_end_time, - test_profile_ids=test_profile_ids, - test_ids=test_ids, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - else: - # make call to next link with the client's api-version - _parsed_next_link = urllib.parse.urlparse(next_link) - _next_request_params = case_insensitive_dict( - { - key: [urllib.parse.quote(v) for v in value] - for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() - } - ) - _next_request_params["api-version"] = self._config.api_version - _request = HttpRequest( - "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - return _request - - def extract_data(pipeline_response): - deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.TestProfile], deserialized["value"]) - if cls: - list_of_elem = cls(list_of_elem) # type: ignore - return deserialized.get("nextLink") or None, iter(list_of_elem) - - def get_next(next_link=None): - _request = prepare_request(next_link) - - _stream = False - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) + else: + deserialized = _deserialize(_models.OperationStatus, response.json()) - return pipeline_response + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore - return ItemPaged(get_next, extract_data) + return deserialized # type: ignore -class LoadTestRunClientOperationsMixin(LoadTestRunClientMixinABC): +class _LoadTestRunClientOperationsMixin( # pylint: disable=too-many-public-methods + ClientMixinABC[PipelineClient[HttpRequest, HttpResponse], LoadTestRunClientConfiguration] +): @overload def _begin_test_run( @@ -2715,7 +4285,7 @@ def create_or_update_app_components( Add an app component to a test run by providing the resource Id, name and type. - :param test_run_id: Unique Id for the load test run, must contain only lower-case alphabetic, + :param test_run_id: Unique name for the load test run, must contain only lower-case alphabetic, numeric, underscore or hyphen characters. Required. :type test_run_id: str :param body: App Component model. Required. @@ -2736,7 +4306,7 @@ def create_or_update_app_components( Add an app component to a test run by providing the resource Id, name and type. - :param test_run_id: Unique Id for the load test run, must contain only lower-case alphabetic, + :param test_run_id: Unique name for the load test run, must contain only lower-case alphabetic, numeric, underscore or hyphen characters. Required. :type test_run_id: str :param body: App Component model. Required. @@ -2757,7 +4327,7 @@ def create_or_update_app_components( Add an app component to a test run by providing the resource Id, name and type. - :param test_run_id: Unique Id for the load test run, must contain only lower-case alphabetic, + :param test_run_id: Unique name for the load test run, must contain only lower-case alphabetic, numeric, underscore or hyphen characters. Required. :type test_run_id: str :param body: App Component model. Required. @@ -2778,7 +4348,7 @@ def create_or_update_app_components( Add an app component to a test run by providing the resource Id, name and type. - :param test_run_id: Unique Id for the load test run, must contain only lower-case alphabetic, + :param test_run_id: Unique name for the load test run, must contain only lower-case alphabetic, numeric, underscore or hyphen characters. Required. :type test_run_id: str :param body: App Component model. Is one of the following types: TestRunAppComponents, JSON, @@ -2861,7 +4431,7 @@ def create_or_update_server_metrics_config( Configure server metrics for a test run. - :param test_run_id: Unique Id for the load test run, must contain only lower-case alphabetic, + :param test_run_id: Unique name for the load test run, must contain only lower-case alphabetic, numeric, underscore or hyphen characters. Required. :type test_run_id: str :param body: Server metric configuration model. Required. @@ -2883,7 +4453,7 @@ def create_or_update_server_metrics_config( Configure server metrics for a test run. - :param test_run_id: Unique Id for the load test run, must contain only lower-case alphabetic, + :param test_run_id: Unique name for the load test run, must contain only lower-case alphabetic, numeric, underscore or hyphen characters. Required. :type test_run_id: str :param body: Server metric configuration model. Required. @@ -2905,7 +4475,7 @@ def create_or_update_server_metrics_config( Configure server metrics for a test run. - :param test_run_id: Unique Id for the load test run, must contain only lower-case alphabetic, + :param test_run_id: Unique name for the load test run, must contain only lower-case alphabetic, numeric, underscore or hyphen characters. Required. :type test_run_id: str :param body: Server metric configuration model. Required. @@ -2927,7 +4497,7 @@ def create_or_update_server_metrics_config( Configure server metrics for a test run. - :param test_run_id: Unique Id for the load test run, must contain only lower-case alphabetic, + :param test_run_id: Unique name for the load test run, must contain only lower-case alphabetic, numeric, underscore or hyphen characters. Required. :type test_run_id: str :param body: Server metric configuration model. Is one of the following types: @@ -3321,7 +4891,7 @@ def list_metric_dimension_values( time_interval: str, interval: Optional[Union[str, _models.TimeGrain]] = None, **kwargs: Any - ) -> Iterable[str]: + ) -> ItemPaged[str]: """List the dimension values for the given metric dimension name. List the dimension values for the given metric dimension name. @@ -3348,7 +4918,7 @@ def list_metric_dimension_values( _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[List[str]] = kwargs.pop("cls", None) + cls: ClsType[list[str]] = kwargs.pop("cls", None) error_map: MutableMapping = { 401: ClientAuthenticationError, @@ -3399,7 +4969,7 @@ def prepare_request(next_link=None): def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[str], deserialized["value"]) + list_of_elem = _deserialize(list[str], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("nextLink") or None, iter(list_of_elem) @@ -3567,7 +5137,7 @@ def list_metrics( interval: Optional[Union[str, _models.TimeGrain]] = None, content_type: str = "application/json", **kwargs: Any - ) -> Iterable["_models.TimeSeriesElement"]: + ) -> ItemPaged["_models.TimeSeriesElement"]: """List the metric values for a load test run. List the metric values for a load test run. @@ -3610,7 +5180,7 @@ def list_metrics( interval: Optional[Union[str, _models.TimeGrain]] = None, content_type: str = "application/json", **kwargs: Any - ) -> Iterable["_models.TimeSeriesElement"]: + ) -> ItemPaged["_models.TimeSeriesElement"]: """List the metric values for a load test run. List the metric values for a load test run. @@ -3653,7 +5223,7 @@ def list_metrics( interval: Optional[Union[str, _models.TimeGrain]] = None, content_type: str = "application/json", **kwargs: Any - ) -> Iterable["_models.TimeSeriesElement"]: + ) -> ItemPaged["_models.TimeSeriesElement"]: """List the metric values for a load test run. List the metric values for a load test run. @@ -3695,7 +5265,7 @@ def list_metrics( aggregation: Optional[str] = None, interval: Optional[Union[str, _models.TimeGrain]] = None, **kwargs: Any - ) -> Iterable["_models.TimeSeriesElement"]: + ) -> ItemPaged["_models.TimeSeriesElement"]: """List the metric values for a load test run. List the metric values for a load test run. @@ -3726,7 +5296,8 @@ def list_metrics( _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[List[_models.TimeSeriesElement]] = kwargs.pop("cls", None) + content_type = content_type if body else None + cls: ClsType[list[_models.TimeSeriesElement]] = kwargs.pop("cls", None) error_map: MutableMapping = { 401: ClientAuthenticationError, @@ -3735,7 +5306,7 @@ def list_metrics( 304: ResourceNotModifiedError, } error_map.update(kwargs.pop("error_map", {}) or {}) - content_type = content_type or "application/json" + content_type = content_type or "application/json" if body else None _content = None if isinstance(body, (IOBase, bytes)): _content = body @@ -3788,7 +5359,7 @@ def prepare_request(next_link=None): def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.TimeSeriesElement], deserialized["value"]) + list_of_elem = _deserialize(list[_models.TimeSeriesElement], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("nextLink") or None, iter(list_of_elem) @@ -3811,6 +5382,19 @@ def get_next(next_link=None): return ItemPaged(get_next, extract_data) @distributed_trace + @api_version_validation( + params_added_on={"2024-07-01-preview": ["created_by_types"], "2025-03-01-preview": ["test_ids"]}, + api_versions_list=[ + "2022-11-01", + "2023-04-01-preview", + "2024-03-01-preview", + "2024-05-01-preview", + "2024-07-01-preview", + "2024-12-01-preview", + "2025-03-01-preview", + "2025-11-01-preview", + ], + ) def list_test_runs( self, *, @@ -3820,8 +5404,10 @@ def list_test_runs( execution_from: Optional[datetime.datetime] = None, execution_to: Optional[datetime.datetime] = None, status: Optional[str] = None, + created_by_types: Optional[list[str]] = None, + test_ids: Optional[list[str]] = None, **kwargs: Any - ) -> Iterable["_models.TestRun"]: + ) -> ItemPaged["_models.TestRun"]: """Get all test runs for the given filters. Get all test runs for the given filters. @@ -3843,6 +5429,12 @@ def list_test_runs( :paramtype execution_to: ~datetime.datetime :keyword status: Comma separated list of test run status. Default value is None. :paramtype status: str + :keyword created_by_types: Comma separated list of type of entities that have created the test + run. Default value is None. + :paramtype created_by_types: list[str] + :keyword test_ids: Comma-separated list of test IDs. If you are using testIds, do not send a + value for testId. Default value is None. + :paramtype test_ids: list[str] :return: An iterator like instance of TestRun :rtype: ~azure.core.paging.ItemPaged[~azure.developer.loadtesting.models.TestRun] :raises ~azure.core.exceptions.HttpResponseError: @@ -3851,7 +5443,7 @@ def list_test_runs( _params = kwargs.pop("params", {}) or {} maxpagesize = kwargs.pop("maxpagesize", None) - cls: ClsType[List[_models.TestRun]] = kwargs.pop("cls", None) + cls: ClsType[list[_models.TestRun]] = kwargs.pop("cls", None) error_map: MutableMapping = { 401: ClientAuthenticationError, @@ -3872,6 +5464,8 @@ def prepare_request(next_link=None): execution_to=execution_to, status=status, maxpagesize=maxpagesize, + created_by_types=created_by_types, + test_ids=test_ids, api_version=self._config.api_version, headers=_headers, params=_params, @@ -3903,7 +5497,7 @@ def prepare_request(next_link=None): def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.TestRun], deserialized["value"]) + list_of_elem = _deserialize(list[_models.TestRun], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("nextLink") or None, iter(list_of_elem) @@ -4015,6 +5609,13 @@ def _begin_test_profile_run( @api_version_validation( method_added_on="2024-05-01-preview", params_added_on={"2024-05-01-preview": ["api_version", "test_profile_run_id", "content_type", "accept"]}, + api_versions_list=[ + "2024-05-01-preview", + "2024-07-01-preview", + "2024-12-01-preview", + "2025-03-01-preview", + "2025-11-01-preview", + ], ) def _begin_test_profile_run( self, test_profile_run_id: str, body: Union[_models.TestProfileRun, JSON, IO[bytes]], **kwargs: Any @@ -4096,7 +5697,14 @@ def _begin_test_profile_run( @distributed_trace @api_version_validation( method_added_on="2024-05-01-preview", - params_added_on={"2024-05-01-preview": ["api_version", "test_profile_run_id", "accept"]}, + params_added_on={"2024-05-01-preview": ["api_version", "test_profile_run_id"]}, + api_versions_list=[ + "2024-05-01-preview", + "2024-07-01-preview", + "2024-12-01-preview", + "2025-03-01-preview", + "2025-11-01-preview", + ], ) def delete_test_profile_run( # pylint: disable=inconsistent-return-statements self, test_profile_run_id: str, **kwargs: Any @@ -4154,6 +5762,13 @@ def delete_test_profile_run( # pylint: disable=inconsistent-return-statements @api_version_validation( method_added_on="2024-05-01-preview", params_added_on={"2024-05-01-preview": ["api_version", "test_profile_run_id", "accept"]}, + api_versions_list=[ + "2024-05-01-preview", + "2024-07-01-preview", + "2024-12-01-preview", + "2025-03-01-preview", + "2025-11-01-preview", + ], ) def get_test_profile_run(self, test_profile_run_id: str, **kwargs: Any) -> _models.TestProfileRun: """Get test profile run details. @@ -4236,6 +5851,13 @@ def get_test_profile_run(self, test_profile_run_id: str, **kwargs: Any) -> _mode "accept", ] }, + api_versions_list=[ + "2024-05-01-preview", + "2024-07-01-preview", + "2024-12-01-preview", + "2025-03-01-preview", + "2025-11-01-preview", + ], ) def list_test_profile_runs( self, @@ -4246,11 +5868,11 @@ def list_test_profile_runs( max_end_date_time: Optional[datetime.datetime] = None, created_date_start_time: Optional[datetime.datetime] = None, created_date_end_time: Optional[datetime.datetime] = None, - test_profile_run_ids: Optional[List[str]] = None, - test_profile_ids: Optional[List[str]] = None, - statuses: Optional[List[str]] = None, + test_profile_run_ids: Optional[list[str]] = None, + test_profile_ids: Optional[list[str]] = None, + statuses: Optional[list[str]] = None, **kwargs: Any - ) -> Iterable["_models.TestProfileRun"]: + ) -> ItemPaged["_models.TestProfileRun"]: """List test profile runs. Get all test profile runs for the given filters. @@ -4290,7 +5912,7 @@ def list_test_profile_runs( _params = kwargs.pop("params", {}) or {} maxpagesize = kwargs.pop("maxpagesize", None) - cls: ClsType[List[_models.TestProfileRun]] = kwargs.pop("cls", None) + cls: ClsType[list[_models.TestProfileRun]] = kwargs.pop("cls", None) error_map: MutableMapping = { 401: ClientAuthenticationError, @@ -4345,7 +5967,7 @@ def prepare_request(next_link=None): def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.TestProfileRun], deserialized["value"]) + list_of_elem = _deserialize(list[_models.TestProfileRun], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("nextLink") or None, iter(list_of_elem) @@ -4371,6 +5993,13 @@ def get_next(next_link=None): @api_version_validation( method_added_on="2024-05-01-preview", params_added_on={"2024-05-01-preview": ["api_version", "test_profile_run_id", "accept"]}, + api_versions_list=[ + "2024-05-01-preview", + "2024-07-01-preview", + "2024-12-01-preview", + "2025-03-01-preview", + "2025-11-01-preview", + ], ) def stop_test_profile_run(self, test_profile_run_id: str, **kwargs: Any) -> _models.TestProfileRun: """Stop test profile run. @@ -4433,3 +6062,334 @@ def stop_test_profile_run(self, test_profile_run_id: str, **kwargs: Any) -> _mod return cls(pipeline_response, deserialized, {}) # type: ignore return deserialized # type: ignore + + @distributed_trace + @api_version_validation( + method_added_on="2025-03-01-preview", + params_added_on={"2025-03-01-preview": ["api_version", "test_run_id", "accept"]}, + api_versions_list=["2025-03-01-preview", "2025-11-01-preview"], + ) + def get_latest_test_run_insights(self, test_run_id: str, **kwargs: Any) -> _models.TestRunInsights: + """Get the latest insights for the test run. + + Get the latest insights for the test run. + + :param test_run_id: Unique name for the load test run, must contain only lower-case alphabetic, + numeric, underscore or hyphen characters. Required. + :type test_run_id: str + :return: TestRunInsights. The TestRunInsights is compatible with MutableMapping + :rtype: ~azure.developer.loadtesting.models.TestRunInsights + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.TestRunInsights] = kwargs.pop("cls", None) + + _request = build_load_test_run_get_latest_test_run_insights_request( + test_run_id=test_run_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.TestRunInsights, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def patch_latest_test_run_insights( + self, + test_run_id: str, + body: _models.TestRunInsights, + *, + content_type: str = "application/merge-patch+json", + **kwargs: Any + ) -> _models.TestRunInsights: + """Update the latest insights for the test run. + + Update the latest insights for the test run. + + :param test_run_id: Unique name for the load test run, must contain only lower-case alphabetic, + numeric, underscore or hyphen characters. Required. + :type test_run_id: str + :param body: Test run insights model. Required. + :type body: ~azure.developer.loadtesting.models.TestRunInsights + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/merge-patch+json". + :paramtype content_type: str + :return: TestRunInsights. The TestRunInsights is compatible with MutableMapping + :rtype: ~azure.developer.loadtesting.models.TestRunInsights + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def patch_latest_test_run_insights( + self, test_run_id: str, body: JSON, *, content_type: str = "application/merge-patch+json", **kwargs: Any + ) -> _models.TestRunInsights: + """Update the latest insights for the test run. + + Update the latest insights for the test run. + + :param test_run_id: Unique name for the load test run, must contain only lower-case alphabetic, + numeric, underscore or hyphen characters. Required. + :type test_run_id: str + :param body: Test run insights model. Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/merge-patch+json". + :paramtype content_type: str + :return: TestRunInsights. The TestRunInsights is compatible with MutableMapping + :rtype: ~azure.developer.loadtesting.models.TestRunInsights + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def patch_latest_test_run_insights( + self, test_run_id: str, body: IO[bytes], *, content_type: str = "application/merge-patch+json", **kwargs: Any + ) -> _models.TestRunInsights: + """Update the latest insights for the test run. + + Update the latest insights for the test run. + + :param test_run_id: Unique name for the load test run, must contain only lower-case alphabetic, + numeric, underscore or hyphen characters. Required. + :type test_run_id: str + :param body: Test run insights model. Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/merge-patch+json". + :paramtype content_type: str + :return: TestRunInsights. The TestRunInsights is compatible with MutableMapping + :rtype: ~azure.developer.loadtesting.models.TestRunInsights + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + @api_version_validation( + method_added_on="2025-11-01-preview", + params_added_on={"2025-11-01-preview": ["api_version", "content_type", "test_run_id", "accept"]}, + api_versions_list=["2025-11-01-preview"], + ) + def patch_latest_test_run_insights( + self, test_run_id: str, body: Union[_models.TestRunInsights, JSON, IO[bytes]], **kwargs: Any + ) -> _models.TestRunInsights: + """Update the latest insights for the test run. + + Update the latest insights for the test run. + + :param test_run_id: Unique name for the load test run, must contain only lower-case alphabetic, + numeric, underscore or hyphen characters. Required. + :type test_run_id: str + :param body: Test run insights model. Is one of the following types: TestRunInsights, JSON, + IO[bytes] Required. + :type body: ~azure.developer.loadtesting.models.TestRunInsights or JSON or IO[bytes] + :return: TestRunInsights. The TestRunInsights is compatible with MutableMapping + :rtype: ~azure.developer.loadtesting.models.TestRunInsights + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("content-type", None)) + cls: ClsType[_models.TestRunInsights] = kwargs.pop("cls", None) + + content_type = content_type or "application/merge-patch+json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_load_test_run_patch_latest_test_run_insights_request( + test_run_id=test_run_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.TestRunInsights, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @api_version_validation( + method_added_on="2025-03-01-preview", + params_added_on={"2025-03-01-preview": ["api_version", "test_run_id", "accept"]}, + api_versions_list=["2025-03-01-preview", "2025-11-01-preview"], + ) + def _generate_test_run_insights_initial(self, test_run_id: str, **kwargs: Any) -> Iterator[bytes]: + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None) + + _request = build_load_test_run_generate_test_run_insights_request( + test_run_id=test_run_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = True + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + response_headers = {} + response_headers["Operation-Location"] = self._deserialize("str", response.headers.get("Operation-Location")) + + deserialized = response.iter_bytes() + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + @api_version_validation( + method_added_on="2025-03-01-preview", + params_added_on={"2025-03-01-preview": ["api_version", "test_run_id", "accept"]}, + api_versions_list=["2025-03-01-preview", "2025-11-01-preview"], + ) + def begin_generate_test_run_insights(self, test_run_id: str, **kwargs: Any) -> LROPoller[None]: + """Generate insights for the test run. + + Generate insights for the test run. + + :param test_run_id: Unique name for the load test run, must contain only lower-case alphabetic, + numeric, underscore or hyphen characters. Required. + :type test_run_id: str + :return: An instance of LROPoller that returns None + :rtype: ~azure.core.polling.LROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._generate_test_run_insights_initial( + test_run_id=test_run_id, cls=lambda x, y, z: x, headers=_headers, params=_params, **kwargs + ) + raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + } + + if polling is True: + polling_method: PollingMethod = cast( + PollingMethod, LROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) + ) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller[None].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore diff --git a/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/_operations/_patch.py b/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/_operations/_patch.py index 026ffb5086c7..41147c8d691b 100644 --- a/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/_operations/_patch.py +++ b/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/_operations/_patch.py @@ -16,8 +16,8 @@ from azure.core.tracing.decorator import distributed_trace from ._operations import JSON -from ._operations import LoadTestAdministrationClientOperationsMixin as GeneratedAdministrationClientOperations -from ._operations import LoadTestRunClientOperationsMixin as GeneratedRunClientOperations +from ._operations import _LoadTestAdministrationClientOperationsMixin as GeneratedAdministrationClientOperations +from ._operations import _LoadTestRunClientOperationsMixin as GeneratedRunClientOperations from .. import models as _models diff --git a/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/_utils/__init__.py b/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/_utils/__init__.py new file mode 100644 index 000000000000..8026245c2abc --- /dev/null +++ b/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/_utils/__init__.py @@ -0,0 +1,6 @@ +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- diff --git a/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/_utils/model_base.py b/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/_utils/model_base.py new file mode 100644 index 000000000000..ab83e61092d7 --- /dev/null +++ b/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/_utils/model_base.py @@ -0,0 +1,1343 @@ +# pylint: disable=line-too-long,useless-suppression,too-many-lines +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +# pylint: disable=protected-access, broad-except + +import copy +import calendar +import decimal +import functools +import sys +import logging +import base64 +import re +import typing +import enum +import email.utils +from datetime import datetime, date, time, timedelta, timezone +from json import JSONEncoder +import xml.etree.ElementTree as ET +from collections.abc import MutableMapping +from typing_extensions import Self +import isodate +from azure.core.exceptions import DeserializationError +from azure.core import CaseInsensitiveEnumMeta +from azure.core.pipeline import PipelineResponse +from azure.core.serialization import _Null +from azure.core.rest import HttpResponse + +_LOGGER = logging.getLogger(__name__) + +__all__ = ["SdkJSONEncoder", "Model", "rest_field", "rest_discriminator"] + +TZ_UTC = timezone.utc +_T = typing.TypeVar("_T") + + +def _timedelta_as_isostr(td: timedelta) -> str: + """Converts a datetime.timedelta object into an ISO 8601 formatted string, e.g. 'P4DT12H30M05S' + + Function adapted from the Tin Can Python project: https://github.com/RusticiSoftware/TinCanPython + + :param timedelta td: The timedelta to convert + :rtype: str + :return: ISO8601 version of this timedelta + """ + + # Split seconds to larger units + seconds = td.total_seconds() + minutes, seconds = divmod(seconds, 60) + hours, minutes = divmod(minutes, 60) + days, hours = divmod(hours, 24) + + days, hours, minutes = list(map(int, (days, hours, minutes))) + seconds = round(seconds, 6) + + # Build date + date_str = "" + if days: + date_str = "%sD" % days + + if hours or minutes or seconds: + # Build time + time_str = "T" + + # Hours + bigger_exists = date_str or hours + if bigger_exists: + time_str += "{:02}H".format(hours) + + # Minutes + bigger_exists = bigger_exists or minutes + if bigger_exists: + time_str += "{:02}M".format(minutes) + + # Seconds + try: + if seconds.is_integer(): + seconds_string = "{:02}".format(int(seconds)) + else: + # 9 chars long w/ leading 0, 6 digits after decimal + seconds_string = "%09.6f" % seconds + # Remove trailing zeros + seconds_string = seconds_string.rstrip("0") + except AttributeError: # int.is_integer() raises + seconds_string = "{:02}".format(seconds) + + time_str += "{}S".format(seconds_string) + else: + time_str = "" + + return "P" + date_str + time_str + + +def _serialize_bytes(o, format: typing.Optional[str] = None) -> str: + encoded = base64.b64encode(o).decode() + if format == "base64url": + return encoded.strip("=").replace("+", "-").replace("/", "_") + return encoded + + +def _serialize_datetime(o, format: typing.Optional[str] = None): + if hasattr(o, "year") and hasattr(o, "hour"): + if format == "rfc7231": + return email.utils.format_datetime(o, usegmt=True) + if format == "unix-timestamp": + return int(calendar.timegm(o.utctimetuple())) + + # astimezone() fails for naive times in Python 2.7, so make make sure o is aware (tzinfo is set) + if not o.tzinfo: + iso_formatted = o.replace(tzinfo=TZ_UTC).isoformat() + else: + iso_formatted = o.astimezone(TZ_UTC).isoformat() + # Replace the trailing "+00:00" UTC offset with "Z" (RFC 3339: https://www.ietf.org/rfc/rfc3339.txt) + return iso_formatted.replace("+00:00", "Z") + # Next try datetime.date or datetime.time + return o.isoformat() + + +def _is_readonly(p): + try: + return p._visibility == ["read"] + except AttributeError: + return False + + +class SdkJSONEncoder(JSONEncoder): + """A JSON encoder that's capable of serializing datetime objects and bytes.""" + + def __init__(self, *args, exclude_readonly: bool = False, format: typing.Optional[str] = None, **kwargs): + super().__init__(*args, **kwargs) + self.exclude_readonly = exclude_readonly + self.format = format + + def default(self, o): # pylint: disable=too-many-return-statements + if _is_model(o): + if self.exclude_readonly: + readonly_props = [p._rest_name for p in o._attr_to_rest_field.values() if _is_readonly(p)] + return {k: v for k, v in o.items() if k not in readonly_props} + return dict(o.items()) + try: + return super(SdkJSONEncoder, self).default(o) + except TypeError: + if isinstance(o, _Null): + return None + if isinstance(o, decimal.Decimal): + return float(o) + if isinstance(o, (bytes, bytearray)): + return _serialize_bytes(o, self.format) + try: + # First try datetime.datetime + return _serialize_datetime(o, self.format) + except AttributeError: + pass + # Last, try datetime.timedelta + try: + return _timedelta_as_isostr(o) + except AttributeError: + # This will be raised when it hits value.total_seconds in the method above + pass + return super(SdkJSONEncoder, self).default(o) + + +_VALID_DATE = re.compile(r"\d{4}[-]\d{2}[-]\d{2}T\d{2}:\d{2}:\d{2}" + r"\.?\d*Z?[-+]?[\d{2}]?:?[\d{2}]?") +_VALID_RFC7231 = re.compile( + r"(Mon|Tue|Wed|Thu|Fri|Sat|Sun),\s\d{2}\s" + r"(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\s\d{4}\s\d{2}:\d{2}:\d{2}\sGMT" +) + +_ARRAY_ENCODE_MAPPING = { + "pipeDelimited": "|", + "spaceDelimited": " ", + "commaDelimited": ",", + "newlineDelimited": "\n", +} + + +def _deserialize_array_encoded(delimit: str, attr): + if isinstance(attr, str): + if attr == "": + return [] + return attr.split(delimit) + return attr + + +def _deserialize_datetime(attr: typing.Union[str, datetime]) -> datetime: + """Deserialize ISO-8601 formatted string into Datetime object. + + :param str attr: response string to be deserialized. + :rtype: ~datetime.datetime + :returns: The datetime object from that input + """ + if isinstance(attr, datetime): + # i'm already deserialized + return attr + attr = attr.upper() + match = _VALID_DATE.match(attr) + if not match: + raise ValueError("Invalid datetime string: " + attr) + + check_decimal = attr.split(".") + if len(check_decimal) > 1: + decimal_str = "" + for digit in check_decimal[1]: + if digit.isdigit(): + decimal_str += digit + else: + break + if len(decimal_str) > 6: + attr = attr.replace(decimal_str, decimal_str[0:6]) + + date_obj = isodate.parse_datetime(attr) + test_utc = date_obj.utctimetuple() + if test_utc.tm_year > 9999 or test_utc.tm_year < 1: + raise OverflowError("Hit max or min date") + return date_obj + + +def _deserialize_datetime_rfc7231(attr: typing.Union[str, datetime]) -> datetime: + """Deserialize RFC7231 formatted string into Datetime object. + + :param str attr: response string to be deserialized. + :rtype: ~datetime.datetime + :returns: The datetime object from that input + """ + if isinstance(attr, datetime): + # i'm already deserialized + return attr + match = _VALID_RFC7231.match(attr) + if not match: + raise ValueError("Invalid datetime string: " + attr) + + return email.utils.parsedate_to_datetime(attr) + + +def _deserialize_datetime_unix_timestamp(attr: typing.Union[float, datetime]) -> datetime: + """Deserialize unix timestamp into Datetime object. + + :param str attr: response string to be deserialized. + :rtype: ~datetime.datetime + :returns: The datetime object from that input + """ + if isinstance(attr, datetime): + # i'm already deserialized + return attr + return datetime.fromtimestamp(attr, TZ_UTC) + + +def _deserialize_date(attr: typing.Union[str, date]) -> date: + """Deserialize ISO-8601 formatted string into Date object. + :param str attr: response string to be deserialized. + :rtype: date + :returns: The date object from that input + """ + # This must NOT use defaultmonth/defaultday. Using None ensure this raises an exception. + if isinstance(attr, date): + return attr + return isodate.parse_date(attr, defaultmonth=None, defaultday=None) # type: ignore + + +def _deserialize_time(attr: typing.Union[str, time]) -> time: + """Deserialize ISO-8601 formatted string into time object. + + :param str attr: response string to be deserialized. + :rtype: datetime.time + :returns: The time object from that input + """ + if isinstance(attr, time): + return attr + return isodate.parse_time(attr) + + +def _deserialize_bytes(attr): + if isinstance(attr, (bytes, bytearray)): + return attr + return bytes(base64.b64decode(attr)) + + +def _deserialize_bytes_base64(attr): + if isinstance(attr, (bytes, bytearray)): + return attr + padding = "=" * (3 - (len(attr) + 3) % 4) # type: ignore + attr = attr + padding # type: ignore + encoded = attr.replace("-", "+").replace("_", "/") + return bytes(base64.b64decode(encoded)) + + +def _deserialize_duration(attr): + if isinstance(attr, timedelta): + return attr + return isodate.parse_duration(attr) + + +def _deserialize_decimal(attr): + if isinstance(attr, decimal.Decimal): + return attr + return decimal.Decimal(str(attr)) + + +def _deserialize_int_as_str(attr): + if isinstance(attr, int): + return attr + return int(attr) + + +_DESERIALIZE_MAPPING = { + datetime: _deserialize_datetime, + date: _deserialize_date, + time: _deserialize_time, + bytes: _deserialize_bytes, + bytearray: _deserialize_bytes, + timedelta: _deserialize_duration, + typing.Any: lambda x: x, + decimal.Decimal: _deserialize_decimal, +} + +_DESERIALIZE_MAPPING_WITHFORMAT = { + "rfc3339": _deserialize_datetime, + "rfc7231": _deserialize_datetime_rfc7231, + "unix-timestamp": _deserialize_datetime_unix_timestamp, + "base64": _deserialize_bytes, + "base64url": _deserialize_bytes_base64, +} + + +def get_deserializer(annotation: typing.Any, rf: typing.Optional["_RestField"] = None): + if annotation is int and rf and rf._format == "str": + return _deserialize_int_as_str + if annotation is str and rf and rf._format in _ARRAY_ENCODE_MAPPING: + return functools.partial(_deserialize_array_encoded, _ARRAY_ENCODE_MAPPING[rf._format]) + if rf and rf._format: + return _DESERIALIZE_MAPPING_WITHFORMAT.get(rf._format) + return _DESERIALIZE_MAPPING.get(annotation) # pyright: ignore + + +def _get_type_alias_type(module_name: str, alias_name: str): + types = { + k: v + for k, v in sys.modules[module_name].__dict__.items() + if isinstance(v, typing._GenericAlias) # type: ignore + } + if alias_name not in types: + return alias_name + return types[alias_name] + + +def _get_model(module_name: str, model_name: str): + models = {k: v for k, v in sys.modules[module_name].__dict__.items() if isinstance(v, type)} + module_end = module_name.rsplit(".", 1)[0] + models.update({k: v for k, v in sys.modules[module_end].__dict__.items() if isinstance(v, type)}) + if isinstance(model_name, str): + model_name = model_name.split(".")[-1] + if model_name not in models: + return model_name + return models[model_name] + + +_UNSET = object() + + +class _MyMutableMapping(MutableMapping[str, typing.Any]): + def __init__(self, data: dict[str, typing.Any]) -> None: + self._data = data + + def __contains__(self, key: typing.Any) -> bool: + return key in self._data + + def __getitem__(self, key: str) -> typing.Any: + # If this key has been deserialized (for mutable types), we need to handle serialization + if hasattr(self, "_attr_to_rest_field"): + cache_attr = f"_deserialized_{key}" + if hasattr(self, cache_attr): + rf = _get_rest_field(getattr(self, "_attr_to_rest_field"), key) + if rf: + value = self._data.get(key) + if isinstance(value, (dict, list, set)): + # For mutable types, serialize and return + # But also update _data with serialized form and clear flag + # so mutations via this returned value affect _data + serialized = _serialize(value, rf._format) + # If serialized form is same type (no transformation needed), + # return _data directly so mutations work + if isinstance(serialized, type(value)) and serialized == value: + return self._data.get(key) + # Otherwise return serialized copy and clear flag + try: + object.__delattr__(self, cache_attr) + except AttributeError: + pass + # Store serialized form back + self._data[key] = serialized + return serialized + return self._data.__getitem__(key) + + def __setitem__(self, key: str, value: typing.Any) -> None: + # Clear any cached deserialized value when setting through dictionary access + cache_attr = f"_deserialized_{key}" + try: + object.__delattr__(self, cache_attr) + except AttributeError: + pass + self._data.__setitem__(key, value) + + def __delitem__(self, key: str) -> None: + self._data.__delitem__(key) + + def __iter__(self) -> typing.Iterator[typing.Any]: + return self._data.__iter__() + + def __len__(self) -> int: + return self._data.__len__() + + def __ne__(self, other: typing.Any) -> bool: + return not self.__eq__(other) + + def keys(self) -> typing.KeysView[str]: + """ + :returns: a set-like object providing a view on D's keys + :rtype: ~typing.KeysView + """ + return self._data.keys() + + def values(self) -> typing.ValuesView[typing.Any]: + """ + :returns: an object providing a view on D's values + :rtype: ~typing.ValuesView + """ + return self._data.values() + + def items(self) -> typing.ItemsView[str, typing.Any]: + """ + :returns: set-like object providing a view on D's items + :rtype: ~typing.ItemsView + """ + return self._data.items() + + def get(self, key: str, default: typing.Any = None) -> typing.Any: + """ + Get the value for key if key is in the dictionary, else default. + :param str key: The key to look up. + :param any default: The value to return if key is not in the dictionary. Defaults to None + :returns: D[k] if k in D, else d. + :rtype: any + """ + try: + return self[key] + except KeyError: + return default + + @typing.overload + def pop(self, key: str) -> typing.Any: ... # pylint: disable=arguments-differ + + @typing.overload + def pop(self, key: str, default: _T) -> _T: ... # pylint: disable=signature-differs + + @typing.overload + def pop(self, key: str, default: typing.Any) -> typing.Any: ... # pylint: disable=signature-differs + + def pop(self, key: str, default: typing.Any = _UNSET) -> typing.Any: + """ + Removes specified key and return the corresponding value. + :param str key: The key to pop. + :param any default: The value to return if key is not in the dictionary + :returns: The value corresponding to the key. + :rtype: any + :raises KeyError: If key is not found and default is not given. + """ + if default is _UNSET: + return self._data.pop(key) + return self._data.pop(key, default) + + def popitem(self) -> tuple[str, typing.Any]: + """ + Removes and returns some (key, value) pair + :returns: The (key, value) pair. + :rtype: tuple + :raises KeyError: if D is empty. + """ + return self._data.popitem() + + def clear(self) -> None: + """ + Remove all items from D. + """ + self._data.clear() + + def update(self, *args: typing.Any, **kwargs: typing.Any) -> None: # pylint: disable=arguments-differ + """ + Updates D from mapping/iterable E and F. + :param any args: Either a mapping object or an iterable of key-value pairs. + """ + self._data.update(*args, **kwargs) + + @typing.overload + def setdefault(self, key: str, default: None = None) -> None: ... + + @typing.overload + def setdefault(self, key: str, default: typing.Any) -> typing.Any: ... # pylint: disable=signature-differs + + def setdefault(self, key: str, default: typing.Any = _UNSET) -> typing.Any: + """ + Same as calling D.get(k, d), and setting D[k]=d if k not found + :param str key: The key to look up. + :param any default: The value to set if key is not in the dictionary + :returns: D[k] if k in D, else d. + :rtype: any + """ + if default is _UNSET: + return self._data.setdefault(key) + return self._data.setdefault(key, default) + + def __eq__(self, other: typing.Any) -> bool: + try: + other_model = self.__class__(other) + except Exception: + return False + return self._data == other_model._data + + def __repr__(self) -> str: + return str(self._data) + + +def _is_model(obj: typing.Any) -> bool: + return getattr(obj, "_is_model", False) + + +def _serialize(o, format: typing.Optional[str] = None): # pylint: disable=too-many-return-statements + if isinstance(o, list): + if format in _ARRAY_ENCODE_MAPPING and all(isinstance(x, str) for x in o): + return _ARRAY_ENCODE_MAPPING[format].join(o) + return [_serialize(x, format) for x in o] + if isinstance(o, dict): + return {k: _serialize(v, format) for k, v in o.items()} + if isinstance(o, set): + return {_serialize(x, format) for x in o} + if isinstance(o, tuple): + return tuple(_serialize(x, format) for x in o) + if isinstance(o, (bytes, bytearray)): + return _serialize_bytes(o, format) + if isinstance(o, decimal.Decimal): + return float(o) + if isinstance(o, enum.Enum): + return o.value + if isinstance(o, int): + if format == "str": + return str(o) + return o + try: + # First try datetime.datetime + return _serialize_datetime(o, format) + except AttributeError: + pass + # Last, try datetime.timedelta + try: + return _timedelta_as_isostr(o) + except AttributeError: + # This will be raised when it hits value.total_seconds in the method above + pass + return o + + +def _get_rest_field(attr_to_rest_field: dict[str, "_RestField"], rest_name: str) -> typing.Optional["_RestField"]: + try: + return next(rf for rf in attr_to_rest_field.values() if rf._rest_name == rest_name) + except StopIteration: + return None + + +def _create_value(rf: typing.Optional["_RestField"], value: typing.Any) -> typing.Any: + if not rf: + return _serialize(value, None) + if rf._is_multipart_file_input: + return value + if rf._is_model: + return _deserialize(rf._type, value) + if isinstance(value, ET.Element): + value = _deserialize(rf._type, value) + return _serialize(value, rf._format) + + +class Model(_MyMutableMapping): + _is_model = True + # label whether current class's _attr_to_rest_field has been calculated + # could not see _attr_to_rest_field directly because subclass inherits it from parent class + _calculated: set[str] = set() + + def __init__(self, *args: typing.Any, **kwargs: typing.Any) -> None: + class_name = self.__class__.__name__ + if len(args) > 1: + raise TypeError(f"{class_name}.__init__() takes 2 positional arguments but {len(args) + 1} were given") + dict_to_pass = { + rest_field._rest_name: rest_field._default + for rest_field in self._attr_to_rest_field.values() + if rest_field._default is not _UNSET + } + if args: # pylint: disable=too-many-nested-blocks + if isinstance(args[0], ET.Element): + existed_attr_keys = [] + model_meta = getattr(self, "_xml", {}) + + for rf in self._attr_to_rest_field.values(): + prop_meta = getattr(rf, "_xml", {}) + xml_name = prop_meta.get("name", rf._rest_name) + xml_ns = prop_meta.get("ns", model_meta.get("ns", None)) + if xml_ns: + xml_name = "{" + xml_ns + "}" + xml_name + + # attribute + if prop_meta.get("attribute", False) and args[0].get(xml_name) is not None: + existed_attr_keys.append(xml_name) + dict_to_pass[rf._rest_name] = _deserialize(rf._type, args[0].get(xml_name)) + continue + + # unwrapped element is array + if prop_meta.get("unwrapped", False): + # unwrapped array could either use prop items meta/prop meta + if prop_meta.get("itemsName"): + xml_name = prop_meta.get("itemsName") + xml_ns = prop_meta.get("itemNs") + if xml_ns: + xml_name = "{" + xml_ns + "}" + xml_name + items = args[0].findall(xml_name) # pyright: ignore + if len(items) > 0: + existed_attr_keys.append(xml_name) + dict_to_pass[rf._rest_name] = _deserialize(rf._type, items) + continue + + # text element is primitive type + if prop_meta.get("text", False): + if args[0].text is not None: + dict_to_pass[rf._rest_name] = _deserialize(rf._type, args[0].text) + continue + + # wrapped element could be normal property or array, it should only have one element + item = args[0].find(xml_name) + if item is not None: + existed_attr_keys.append(xml_name) + dict_to_pass[rf._rest_name] = _deserialize(rf._type, item) + + # rest thing is additional properties + for e in args[0]: + if e.tag not in existed_attr_keys: + dict_to_pass[e.tag] = _convert_element(e) + else: + dict_to_pass.update( + {k: _create_value(_get_rest_field(self._attr_to_rest_field, k), v) for k, v in args[0].items()} + ) + else: + non_attr_kwargs = [k for k in kwargs if k not in self._attr_to_rest_field] + if non_attr_kwargs: + # actual type errors only throw the first wrong keyword arg they see, so following that. + raise TypeError(f"{class_name}.__init__() got an unexpected keyword argument '{non_attr_kwargs[0]}'") + dict_to_pass.update( + { + self._attr_to_rest_field[k]._rest_name: _create_value(self._attr_to_rest_field[k], v) + for k, v in kwargs.items() + if v is not None + } + ) + super().__init__(dict_to_pass) + + def copy(self) -> "Model": + return Model(self.__dict__) + + def __new__(cls, *args: typing.Any, **kwargs: typing.Any) -> Self: + if f"{cls.__module__}.{cls.__qualname__}" not in cls._calculated: + # we know the last nine classes in mro are going to be 'Model', '_MyMutableMapping', 'MutableMapping', + # 'Mapping', 'Collection', 'Sized', 'Iterable', 'Container' and 'object' + mros = cls.__mro__[:-9][::-1] # ignore parents, and reverse the mro order + attr_to_rest_field: dict[str, _RestField] = { # map attribute name to rest_field property + k: v for mro_class in mros for k, v in mro_class.__dict__.items() if k[0] != "_" and hasattr(v, "_type") + } + annotations = { + k: v + for mro_class in mros + if hasattr(mro_class, "__annotations__") + for k, v in mro_class.__annotations__.items() + } + for attr, rf in attr_to_rest_field.items(): + rf._module = cls.__module__ + if not rf._type: + rf._type = rf._get_deserialize_callable_from_annotation(annotations.get(attr, None)) + if not rf._rest_name_input: + rf._rest_name_input = attr + cls._attr_to_rest_field: dict[str, _RestField] = dict(attr_to_rest_field.items()) + cls._backcompat_attr_to_rest_field: dict[str, _RestField] = { + Model._get_backcompat_attribute_name(cls._attr_to_rest_field, attr): rf + for attr, rf in cls._attr_to_rest_field.items() + } + cls._calculated.add(f"{cls.__module__}.{cls.__qualname__}") + + return super().__new__(cls) + + def __init_subclass__(cls, discriminator: typing.Optional[str] = None) -> None: + for base in cls.__bases__: + if hasattr(base, "__mapping__"): + base.__mapping__[discriminator or cls.__name__] = cls # type: ignore + + @classmethod + def _get_backcompat_attribute_name(cls, attr_to_rest_field: dict[str, "_RestField"], attr_name: str) -> str: + rest_field_obj = attr_to_rest_field.get(attr_name) # pylint: disable=protected-access + if rest_field_obj is None: + return attr_name + original_tsp_name = getattr(rest_field_obj, "_original_tsp_name", None) # pylint: disable=protected-access + if original_tsp_name: + return original_tsp_name + return attr_name + + @classmethod + def _get_discriminator(cls, exist_discriminators) -> typing.Optional["_RestField"]: + for v in cls.__dict__.values(): + if isinstance(v, _RestField) and v._is_discriminator and v._rest_name not in exist_discriminators: + return v + return None + + @classmethod + def _deserialize(cls, data, exist_discriminators): + if not hasattr(cls, "__mapping__"): + return cls(data) + discriminator = cls._get_discriminator(exist_discriminators) + if discriminator is None: + return cls(data) + exist_discriminators.append(discriminator._rest_name) + if isinstance(data, ET.Element): + model_meta = getattr(cls, "_xml", {}) + prop_meta = getattr(discriminator, "_xml", {}) + xml_name = prop_meta.get("name", discriminator._rest_name) + xml_ns = prop_meta.get("ns", model_meta.get("ns", None)) + if xml_ns: + xml_name = "{" + xml_ns + "}" + xml_name + + if data.get(xml_name) is not None: + discriminator_value = data.get(xml_name) + else: + discriminator_value = data.find(xml_name).text # pyright: ignore + else: + discriminator_value = data.get(discriminator._rest_name) + mapped_cls = cls.__mapping__.get(discriminator_value, cls) # pyright: ignore # pylint: disable=no-member + return mapped_cls._deserialize(data, exist_discriminators) + + def as_dict(self, *, exclude_readonly: bool = False) -> dict[str, typing.Any]: + """Return a dict that can be turned into json using json.dump. + + :keyword bool exclude_readonly: Whether to remove the readonly properties. + :returns: A dict JSON compatible object + :rtype: dict + """ + + result = {} + readonly_props = [] + if exclude_readonly: + readonly_props = [p._rest_name for p in self._attr_to_rest_field.values() if _is_readonly(p)] + for k, v in self.items(): + if exclude_readonly and k in readonly_props: # pyright: ignore + continue + is_multipart_file_input = False + try: + is_multipart_file_input = next( + rf for rf in self._attr_to_rest_field.values() if rf._rest_name == k + )._is_multipart_file_input + except StopIteration: + pass + result[k] = v if is_multipart_file_input else Model._as_dict_value(v, exclude_readonly=exclude_readonly) + return result + + @staticmethod + def _as_dict_value(v: typing.Any, exclude_readonly: bool = False) -> typing.Any: + if v is None or isinstance(v, _Null): + return None + if isinstance(v, (list, tuple, set)): + return type(v)(Model._as_dict_value(x, exclude_readonly=exclude_readonly) for x in v) + if isinstance(v, dict): + return {dk: Model._as_dict_value(dv, exclude_readonly=exclude_readonly) for dk, dv in v.items()} + return v.as_dict(exclude_readonly=exclude_readonly) if hasattr(v, "as_dict") else v + + +def _deserialize_model(model_deserializer: typing.Optional[typing.Callable], obj): + if _is_model(obj): + return obj + return _deserialize(model_deserializer, obj) + + +def _deserialize_with_optional(if_obj_deserializer: typing.Optional[typing.Callable], obj): + if obj is None: + return obj + return _deserialize_with_callable(if_obj_deserializer, obj) + + +def _deserialize_with_union(deserializers, obj): + for deserializer in deserializers: + try: + return _deserialize(deserializer, obj) + except DeserializationError: + pass + raise DeserializationError() + + +def _deserialize_dict( + value_deserializer: typing.Optional[typing.Callable], + module: typing.Optional[str], + obj: dict[typing.Any, typing.Any], +): + if obj is None: + return obj + if isinstance(obj, ET.Element): + obj = {child.tag: child for child in obj} + return {k: _deserialize(value_deserializer, v, module) for k, v in obj.items()} + + +def _deserialize_multiple_sequence( + entry_deserializers: list[typing.Optional[typing.Callable]], + module: typing.Optional[str], + obj, +): + if obj is None: + return obj + return type(obj)(_deserialize(deserializer, entry, module) for entry, deserializer in zip(obj, entry_deserializers)) + + +def _deserialize_sequence( + deserializer: typing.Optional[typing.Callable], + module: typing.Optional[str], + obj, +): + if obj is None: + return obj + if isinstance(obj, ET.Element): + obj = list(obj) + try: + if ( + isinstance(obj, str) + and isinstance(deserializer, functools.partial) + and isinstance(deserializer.args[0], functools.partial) + and deserializer.args[0].func == _deserialize_array_encoded # pylint: disable=comparison-with-callable + ): + # encoded string may be deserialized to sequence + return deserializer(obj) + except: # pylint: disable=bare-except + pass + return type(obj)(_deserialize(deserializer, entry, module) for entry in obj) + + +def _sorted_annotations(types: list[typing.Any]) -> list[typing.Any]: + return sorted( + types, + key=lambda x: hasattr(x, "__name__") and x.__name__.lower() in ("str", "float", "int", "bool"), + ) + + +def _get_deserialize_callable_from_annotation( # pylint: disable=too-many-return-statements, too-many-statements, too-many-branches + annotation: typing.Any, + module: typing.Optional[str], + rf: typing.Optional["_RestField"] = None, +) -> typing.Optional[typing.Callable[[typing.Any], typing.Any]]: + if not annotation: + return None + + # is it a type alias? + if isinstance(annotation, str): + if module is not None: + annotation = _get_type_alias_type(module, annotation) + + # is it a forward ref / in quotes? + if isinstance(annotation, (str, typing.ForwardRef)): + try: + model_name = annotation.__forward_arg__ # type: ignore + except AttributeError: + model_name = annotation + if module is not None: + annotation = _get_model(module, model_name) # type: ignore + + try: + if module and _is_model(annotation): + if rf: + rf._is_model = True + + return functools.partial(_deserialize_model, annotation) # pyright: ignore + except Exception: + pass + + # is it a literal? + try: + if annotation.__origin__ is typing.Literal: # pyright: ignore + return None + except AttributeError: + pass + + # is it optional? + try: + if any(a for a in annotation.__args__ if a == type(None)): # pyright: ignore + if len(annotation.__args__) <= 2: # pyright: ignore + if_obj_deserializer = _get_deserialize_callable_from_annotation( + next(a for a in annotation.__args__ if a != type(None)), module, rf # pyright: ignore + ) + + return functools.partial(_deserialize_with_optional, if_obj_deserializer) + # the type is Optional[Union[...]], we need to remove the None type from the Union + annotation_copy = copy.copy(annotation) + annotation_copy.__args__ = [a for a in annotation_copy.__args__ if a != type(None)] # pyright: ignore + return _get_deserialize_callable_from_annotation(annotation_copy, module, rf) + except AttributeError: + pass + + # is it union? + if getattr(annotation, "__origin__", None) is typing.Union: + # initial ordering is we make `string` the last deserialization option, because it is often them most generic + deserializers = [ + _get_deserialize_callable_from_annotation(arg, module, rf) + for arg in _sorted_annotations(annotation.__args__) # pyright: ignore + ] + + return functools.partial(_deserialize_with_union, deserializers) + + try: + annotation_name = ( + annotation.__name__ if hasattr(annotation, "__name__") else annotation._name # pyright: ignore + ) + if annotation_name.lower() == "dict": + value_deserializer = _get_deserialize_callable_from_annotation( + annotation.__args__[1], module, rf # pyright: ignore + ) + + return functools.partial( + _deserialize_dict, + value_deserializer, + module, + ) + except (AttributeError, IndexError): + pass + try: + annotation_name = ( + annotation.__name__ if hasattr(annotation, "__name__") else annotation._name # pyright: ignore + ) + if annotation_name.lower() in ["list", "set", "tuple", "sequence"]: + if len(annotation.__args__) > 1: # pyright: ignore + entry_deserializers = [ + _get_deserialize_callable_from_annotation(dt, module, rf) + for dt in annotation.__args__ # pyright: ignore + ] + return functools.partial(_deserialize_multiple_sequence, entry_deserializers, module) + deserializer = _get_deserialize_callable_from_annotation( + annotation.__args__[0], module, rf # pyright: ignore + ) + + return functools.partial(_deserialize_sequence, deserializer, module) + except (TypeError, IndexError, AttributeError, SyntaxError): + pass + + def _deserialize_default( + deserializer, + obj, + ): + if obj is None: + return obj + try: + return _deserialize_with_callable(deserializer, obj) + except Exception: + pass + return obj + + if get_deserializer(annotation, rf): + return functools.partial(_deserialize_default, get_deserializer(annotation, rf)) + + return functools.partial(_deserialize_default, annotation) + + +def _deserialize_with_callable( + deserializer: typing.Optional[typing.Callable[[typing.Any], typing.Any]], + value: typing.Any, +): # pylint: disable=too-many-return-statements + try: + if value is None or isinstance(value, _Null): + return None + if isinstance(value, ET.Element): + if deserializer is str: + return value.text or "" + if deserializer is int: + return int(value.text) if value.text else None + if deserializer is float: + return float(value.text) if value.text else None + if deserializer is bool: + return value.text == "true" if value.text else None + if deserializer is None: + return value + if deserializer in [int, float, bool]: + return deserializer(value) + if isinstance(deserializer, CaseInsensitiveEnumMeta): + try: + return deserializer(value) + except ValueError: + # for unknown value, return raw value + return value + if isinstance(deserializer, type) and issubclass(deserializer, Model): + return deserializer._deserialize(value, []) + return typing.cast(typing.Callable[[typing.Any], typing.Any], deserializer)(value) + except Exception as e: + raise DeserializationError() from e + + +def _deserialize( + deserializer: typing.Any, + value: typing.Any, + module: typing.Optional[str] = None, + rf: typing.Optional["_RestField"] = None, + format: typing.Optional[str] = None, +) -> typing.Any: + if isinstance(value, PipelineResponse): + value = value.http_response.json() + if rf is None and format: + rf = _RestField(format=format) + if not isinstance(deserializer, functools.partial): + deserializer = _get_deserialize_callable_from_annotation(deserializer, module, rf) + return _deserialize_with_callable(deserializer, value) + + +def _failsafe_deserialize( + deserializer: typing.Any, + response: HttpResponse, + module: typing.Optional[str] = None, + rf: typing.Optional["_RestField"] = None, + format: typing.Optional[str] = None, +) -> typing.Any: + try: + return _deserialize(deserializer, response.json(), module, rf, format) + except DeserializationError: + _LOGGER.warning( + "Ran into a deserialization error. Ignoring since this is failsafe deserialization", exc_info=True + ) + return None + + +def _failsafe_deserialize_xml( + deserializer: typing.Any, + response: HttpResponse, +) -> typing.Any: + try: + return _deserialize_xml(deserializer, response.text()) + except DeserializationError: + _LOGGER.warning( + "Ran into a deserialization error. Ignoring since this is failsafe deserialization", exc_info=True + ) + return None + + +# pylint: disable=too-many-instance-attributes +class _RestField: + def __init__( + self, + *, + name: typing.Optional[str] = None, + type: typing.Optional[typing.Callable] = None, # pylint: disable=redefined-builtin + is_discriminator: bool = False, + visibility: typing.Optional[list[str]] = None, + default: typing.Any = _UNSET, + format: typing.Optional[str] = None, + is_multipart_file_input: bool = False, + xml: typing.Optional[dict[str, typing.Any]] = None, + original_tsp_name: typing.Optional[str] = None, + ): + self._type = type + self._rest_name_input = name + self._module: typing.Optional[str] = None + self._is_discriminator = is_discriminator + self._visibility = visibility + self._is_model = False + self._default = default + self._format = format + self._is_multipart_file_input = is_multipart_file_input + self._xml = xml if xml is not None else {} + self._original_tsp_name = original_tsp_name + + @property + def _class_type(self) -> typing.Any: + result = getattr(self._type, "args", [None])[0] + # type may be wrapped by nested functools.partial so we need to check for that + if isinstance(result, functools.partial): + return getattr(result, "args", [None])[0] + return result + + @property + def _rest_name(self) -> str: + if self._rest_name_input is None: + raise ValueError("Rest name was never set") + return self._rest_name_input + + def __get__(self, obj: Model, type=None): # pylint: disable=redefined-builtin + # by this point, type and rest_name will have a value bc we default + # them in __new__ of the Model class + # Use _data.get() directly to avoid triggering __getitem__ which clears the cache + item = obj._data.get(self._rest_name) + if item is None: + return item + if self._is_model: + return item + + # For mutable types, we want mutations to directly affect _data + # Check if we've already deserialized this value + cache_attr = f"_deserialized_{self._rest_name}" + if hasattr(obj, cache_attr): + # Return the value from _data directly (it's been deserialized in place) + return obj._data.get(self._rest_name) + + deserialized = _deserialize(self._type, _serialize(item, self._format), rf=self) + + # For mutable types, store the deserialized value back in _data + # so mutations directly affect _data + if isinstance(deserialized, (dict, list, set)): + obj._data[self._rest_name] = deserialized + object.__setattr__(obj, cache_attr, True) # Mark as deserialized + return deserialized + + return deserialized + + def __set__(self, obj: Model, value) -> None: + # Clear the cached deserialized object when setting a new value + cache_attr = f"_deserialized_{self._rest_name}" + if hasattr(obj, cache_attr): + object.__delattr__(obj, cache_attr) + + if value is None: + # we want to wipe out entries if users set attr to None + try: + obj.__delitem__(self._rest_name) + except KeyError: + pass + return + if self._is_model: + if not _is_model(value): + value = _deserialize(self._type, value) + obj.__setitem__(self._rest_name, value) + return + obj.__setitem__(self._rest_name, _serialize(value, self._format)) + + def _get_deserialize_callable_from_annotation( + self, annotation: typing.Any + ) -> typing.Optional[typing.Callable[[typing.Any], typing.Any]]: + return _get_deserialize_callable_from_annotation(annotation, self._module, self) + + +def rest_field( + *, + name: typing.Optional[str] = None, + type: typing.Optional[typing.Callable] = None, # pylint: disable=redefined-builtin + visibility: typing.Optional[list[str]] = None, + default: typing.Any = _UNSET, + format: typing.Optional[str] = None, + is_multipart_file_input: bool = False, + xml: typing.Optional[dict[str, typing.Any]] = None, + original_tsp_name: typing.Optional[str] = None, +) -> typing.Any: + return _RestField( + name=name, + type=type, + visibility=visibility, + default=default, + format=format, + is_multipart_file_input=is_multipart_file_input, + xml=xml, + original_tsp_name=original_tsp_name, + ) + + +def rest_discriminator( + *, + name: typing.Optional[str] = None, + type: typing.Optional[typing.Callable] = None, # pylint: disable=redefined-builtin + visibility: typing.Optional[list[str]] = None, + xml: typing.Optional[dict[str, typing.Any]] = None, +) -> typing.Any: + return _RestField(name=name, type=type, is_discriminator=True, visibility=visibility, xml=xml) + + +def serialize_xml(model: Model, exclude_readonly: bool = False) -> str: + """Serialize a model to XML. + + :param Model model: The model to serialize. + :param bool exclude_readonly: Whether to exclude readonly properties. + :returns: The XML representation of the model. + :rtype: str + """ + return ET.tostring(_get_element(model, exclude_readonly), encoding="unicode") # type: ignore + + +def _get_element( + o: typing.Any, + exclude_readonly: bool = False, + parent_meta: typing.Optional[dict[str, typing.Any]] = None, + wrapped_element: typing.Optional[ET.Element] = None, +) -> typing.Union[ET.Element, list[ET.Element]]: + if _is_model(o): + model_meta = getattr(o, "_xml", {}) + + # if prop is a model, then use the prop element directly, else generate a wrapper of model + if wrapped_element is None: + wrapped_element = _create_xml_element( + model_meta.get("name", o.__class__.__name__), + model_meta.get("prefix"), + model_meta.get("ns"), + ) + + readonly_props = [] + if exclude_readonly: + readonly_props = [p._rest_name for p in o._attr_to_rest_field.values() if _is_readonly(p)] + + for k, v in o.items(): + # do not serialize readonly properties + if exclude_readonly and k in readonly_props: + continue + + prop_rest_field = _get_rest_field(o._attr_to_rest_field, k) + if prop_rest_field: + prop_meta = getattr(prop_rest_field, "_xml").copy() + # use the wire name as xml name if no specific name is set + if prop_meta.get("name") is None: + prop_meta["name"] = k + else: + # additional properties will not have rest field, use the wire name as xml name + prop_meta = {"name": k} + + # if no ns for prop, use model's + if prop_meta.get("ns") is None and model_meta.get("ns"): + prop_meta["ns"] = model_meta.get("ns") + prop_meta["prefix"] = model_meta.get("prefix") + + if prop_meta.get("unwrapped", False): + # unwrapped could only set on array + wrapped_element.extend(_get_element(v, exclude_readonly, prop_meta)) + elif prop_meta.get("text", False): + # text could only set on primitive type + wrapped_element.text = _get_primitive_type_value(v) + elif prop_meta.get("attribute", False): + xml_name = prop_meta.get("name", k) + if prop_meta.get("ns"): + ET.register_namespace(prop_meta.get("prefix"), prop_meta.get("ns")) # pyright: ignore + xml_name = "{" + prop_meta.get("ns") + "}" + xml_name # pyright: ignore + # attribute should be primitive type + wrapped_element.set(xml_name, _get_primitive_type_value(v)) + else: + # other wrapped prop element + wrapped_element.append(_get_wrapped_element(v, exclude_readonly, prop_meta)) + return wrapped_element + if isinstance(o, list): + return [_get_element(x, exclude_readonly, parent_meta) for x in o] # type: ignore + if isinstance(o, dict): + result = [] + for k, v in o.items(): + result.append( + _get_wrapped_element( + v, + exclude_readonly, + { + "name": k, + "ns": parent_meta.get("ns") if parent_meta else None, + "prefix": parent_meta.get("prefix") if parent_meta else None, + }, + ) + ) + return result + + # primitive case need to create element based on parent_meta + if parent_meta: + return _get_wrapped_element( + o, + exclude_readonly, + { + "name": parent_meta.get("itemsName", parent_meta.get("name")), + "prefix": parent_meta.get("itemsPrefix", parent_meta.get("prefix")), + "ns": parent_meta.get("itemsNs", parent_meta.get("ns")), + }, + ) + + raise ValueError("Could not serialize value into xml: " + o) + + +def _get_wrapped_element( + v: typing.Any, + exclude_readonly: bool, + meta: typing.Optional[dict[str, typing.Any]], +) -> ET.Element: + wrapped_element = _create_xml_element( + meta.get("name") if meta else None, meta.get("prefix") if meta else None, meta.get("ns") if meta else None + ) + if isinstance(v, (dict, list)): + wrapped_element.extend(_get_element(v, exclude_readonly, meta)) + elif _is_model(v): + _get_element(v, exclude_readonly, meta, wrapped_element) + else: + wrapped_element.text = _get_primitive_type_value(v) + return wrapped_element + + +def _get_primitive_type_value(v) -> str: + if v is True: + return "true" + if v is False: + return "false" + if isinstance(v, _Null): + return "" + return str(v) + + +def _create_xml_element(tag, prefix=None, ns=None): + if prefix and ns: + ET.register_namespace(prefix, ns) + if ns: + return ET.Element("{" + ns + "}" + tag) + return ET.Element(tag) + + +def _deserialize_xml( + deserializer: typing.Any, + value: str, +) -> typing.Any: + element = ET.fromstring(value) # nosec + return _deserialize(deserializer, element) + + +def _convert_element(e: ET.Element): + # dict case + if len(e.attrib) > 0 or len({child.tag for child in e}) > 1: + dict_result: dict[str, typing.Any] = {} + for child in e: + if dict_result.get(child.tag) is not None: + if isinstance(dict_result[child.tag], list): + dict_result[child.tag].append(_convert_element(child)) + else: + dict_result[child.tag] = [dict_result[child.tag], _convert_element(child)] + else: + dict_result[child.tag] = _convert_element(child) + dict_result.update(e.attrib) + return dict_result + # array case + if len(e) > 0: + array_result: list[typing.Any] = [] + for child in e: + array_result.append(_convert_element(child)) + return array_result + # primitive case + return e.text diff --git a/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/_utils/serialization.py b/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/_utils/serialization.py new file mode 100644 index 000000000000..81ec1de5922b --- /dev/null +++ b/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/_utils/serialization.py @@ -0,0 +1,2041 @@ +# pylint: disable=line-too-long,useless-suppression,too-many-lines +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +# pyright: reportUnnecessaryTypeIgnoreComment=false + +from base64 import b64decode, b64encode +import calendar +import datetime +import decimal +import email +from enum import Enum +import json +import logging +import re +import sys +import codecs +from typing import ( + Any, + cast, + Optional, + Union, + AnyStr, + IO, + Mapping, + Callable, + MutableMapping, +) + +try: + from urllib import quote # type: ignore +except ImportError: + from urllib.parse import quote +import xml.etree.ElementTree as ET + +import isodate # type: ignore +from typing_extensions import Self + +from azure.core.exceptions import DeserializationError, SerializationError +from azure.core.serialization import NULL as CoreNull + +_BOM = codecs.BOM_UTF8.decode(encoding="utf-8") + +JSON = MutableMapping[str, Any] + + +class RawDeserializer: + + # Accept "text" because we're open minded people... + JSON_REGEXP = re.compile(r"^(application|text)/([a-z+.]+\+)?json$") + + # Name used in context + CONTEXT_NAME = "deserialized_data" + + @classmethod + def deserialize_from_text(cls, data: Optional[Union[AnyStr, IO]], content_type: Optional[str] = None) -> Any: + """Decode data according to content-type. + + Accept a stream of data as well, but will be load at once in memory for now. + + If no content-type, will return the string version (not bytes, not stream) + + :param data: Input, could be bytes or stream (will be decoded with UTF8) or text + :type data: str or bytes or IO + :param str content_type: The content type. + :return: The deserialized data. + :rtype: object + """ + if hasattr(data, "read"): + # Assume a stream + data = cast(IO, data).read() + + if isinstance(data, bytes): + data_as_str = data.decode(encoding="utf-8-sig") + else: + # Explain to mypy the correct type. + data_as_str = cast(str, data) + + # Remove Byte Order Mark if present in string + data_as_str = data_as_str.lstrip(_BOM) + + if content_type is None: + return data + + if cls.JSON_REGEXP.match(content_type): + try: + return json.loads(data_as_str) + except ValueError as err: + raise DeserializationError("JSON is invalid: {}".format(err), err) from err + elif "xml" in (content_type or []): + try: + + try: + if isinstance(data, unicode): # type: ignore + # If I'm Python 2.7 and unicode XML will scream if I try a "fromstring" on unicode string + data_as_str = data_as_str.encode(encoding="utf-8") # type: ignore + except NameError: + pass + + return ET.fromstring(data_as_str) # nosec + except ET.ParseError as err: + # It might be because the server has an issue, and returned JSON with + # content-type XML.... + # So let's try a JSON load, and if it's still broken + # let's flow the initial exception + def _json_attemp(data): + try: + return True, json.loads(data) + except ValueError: + return False, None # Don't care about this one + + success, json_result = _json_attemp(data) + if success: + return json_result + # If i'm here, it's not JSON, it's not XML, let's scream + # and raise the last context in this block (the XML exception) + # The function hack is because Py2.7 messes up with exception + # context otherwise. + _LOGGER.critical("Wasn't XML not JSON, failing") + raise DeserializationError("XML is invalid") from err + elif content_type.startswith("text/"): + return data_as_str + raise DeserializationError("Cannot deserialize content-type: {}".format(content_type)) + + @classmethod + def deserialize_from_http_generics(cls, body_bytes: Optional[Union[AnyStr, IO]], headers: Mapping) -> Any: + """Deserialize from HTTP response. + + Use bytes and headers to NOT use any requests/aiohttp or whatever + specific implementation. + Headers will tested for "content-type" + + :param bytes body_bytes: The body of the response. + :param dict headers: The headers of the response. + :returns: The deserialized data. + :rtype: object + """ + # Try to use content-type from headers if available + content_type = None + if "content-type" in headers: + content_type = headers["content-type"].split(";")[0].strip().lower() + # Ouch, this server did not declare what it sent... + # Let's guess it's JSON... + # Also, since Autorest was considering that an empty body was a valid JSON, + # need that test as well.... + else: + content_type = "application/json" + + if body_bytes: + return cls.deserialize_from_text(body_bytes, content_type) + return None + + +_LOGGER = logging.getLogger(__name__) + +try: + _long_type = long # type: ignore +except NameError: + _long_type = int + +TZ_UTC = datetime.timezone.utc + +_FLATTEN = re.compile(r"(? None: + self.additional_properties: Optional[dict[str, Any]] = {} + for k in kwargs: # pylint: disable=consider-using-dict-items + if k not in self._attribute_map: + _LOGGER.warning("%s is not a known attribute of class %s and will be ignored", k, self.__class__) + elif k in self._validation and self._validation[k].get("readonly", False): + _LOGGER.warning("Readonly attribute %s will be ignored in class %s", k, self.__class__) + else: + setattr(self, k, kwargs[k]) + + def __eq__(self, other: Any) -> bool: + """Compare objects by comparing all attributes. + + :param object other: The object to compare + :returns: True if objects are equal + :rtype: bool + """ + if isinstance(other, self.__class__): + return self.__dict__ == other.__dict__ + return False + + def __ne__(self, other: Any) -> bool: + """Compare objects by comparing all attributes. + + :param object other: The object to compare + :returns: True if objects are not equal + :rtype: bool + """ + return not self.__eq__(other) + + def __str__(self) -> str: + return str(self.__dict__) + + @classmethod + def enable_additional_properties_sending(cls) -> None: + cls._attribute_map["additional_properties"] = {"key": "", "type": "{object}"} + + @classmethod + def is_xml_model(cls) -> bool: + try: + cls._xml_map # type: ignore + except AttributeError: + return False + return True + + @classmethod + def _create_xml_node(cls): + """Create XML node. + + :returns: The XML node + :rtype: xml.etree.ElementTree.Element + """ + try: + xml_map = cls._xml_map # type: ignore + except AttributeError: + xml_map = {} + + return _create_xml_node(xml_map.get("name", cls.__name__), xml_map.get("prefix", None), xml_map.get("ns", None)) + + def serialize(self, keep_readonly: bool = False, **kwargs: Any) -> JSON: + """Return the JSON that would be sent to server from this model. + + This is an alias to `as_dict(full_restapi_key_transformer, keep_readonly=False)`. + + If you want XML serialization, you can pass the kwargs is_xml=True. + + :param bool keep_readonly: If you want to serialize the readonly attributes + :returns: A dict JSON compatible object + :rtype: dict + """ + serializer = Serializer(self._infer_class_models()) + return serializer._serialize( # type: ignore # pylint: disable=protected-access + self, keep_readonly=keep_readonly, **kwargs + ) + + def as_dict( + self, + keep_readonly: bool = True, + key_transformer: Callable[[str, dict[str, Any], Any], Any] = attribute_transformer, + **kwargs: Any + ) -> JSON: + """Return a dict that can be serialized using json.dump. + + Advanced usage might optionally use a callback as parameter: + + .. code::python + + def my_key_transformer(key, attr_desc, value): + return key + + Key is the attribute name used in Python. Attr_desc + is a dict of metadata. Currently contains 'type' with the + msrest type and 'key' with the RestAPI encoded key. + Value is the current value in this object. + + The string returned will be used to serialize the key. + If the return type is a list, this is considered hierarchical + result dict. + + See the three examples in this file: + + - attribute_transformer + - full_restapi_key_transformer + - last_restapi_key_transformer + + If you want XML serialization, you can pass the kwargs is_xml=True. + + :param bool keep_readonly: If you want to serialize the readonly attributes + :param function key_transformer: A key transformer function. + :returns: A dict JSON compatible object + :rtype: dict + """ + serializer = Serializer(self._infer_class_models()) + return serializer._serialize( # type: ignore # pylint: disable=protected-access + self, key_transformer=key_transformer, keep_readonly=keep_readonly, **kwargs + ) + + @classmethod + def _infer_class_models(cls): + try: + str_models = cls.__module__.rsplit(".", 1)[0] + models = sys.modules[str_models] + client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} + if cls.__name__ not in client_models: + raise ValueError("Not Autorest generated code") + except Exception: # pylint: disable=broad-exception-caught + # Assume it's not Autorest generated (tests?). Add ourselves as dependencies. + client_models = {cls.__name__: cls} + return client_models + + @classmethod + def deserialize(cls, data: Any, content_type: Optional[str] = None) -> Self: + """Parse a str using the RestAPI syntax and return a model. + + :param str data: A str using RestAPI structure. JSON by default. + :param str content_type: JSON by default, set application/xml if XML. + :returns: An instance of this model + :raises DeserializationError: if something went wrong + :rtype: Self + """ + deserializer = Deserializer(cls._infer_class_models()) + return deserializer(cls.__name__, data, content_type=content_type) # type: ignore + + @classmethod + def from_dict( + cls, + data: Any, + key_extractors: Optional[Callable[[str, dict[str, Any], Any], Any]] = None, + content_type: Optional[str] = None, + ) -> Self: + """Parse a dict using given key extractor return a model. + + By default consider key + extractors (rest_key_case_insensitive_extractor, attribute_key_case_insensitive_extractor + and last_rest_key_case_insensitive_extractor) + + :param dict data: A dict using RestAPI structure + :param function key_extractors: A key extractor function. + :param str content_type: JSON by default, set application/xml if XML. + :returns: An instance of this model + :raises DeserializationError: if something went wrong + :rtype: Self + """ + deserializer = Deserializer(cls._infer_class_models()) + deserializer.key_extractors = ( # type: ignore + [ # type: ignore + attribute_key_case_insensitive_extractor, + rest_key_case_insensitive_extractor, + last_rest_key_case_insensitive_extractor, + ] + if key_extractors is None + else key_extractors + ) + return deserializer(cls.__name__, data, content_type=content_type) # type: ignore + + @classmethod + def _flatten_subtype(cls, key, objects): + if "_subtype_map" not in cls.__dict__: + return {} + result = dict(cls._subtype_map[key]) + for valuetype in cls._subtype_map[key].values(): + result |= objects[valuetype]._flatten_subtype(key, objects) # pylint: disable=protected-access + return result + + @classmethod + def _classify(cls, response, objects): + """Check the class _subtype_map for any child classes. + We want to ignore any inherited _subtype_maps. + + :param dict response: The initial data + :param dict objects: The class objects + :returns: The class to be used + :rtype: class + """ + for subtype_key in cls.__dict__.get("_subtype_map", {}).keys(): + subtype_value = None + + if not isinstance(response, ET.Element): + rest_api_response_key = cls._get_rest_key_parts(subtype_key)[-1] + subtype_value = response.get(rest_api_response_key, None) or response.get(subtype_key, None) + else: + subtype_value = xml_key_extractor(subtype_key, cls._attribute_map[subtype_key], response) + if subtype_value: + # Try to match base class. Can be class name only + # (bug to fix in Autorest to support x-ms-discriminator-name) + if cls.__name__ == subtype_value: + return cls + flatten_mapping_type = cls._flatten_subtype(subtype_key, objects) + try: + return objects[flatten_mapping_type[subtype_value]] # type: ignore + except KeyError: + _LOGGER.warning( + "Subtype value %s has no mapping, use base class %s.", + subtype_value, + cls.__name__, + ) + break + else: + _LOGGER.warning("Discriminator %s is absent or null, use base class %s.", subtype_key, cls.__name__) + break + return cls + + @classmethod + def _get_rest_key_parts(cls, attr_key): + """Get the RestAPI key of this attr, split it and decode part + :param str attr_key: Attribute key must be in attribute_map. + :returns: A list of RestAPI part + :rtype: list + """ + rest_split_key = _FLATTEN.split(cls._attribute_map[attr_key]["key"]) + return [_decode_attribute_map_key(key_part) for key_part in rest_split_key] + + +def _decode_attribute_map_key(key): + """This decode a key in an _attribute_map to the actual key we want to look at + inside the received data. + + :param str key: A key string from the generated code + :returns: The decoded key + :rtype: str + """ + return key.replace("\\.", ".") + + +class Serializer: # pylint: disable=too-many-public-methods + """Request object model serializer.""" + + basic_types = {str: "str", int: "int", bool: "bool", float: "float"} + + _xml_basic_types_serializers = {"bool": lambda x: str(x).lower()} + days = {0: "Mon", 1: "Tue", 2: "Wed", 3: "Thu", 4: "Fri", 5: "Sat", 6: "Sun"} + months = { + 1: "Jan", + 2: "Feb", + 3: "Mar", + 4: "Apr", + 5: "May", + 6: "Jun", + 7: "Jul", + 8: "Aug", + 9: "Sep", + 10: "Oct", + 11: "Nov", + 12: "Dec", + } + validation = { + "min_length": lambda x, y: len(x) < y, + "max_length": lambda x, y: len(x) > y, + "minimum": lambda x, y: x < y, + "maximum": lambda x, y: x > y, + "minimum_ex": lambda x, y: x <= y, + "maximum_ex": lambda x, y: x >= y, + "min_items": lambda x, y: len(x) < y, + "max_items": lambda x, y: len(x) > y, + "pattern": lambda x, y: not re.match(y, x, re.UNICODE), + "unique": lambda x, y: len(x) != len(set(x)), + "multiple": lambda x, y: x % y != 0, + } + + def __init__(self, classes: Optional[Mapping[str, type]] = None) -> None: + self.serialize_type = { + "iso-8601": Serializer.serialize_iso, + "rfc-1123": Serializer.serialize_rfc, + "unix-time": Serializer.serialize_unix, + "duration": Serializer.serialize_duration, + "date": Serializer.serialize_date, + "time": Serializer.serialize_time, + "decimal": Serializer.serialize_decimal, + "long": Serializer.serialize_long, + "bytearray": Serializer.serialize_bytearray, + "base64": Serializer.serialize_base64, + "object": self.serialize_object, + "[]": self.serialize_iter, + "{}": self.serialize_dict, + } + self.dependencies: dict[str, type] = dict(classes) if classes else {} + self.key_transformer = full_restapi_key_transformer + self.client_side_validation = True + + def _serialize( # pylint: disable=too-many-nested-blocks, too-many-branches, too-many-statements, too-many-locals + self, target_obj, data_type=None, **kwargs + ): + """Serialize data into a string according to type. + + :param object target_obj: The data to be serialized. + :param str data_type: The type to be serialized from. + :rtype: str, dict + :raises SerializationError: if serialization fails. + :returns: The serialized data. + """ + key_transformer = kwargs.get("key_transformer", self.key_transformer) + keep_readonly = kwargs.get("keep_readonly", False) + if target_obj is None: + return None + + attr_name = None + class_name = target_obj.__class__.__name__ + + if data_type: + return self.serialize_data(target_obj, data_type, **kwargs) + + if not hasattr(target_obj, "_attribute_map"): + data_type = type(target_obj).__name__ + if data_type in self.basic_types.values(): + return self.serialize_data(target_obj, data_type, **kwargs) + + # Force "is_xml" kwargs if we detect a XML model + try: + is_xml_model_serialization = kwargs["is_xml"] + except KeyError: + is_xml_model_serialization = kwargs.setdefault("is_xml", target_obj.is_xml_model()) + + serialized = {} + if is_xml_model_serialization: + serialized = target_obj._create_xml_node() # pylint: disable=protected-access + try: + attributes = target_obj._attribute_map # pylint: disable=protected-access + for attr, attr_desc in attributes.items(): + attr_name = attr + if not keep_readonly and target_obj._validation.get( # pylint: disable=protected-access + attr_name, {} + ).get("readonly", False): + continue + + if attr_name == "additional_properties" and attr_desc["key"] == "": + if target_obj.additional_properties is not None: + serialized |= target_obj.additional_properties + continue + try: + + orig_attr = getattr(target_obj, attr) + if is_xml_model_serialization: + pass # Don't provide "transformer" for XML for now. Keep "orig_attr" + else: # JSON + keys, orig_attr = key_transformer(attr, attr_desc.copy(), orig_attr) + keys = keys if isinstance(keys, list) else [keys] + + kwargs["serialization_ctxt"] = attr_desc + new_attr = self.serialize_data(orig_attr, attr_desc["type"], **kwargs) + + if is_xml_model_serialization: + xml_desc = attr_desc.get("xml", {}) + xml_name = xml_desc.get("name", attr_desc["key"]) + xml_prefix = xml_desc.get("prefix", None) + xml_ns = xml_desc.get("ns", None) + if xml_desc.get("attr", False): + if xml_ns: + ET.register_namespace(xml_prefix, xml_ns) + xml_name = "{{{}}}{}".format(xml_ns, xml_name) + serialized.set(xml_name, new_attr) # type: ignore + continue + if xml_desc.get("text", False): + serialized.text = new_attr # type: ignore + continue + if isinstance(new_attr, list): + serialized.extend(new_attr) # type: ignore + elif isinstance(new_attr, ET.Element): + # If the down XML has no XML/Name, + # we MUST replace the tag with the local tag. But keeping the namespaces. + if "name" not in getattr(orig_attr, "_xml_map", {}): + splitted_tag = new_attr.tag.split("}") + if len(splitted_tag) == 2: # Namespace + new_attr.tag = "}".join([splitted_tag[0], xml_name]) + else: + new_attr.tag = xml_name + serialized.append(new_attr) # type: ignore + else: # That's a basic type + # Integrate namespace if necessary + local_node = _create_xml_node(xml_name, xml_prefix, xml_ns) + local_node.text = str(new_attr) + serialized.append(local_node) # type: ignore + else: # JSON + for k in reversed(keys): # type: ignore + new_attr = {k: new_attr} + + _new_attr = new_attr + _serialized = serialized + for k in keys: # type: ignore + if k not in _serialized: + _serialized.update(_new_attr) # type: ignore + _new_attr = _new_attr[k] # type: ignore + _serialized = _serialized[k] + except ValueError as err: + if isinstance(err, SerializationError): + raise + + except (AttributeError, KeyError, TypeError) as err: + msg = "Attribute {} in object {} cannot be serialized.\n{}".format(attr_name, class_name, str(target_obj)) + raise SerializationError(msg) from err + return serialized + + def body(self, data, data_type, **kwargs): + """Serialize data intended for a request body. + + :param object data: The data to be serialized. + :param str data_type: The type to be serialized from. + :rtype: dict + :raises SerializationError: if serialization fails. + :raises ValueError: if data is None + :returns: The serialized request body + """ + + # Just in case this is a dict + internal_data_type_str = data_type.strip("[]{}") + internal_data_type = self.dependencies.get(internal_data_type_str, None) + try: + is_xml_model_serialization = kwargs["is_xml"] + except KeyError: + if internal_data_type and issubclass(internal_data_type, Model): + is_xml_model_serialization = kwargs.setdefault("is_xml", internal_data_type.is_xml_model()) + else: + is_xml_model_serialization = False + if internal_data_type and not isinstance(internal_data_type, Enum): + try: + deserializer = Deserializer(self.dependencies) + # Since it's on serialization, it's almost sure that format is not JSON REST + # We're not able to deal with additional properties for now. + deserializer.additional_properties_detection = False + if is_xml_model_serialization: + deserializer.key_extractors = [ # type: ignore + attribute_key_case_insensitive_extractor, + ] + else: + deserializer.key_extractors = [ + rest_key_case_insensitive_extractor, + attribute_key_case_insensitive_extractor, + last_rest_key_case_insensitive_extractor, + ] + data = deserializer._deserialize(data_type, data) # pylint: disable=protected-access + except DeserializationError as err: + raise SerializationError("Unable to build a model: " + str(err)) from err + + return self._serialize(data, data_type, **kwargs) + + def url(self, name, data, data_type, **kwargs): + """Serialize data intended for a URL path. + + :param str name: The name of the URL path parameter. + :param object data: The data to be serialized. + :param str data_type: The type to be serialized from. + :rtype: str + :returns: The serialized URL path + :raises TypeError: if serialization fails. + :raises ValueError: if data is None + """ + try: + output = self.serialize_data(data, data_type, **kwargs) + if data_type == "bool": + output = json.dumps(output) + + if kwargs.get("skip_quote") is True: + output = str(output) + output = output.replace("{", quote("{")).replace("}", quote("}")) + else: + output = quote(str(output), safe="") + except SerializationError as exc: + raise TypeError("{} must be type {}.".format(name, data_type)) from exc + return output + + def query(self, name, data, data_type, **kwargs): + """Serialize data intended for a URL query. + + :param str name: The name of the query parameter. + :param object data: The data to be serialized. + :param str data_type: The type to be serialized from. + :rtype: str, list + :raises TypeError: if serialization fails. + :raises ValueError: if data is None + :returns: The serialized query parameter + """ + try: + # Treat the list aside, since we don't want to encode the div separator + if data_type.startswith("["): + internal_data_type = data_type[1:-1] + do_quote = not kwargs.get("skip_quote", False) + return self.serialize_iter(data, internal_data_type, do_quote=do_quote, **kwargs) + + # Not a list, regular serialization + output = self.serialize_data(data, data_type, **kwargs) + if data_type == "bool": + output = json.dumps(output) + if kwargs.get("skip_quote") is True: + output = str(output) + else: + output = quote(str(output), safe="") + except SerializationError as exc: + raise TypeError("{} must be type {}.".format(name, data_type)) from exc + return str(output) + + def header(self, name, data, data_type, **kwargs): + """Serialize data intended for a request header. + + :param str name: The name of the header. + :param object data: The data to be serialized. + :param str data_type: The type to be serialized from. + :rtype: str + :raises TypeError: if serialization fails. + :raises ValueError: if data is None + :returns: The serialized header + """ + try: + if data_type in ["[str]"]: + data = ["" if d is None else d for d in data] + + output = self.serialize_data(data, data_type, **kwargs) + if data_type == "bool": + output = json.dumps(output) + except SerializationError as exc: + raise TypeError("{} must be type {}.".format(name, data_type)) from exc + return str(output) + + def serialize_data(self, data, data_type, **kwargs): + """Serialize generic data according to supplied data type. + + :param object data: The data to be serialized. + :param str data_type: The type to be serialized from. + :raises AttributeError: if required data is None. + :raises ValueError: if data is None + :raises SerializationError: if serialization fails. + :returns: The serialized data. + :rtype: str, int, float, bool, dict, list + """ + if data is None: + raise ValueError("No value for given attribute") + + try: + if data is CoreNull: + return None + if data_type in self.basic_types.values(): + return self.serialize_basic(data, data_type, **kwargs) + + if data_type in self.serialize_type: + return self.serialize_type[data_type](data, **kwargs) + + # If dependencies is empty, try with current data class + # It has to be a subclass of Enum anyway + enum_type = self.dependencies.get(data_type, cast(type, data.__class__)) + if issubclass(enum_type, Enum): + return Serializer.serialize_enum(data, enum_obj=enum_type) + + iter_type = data_type[0] + data_type[-1] + if iter_type in self.serialize_type: + return self.serialize_type[iter_type](data, data_type[1:-1], **kwargs) + + except (ValueError, TypeError) as err: + msg = "Unable to serialize value: {!r} as type: {!r}." + raise SerializationError(msg.format(data, data_type)) from err + return self._serialize(data, **kwargs) + + @classmethod + def _get_custom_serializers(cls, data_type, **kwargs): # pylint: disable=inconsistent-return-statements + custom_serializer = kwargs.get("basic_types_serializers", {}).get(data_type) + if custom_serializer: + return custom_serializer + if kwargs.get("is_xml", False): + return cls._xml_basic_types_serializers.get(data_type) + + @classmethod + def serialize_basic(cls, data, data_type, **kwargs): + """Serialize basic builting data type. + Serializes objects to str, int, float or bool. + + Possible kwargs: + - basic_types_serializers dict[str, callable] : If set, use the callable as serializer + - is_xml bool : If set, use xml_basic_types_serializers + + :param obj data: Object to be serialized. + :param str data_type: Type of object in the iterable. + :rtype: str, int, float, bool + :return: serialized object + :raises TypeError: raise if data_type is not one of str, int, float, bool. + """ + custom_serializer = cls._get_custom_serializers(data_type, **kwargs) + if custom_serializer: + return custom_serializer(data) + if data_type == "str": + return cls.serialize_unicode(data) + if data_type == "int": + return int(data) + if data_type == "float": + return float(data) + if data_type == "bool": + return bool(data) + raise TypeError("Unknown basic data type: {}".format(data_type)) + + @classmethod + def serialize_unicode(cls, data): + """Special handling for serializing unicode strings in Py2. + Encode to UTF-8 if unicode, otherwise handle as a str. + + :param str data: Object to be serialized. + :rtype: str + :return: serialized object + """ + try: # If I received an enum, return its value + return data.value + except AttributeError: + pass + + try: + if isinstance(data, unicode): # type: ignore + # Don't change it, JSON and XML ElementTree are totally able + # to serialize correctly u'' strings + return data + except NameError: + return str(data) + return str(data) + + def serialize_iter(self, data, iter_type, div=None, **kwargs): + """Serialize iterable. + + Supported kwargs: + - serialization_ctxt dict : The current entry of _attribute_map, or same format. + serialization_ctxt['type'] should be same as data_type. + - is_xml bool : If set, serialize as XML + + :param list data: Object to be serialized. + :param str iter_type: Type of object in the iterable. + :param str div: If set, this str will be used to combine the elements + in the iterable into a combined string. Default is 'None'. + Defaults to False. + :rtype: list, str + :return: serialized iterable + """ + if isinstance(data, str): + raise SerializationError("Refuse str type as a valid iter type.") + + serialization_ctxt = kwargs.get("serialization_ctxt", {}) + is_xml = kwargs.get("is_xml", False) + + serialized = [] + for d in data: + try: + serialized.append(self.serialize_data(d, iter_type, **kwargs)) + except ValueError as err: + if isinstance(err, SerializationError): + raise + serialized.append(None) + + if kwargs.get("do_quote", False): + serialized = ["" if s is None else quote(str(s), safe="") for s in serialized] + + if div: + serialized = ["" if s is None else str(s) for s in serialized] + serialized = div.join(serialized) + + if "xml" in serialization_ctxt or is_xml: + # XML serialization is more complicated + xml_desc = serialization_ctxt.get("xml", {}) + xml_name = xml_desc.get("name") + if not xml_name: + xml_name = serialization_ctxt["key"] + + # Create a wrap node if necessary (use the fact that Element and list have "append") + is_wrapped = xml_desc.get("wrapped", False) + node_name = xml_desc.get("itemsName", xml_name) + if is_wrapped: + final_result = _create_xml_node(xml_name, xml_desc.get("prefix", None), xml_desc.get("ns", None)) + else: + final_result = [] + # All list elements to "local_node" + for el in serialized: + if isinstance(el, ET.Element): + el_node = el + else: + el_node = _create_xml_node(node_name, xml_desc.get("prefix", None), xml_desc.get("ns", None)) + if el is not None: # Otherwise it writes "None" :-p + el_node.text = str(el) + final_result.append(el_node) + return final_result + return serialized + + def serialize_dict(self, attr, dict_type, **kwargs): + """Serialize a dictionary of objects. + + :param dict attr: Object to be serialized. + :param str dict_type: Type of object in the dictionary. + :rtype: dict + :return: serialized dictionary + """ + serialization_ctxt = kwargs.get("serialization_ctxt", {}) + serialized = {} + for key, value in attr.items(): + try: + serialized[self.serialize_unicode(key)] = self.serialize_data(value, dict_type, **kwargs) + except ValueError as err: + if isinstance(err, SerializationError): + raise + serialized[self.serialize_unicode(key)] = None + + if "xml" in serialization_ctxt: + # XML serialization is more complicated + xml_desc = serialization_ctxt["xml"] + xml_name = xml_desc["name"] + + final_result = _create_xml_node(xml_name, xml_desc.get("prefix", None), xml_desc.get("ns", None)) + for key, value in serialized.items(): + ET.SubElement(final_result, key).text = value + return final_result + + return serialized + + def serialize_object(self, attr, **kwargs): # pylint: disable=too-many-return-statements + """Serialize a generic object. + This will be handled as a dictionary. If object passed in is not + a basic type (str, int, float, dict, list) it will simply be + cast to str. + + :param dict attr: Object to be serialized. + :rtype: dict or str + :return: serialized object + """ + if attr is None: + return None + if isinstance(attr, ET.Element): + return attr + obj_type = type(attr) + if obj_type in self.basic_types: + return self.serialize_basic(attr, self.basic_types[obj_type], **kwargs) + if obj_type is _long_type: + return self.serialize_long(attr) + if obj_type is str: + return self.serialize_unicode(attr) + if obj_type is datetime.datetime: + return self.serialize_iso(attr) + if obj_type is datetime.date: + return self.serialize_date(attr) + if obj_type is datetime.time: + return self.serialize_time(attr) + if obj_type is datetime.timedelta: + return self.serialize_duration(attr) + if obj_type is decimal.Decimal: + return self.serialize_decimal(attr) + + # If it's a model or I know this dependency, serialize as a Model + if obj_type in self.dependencies.values() or isinstance(attr, Model): + return self._serialize(attr) + + if obj_type == dict: + serialized = {} + for key, value in attr.items(): + try: + serialized[self.serialize_unicode(key)] = self.serialize_object(value, **kwargs) + except ValueError: + serialized[self.serialize_unicode(key)] = None + return serialized + + if obj_type == list: + serialized = [] + for obj in attr: + try: + serialized.append(self.serialize_object(obj, **kwargs)) + except ValueError: + pass + return serialized + return str(attr) + + @staticmethod + def serialize_enum(attr, enum_obj=None): + try: + result = attr.value + except AttributeError: + result = attr + try: + enum_obj(result) # type: ignore + return result + except ValueError as exc: + for enum_value in enum_obj: # type: ignore + if enum_value.value.lower() == str(attr).lower(): + return enum_value.value + error = "{!r} is not valid value for enum {!r}" + raise SerializationError(error.format(attr, enum_obj)) from exc + + @staticmethod + def serialize_bytearray(attr, **kwargs): # pylint: disable=unused-argument + """Serialize bytearray into base-64 string. + + :param str attr: Object to be serialized. + :rtype: str + :return: serialized base64 + """ + return b64encode(attr).decode() + + @staticmethod + def serialize_base64(attr, **kwargs): # pylint: disable=unused-argument + """Serialize str into base-64 string. + + :param str attr: Object to be serialized. + :rtype: str + :return: serialized base64 + """ + encoded = b64encode(attr).decode("ascii") + return encoded.strip("=").replace("+", "-").replace("/", "_") + + @staticmethod + def serialize_decimal(attr, **kwargs): # pylint: disable=unused-argument + """Serialize Decimal object to float. + + :param decimal attr: Object to be serialized. + :rtype: float + :return: serialized decimal + """ + return float(attr) + + @staticmethod + def serialize_long(attr, **kwargs): # pylint: disable=unused-argument + """Serialize long (Py2) or int (Py3). + + :param int attr: Object to be serialized. + :rtype: int/long + :return: serialized long + """ + return _long_type(attr) + + @staticmethod + def serialize_date(attr, **kwargs): # pylint: disable=unused-argument + """Serialize Date object into ISO-8601 formatted string. + + :param Date attr: Object to be serialized. + :rtype: str + :return: serialized date + """ + if isinstance(attr, str): + attr = isodate.parse_date(attr) + t = "{:04}-{:02}-{:02}".format(attr.year, attr.month, attr.day) + return t + + @staticmethod + def serialize_time(attr, **kwargs): # pylint: disable=unused-argument + """Serialize Time object into ISO-8601 formatted string. + + :param datetime.time attr: Object to be serialized. + :rtype: str + :return: serialized time + """ + if isinstance(attr, str): + attr = isodate.parse_time(attr) + t = "{:02}:{:02}:{:02}".format(attr.hour, attr.minute, attr.second) + if attr.microsecond: + t += ".{:02}".format(attr.microsecond) + return t + + @staticmethod + def serialize_duration(attr, **kwargs): # pylint: disable=unused-argument + """Serialize TimeDelta object into ISO-8601 formatted string. + + :param TimeDelta attr: Object to be serialized. + :rtype: str + :return: serialized duration + """ + if isinstance(attr, str): + attr = isodate.parse_duration(attr) + return isodate.duration_isoformat(attr) + + @staticmethod + def serialize_rfc(attr, **kwargs): # pylint: disable=unused-argument + """Serialize Datetime object into RFC-1123 formatted string. + + :param Datetime attr: Object to be serialized. + :rtype: str + :raises TypeError: if format invalid. + :return: serialized rfc + """ + try: + if not attr.tzinfo: + _LOGGER.warning("Datetime with no tzinfo will be considered UTC.") + utc = attr.utctimetuple() + except AttributeError as exc: + raise TypeError("RFC1123 object must be valid Datetime object.") from exc + + return "{}, {:02} {} {:04} {:02}:{:02}:{:02} GMT".format( + Serializer.days[utc.tm_wday], + utc.tm_mday, + Serializer.months[utc.tm_mon], + utc.tm_year, + utc.tm_hour, + utc.tm_min, + utc.tm_sec, + ) + + @staticmethod + def serialize_iso(attr, **kwargs): # pylint: disable=unused-argument + """Serialize Datetime object into ISO-8601 formatted string. + + :param Datetime attr: Object to be serialized. + :rtype: str + :raises SerializationError: if format invalid. + :return: serialized iso + """ + if isinstance(attr, str): + attr = isodate.parse_datetime(attr) + try: + if not attr.tzinfo: + _LOGGER.warning("Datetime with no tzinfo will be considered UTC.") + utc = attr.utctimetuple() + if utc.tm_year > 9999 or utc.tm_year < 1: + raise OverflowError("Hit max or min date") + + microseconds = str(attr.microsecond).rjust(6, "0").rstrip("0").ljust(3, "0") + if microseconds: + microseconds = "." + microseconds + date = "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}".format( + utc.tm_year, utc.tm_mon, utc.tm_mday, utc.tm_hour, utc.tm_min, utc.tm_sec + ) + return date + microseconds + "Z" + except (ValueError, OverflowError) as err: + msg = "Unable to serialize datetime object." + raise SerializationError(msg) from err + except AttributeError as err: + msg = "ISO-8601 object must be valid Datetime object." + raise TypeError(msg) from err + + @staticmethod + def serialize_unix(attr, **kwargs): # pylint: disable=unused-argument + """Serialize Datetime object into IntTime format. + This is represented as seconds. + + :param Datetime attr: Object to be serialized. + :rtype: int + :raises SerializationError: if format invalid + :return: serialied unix + """ + if isinstance(attr, int): + return attr + try: + if not attr.tzinfo: + _LOGGER.warning("Datetime with no tzinfo will be considered UTC.") + return int(calendar.timegm(attr.utctimetuple())) + except AttributeError as exc: + raise TypeError("Unix time object must be valid Datetime object.") from exc + + +def rest_key_extractor(attr, attr_desc, data): # pylint: disable=unused-argument + key = attr_desc["key"] + working_data = data + + while "." in key: + # Need the cast, as for some reasons "split" is typed as list[str | Any] + dict_keys = cast(list[str], _FLATTEN.split(key)) + if len(dict_keys) == 1: + key = _decode_attribute_map_key(dict_keys[0]) + break + working_key = _decode_attribute_map_key(dict_keys[0]) + working_data = working_data.get(working_key, data) + if working_data is None: + # If at any point while following flatten JSON path see None, it means + # that all properties under are None as well + return None + key = ".".join(dict_keys[1:]) + + return working_data.get(key) + + +def rest_key_case_insensitive_extractor( # pylint: disable=unused-argument, inconsistent-return-statements + attr, attr_desc, data +): + key = attr_desc["key"] + working_data = data + + while "." in key: + dict_keys = _FLATTEN.split(key) + if len(dict_keys) == 1: + key = _decode_attribute_map_key(dict_keys[0]) + break + working_key = _decode_attribute_map_key(dict_keys[0]) + working_data = attribute_key_case_insensitive_extractor(working_key, None, working_data) + if working_data is None: + # If at any point while following flatten JSON path see None, it means + # that all properties under are None as well + return None + key = ".".join(dict_keys[1:]) + + if working_data: + return attribute_key_case_insensitive_extractor(key, None, working_data) + + +def last_rest_key_extractor(attr, attr_desc, data): # pylint: disable=unused-argument + """Extract the attribute in "data" based on the last part of the JSON path key. + + :param str attr: The attribute to extract + :param dict attr_desc: The attribute description + :param dict data: The data to extract from + :rtype: object + :returns: The extracted attribute + """ + key = attr_desc["key"] + dict_keys = _FLATTEN.split(key) + return attribute_key_extractor(dict_keys[-1], None, data) + + +def last_rest_key_case_insensitive_extractor(attr, attr_desc, data): # pylint: disable=unused-argument + """Extract the attribute in "data" based on the last part of the JSON path key. + + This is the case insensitive version of "last_rest_key_extractor" + :param str attr: The attribute to extract + :param dict attr_desc: The attribute description + :param dict data: The data to extract from + :rtype: object + :returns: The extracted attribute + """ + key = attr_desc["key"] + dict_keys = _FLATTEN.split(key) + return attribute_key_case_insensitive_extractor(dict_keys[-1], None, data) + + +def attribute_key_extractor(attr, _, data): + return data.get(attr) + + +def attribute_key_case_insensitive_extractor(attr, _, data): + found_key = None + lower_attr = attr.lower() + for key in data: + if lower_attr == key.lower(): + found_key = key + break + + return data.get(found_key) + + +def _extract_name_from_internal_type(internal_type): + """Given an internal type XML description, extract correct XML name with namespace. + + :param dict internal_type: An model type + :rtype: tuple + :returns: A tuple XML name + namespace dict + """ + internal_type_xml_map = getattr(internal_type, "_xml_map", {}) + xml_name = internal_type_xml_map.get("name", internal_type.__name__) + xml_ns = internal_type_xml_map.get("ns", None) + if xml_ns: + xml_name = "{{{}}}{}".format(xml_ns, xml_name) + return xml_name + + +def xml_key_extractor(attr, attr_desc, data): # pylint: disable=unused-argument,too-many-return-statements + if isinstance(data, dict): + return None + + # Test if this model is XML ready first + if not isinstance(data, ET.Element): + return None + + xml_desc = attr_desc.get("xml", {}) + xml_name = xml_desc.get("name", attr_desc["key"]) + + # Look for a children + is_iter_type = attr_desc["type"].startswith("[") + is_wrapped = xml_desc.get("wrapped", False) + internal_type = attr_desc.get("internalType", None) + internal_type_xml_map = getattr(internal_type, "_xml_map", {}) + + # Integrate namespace if necessary + xml_ns = xml_desc.get("ns", internal_type_xml_map.get("ns", None)) + if xml_ns: + xml_name = "{{{}}}{}".format(xml_ns, xml_name) + + # If it's an attribute, that's simple + if xml_desc.get("attr", False): + return data.get(xml_name) + + # If it's x-ms-text, that's simple too + if xml_desc.get("text", False): + return data.text + + # Scenario where I take the local name: + # - Wrapped node + # - Internal type is an enum (considered basic types) + # - Internal type has no XML/Name node + if is_wrapped or (internal_type and (issubclass(internal_type, Enum) or "name" not in internal_type_xml_map)): + children = data.findall(xml_name) + # If internal type has a local name and it's not a list, I use that name + elif not is_iter_type and internal_type and "name" in internal_type_xml_map: + xml_name = _extract_name_from_internal_type(internal_type) + children = data.findall(xml_name) + # That's an array + else: + if internal_type: # Complex type, ignore itemsName and use the complex type name + items_name = _extract_name_from_internal_type(internal_type) + else: + items_name = xml_desc.get("itemsName", xml_name) + children = data.findall(items_name) + + if len(children) == 0: + if is_iter_type: + if is_wrapped: + return None # is_wrapped no node, we want None + return [] # not wrapped, assume empty list + return None # Assume it's not there, maybe an optional node. + + # If is_iter_type and not wrapped, return all found children + if is_iter_type: + if not is_wrapped: + return children + # Iter and wrapped, should have found one node only (the wrap one) + if len(children) != 1: + raise DeserializationError( + "Tried to deserialize an array not wrapped, and found several nodes '{}'. Maybe you should declare this array as wrapped?".format( + xml_name + ) + ) + return list(children[0]) # Might be empty list and that's ok. + + # Here it's not a itertype, we should have found one element only or empty + if len(children) > 1: + raise DeserializationError("Find several XML '{}' where it was not expected".format(xml_name)) + return children[0] + + +class Deserializer: + """Response object model deserializer. + + :param dict classes: Class type dictionary for deserializing complex types. + :ivar list key_extractors: Ordered list of extractors to be used by this deserializer. + """ + + basic_types = {str: "str", int: "int", bool: "bool", float: "float"} + + valid_date = re.compile(r"\d{4}[-]\d{2}[-]\d{2}T\d{2}:\d{2}:\d{2}\.?\d*Z?[-+]?[\d{2}]?:?[\d{2}]?") + + def __init__(self, classes: Optional[Mapping[str, type]] = None) -> None: + self.deserialize_type = { + "iso-8601": Deserializer.deserialize_iso, + "rfc-1123": Deserializer.deserialize_rfc, + "unix-time": Deserializer.deserialize_unix, + "duration": Deserializer.deserialize_duration, + "date": Deserializer.deserialize_date, + "time": Deserializer.deserialize_time, + "decimal": Deserializer.deserialize_decimal, + "long": Deserializer.deserialize_long, + "bytearray": Deserializer.deserialize_bytearray, + "base64": Deserializer.deserialize_base64, + "object": self.deserialize_object, + "[]": self.deserialize_iter, + "{}": self.deserialize_dict, + } + self.deserialize_expected_types = { + "duration": (isodate.Duration, datetime.timedelta), + "iso-8601": (datetime.datetime), + } + self.dependencies: dict[str, type] = dict(classes) if classes else {} + self.key_extractors = [rest_key_extractor, xml_key_extractor] + # Additional properties only works if the "rest_key_extractor" is used to + # extract the keys. Making it to work whatever the key extractor is too much + # complicated, with no real scenario for now. + # So adding a flag to disable additional properties detection. This flag should be + # used if your expect the deserialization to NOT come from a JSON REST syntax. + # Otherwise, result are unexpected + self.additional_properties_detection = True + + def __call__(self, target_obj, response_data, content_type=None): + """Call the deserializer to process a REST response. + + :param str target_obj: Target data type to deserialize to. + :param requests.Response response_data: REST response object. + :param str content_type: Swagger "produces" if available. + :raises DeserializationError: if deserialization fails. + :return: Deserialized object. + :rtype: object + """ + data = self._unpack_content(response_data, content_type) + return self._deserialize(target_obj, data) + + def _deserialize(self, target_obj, data): # pylint: disable=inconsistent-return-statements + """Call the deserializer on a model. + + Data needs to be already deserialized as JSON or XML ElementTree + + :param str target_obj: Target data type to deserialize to. + :param object data: Object to deserialize. + :raises DeserializationError: if deserialization fails. + :return: Deserialized object. + :rtype: object + """ + # This is already a model, go recursive just in case + if hasattr(data, "_attribute_map"): + constants = [name for name, config in getattr(data, "_validation", {}).items() if config.get("constant")] + try: + for attr, mapconfig in data._attribute_map.items(): # pylint: disable=protected-access + if attr in constants: + continue + value = getattr(data, attr) + if value is None: + continue + local_type = mapconfig["type"] + internal_data_type = local_type.strip("[]{}") + if internal_data_type not in self.dependencies or isinstance(internal_data_type, Enum): + continue + setattr(data, attr, self._deserialize(local_type, value)) + return data + except AttributeError: + return + + response, class_name = self._classify_target(target_obj, data) + + if isinstance(response, str): + return self.deserialize_data(data, response) + if isinstance(response, type) and issubclass(response, Enum): + return self.deserialize_enum(data, response) + + if data is None or data is CoreNull: + return data + try: + attributes = response._attribute_map # type: ignore # pylint: disable=protected-access + d_attrs = {} + for attr, attr_desc in attributes.items(): + # Check empty string. If it's not empty, someone has a real "additionalProperties"... + if attr == "additional_properties" and attr_desc["key"] == "": + continue + raw_value = None + # Enhance attr_desc with some dynamic data + attr_desc = attr_desc.copy() # Do a copy, do not change the real one + internal_data_type = attr_desc["type"].strip("[]{}") + if internal_data_type in self.dependencies: + attr_desc["internalType"] = self.dependencies[internal_data_type] + + for key_extractor in self.key_extractors: + found_value = key_extractor(attr, attr_desc, data) + if found_value is not None: + if raw_value is not None and raw_value != found_value: + msg = ( + "Ignoring extracted value '%s' from %s for key '%s'" + " (duplicate extraction, follow extractors order)" + ) + _LOGGER.warning(msg, found_value, key_extractor, attr) + continue + raw_value = found_value + + value = self.deserialize_data(raw_value, attr_desc["type"]) + d_attrs[attr] = value + except (AttributeError, TypeError, KeyError) as err: + msg = "Unable to deserialize to object: " + class_name # type: ignore + raise DeserializationError(msg) from err + additional_properties = self._build_additional_properties(attributes, data) + return self._instantiate_model(response, d_attrs, additional_properties) + + def _build_additional_properties(self, attribute_map, data): + if not self.additional_properties_detection: + return None + if "additional_properties" in attribute_map and attribute_map.get("additional_properties", {}).get("key") != "": + # Check empty string. If it's not empty, someone has a real "additionalProperties" + return None + if isinstance(data, ET.Element): + data = {el.tag: el.text for el in data} + + known_keys = { + _decode_attribute_map_key(_FLATTEN.split(desc["key"])[0]) + for desc in attribute_map.values() + if desc["key"] != "" + } + present_keys = set(data.keys()) + missing_keys = present_keys - known_keys + return {key: data[key] for key in missing_keys} + + def _classify_target(self, target, data): + """Check to see whether the deserialization target object can + be classified into a subclass. + Once classification has been determined, initialize object. + + :param str target: The target object type to deserialize to. + :param str/dict data: The response data to deserialize. + :return: The classified target object and its class name. + :rtype: tuple + """ + if target is None: + return None, None + + if isinstance(target, str): + try: + target = self.dependencies[target] + except KeyError: + return target, target + + try: + target = target._classify(data, self.dependencies) # type: ignore # pylint: disable=protected-access + except AttributeError: + pass # Target is not a Model, no classify + return target, target.__class__.__name__ # type: ignore + + def failsafe_deserialize(self, target_obj, data, content_type=None): + """Ignores any errors encountered in deserialization, + and falls back to not deserializing the object. Recommended + for use in error deserialization, as we want to return the + HttpResponseError to users, and not have them deal with + a deserialization error. + + :param str target_obj: The target object type to deserialize to. + :param str/dict data: The response data to deserialize. + :param str content_type: Swagger "produces" if available. + :return: Deserialized object. + :rtype: object + """ + try: + return self(target_obj, data, content_type=content_type) + except: # pylint: disable=bare-except + _LOGGER.debug( + "Ran into a deserialization error. Ignoring since this is failsafe deserialization", exc_info=True + ) + return None + + @staticmethod + def _unpack_content(raw_data, content_type=None): + """Extract the correct structure for deserialization. + + If raw_data is a PipelineResponse, try to extract the result of RawDeserializer. + if we can't, raise. Your Pipeline should have a RawDeserializer. + + If not a pipeline response and raw_data is bytes or string, use content-type + to decode it. If no content-type, try JSON. + + If raw_data is something else, bypass all logic and return it directly. + + :param obj raw_data: Data to be processed. + :param str content_type: How to parse if raw_data is a string/bytes. + :raises JSONDecodeError: If JSON is requested and parsing is impossible. + :raises UnicodeDecodeError: If bytes is not UTF8 + :rtype: object + :return: Unpacked content. + """ + # Assume this is enough to detect a Pipeline Response without importing it + context = getattr(raw_data, "context", {}) + if context: + if RawDeserializer.CONTEXT_NAME in context: + return context[RawDeserializer.CONTEXT_NAME] + raise ValueError("This pipeline didn't have the RawDeserializer policy; can't deserialize") + + # Assume this is enough to recognize universal_http.ClientResponse without importing it + if hasattr(raw_data, "body"): + return RawDeserializer.deserialize_from_http_generics(raw_data.text(), raw_data.headers) + + # Assume this enough to recognize requests.Response without importing it. + if hasattr(raw_data, "_content_consumed"): + return RawDeserializer.deserialize_from_http_generics(raw_data.text, raw_data.headers) + + if isinstance(raw_data, (str, bytes)) or hasattr(raw_data, "read"): + return RawDeserializer.deserialize_from_text(raw_data, content_type) # type: ignore + return raw_data + + def _instantiate_model(self, response, attrs, additional_properties=None): + """Instantiate a response model passing in deserialized args. + + :param Response response: The response model class. + :param dict attrs: The deserialized response attributes. + :param dict additional_properties: Additional properties to be set. + :rtype: Response + :return: The instantiated response model. + """ + if callable(response): + subtype = getattr(response, "_subtype_map", {}) + try: + readonly = [ + k + for k, v in response._validation.items() # pylint: disable=protected-access # type: ignore + if v.get("readonly") + ] + const = [ + k + for k, v in response._validation.items() # pylint: disable=protected-access # type: ignore + if v.get("constant") + ] + kwargs = {k: v for k, v in attrs.items() if k not in subtype and k not in readonly + const} + response_obj = response(**kwargs) + for attr in readonly: + setattr(response_obj, attr, attrs.get(attr)) + if additional_properties: + response_obj.additional_properties = additional_properties # type: ignore + return response_obj + except TypeError as err: + msg = "Unable to deserialize {} into model {}. ".format(kwargs, response) # type: ignore + raise DeserializationError(msg + str(err)) from err + else: + try: + for attr, value in attrs.items(): + setattr(response, attr, value) + return response + except Exception as exp: + msg = "Unable to populate response model. " + msg += "Type: {}, Error: {}".format(type(response), exp) + raise DeserializationError(msg) from exp + + def deserialize_data(self, data, data_type): # pylint: disable=too-many-return-statements + """Process data for deserialization according to data type. + + :param str data: The response string to be deserialized. + :param str data_type: The type to deserialize to. + :raises DeserializationError: if deserialization fails. + :return: Deserialized object. + :rtype: object + """ + if data is None: + return data + + try: + if not data_type: + return data + if data_type in self.basic_types.values(): + return self.deserialize_basic(data, data_type) + if data_type in self.deserialize_type: + if isinstance(data, self.deserialize_expected_types.get(data_type, tuple())): + return data + + is_a_text_parsing_type = lambda x: x not in [ # pylint: disable=unnecessary-lambda-assignment + "object", + "[]", + r"{}", + ] + if isinstance(data, ET.Element) and is_a_text_parsing_type(data_type) and not data.text: + return None + data_val = self.deserialize_type[data_type](data) + return data_val + + iter_type = data_type[0] + data_type[-1] + if iter_type in self.deserialize_type: + return self.deserialize_type[iter_type](data, data_type[1:-1]) + + obj_type = self.dependencies[data_type] + if issubclass(obj_type, Enum): + if isinstance(data, ET.Element): + data = data.text + return self.deserialize_enum(data, obj_type) + + except (ValueError, TypeError, AttributeError) as err: + msg = "Unable to deserialize response data." + msg += " Data: {}, {}".format(data, data_type) + raise DeserializationError(msg) from err + return self._deserialize(obj_type, data) + + def deserialize_iter(self, attr, iter_type): + """Deserialize an iterable. + + :param list attr: Iterable to be deserialized. + :param str iter_type: The type of object in the iterable. + :return: Deserialized iterable. + :rtype: list + """ + if attr is None: + return None + if isinstance(attr, ET.Element): # If I receive an element here, get the children + attr = list(attr) + if not isinstance(attr, (list, set)): + raise DeserializationError("Cannot deserialize as [{}] an object of type {}".format(iter_type, type(attr))) + return [self.deserialize_data(a, iter_type) for a in attr] + + def deserialize_dict(self, attr, dict_type): + """Deserialize a dictionary. + + :param dict/list attr: Dictionary to be deserialized. Also accepts + a list of key, value pairs. + :param str dict_type: The object type of the items in the dictionary. + :return: Deserialized dictionary. + :rtype: dict + """ + if isinstance(attr, list): + return {x["key"]: self.deserialize_data(x["value"], dict_type) for x in attr} + + if isinstance(attr, ET.Element): + # Transform value into {"Key": "value"} + attr = {el.tag: el.text for el in attr} + return {k: self.deserialize_data(v, dict_type) for k, v in attr.items()} + + def deserialize_object(self, attr, **kwargs): # pylint: disable=too-many-return-statements + """Deserialize a generic object. + This will be handled as a dictionary. + + :param dict attr: Dictionary to be deserialized. + :return: Deserialized object. + :rtype: dict + :raises TypeError: if non-builtin datatype encountered. + """ + if attr is None: + return None + if isinstance(attr, ET.Element): + # Do no recurse on XML, just return the tree as-is + return attr + if isinstance(attr, str): + return self.deserialize_basic(attr, "str") + obj_type = type(attr) + if obj_type in self.basic_types: + return self.deserialize_basic(attr, self.basic_types[obj_type]) + if obj_type is _long_type: + return self.deserialize_long(attr) + + if obj_type == dict: + deserialized = {} + for key, value in attr.items(): + try: + deserialized[key] = self.deserialize_object(value, **kwargs) + except ValueError: + deserialized[key] = None + return deserialized + + if obj_type == list: + deserialized = [] + for obj in attr: + try: + deserialized.append(self.deserialize_object(obj, **kwargs)) + except ValueError: + pass + return deserialized + + error = "Cannot deserialize generic object with type: " + raise TypeError(error + str(obj_type)) + + def deserialize_basic(self, attr, data_type): # pylint: disable=too-many-return-statements + """Deserialize basic builtin data type from string. + Will attempt to convert to str, int, float and bool. + This function will also accept '1', '0', 'true' and 'false' as + valid bool values. + + :param str attr: response string to be deserialized. + :param str data_type: deserialization data type. + :return: Deserialized basic type. + :rtype: str, int, float or bool + :raises TypeError: if string format is not valid or data_type is not one of str, int, float, bool. + """ + # If we're here, data is supposed to be a basic type. + # If it's still an XML node, take the text + if isinstance(attr, ET.Element): + attr = attr.text + if not attr: + if data_type == "str": + # None or '', node is empty string. + return "" + # None or '', node with a strong type is None. + # Don't try to model "empty bool" or "empty int" + return None + + if data_type == "bool": + if attr in [True, False, 1, 0]: + return bool(attr) + if isinstance(attr, str): + if attr.lower() in ["true", "1"]: + return True + if attr.lower() in ["false", "0"]: + return False + raise TypeError("Invalid boolean value: {}".format(attr)) + + if data_type == "str": + return self.deserialize_unicode(attr) + if data_type == "int": + return int(attr) + if data_type == "float": + return float(attr) + raise TypeError("Unknown basic data type: {}".format(data_type)) + + @staticmethod + def deserialize_unicode(data): + """Preserve unicode objects in Python 2, otherwise return data + as a string. + + :param str data: response string to be deserialized. + :return: Deserialized string. + :rtype: str or unicode + """ + # We might be here because we have an enum modeled as string, + # and we try to deserialize a partial dict with enum inside + if isinstance(data, Enum): + return data + + # Consider this is real string + try: + if isinstance(data, unicode): # type: ignore + return data + except NameError: + return str(data) + return str(data) + + @staticmethod + def deserialize_enum(data, enum_obj): + """Deserialize string into enum object. + + If the string is not a valid enum value it will be returned as-is + and a warning will be logged. + + :param str data: Response string to be deserialized. If this value is + None or invalid it will be returned as-is. + :param Enum enum_obj: Enum object to deserialize to. + :return: Deserialized enum object. + :rtype: Enum + """ + if isinstance(data, enum_obj) or data is None: + return data + if isinstance(data, Enum): + data = data.value + if isinstance(data, int): + # Workaround. We might consider remove it in the future. + try: + return list(enum_obj.__members__.values())[data] + except IndexError as exc: + error = "{!r} is not a valid index for enum {!r}" + raise DeserializationError(error.format(data, enum_obj)) from exc + try: + return enum_obj(str(data)) + except ValueError: + for enum_value in enum_obj: + if enum_value.value.lower() == str(data).lower(): + return enum_value + # We don't fail anymore for unknown value, we deserialize as a string + _LOGGER.warning("Deserializer is not able to find %s as valid enum in %s", data, enum_obj) + return Deserializer.deserialize_unicode(data) + + @staticmethod + def deserialize_bytearray(attr): + """Deserialize string into bytearray. + + :param str attr: response string to be deserialized. + :return: Deserialized bytearray + :rtype: bytearray + :raises TypeError: if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + return bytearray(b64decode(attr)) # type: ignore + + @staticmethod + def deserialize_base64(attr): + """Deserialize base64 encoded string into string. + + :param str attr: response string to be deserialized. + :return: Deserialized base64 string + :rtype: bytearray + :raises TypeError: if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + padding = "=" * (3 - (len(attr) + 3) % 4) # type: ignore + attr = attr + padding # type: ignore + encoded = attr.replace("-", "+").replace("_", "/") + return b64decode(encoded) + + @staticmethod + def deserialize_decimal(attr): + """Deserialize string into Decimal object. + + :param str attr: response string to be deserialized. + :return: Deserialized decimal + :raises DeserializationError: if string format invalid. + :rtype: decimal + """ + if isinstance(attr, ET.Element): + attr = attr.text + try: + return decimal.Decimal(str(attr)) # type: ignore + except decimal.DecimalException as err: + msg = "Invalid decimal {}".format(attr) + raise DeserializationError(msg) from err + + @staticmethod + def deserialize_long(attr): + """Deserialize string into long (Py2) or int (Py3). + + :param str attr: response string to be deserialized. + :return: Deserialized int + :rtype: long or int + :raises ValueError: if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + return _long_type(attr) # type: ignore + + @staticmethod + def deserialize_duration(attr): + """Deserialize ISO-8601 formatted string into TimeDelta object. + + :param str attr: response string to be deserialized. + :return: Deserialized duration + :rtype: TimeDelta + :raises DeserializationError: if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + try: + duration = isodate.parse_duration(attr) + except (ValueError, OverflowError, AttributeError) as err: + msg = "Cannot deserialize duration object." + raise DeserializationError(msg) from err + return duration + + @staticmethod + def deserialize_date(attr): + """Deserialize ISO-8601 formatted string into Date object. + + :param str attr: response string to be deserialized. + :return: Deserialized date + :rtype: Date + :raises DeserializationError: if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + if re.search(r"[^\W\d_]", attr, re.I + re.U): # type: ignore + raise DeserializationError("Date must have only digits and -. Received: %s" % attr) + # This must NOT use defaultmonth/defaultday. Using None ensure this raises an exception. + return isodate.parse_date(attr, defaultmonth=0, defaultday=0) + + @staticmethod + def deserialize_time(attr): + """Deserialize ISO-8601 formatted string into time object. + + :param str attr: response string to be deserialized. + :return: Deserialized time + :rtype: datetime.time + :raises DeserializationError: if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + if re.search(r"[^\W\d_]", attr, re.I + re.U): # type: ignore + raise DeserializationError("Date must have only digits and -. Received: %s" % attr) + return isodate.parse_time(attr) + + @staticmethod + def deserialize_rfc(attr): + """Deserialize RFC-1123 formatted string into Datetime object. + + :param str attr: response string to be deserialized. + :return: Deserialized RFC datetime + :rtype: Datetime + :raises DeserializationError: if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + try: + parsed_date = email.utils.parsedate_tz(attr) # type: ignore + date_obj = datetime.datetime( + *parsed_date[:6], tzinfo=datetime.timezone(datetime.timedelta(minutes=(parsed_date[9] or 0) / 60)) + ) + if not date_obj.tzinfo: + date_obj = date_obj.astimezone(tz=TZ_UTC) + except ValueError as err: + msg = "Cannot deserialize to rfc datetime object." + raise DeserializationError(msg) from err + return date_obj + + @staticmethod + def deserialize_iso(attr): + """Deserialize ISO-8601 formatted string into Datetime object. + + :param str attr: response string to be deserialized. + :return: Deserialized ISO datetime + :rtype: Datetime + :raises DeserializationError: if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + try: + attr = attr.upper() # type: ignore + match = Deserializer.valid_date.match(attr) + if not match: + raise ValueError("Invalid datetime string: " + attr) + + check_decimal = attr.split(".") + if len(check_decimal) > 1: + decimal_str = "" + for digit in check_decimal[1]: + if digit.isdigit(): + decimal_str += digit + else: + break + if len(decimal_str) > 6: + attr = attr.replace(decimal_str, decimal_str[0:6]) + + date_obj = isodate.parse_datetime(attr) + test_utc = date_obj.utctimetuple() + if test_utc.tm_year > 9999 or test_utc.tm_year < 1: + raise OverflowError("Hit max or min date") + except (ValueError, OverflowError, AttributeError) as err: + msg = "Cannot deserialize datetime object." + raise DeserializationError(msg) from err + return date_obj + + @staticmethod + def deserialize_unix(attr): + """Serialize Datetime object into IntTime format. + This is represented as seconds. + + :param int attr: Object to be serialized. + :return: Deserialized datetime + :rtype: Datetime + :raises DeserializationError: if format invalid + """ + if isinstance(attr, ET.Element): + attr = int(attr.text) # type: ignore + try: + attr = int(attr) + date_obj = datetime.datetime.fromtimestamp(attr, TZ_UTC) + except ValueError as err: + msg = "Cannot deserialize to unix datetime object." + raise DeserializationError(msg) from err + return date_obj diff --git a/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/_utils/utils.py b/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/_utils/utils.py new file mode 100644 index 000000000000..35c9c836f85f --- /dev/null +++ b/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/_utils/utils.py @@ -0,0 +1,25 @@ +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from abc import ABC +from typing import Generic, TYPE_CHECKING, TypeVar + +if TYPE_CHECKING: + from .serialization import Deserializer, Serializer + + +TClient = TypeVar("TClient") +TConfig = TypeVar("TConfig") + + +class ClientMixinABC(ABC, Generic[TClient, TConfig]): + """DO NOT use this class. It is for internal typing use only.""" + + _client: TClient + _config: TConfig + _serialize: "Serializer" + _deserialize: "Deserializer" diff --git a/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/_validation.py b/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/_validation.py index 752b2822f9d3..f5af3a4eb8a2 100644 --- a/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/_validation.py +++ b/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/_validation.py @@ -10,6 +10,22 @@ def api_version_validation(**kwargs): params_added_on = kwargs.pop("params_added_on", {}) method_added_on = kwargs.pop("method_added_on", "") + api_versions_list = kwargs.pop("api_versions_list", []) + + def _index_with_default(value: str, default: int = -1) -> int: + """Get the index of value in lst, or return default if not found. + + :param value: The value to search for in the api_versions_list. + :type value: str + :param default: The default value to return if the value is not found. + :type default: int + :return: The index of the value in the list, or the default value if not found. + :rtype: int + """ + try: + return api_versions_list.index(value) + except ValueError: + return default def decorator(func): @functools.wraps(func) @@ -21,7 +37,7 @@ def wrapper(*args, **kwargs): except AttributeError: return func(*args, **kwargs) - if method_added_on > client_api_version: + if _index_with_default(method_added_on) > _index_with_default(client_api_version): raise ValueError( f"'{func.__name__}' is not available in API version " f"{client_api_version}. Pass service API version {method_added_on} or newer to your client." @@ -31,7 +47,7 @@ def wrapper(*args, **kwargs): parameter: api_version for api_version, parameters in params_added_on.items() for parameter in parameters - if parameter in kwargs and api_version > client_api_version + if parameter in kwargs and _index_with_default(api_version) > _index_with_default(client_api_version) } if unsupported: raise ValueError( diff --git a/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/aio/_client.py b/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/aio/_client.py index 20236dba0418..3c98e8a4fe89 100644 --- a/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/aio/_client.py +++ b/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/aio/_client.py @@ -14,15 +14,15 @@ from azure.core.pipeline import policies from azure.core.rest import AsyncHttpResponse, HttpRequest -from .._serialization import Deserializer, Serializer +from .._utils.serialization import Deserializer, Serializer from ._configuration import LoadTestAdministrationClientConfiguration, LoadTestRunClientConfiguration -from ._operations import LoadTestAdministrationClientOperationsMixin, LoadTestRunClientOperationsMixin +from ._operations import _LoadTestAdministrationClientOperationsMixin, _LoadTestRunClientOperationsMixin if TYPE_CHECKING: from azure.core.credentials_async import AsyncTokenCredential -class LoadTestAdministrationClient(LoadTestAdministrationClientOperationsMixin): +class LoadTestAdministrationClient(_LoadTestAdministrationClientOperationsMixin): """LoadTestAdministrationClient. :param endpoint: Required. @@ -30,14 +30,17 @@ class LoadTestAdministrationClient(LoadTestAdministrationClientOperationsMixin): :param credential: Credential used to authenticate requests to the service. Required. :type credential: ~azure.core.credentials_async.AsyncTokenCredential :keyword api_version: The API version to use for this operation. Default value is - "2024-12-01-preview". Note that overriding this default value may result in unsupported + "2025-11-01-preview". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. """ def __init__(self, endpoint: str, credential: "AsyncTokenCredential", **kwargs: Any) -> None: _endpoint = "https://{endpoint}" self._config = LoadTestAdministrationClientConfiguration(endpoint=endpoint, credential=credential, **kwargs) + _policies = kwargs.pop("policies", None) if _policies is None: _policies = [ @@ -100,7 +103,7 @@ async def __aexit__(self, *exc_details: Any) -> None: await self._client.__aexit__(*exc_details) -class LoadTestRunClient(LoadTestRunClientOperationsMixin): +class LoadTestRunClient(_LoadTestRunClientOperationsMixin): """LoadTestRunClient. :param endpoint: Required. @@ -108,14 +111,17 @@ class LoadTestRunClient(LoadTestRunClientOperationsMixin): :param credential: Credential used to authenticate requests to the service. Required. :type credential: ~azure.core.credentials_async.AsyncTokenCredential :keyword api_version: The API version to use for this operation. Default value is - "2024-12-01-preview". Note that overriding this default value may result in unsupported + "2025-11-01-preview". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. """ def __init__(self, endpoint: str, credential: "AsyncTokenCredential", **kwargs: Any) -> None: _endpoint = "https://{endpoint}" self._config = LoadTestRunClientConfiguration(endpoint=endpoint, credential=credential, **kwargs) + _policies = kwargs.pop("policies", None) if _policies is None: _policies = [ diff --git a/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/aio/_configuration.py b/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/aio/_configuration.py index 9a77a5e7a3e4..3f08d7bc214d 100644 --- a/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/aio/_configuration.py +++ b/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/aio/_configuration.py @@ -27,13 +27,13 @@ class LoadTestAdministrationClientConfiguration: # pylint: disable=too-many-ins :param credential: Credential used to authenticate requests to the service. Required. :type credential: ~azure.core.credentials_async.AsyncTokenCredential :keyword api_version: The API version to use for this operation. Default value is - "2024-12-01-preview". Note that overriding this default value may result in unsupported + "2025-11-01-preview". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str """ def __init__(self, endpoint: str, credential: "AsyncTokenCredential", **kwargs: Any) -> None: - api_version: str = kwargs.pop("api_version", "2024-12-01-preview") + api_version: str = kwargs.pop("api_version", "2025-11-01-preview") if endpoint is None: raise ValueError("Parameter 'endpoint' must not be None.") @@ -75,13 +75,13 @@ class LoadTestRunClientConfiguration: # pylint: disable=too-many-instance-attri :param credential: Credential used to authenticate requests to the service. Required. :type credential: ~azure.core.credentials_async.AsyncTokenCredential :keyword api_version: The API version to use for this operation. Default value is - "2024-12-01-preview". Note that overriding this default value may result in unsupported + "2025-11-01-preview". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str """ def __init__(self, endpoint: str, credential: "AsyncTokenCredential", **kwargs: Any) -> None: - api_version: str = kwargs.pop("api_version", "2024-12-01-preview") + api_version: str = kwargs.pop("api_version", "2025-11-01-preview") if endpoint is None: raise ValueError("Parameter 'endpoint' must not be None.") diff --git a/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/aio/_operations/__init__.py b/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/aio/_operations/__init__.py index 93b9c55d70d8..8809651f9db9 100644 --- a/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/aio/_operations/__init__.py +++ b/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/aio/_operations/__init__.py @@ -12,16 +12,13 @@ if TYPE_CHECKING: from ._patch import * # pylint: disable=unused-wildcard-import -from ._operations import LoadTestAdministrationClientOperationsMixin # type: ignore -from ._operations import LoadTestRunClientOperationsMixin # type: ignore +from ._operations import _LoadTestAdministrationClientOperationsMixin # type: ignore # pylint: disable=unused-import +from ._operations import _LoadTestRunClientOperationsMixin # type: ignore # pylint: disable=unused-import from ._patch import __all__ as _patch_all from ._patch import * from ._patch import patch_sdk as _patch_sdk -__all__ = [ - "LoadTestAdministrationClientOperationsMixin", - "LoadTestRunClientOperationsMixin", -] +__all__ = [] __all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore _patch_sdk() diff --git a/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/aio/_operations/_operations.py b/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/aio/_operations/_operations.py index f2e638eb48b0..2ce0a6b39361 100644 --- a/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/aio/_operations/_operations.py +++ b/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/aio/_operations/_operations.py @@ -6,13 +6,14 @@ # Code generated by Microsoft (R) Python Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- +from collections.abc import MutableMapping import datetime from io import IOBase import json -import sys -from typing import Any, AsyncIterable, Callable, Dict, IO, List, Optional, TypeVar, Union, overload +from typing import Any, AsyncIterator, Callable, IO, Optional, TypeVar, Union, cast, overload import urllib.parse +from azure.core import AsyncPipelineClient from azure.core.async_paging import AsyncItemPaged, AsyncList from azure.core.exceptions import ( ClientAuthenticationError, @@ -25,37 +26,51 @@ map_error, ) from azure.core.pipeline import PipelineResponse +from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod +from azure.core.polling.async_base_polling import AsyncLROBasePolling from azure.core.rest import AsyncHttpResponse, HttpRequest from azure.core.tracing.decorator import distributed_trace from azure.core.tracing.decorator_async import distributed_trace_async from azure.core.utils import case_insensitive_dict from ... import models as _models -from ..._model_base import SdkJSONEncoder, _deserialize from ..._operations._operations import ( build_load_test_administration_begin_upload_test_file_request, + build_load_test_administration_clone_test_request, build_load_test_administration_create_or_update_app_components_request, + build_load_test_administration_create_or_update_notification_rule_request, build_load_test_administration_create_or_update_server_metrics_config_request, build_load_test_administration_create_or_update_test_profile_request, build_load_test_administration_create_or_update_test_request, + build_load_test_administration_create_or_update_trigger_request, + build_load_test_administration_delete_notification_rule_request, build_load_test_administration_delete_test_file_request, build_load_test_administration_delete_test_profile_request, build_load_test_administration_delete_test_request, + build_load_test_administration_delete_trigger_request, + build_load_test_administration_generate_test_plan_recommendations_request, build_load_test_administration_get_app_components_request, + build_load_test_administration_get_notification_rule_request, + build_load_test_administration_get_operation_status_request, build_load_test_administration_get_server_metrics_config_request, build_load_test_administration_get_test_file_request, build_load_test_administration_get_test_profile_request, build_load_test_administration_get_test_request, + build_load_test_administration_get_trigger_request, + build_load_test_administration_list_notification_rules_request, build_load_test_administration_list_test_files_request, build_load_test_administration_list_test_profiles_request, build_load_test_administration_list_tests_request, + build_load_test_administration_list_triggers_request, build_load_test_run_begin_test_profile_run_request, build_load_test_run_begin_test_run_request, build_load_test_run_create_or_update_app_components_request, build_load_test_run_create_or_update_server_metrics_config_request, build_load_test_run_delete_test_profile_run_request, build_load_test_run_delete_test_run_request, + build_load_test_run_generate_test_run_insights_request, build_load_test_run_get_app_components_request, + build_load_test_run_get_latest_test_run_insights_request, build_load_test_run_get_metric_definitions_request, build_load_test_run_get_metric_namespaces_request, build_load_test_run_get_server_metrics_config_request, @@ -66,23 +81,23 @@ build_load_test_run_list_metrics_request, build_load_test_run_list_test_profile_runs_request, build_load_test_run_list_test_runs_request, + build_load_test_run_patch_latest_test_run_insights_request, build_load_test_run_stop_test_profile_run_request, build_load_test_run_stop_test_run_request, ) +from ..._utils.model_base import SdkJSONEncoder, _deserialize +from ..._utils.utils import ClientMixinABC from ..._validation import api_version_validation -from .._vendor import LoadTestAdministrationClientMixinABC, LoadTestRunClientMixinABC +from .._configuration import LoadTestAdministrationClientConfiguration, LoadTestRunClientConfiguration -if sys.version_info >= (3, 9): - from collections.abc import MutableMapping -else: - from typing import MutableMapping # type: ignore -JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object +JSON = MutableMapping[str, Any] +_Unset: Any = object() T = TypeVar("T") -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, dict[str, Any]], Any]] -class LoadTestAdministrationClientOperationsMixin( # pylint: disable=name-too-long - LoadTestAdministrationClientMixinABC +class _LoadTestAdministrationClientOperationsMixin( # pylint: disable=too-many-public-methods + ClientMixinABC[AsyncPipelineClient[HttpRequest, AsyncHttpResponse], LoadTestAdministrationClientConfiguration] ): @overload @@ -780,7 +795,7 @@ async def get_test_file(self, test_id: str, file_name: str, **kwargs: Any) -> _m return deserialized # type: ignore @distributed_trace - def list_test_files(self, test_id: str, **kwargs: Any) -> AsyncIterable["_models.TestFileInfo"]: + def list_test_files(self, test_id: str, **kwargs: Any) -> AsyncItemPaged["_models.TestFileInfo"]: """Get all test files. Get all test files. @@ -796,7 +811,7 @@ def list_test_files(self, test_id: str, **kwargs: Any) -> AsyncIterable["_models _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[List[_models.TestFileInfo]] = kwargs.pop("cls", None) + cls: ClsType[list[_models.TestFileInfo]] = kwargs.pop("cls", None) error_map: MutableMapping = { 401: ClientAuthenticationError, @@ -842,7 +857,7 @@ def prepare_request(next_link=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.TestFileInfo], deserialized["value"]) + list_of_elem = _deserialize(list[_models.TestFileInfo], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("nextLink") or None, AsyncList(list_of_elem) @@ -873,7 +888,7 @@ def list_tests( last_modified_start_time: Optional[datetime.datetime] = None, last_modified_end_time: Optional[datetime.datetime] = None, **kwargs: Any - ) -> AsyncIterable["_models.Test"]: + ) -> AsyncItemPaged["_models.Test"]: """Get all load tests by the fully qualified resource Id e.g subscriptions/{subId}/resourceGroups/{rg}/providers/Microsoft.LoadTestService/loadtests/{resName}. @@ -901,7 +916,7 @@ def list_tests( _params = kwargs.pop("params", {}) or {} maxpagesize = kwargs.pop("maxpagesize", None) - cls: ClsType[List[_models.Test]] = kwargs.pop("cls", None) + cls: ClsType[list[_models.Test]] = kwargs.pop("cls", None) error_map: MutableMapping = { 401: ClientAuthenticationError, @@ -951,7 +966,7 @@ def prepare_request(next_link=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.Test], deserialized["value"]) + list_of_elem = _deserialize(list[_models.Test], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("nextLink") or None, AsyncList(list_of_elem) @@ -999,8 +1014,8 @@ async def _begin_upload_test_file( :param body: The file content as application/octet-stream. Required. :type body: bytes :keyword file_type: File type. Known values are: "JMX_FILE", "USER_PROPERTIES", - "ADDITIONAL_ARTIFACTS", "ZIPPED_ARTIFACTS", "URL_TEST_CONFIG", and "TEST_SCRIPT". Default value - is None. + "ADDITIONAL_ARTIFACTS", "ZIPPED_ARTIFACTS", "URL_TEST_CONFIG", "TEST_SCRIPT", + "BROWSER_RECORDING", and "TEST_PLAN_RECOMMENDATIONS". Default value is None. :paramtype file_type: str or ~azure.developer.loadtesting.models.FileType :return: TestFileInfo. The TestFileInfo is compatible with MutableMapping :rtype: ~azure.developer.loadtesting.models.TestFileInfo @@ -1245,6 +1260,13 @@ async def create_or_update_test_profile( @api_version_validation( method_added_on="2024-05-01-preview", params_added_on={"2024-05-01-preview": ["api_version", "test_profile_id", "content_type", "accept"]}, + api_versions_list=[ + "2024-05-01-preview", + "2024-07-01-preview", + "2024-12-01-preview", + "2025-03-01-preview", + "2025-11-01-preview", + ], ) async def create_or_update_test_profile( self, test_profile_id: str, body: Union[_models.TestProfile, JSON, IO[bytes]], **kwargs: Any @@ -1326,7 +1348,14 @@ async def create_or_update_test_profile( @distributed_trace_async @api_version_validation( method_added_on="2024-05-01-preview", - params_added_on={"2024-05-01-preview": ["api_version", "test_profile_id", "accept"]}, + params_added_on={"2024-05-01-preview": ["api_version", "test_profile_id"]}, + api_versions_list=[ + "2024-05-01-preview", + "2024-07-01-preview", + "2024-12-01-preview", + "2025-03-01-preview", + "2025-11-01-preview", + ], ) async def delete_test_profile(self, test_profile_id: str, **kwargs: Any) -> None: """Delete a test profile. @@ -1382,6 +1411,13 @@ async def delete_test_profile(self, test_profile_id: str, **kwargs: Any) -> None @api_version_validation( method_added_on="2024-05-01-preview", params_added_on={"2024-05-01-preview": ["api_version", "test_profile_id", "accept"]}, + api_versions_list=[ + "2024-05-01-preview", + "2024-07-01-preview", + "2024-12-01-preview", + "2025-03-01-preview", + "2025-11-01-preview", + ], ) async def get_test_profile(self, test_profile_id: str, **kwargs: Any) -> _models.TestProfile: """Get load test profile details. @@ -1459,16 +1495,23 @@ async def get_test_profile(self, test_profile_id: str, **kwargs: Any) -> _models "accept", ] }, + api_versions_list=[ + "2024-05-01-preview", + "2024-07-01-preview", + "2024-12-01-preview", + "2025-03-01-preview", + "2025-11-01-preview", + ], ) def list_test_profiles( self, *, last_modified_start_time: Optional[datetime.datetime] = None, last_modified_end_time: Optional[datetime.datetime] = None, - test_profile_ids: Optional[List[str]] = None, - test_ids: Optional[List[str]] = None, + test_profile_ids: Optional[list[str]] = None, + test_ids: Optional[list[str]] = None, **kwargs: Any - ) -> AsyncIterable["_models.TestProfile"]: + ) -> AsyncItemPaged["_models.TestProfile"]: """List test profiles. Get all test profiles for the given filters. @@ -1494,7 +1537,7 @@ def list_test_profiles( _params = kwargs.pop("params", {}) or {} maxpagesize = kwargs.pop("maxpagesize", None) - cls: ClsType[List[_models.TestProfile]] = kwargs.pop("cls", None) + cls: ClsType[list[_models.TestProfile]] = kwargs.pop("cls", None) error_map: MutableMapping = { 401: ClientAuthenticationError, @@ -1544,7 +1587,7 @@ def prepare_request(next_link=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.TestProfile], deserialized["value"]) + list_of_elem = _deserialize(list[_models.TestProfile], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("nextLink") or None, AsyncList(list_of_elem) @@ -1562,12 +1605,1195 @@ async def get_next(next_link=None): map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) - return pipeline_response + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) + + @overload + async def create_or_update_trigger( + self, + trigger_id: str, + body: _models.Trigger, + *, + content_type: str = "application/merge-patch+json", + **kwargs: Any + ) -> _models.Trigger: + """Create or update operation template. + + :param trigger_id: The unique identifier of the trigger. Required. + :type trigger_id: str + :param body: The resource instance. Required. + :type body: ~azure.developer.loadtesting.models.Trigger + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/merge-patch+json". + :paramtype content_type: str + :return: Trigger. The Trigger is compatible with MutableMapping + :rtype: ~azure.developer.loadtesting.models.Trigger + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_or_update_trigger( + self, trigger_id: str, body: JSON, *, content_type: str = "application/merge-patch+json", **kwargs: Any + ) -> _models.Trigger: + """Create or update operation template. + + :param trigger_id: The unique identifier of the trigger. Required. + :type trigger_id: str + :param body: The resource instance. Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/merge-patch+json". + :paramtype content_type: str + :return: Trigger. The Trigger is compatible with MutableMapping + :rtype: ~azure.developer.loadtesting.models.Trigger + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_or_update_trigger( + self, trigger_id: str, body: IO[bytes], *, content_type: str = "application/merge-patch+json", **kwargs: Any + ) -> _models.Trigger: + """Create or update operation template. + + :param trigger_id: The unique identifier of the trigger. Required. + :type trigger_id: str + :param body: The resource instance. Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/merge-patch+json". + :paramtype content_type: str + :return: Trigger. The Trigger is compatible with MutableMapping + :rtype: ~azure.developer.loadtesting.models.Trigger + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_or_update_trigger( + self, trigger_id: str, body: Union[_models.Trigger, JSON, IO[bytes]], **kwargs: Any + ) -> _models.Trigger: + """Create or update operation template. + + :param trigger_id: The unique identifier of the trigger. Required. + :type trigger_id: str + :param body: The resource instance. Is one of the following types: Trigger, JSON, IO[bytes] + Required. + :type body: ~azure.developer.loadtesting.models.Trigger or JSON or IO[bytes] + :return: Trigger. The Trigger is compatible with MutableMapping + :rtype: ~azure.developer.loadtesting.models.Trigger + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Trigger] = kwargs.pop("cls", None) + + content_type = content_type or "application/merge-patch+json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_load_test_administration_create_or_update_trigger_request( + trigger_id=trigger_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.Trigger, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def delete_trigger(self, trigger_id: str, **kwargs: Any) -> None: + """Resource delete operation template. + + :param trigger_id: The unique identifier of the trigger. Required. + :type trigger_id: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_load_test_administration_delete_trigger_request( + trigger_id=trigger_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + @distributed_trace_async + async def get_trigger(self, trigger_id: str, **kwargs: Any) -> _models.Trigger: + """Resource read operation template. + + :param trigger_id: The unique identifier of the trigger. Required. + :type trigger_id: str + :return: Trigger. The Trigger is compatible with MutableMapping + :rtype: ~azure.developer.loadtesting.models.Trigger + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.Trigger] = kwargs.pop("cls", None) + + _request = build_load_test_administration_get_trigger_request( + trigger_id=trigger_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.Trigger, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list_triggers( + self, + *, + test_ids: Optional[str] = None, + states: Optional[Union[str, _models.TriggerState]] = None, + last_modified_start_time: Optional[datetime.datetime] = None, + last_modified_end_time: Optional[datetime.datetime] = None, + **kwargs: Any + ) -> AsyncItemPaged["_models.Trigger"]: + """Resource list operation template. + + :keyword test_ids: Search based on triggers associated with the provided test ids. Default + value is None. + :paramtype test_ids: str + :keyword states: Filter triggers based on a comma separated list of states. Known values are: + "Active", "Paused", "Completed", and "Disabled". Default value is None. + :paramtype states: str or ~azure.developer.loadtesting.models.TriggerState + :keyword last_modified_start_time: Start DateTime(RFC 3339 literal format) of the last updated + time range to filter triggers. Default value is None. + :paramtype last_modified_start_time: ~datetime.datetime + :keyword last_modified_end_time: End DateTime(RFC 3339 literal format) of the last updated time + range to filter triggers. Default value is None. + :paramtype last_modified_end_time: ~datetime.datetime + :return: An iterator like instance of Trigger + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.developer.loadtesting.models.Trigger] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + maxpagesize = kwargs.pop("maxpagesize", None) + cls: ClsType[list[_models.Trigger]] = kwargs.pop("cls", None) + + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_load_test_administration_list_triggers_request( + test_ids=test_ids, + states=states, + last_modified_start_time=last_modified_start_time, + last_modified_end_time=last_modified_end_time, + maxpagesize=maxpagesize, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + async def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(list[_models.Trigger], deserialized.get("value", [])) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("nextLink") or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) + + @overload + async def create_or_update_notification_rule( + self, + notification_rule_id: str, + body: _models.NotificationRule, + *, + content_type: str = "application/merge-patch+json", + **kwargs: Any + ) -> _models.NotificationRule: + """Create or update operation template. + + :param notification_rule_id: The unique identifier of the notification rule. Required. + :type notification_rule_id: str + :param body: The resource instance. Required. + :type body: ~azure.developer.loadtesting.models.NotificationRule + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/merge-patch+json". + :paramtype content_type: str + :return: NotificationRule. The NotificationRule is compatible with MutableMapping + :rtype: ~azure.developer.loadtesting.models.NotificationRule + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_or_update_notification_rule( + self, + notification_rule_id: str, + body: JSON, + *, + content_type: str = "application/merge-patch+json", + **kwargs: Any + ) -> _models.NotificationRule: + """Create or update operation template. + + :param notification_rule_id: The unique identifier of the notification rule. Required. + :type notification_rule_id: str + :param body: The resource instance. Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/merge-patch+json". + :paramtype content_type: str + :return: NotificationRule. The NotificationRule is compatible with MutableMapping + :rtype: ~azure.developer.loadtesting.models.NotificationRule + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_or_update_notification_rule( + self, + notification_rule_id: str, + body: IO[bytes], + *, + content_type: str = "application/merge-patch+json", + **kwargs: Any + ) -> _models.NotificationRule: + """Create or update operation template. + + :param notification_rule_id: The unique identifier of the notification rule. Required. + :type notification_rule_id: str + :param body: The resource instance. Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/merge-patch+json". + :paramtype content_type: str + :return: NotificationRule. The NotificationRule is compatible with MutableMapping + :rtype: ~azure.developer.loadtesting.models.NotificationRule + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + @api_version_validation( + method_added_on="2024-12-01-preview", + params_added_on={"2024-12-01-preview": ["api_version", "notification_rule_id", "content_type", "accept"]}, + api_versions_list=["2024-12-01-preview", "2025-03-01-preview", "2025-11-01-preview"], + ) + async def create_or_update_notification_rule( + self, notification_rule_id: str, body: Union[_models.NotificationRule, JSON, IO[bytes]], **kwargs: Any + ) -> _models.NotificationRule: + """Create or update operation template. + + :param notification_rule_id: The unique identifier of the notification rule. Required. + :type notification_rule_id: str + :param body: The resource instance. Is one of the following types: NotificationRule, JSON, + IO[bytes] Required. + :type body: ~azure.developer.loadtesting.models.NotificationRule or JSON or IO[bytes] + :return: NotificationRule. The NotificationRule is compatible with MutableMapping + :rtype: ~azure.developer.loadtesting.models.NotificationRule + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.NotificationRule] = kwargs.pop("cls", None) + + content_type = content_type or "application/merge-patch+json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_load_test_administration_create_or_update_notification_rule_request( + notification_rule_id=notification_rule_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.NotificationRule, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + @api_version_validation( + method_added_on="2024-12-01-preview", + params_added_on={"2024-12-01-preview": ["api_version", "notification_rule_id"]}, + api_versions_list=["2024-12-01-preview", "2025-03-01-preview", "2025-11-01-preview"], + ) + async def delete_notification_rule(self, notification_rule_id: str, **kwargs: Any) -> None: + """Resource delete operation template. + + :param notification_rule_id: The unique identifier of the notification rule. Required. + :type notification_rule_id: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_load_test_administration_delete_notification_rule_request( + notification_rule_id=notification_rule_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + @distributed_trace_async + @api_version_validation( + method_added_on="2024-12-01-preview", + params_added_on={"2024-12-01-preview": ["api_version", "notification_rule_id", "accept"]}, + api_versions_list=["2024-12-01-preview", "2025-03-01-preview", "2025-11-01-preview"], + ) + async def get_notification_rule(self, notification_rule_id: str, **kwargs: Any) -> _models.NotificationRule: + """Resource read operation template. + + :param notification_rule_id: The unique identifier of the notification rule. Required. + :type notification_rule_id: str + :return: NotificationRule. The NotificationRule is compatible with MutableMapping + :rtype: ~azure.developer.loadtesting.models.NotificationRule + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.NotificationRule] = kwargs.pop("cls", None) + + _request = build_load_test_administration_get_notification_rule_request( + notification_rule_id=notification_rule_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.NotificationRule, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + @api_version_validation( + method_added_on="2024-12-01-preview", + params_added_on={ + "2024-12-01-preview": [ + "api_version", + "test_ids", + "scopes", + "last_modified_start_time", + "last_modified_end_time", + "maxpagesize", + "accept", + ] + }, + api_versions_list=["2024-12-01-preview", "2025-03-01-preview", "2025-11-01-preview"], + ) + def list_notification_rules( + self, + *, + test_ids: Optional[str] = None, + scopes: Optional[str] = None, + last_modified_start_time: Optional[datetime.datetime] = None, + last_modified_end_time: Optional[datetime.datetime] = None, + **kwargs: Any + ) -> AsyncItemPaged["_models.NotificationRule"]: + """Resource list operation template. + + :keyword test_ids: Search based on notification rules associated with the provided test ids. + Default value is None. + :paramtype test_ids: str + :keyword scopes: Search based on notification rules for the provided scopes. Default value is + None. + :paramtype scopes: str + :keyword last_modified_start_time: Start DateTime(RFC 3339 literal format) of the last updated + time range to filter notification rules. Default value is None. + :paramtype last_modified_start_time: ~datetime.datetime + :keyword last_modified_end_time: End DateTime(RFC 3339 literal format) of the last updated time + range to filter notification rules. Default value is None. + :paramtype last_modified_end_time: ~datetime.datetime + :return: An iterator like instance of NotificationRule + :rtype: + ~azure.core.async_paging.AsyncItemPaged[~azure.developer.loadtesting.models.NotificationRule] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + maxpagesize = kwargs.pop("maxpagesize", None) + cls: ClsType[list[_models.NotificationRule]] = kwargs.pop("cls", None) + + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_load_test_administration_list_notification_rules_request( + test_ids=test_ids, + scopes=scopes, + last_modified_start_time=last_modified_start_time, + last_modified_end_time=last_modified_end_time, + maxpagesize=maxpagesize, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + async def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(list[_models.NotificationRule], deserialized.get("value", [])) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("nextLink") or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) + + @api_version_validation( + method_added_on="2025-03-01-preview", + params_added_on={"2025-03-01-preview": ["api_version", "test_id", "content_type", "accept"]}, + api_versions_list=["2025-03-01-preview", "2025-11-01-preview"], + ) + async def _clone_test_initial( + self, + test_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + new_test_id: str = _Unset, + display_name: Optional[str] = None, + description: Optional[str] = None, + **kwargs: Any + ) -> AsyncIterator[bytes]: + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None) + + if body is _Unset: + if new_test_id is _Unset: + raise TypeError("missing required argument: new_test_id") + body = {"description": description, "displayName": display_name, "newTestId": new_test_id} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_load_test_administration_clone_test_request( + test_id=test_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = True + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + response_headers = {} + response_headers["Operation-Location"] = self._deserialize("str", response.headers.get("Operation-Location")) + + deserialized = response.iter_bytes() + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @overload + async def begin_clone_test( + self, + test_id: str, + *, + new_test_id: str, + content_type: str = "application/json", + display_name: Optional[str] = None, + description: Optional[str] = None, + **kwargs: Any + ) -> AsyncLROPoller[_models.Test]: + """Clone a load test. + + Clone the given test with optional overrides applied to the clone test. + + :param test_id: Unique test identifier for the load test, must contain only lower-case + alphabetic, numeric, underscore or hyphen characters. Required. + :type test_id: str + :keyword new_test_id: Unique identifier for the new test that will be created. Required. + :paramtype new_test_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword display_name: Display Name override for the newly created test. Default value is None. + :paramtype display_name: str + :keyword description: Description override for the newly created test. Default value is None. + :paramtype description: str + :return: An instance of AsyncLROPoller that returns Test. The Test is compatible with + MutableMapping + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.developer.loadtesting.models.Test] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def begin_clone_test( + self, test_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> AsyncLROPoller[_models.Test]: + """Clone a load test. + + Clone the given test with optional overrides applied to the clone test. + + :param test_id: Unique test identifier for the load test, must contain only lower-case + alphabetic, numeric, underscore or hyphen characters. Required. + :type test_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of AsyncLROPoller that returns Test. The Test is compatible with + MutableMapping + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.developer.loadtesting.models.Test] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def begin_clone_test( + self, test_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> AsyncLROPoller[_models.Test]: + """Clone a load test. + + Clone the given test with optional overrides applied to the clone test. + + :param test_id: Unique test identifier for the load test, must contain only lower-case + alphabetic, numeric, underscore or hyphen characters. Required. + :type test_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: An instance of AsyncLROPoller that returns Test. The Test is compatible with + MutableMapping + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.developer.loadtesting.models.Test] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + @api_version_validation( + method_added_on="2025-03-01-preview", + params_added_on={"2025-03-01-preview": ["api_version", "test_id", "content_type", "accept"]}, + api_versions_list=["2025-03-01-preview", "2025-11-01-preview"], + ) + async def begin_clone_test( + self, + test_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + new_test_id: str = _Unset, + display_name: Optional[str] = None, + description: Optional[str] = None, + **kwargs: Any + ) -> AsyncLROPoller[_models.Test]: + """Clone a load test. + + Clone the given test with optional overrides applied to the clone test. + + :param test_id: Unique test identifier for the load test, must contain only lower-case + alphabetic, numeric, underscore or hyphen characters. Required. + :type test_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword new_test_id: Unique identifier for the new test that will be created. Required. + :paramtype new_test_id: str + :keyword display_name: Display Name override for the newly created test. Default value is None. + :paramtype display_name: str + :keyword description: Description override for the newly created test. Default value is None. + :paramtype description: str + :return: An instance of AsyncLROPoller that returns Test. The Test is compatible with + MutableMapping + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.developer.loadtesting.models.Test] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Test] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._clone_test_initial( + test_id=test_id, + body=body, + new_test_id=new_test_id, + display_name=display_name, + description=description, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + await raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + response_headers = {} + response = pipeline_response.http_response + response_headers["Operation-Location"] = self._deserialize( + "str", response.headers.get("Operation-Location") + ) + + deserialized = _deserialize(_models.Test, response.json()) + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + return deserialized + + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + } + + if polling is True: + polling_method: AsyncPollingMethod = cast( + AsyncPollingMethod, + AsyncLROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs), + ) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller[_models.Test].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller[_models.Test]( + self._client, raw_result, get_long_running_output, polling_method # type: ignore + ) + + @api_version_validation( + method_added_on="2025-11-01-preview", + params_added_on={"2025-11-01-preview": ["api_version", "test_id", "accept"]}, + api_versions_list=["2025-11-01-preview"], + ) + async def _generate_test_plan_recommendations_initial( # pylint: disable=name-too-long + self, test_id: str, **kwargs: Any + ) -> AsyncIterator[bytes]: + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None) + + _request = build_load_test_administration_generate_test_plan_recommendations_request( + test_id=test_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = True + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + response_headers = {} + response_headers["Operation-Location"] = self._deserialize("str", response.headers.get("Operation-Location")) + + deserialized = response.iter_bytes() + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + @api_version_validation( + method_added_on="2025-11-01-preview", + params_added_on={"2025-11-01-preview": ["api_version", "test_id", "accept"]}, + api_versions_list=["2025-11-01-preview"], + ) + async def begin_generate_test_plan_recommendations( + self, test_id: str, **kwargs: Any + ) -> AsyncLROPoller[_models.Test]: + """Generate load test plan recommendations. + + Generate AI Recommendations to author a load test plan using the uploaded browser recording + file. + + :param test_id: Unique test identifier for the load test, must contain only lower-case + alphabetic, numeric, underscore or hyphen characters. Required. + :type test_id: str + :return: An instance of AsyncLROPoller that returns Test. The Test is compatible with + MutableMapping + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.developer.loadtesting.models.Test] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.Test] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._generate_test_plan_recommendations_initial( + test_id=test_id, cls=lambda x, y, z: x, headers=_headers, params=_params, **kwargs + ) + await raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + response_headers = {} + response = pipeline_response.http_response + response_headers["Operation-Location"] = self._deserialize( + "str", response.headers.get("Operation-Location") + ) + + deserialized = _deserialize(_models.Test, response.json()) + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + return deserialized + + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + } + + if polling is True: + polling_method: AsyncPollingMethod = cast( + AsyncPollingMethod, + AsyncLROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs), + ) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller[_models.Test].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller[_models.Test]( + self._client, raw_result, get_long_running_output, polling_method # type: ignore + ) + + @distributed_trace_async + @api_version_validation( + method_added_on="2025-03-01-preview", + params_added_on={"2025-03-01-preview": ["api_version", "operation_id", "accept"]}, + api_versions_list=["2025-03-01-preview", "2025-11-01-preview"], + ) + async def get_operation_status(self, operation_id: str, **kwargs: Any) -> _models.OperationStatus: + """Get the status of a long running operation. + + Get the status of a long running operation. + + :param operation_id: The unique ID of the operation. Required. + :type operation_id: str + :return: OperationStatus. The OperationStatus is compatible with MutableMapping + :rtype: ~azure.developer.loadtesting.models.OperationStatus + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OperationStatus] = kwargs.pop("cls", None) + + _request = build_load_test_administration_get_operation_status_request( + operation_id=operation_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OperationStatus, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore - return AsyncItemPaged(get_next, extract_data) + return deserialized # type: ignore -class LoadTestRunClientOperationsMixin(LoadTestRunClientMixinABC): +class _LoadTestRunClientOperationsMixin( # pylint: disable=too-many-public-methods + ClientMixinABC[AsyncPipelineClient[HttpRequest, AsyncHttpResponse], LoadTestRunClientConfiguration] +): @overload async def _begin_test_run( @@ -1703,7 +2929,7 @@ async def create_or_update_app_components( Add an app component to a test run by providing the resource Id, name and type. - :param test_run_id: Unique Id for the load test run, must contain only lower-case alphabetic, + :param test_run_id: Unique name for the load test run, must contain only lower-case alphabetic, numeric, underscore or hyphen characters. Required. :type test_run_id: str :param body: App Component model. Required. @@ -1724,7 +2950,7 @@ async def create_or_update_app_components( Add an app component to a test run by providing the resource Id, name and type. - :param test_run_id: Unique Id for the load test run, must contain only lower-case alphabetic, + :param test_run_id: Unique name for the load test run, must contain only lower-case alphabetic, numeric, underscore or hyphen characters. Required. :type test_run_id: str :param body: App Component model. Required. @@ -1745,7 +2971,7 @@ async def create_or_update_app_components( Add an app component to a test run by providing the resource Id, name and type. - :param test_run_id: Unique Id for the load test run, must contain only lower-case alphabetic, + :param test_run_id: Unique name for the load test run, must contain only lower-case alphabetic, numeric, underscore or hyphen characters. Required. :type test_run_id: str :param body: App Component model. Required. @@ -1766,7 +2992,7 @@ async def create_or_update_app_components( Add an app component to a test run by providing the resource Id, name and type. - :param test_run_id: Unique Id for the load test run, must contain only lower-case alphabetic, + :param test_run_id: Unique name for the load test run, must contain only lower-case alphabetic, numeric, underscore or hyphen characters. Required. :type test_run_id: str :param body: App Component model. Is one of the following types: TestRunAppComponents, JSON, @@ -1849,7 +3075,7 @@ async def create_or_update_server_metrics_config( Configure server metrics for a test run. - :param test_run_id: Unique Id for the load test run, must contain only lower-case alphabetic, + :param test_run_id: Unique name for the load test run, must contain only lower-case alphabetic, numeric, underscore or hyphen characters. Required. :type test_run_id: str :param body: Server metric configuration model. Required. @@ -1871,7 +3097,7 @@ async def create_or_update_server_metrics_config( Configure server metrics for a test run. - :param test_run_id: Unique Id for the load test run, must contain only lower-case alphabetic, + :param test_run_id: Unique name for the load test run, must contain only lower-case alphabetic, numeric, underscore or hyphen characters. Required. :type test_run_id: str :param body: Server metric configuration model. Required. @@ -1893,7 +3119,7 @@ async def create_or_update_server_metrics_config( Configure server metrics for a test run. - :param test_run_id: Unique Id for the load test run, must contain only lower-case alphabetic, + :param test_run_id: Unique name for the load test run, must contain only lower-case alphabetic, numeric, underscore or hyphen characters. Required. :type test_run_id: str :param body: Server metric configuration model. Required. @@ -1915,7 +3141,7 @@ async def create_or_update_server_metrics_config( Configure server metrics for a test run. - :param test_run_id: Unique Id for the load test run, must contain only lower-case alphabetic, + :param test_run_id: Unique name for the load test run, must contain only lower-case alphabetic, numeric, underscore or hyphen characters. Required. :type test_run_id: str :param body: Server metric configuration model. Is one of the following types: @@ -2309,7 +3535,7 @@ def list_metric_dimension_values( time_interval: str, interval: Optional[Union[str, _models.TimeGrain]] = None, **kwargs: Any - ) -> AsyncIterable[str]: + ) -> AsyncItemPaged[str]: """List the dimension values for the given metric dimension name. List the dimension values for the given metric dimension name. @@ -2336,7 +3562,7 @@ def list_metric_dimension_values( _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[List[str]] = kwargs.pop("cls", None) + cls: ClsType[list[str]] = kwargs.pop("cls", None) error_map: MutableMapping = { 401: ClientAuthenticationError, @@ -2387,7 +3613,7 @@ def prepare_request(next_link=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[str], deserialized["value"]) + list_of_elem = _deserialize(list[str], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("nextLink") or None, AsyncList(list_of_elem) @@ -2555,7 +3781,7 @@ def list_metrics( interval: Optional[Union[str, _models.TimeGrain]] = None, content_type: str = "application/json", **kwargs: Any - ) -> AsyncIterable["_models.TimeSeriesElement"]: + ) -> AsyncItemPaged["_models.TimeSeriesElement"]: """List the metric values for a load test run. List the metric values for a load test run. @@ -2599,7 +3825,7 @@ def list_metrics( interval: Optional[Union[str, _models.TimeGrain]] = None, content_type: str = "application/json", **kwargs: Any - ) -> AsyncIterable["_models.TimeSeriesElement"]: + ) -> AsyncItemPaged["_models.TimeSeriesElement"]: """List the metric values for a load test run. List the metric values for a load test run. @@ -2643,7 +3869,7 @@ def list_metrics( interval: Optional[Union[str, _models.TimeGrain]] = None, content_type: str = "application/json", **kwargs: Any - ) -> AsyncIterable["_models.TimeSeriesElement"]: + ) -> AsyncItemPaged["_models.TimeSeriesElement"]: """List the metric values for a load test run. List the metric values for a load test run. @@ -2686,7 +3912,7 @@ def list_metrics( aggregation: Optional[str] = None, interval: Optional[Union[str, _models.TimeGrain]] = None, **kwargs: Any - ) -> AsyncIterable["_models.TimeSeriesElement"]: + ) -> AsyncItemPaged["_models.TimeSeriesElement"]: """List the metric values for a load test run. List the metric values for a load test run. @@ -2718,7 +3944,8 @@ def list_metrics( _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[List[_models.TimeSeriesElement]] = kwargs.pop("cls", None) + content_type = content_type if body else None + cls: ClsType[list[_models.TimeSeriesElement]] = kwargs.pop("cls", None) error_map: MutableMapping = { 401: ClientAuthenticationError, @@ -2727,7 +3954,7 @@ def list_metrics( 304: ResourceNotModifiedError, } error_map.update(kwargs.pop("error_map", {}) or {}) - content_type = content_type or "application/json" + content_type = content_type or "application/json" if body else None _content = None if isinstance(body, (IOBase, bytes)): _content = body @@ -2780,7 +4007,7 @@ def prepare_request(next_link=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.TimeSeriesElement], deserialized["value"]) + list_of_elem = _deserialize(list[_models.TimeSeriesElement], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("nextLink") or None, AsyncList(list_of_elem) @@ -2803,6 +4030,19 @@ async def get_next(next_link=None): return AsyncItemPaged(get_next, extract_data) @distributed_trace + @api_version_validation( + params_added_on={"2024-07-01-preview": ["created_by_types"], "2025-03-01-preview": ["test_ids"]}, + api_versions_list=[ + "2022-11-01", + "2023-04-01-preview", + "2024-03-01-preview", + "2024-05-01-preview", + "2024-07-01-preview", + "2024-12-01-preview", + "2025-03-01-preview", + "2025-11-01-preview", + ], + ) def list_test_runs( self, *, @@ -2812,8 +4052,10 @@ def list_test_runs( execution_from: Optional[datetime.datetime] = None, execution_to: Optional[datetime.datetime] = None, status: Optional[str] = None, + created_by_types: Optional[list[str]] = None, + test_ids: Optional[list[str]] = None, **kwargs: Any - ) -> AsyncIterable["_models.TestRun"]: + ) -> AsyncItemPaged["_models.TestRun"]: """Get all test runs for the given filters. Get all test runs for the given filters. @@ -2835,6 +4077,12 @@ def list_test_runs( :paramtype execution_to: ~datetime.datetime :keyword status: Comma separated list of test run status. Default value is None. :paramtype status: str + :keyword created_by_types: Comma separated list of type of entities that have created the test + run. Default value is None. + :paramtype created_by_types: list[str] + :keyword test_ids: Comma-separated list of test IDs. If you are using testIds, do not send a + value for testId. Default value is None. + :paramtype test_ids: list[str] :return: An iterator like instance of TestRun :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.developer.loadtesting.models.TestRun] :raises ~azure.core.exceptions.HttpResponseError: @@ -2843,7 +4091,7 @@ def list_test_runs( _params = kwargs.pop("params", {}) or {} maxpagesize = kwargs.pop("maxpagesize", None) - cls: ClsType[List[_models.TestRun]] = kwargs.pop("cls", None) + cls: ClsType[list[_models.TestRun]] = kwargs.pop("cls", None) error_map: MutableMapping = { 401: ClientAuthenticationError, @@ -2864,6 +4112,8 @@ def prepare_request(next_link=None): execution_to=execution_to, status=status, maxpagesize=maxpagesize, + created_by_types=created_by_types, + test_ids=test_ids, api_version=self._config.api_version, headers=_headers, params=_params, @@ -2895,7 +4145,7 @@ def prepare_request(next_link=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.TestRun], deserialized["value"]) + list_of_elem = _deserialize(list[_models.TestRun], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("nextLink") or None, AsyncList(list_of_elem) @@ -3007,6 +4257,13 @@ async def _begin_test_profile_run( @api_version_validation( method_added_on="2024-05-01-preview", params_added_on={"2024-05-01-preview": ["api_version", "test_profile_run_id", "content_type", "accept"]}, + api_versions_list=[ + "2024-05-01-preview", + "2024-07-01-preview", + "2024-12-01-preview", + "2025-03-01-preview", + "2025-11-01-preview", + ], ) async def _begin_test_profile_run( self, test_profile_run_id: str, body: Union[_models.TestProfileRun, JSON, IO[bytes]], **kwargs: Any @@ -3088,7 +4345,14 @@ async def _begin_test_profile_run( @distributed_trace_async @api_version_validation( method_added_on="2024-05-01-preview", - params_added_on={"2024-05-01-preview": ["api_version", "test_profile_run_id", "accept"]}, + params_added_on={"2024-05-01-preview": ["api_version", "test_profile_run_id"]}, + api_versions_list=[ + "2024-05-01-preview", + "2024-07-01-preview", + "2024-12-01-preview", + "2025-03-01-preview", + "2025-11-01-preview", + ], ) async def delete_test_profile_run(self, test_profile_run_id: str, **kwargs: Any) -> None: """Delete an existing load test profile run. @@ -3144,6 +4408,13 @@ async def delete_test_profile_run(self, test_profile_run_id: str, **kwargs: Any) @api_version_validation( method_added_on="2024-05-01-preview", params_added_on={"2024-05-01-preview": ["api_version", "test_profile_run_id", "accept"]}, + api_versions_list=[ + "2024-05-01-preview", + "2024-07-01-preview", + "2024-12-01-preview", + "2025-03-01-preview", + "2025-11-01-preview", + ], ) async def get_test_profile_run(self, test_profile_run_id: str, **kwargs: Any) -> _models.TestProfileRun: """Get test profile run details. @@ -3226,6 +4497,13 @@ async def get_test_profile_run(self, test_profile_run_id: str, **kwargs: Any) -> "accept", ] }, + api_versions_list=[ + "2024-05-01-preview", + "2024-07-01-preview", + "2024-12-01-preview", + "2025-03-01-preview", + "2025-11-01-preview", + ], ) def list_test_profile_runs( self, @@ -3236,11 +4514,11 @@ def list_test_profile_runs( max_end_date_time: Optional[datetime.datetime] = None, created_date_start_time: Optional[datetime.datetime] = None, created_date_end_time: Optional[datetime.datetime] = None, - test_profile_run_ids: Optional[List[str]] = None, - test_profile_ids: Optional[List[str]] = None, - statuses: Optional[List[str]] = None, + test_profile_run_ids: Optional[list[str]] = None, + test_profile_ids: Optional[list[str]] = None, + statuses: Optional[list[str]] = None, **kwargs: Any - ) -> AsyncIterable["_models.TestProfileRun"]: + ) -> AsyncItemPaged["_models.TestProfileRun"]: """List test profile runs. Get all test profile runs for the given filters. @@ -3281,7 +4559,7 @@ def list_test_profile_runs( _params = kwargs.pop("params", {}) or {} maxpagesize = kwargs.pop("maxpagesize", None) - cls: ClsType[List[_models.TestProfileRun]] = kwargs.pop("cls", None) + cls: ClsType[list[_models.TestProfileRun]] = kwargs.pop("cls", None) error_map: MutableMapping = { 401: ClientAuthenticationError, @@ -3336,7 +4614,7 @@ def prepare_request(next_link=None): async def extract_data(pipeline_response): deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.TestProfileRun], deserialized["value"]) + list_of_elem = _deserialize(list[_models.TestProfileRun], deserialized.get("value", [])) if cls: list_of_elem = cls(list_of_elem) # type: ignore return deserialized.get("nextLink") or None, AsyncList(list_of_elem) @@ -3362,6 +4640,13 @@ async def get_next(next_link=None): @api_version_validation( method_added_on="2024-05-01-preview", params_added_on={"2024-05-01-preview": ["api_version", "test_profile_run_id", "accept"]}, + api_versions_list=[ + "2024-05-01-preview", + "2024-07-01-preview", + "2024-12-01-preview", + "2025-03-01-preview", + "2025-11-01-preview", + ], ) async def stop_test_profile_run(self, test_profile_run_id: str, **kwargs: Any) -> _models.TestProfileRun: """Stop test profile run. @@ -3424,3 +4709,335 @@ async def stop_test_profile_run(self, test_profile_run_id: str, **kwargs: Any) - return cls(pipeline_response, deserialized, {}) # type: ignore return deserialized # type: ignore + + @distributed_trace_async + @api_version_validation( + method_added_on="2025-03-01-preview", + params_added_on={"2025-03-01-preview": ["api_version", "test_run_id", "accept"]}, + api_versions_list=["2025-03-01-preview", "2025-11-01-preview"], + ) + async def get_latest_test_run_insights(self, test_run_id: str, **kwargs: Any) -> _models.TestRunInsights: + """Get the latest insights for the test run. + + Get the latest insights for the test run. + + :param test_run_id: Unique name for the load test run, must contain only lower-case alphabetic, + numeric, underscore or hyphen characters. Required. + :type test_run_id: str + :return: TestRunInsights. The TestRunInsights is compatible with MutableMapping + :rtype: ~azure.developer.loadtesting.models.TestRunInsights + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.TestRunInsights] = kwargs.pop("cls", None) + + _request = build_load_test_run_get_latest_test_run_insights_request( + test_run_id=test_run_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.TestRunInsights, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def patch_latest_test_run_insights( + self, + test_run_id: str, + body: _models.TestRunInsights, + *, + content_type: str = "application/merge-patch+json", + **kwargs: Any + ) -> _models.TestRunInsights: + """Update the latest insights for the test run. + + Update the latest insights for the test run. + + :param test_run_id: Unique name for the load test run, must contain only lower-case alphabetic, + numeric, underscore or hyphen characters. Required. + :type test_run_id: str + :param body: Test run insights model. Required. + :type body: ~azure.developer.loadtesting.models.TestRunInsights + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/merge-patch+json". + :paramtype content_type: str + :return: TestRunInsights. The TestRunInsights is compatible with MutableMapping + :rtype: ~azure.developer.loadtesting.models.TestRunInsights + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def patch_latest_test_run_insights( + self, test_run_id: str, body: JSON, *, content_type: str = "application/merge-patch+json", **kwargs: Any + ) -> _models.TestRunInsights: + """Update the latest insights for the test run. + + Update the latest insights for the test run. + + :param test_run_id: Unique name for the load test run, must contain only lower-case alphabetic, + numeric, underscore or hyphen characters. Required. + :type test_run_id: str + :param body: Test run insights model. Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/merge-patch+json". + :paramtype content_type: str + :return: TestRunInsights. The TestRunInsights is compatible with MutableMapping + :rtype: ~azure.developer.loadtesting.models.TestRunInsights + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def patch_latest_test_run_insights( + self, test_run_id: str, body: IO[bytes], *, content_type: str = "application/merge-patch+json", **kwargs: Any + ) -> _models.TestRunInsights: + """Update the latest insights for the test run. + + Update the latest insights for the test run. + + :param test_run_id: Unique name for the load test run, must contain only lower-case alphabetic, + numeric, underscore or hyphen characters. Required. + :type test_run_id: str + :param body: Test run insights model. Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/merge-patch+json". + :paramtype content_type: str + :return: TestRunInsights. The TestRunInsights is compatible with MutableMapping + :rtype: ~azure.developer.loadtesting.models.TestRunInsights + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + @api_version_validation( + method_added_on="2025-11-01-preview", + params_added_on={"2025-11-01-preview": ["api_version", "content_type", "test_run_id", "accept"]}, + api_versions_list=["2025-11-01-preview"], + ) + async def patch_latest_test_run_insights( + self, test_run_id: str, body: Union[_models.TestRunInsights, JSON, IO[bytes]], **kwargs: Any + ) -> _models.TestRunInsights: + """Update the latest insights for the test run. + + Update the latest insights for the test run. + + :param test_run_id: Unique name for the load test run, must contain only lower-case alphabetic, + numeric, underscore or hyphen characters. Required. + :type test_run_id: str + :param body: Test run insights model. Is one of the following types: TestRunInsights, JSON, + IO[bytes] Required. + :type body: ~azure.developer.loadtesting.models.TestRunInsights or JSON or IO[bytes] + :return: TestRunInsights. The TestRunInsights is compatible with MutableMapping + :rtype: ~azure.developer.loadtesting.models.TestRunInsights + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("content-type", None)) + cls: ClsType[_models.TestRunInsights] = kwargs.pop("cls", None) + + content_type = content_type or "application/merge-patch+json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_load_test_run_patch_latest_test_run_insights_request( + test_run_id=test_run_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.TestRunInsights, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @api_version_validation( + method_added_on="2025-03-01-preview", + params_added_on={"2025-03-01-preview": ["api_version", "test_run_id", "accept"]}, + api_versions_list=["2025-03-01-preview", "2025-11-01-preview"], + ) + async def _generate_test_run_insights_initial(self, test_run_id: str, **kwargs: Any) -> AsyncIterator[bytes]: + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None) + + _request = build_load_test_run_generate_test_run_insights_request( + test_run_id=test_run_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = True + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + response_headers = {} + response_headers["Operation-Location"] = self._deserialize("str", response.headers.get("Operation-Location")) + + deserialized = response.iter_bytes() + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + @api_version_validation( + method_added_on="2025-03-01-preview", + params_added_on={"2025-03-01-preview": ["api_version", "test_run_id", "accept"]}, + api_versions_list=["2025-03-01-preview", "2025-11-01-preview"], + ) + async def begin_generate_test_run_insights(self, test_run_id: str, **kwargs: Any) -> AsyncLROPoller[None]: + """Generate insights for the test run. + + Generate insights for the test run. + + :param test_run_id: Unique name for the load test run, must contain only lower-case alphabetic, + numeric, underscore or hyphen characters. Required. + :type test_run_id: str + :return: An instance of AsyncLROPoller that returns None + :rtype: ~azure.core.polling.AsyncLROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._generate_test_run_insights_initial( + test_run_id=test_run_id, cls=lambda x, y, z: x, headers=_headers, params=_params, **kwargs + ) + await raw_result.http_response.read() # type: ignore + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + } + + if polling is True: + polling_method: AsyncPollingMethod = cast( + AsyncPollingMethod, + AsyncLROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs), + ) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller[None].from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore diff --git a/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/aio/_operations/_patch.py b/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/aio/_operations/_patch.py index 44a11c9f19b8..2e5e1e8f6410 100644 --- a/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/aio/_operations/_patch.py +++ b/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/aio/_operations/_patch.py @@ -17,8 +17,8 @@ from azure.core.tracing.decorator_async import distributed_trace_async from ._operations import JSON -from ._operations import LoadTestAdministrationClientOperationsMixin as GeneratedAdministrationClientOperations -from ._operations import LoadTestRunClientOperationsMixin as GeneratedRunClientOperations +from ._operations import _LoadTestAdministrationClientOperationsMixin as GeneratedAdministrationClientOperations +from ._operations import _LoadTestRunClientOperationsMixin as GeneratedRunClientOperations from ... import models as _models diff --git a/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/models/__init__.py b/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/models/__init__.py index 6e644e25a518..99acd54a10f4 100644 --- a/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/models/__init__.py +++ b/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/models/__init__.py @@ -18,11 +18,13 @@ ArtifactsContainerInfo, AutoStopCriteria, CertificateMetadata, + DailyRecurrence, DimensionFilter, DimensionValue, ErrorDetails, FunctionFlexConsumptionResourceConfiguration, FunctionFlexConsumptionTargetResourceConfigurations, + HourlyRecurrence, LoadTestConfiguration, MetricAvailability, MetricDefinition, @@ -31,19 +33,30 @@ MetricNamespaceCollection, MetricRequestPayload, MetricValue, + MonthlyRecurrenceByDates, + MonthlyRecurrenceByWeekDays, NameAndDescription, + NotificationRule, + OperationStatus, OptionalLoadTestConfiguration, PassFailCriteria, PassFailMetric, PassFailServerMetric, + Recurrence, + RecurrenceEnd, + RecurrenceStatus, + RecurrenceWithCron, RegionalConfiguration, ResourceMetric, + ScheduleTestsTrigger, Secret, + StateDetails, TargetResourceConfigurations, Test, TestAppComponents, TestFileInfo, TestInputArtifacts, + TestPreferences, TestProfile, TestProfileRun, TestProfileRunRecommendation, @@ -51,13 +64,24 @@ TestRunAppComponents, TestRunArtifacts, TestRunDetail, + TestRunEndedEventCondition, + TestRunEndedNotificationEventFilter, TestRunFileInfo, TestRunInputArtifacts, + TestRunInsightColumn, + TestRunInsights, TestRunOutputArtifacts, TestRunServerMetricsConfiguration, + TestRunStartedNotificationEventFilter, TestRunStatistics, TestServerMetricsConfiguration, + TestsNotificationEventFilter, + TestsNotificationRule, TimeSeriesElement, + Trigger, + TriggerCompletedNotificationEventFilter, + TriggerDisabledNotificationEventFilter, + WeeklyRecurrence, ) from ._enums import ( # type: ignore @@ -66,8 +90,13 @@ CreatedByType, FileType, FileValidationStatus, + Frequency, ManagedIdentityType, MetricUnit, + NotificationEventType, + NotificationScopeType, + OperationKind, + OperationState, PFMetrics, PassFailAction, PassFailAggregationFunction, @@ -81,6 +110,9 @@ TestProfileRunStatus, TestRunStatus, TimeGrain, + TriggerState, + TriggerType, + WeekDays, ) from ._patch import __all__ as _patch_all from ._patch import * @@ -91,11 +123,13 @@ "ArtifactsContainerInfo", "AutoStopCriteria", "CertificateMetadata", + "DailyRecurrence", "DimensionFilter", "DimensionValue", "ErrorDetails", "FunctionFlexConsumptionResourceConfiguration", "FunctionFlexConsumptionTargetResourceConfigurations", + "HourlyRecurrence", "LoadTestConfiguration", "MetricAvailability", "MetricDefinition", @@ -104,19 +138,30 @@ "MetricNamespaceCollection", "MetricRequestPayload", "MetricValue", + "MonthlyRecurrenceByDates", + "MonthlyRecurrenceByWeekDays", "NameAndDescription", + "NotificationRule", + "OperationStatus", "OptionalLoadTestConfiguration", "PassFailCriteria", "PassFailMetric", "PassFailServerMetric", + "Recurrence", + "RecurrenceEnd", + "RecurrenceStatus", + "RecurrenceWithCron", "RegionalConfiguration", "ResourceMetric", + "ScheduleTestsTrigger", "Secret", + "StateDetails", "TargetResourceConfigurations", "Test", "TestAppComponents", "TestFileInfo", "TestInputArtifacts", + "TestPreferences", "TestProfile", "TestProfileRun", "TestProfileRunRecommendation", @@ -124,20 +169,36 @@ "TestRunAppComponents", "TestRunArtifacts", "TestRunDetail", + "TestRunEndedEventCondition", + "TestRunEndedNotificationEventFilter", "TestRunFileInfo", "TestRunInputArtifacts", + "TestRunInsightColumn", + "TestRunInsights", "TestRunOutputArtifacts", "TestRunServerMetricsConfiguration", + "TestRunStartedNotificationEventFilter", "TestRunStatistics", "TestServerMetricsConfiguration", + "TestsNotificationEventFilter", + "TestsNotificationRule", "TimeSeriesElement", + "Trigger", + "TriggerCompletedNotificationEventFilter", + "TriggerDisabledNotificationEventFilter", + "WeeklyRecurrence", "Aggregation", "CertificateType", "CreatedByType", "FileType", "FileValidationStatus", + "Frequency", "ManagedIdentityType", "MetricUnit", + "NotificationEventType", + "NotificationScopeType", + "OperationKind", + "OperationState", "PFMetrics", "PassFailAction", "PassFailAggregationFunction", @@ -151,6 +212,9 @@ "TestProfileRunStatus", "TestRunStatus", "TimeGrain", + "TriggerState", + "TriggerType", + "WeekDays", ] __all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore _patch_sdk() diff --git a/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/models/_enums.py b/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/models/_enums.py index f6ecc0080712..55bfd12a0b93 100644 --- a/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/models/_enums.py +++ b/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/models/_enums.py @@ -55,6 +55,10 @@ class CreatedByType(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Entity was created by a user.""" SCHEDULED_TRIGGER = "ScheduledTrigger" """Entity was created by a scheduled trigger.""" + AZURE_PIPELINES = "AzurePipelines" + """Entity was created by Azure DevOps pipelines.""" + GIT_HUB_WORKFLOWS = "GitHubWorkflows" + """Entity was created by GitHub Workflows.""" class FileType(str, Enum, metaclass=CaseInsensitiveEnumMeta): @@ -72,6 +76,11 @@ class FileType(str, Enum, metaclass=CaseInsensitiveEnumMeta): """If the file is a JSON config file to define the requests for a URL test.""" TEST_SCRIPT = "TEST_SCRIPT" """If the file is a test script.""" + BROWSER_RECORDING = "BROWSER_RECORDING" + """If the file is a browser recording of interactions with web applications.""" + TEST_PLAN_RECOMMENDATIONS = "TEST_PLAN_RECOMMENDATIONS" + """If the file is an AI Recommendations file created by the system containing recommendations to + create a load test plan from browser recording.""" class FileValidationStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta): @@ -89,6 +98,23 @@ class FileValidationStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Validation is not required.""" +class Frequency(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Frequency of recurrence for a trigger.""" + + CRON = "Cron" + """Recurrence defined by a cron expression.""" + HOURLY = "Hourly" + """Recurrence defined on an hourly basis, as specified by HourlyRecurrence.""" + DAILY = "Daily" + """Recurrence defined on a daily basis, as specified by DailyRecurrence.""" + WEEKLY = "Weekly" + """Recurrence defined on a weekly basis, as specified by WeeklyRecurrence.""" + MONTHLY_BY_DAYS = "MonthlyByDays" + """Recurrence defined monthly on specific days, as specified by MonthlyRecurrenceByWeekDays.""" + MONTHLY_BY_DATES = "MonthlyByDates" + """Recurrence defined monthly on specific dates, as specified by MonthlyRecurrenceByDates.""" + + class ManagedIdentityType(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Managed identity type.""" @@ -119,6 +145,52 @@ class MetricUnit(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Count per second""" +class NotificationEventType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Notification event type.""" + + TEST_RUN_ENDED = "TestRunEnded" + """Test run ended event. This event would occur when a test run reaches terminal state.""" + TEST_RUN_STARTED = "TestRunStarted" + """Test run started event. This event would occur when a new test run is triggered.""" + TRIGGER_COMPLETED = "TriggerCompleted" + """Trigger completed event. This event would occur when a trigger completes.""" + TRIGGER_DISABLED = "TriggerDisabled" + """Trigger disabled event. This event would occur when a trigger is disabled.""" + + +class NotificationScopeType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Scope type of a notification rule.""" + + TESTS = "Tests" + """Notification rule is for Tests.""" + + +class OperationKind(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Kind of the long running operation.""" + + CLONE_TEST = "CloneTest" + """Operation represents a clone test operation""" + GENERATE_TEST_RUN_INSIGHTS = "GenerateTestRunInsights" + """Operation represents a test run insights generation operation""" + TEST_PLAN_RECOMMENDATIONS = "TestPlanRecommendations" + """Operation represents a test plan recommendations generation operation""" + + +class OperationState(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Enum describing allowed operation states.""" + + NOT_STARTED = "NotStarted" + """The operation has not started.""" + RUNNING = "Running" + """The operation is in progress.""" + SUCCEEDED = "Succeeded" + """The operation has completed successfully.""" + FAILED = "Failed" + """The operation has failed.""" + CANCELED = "Canceled" + """The operation has been canceled by the user.""" + + class PassFailAction(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Action to take on failure of pass/fail criteria.""" @@ -315,3 +387,42 @@ class TimeGrain(str, Enum, metaclass=CaseInsensitiveEnumMeta): """5 minutes, available only if test run duration is greater than 1 minute.""" PT1H = "PT1H" """1 hour, available only if test run duration is greater than 1 minute.""" + + +class TriggerState(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Current state of a trigger.""" + + ACTIVE = "Active" + """The trigger is active.""" + PAUSED = "Paused" + """The trigger is paused manually.""" + COMPLETED = "Completed" + """The trigger is completed.""" + DISABLED = "Disabled" + """The trigger is disabled due to error.""" + + +class TriggerType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Type of a trigger.""" + + SCHEDULE_TESTS_TRIGGER = "ScheduleTestsTrigger" + """Trigger is a Scheduled Trigger on a Test.""" + + +class WeekDays(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Defines the days of the week.""" + + SUNDAY = "Sunday" + """Refers to Sunday.""" + MONDAY = "Monday" + """Refers to Monday.""" + TUESDAY = "Tuesday" + """Refers to Tuesday.""" + WEDNESDAY = "Wednesday" + """Refers to Wednesday.""" + THURSDAY = "Thursday" + """Refers to Thursday.""" + FRIDAY = "Friday" + """Refers to Friday.""" + SATURDAY = "Saturday" + """Refers to Saturday.""" diff --git a/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/models/_models.py b/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/models/_models.py index 32acdef7a137..97b989d69e25 100644 --- a/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/models/_models.py +++ b/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/models/_models.py @@ -9,17 +9,18 @@ # pylint: disable=useless-super-delegation import datetime -from typing import Any, Dict, List, Literal, Mapping, Optional, TYPE_CHECKING, Union, overload +from typing import Any, Literal, Mapping, Optional, TYPE_CHECKING, Union, overload -from .. import _model_base -from .._model_base import rest_discriminator, rest_field -from ._enums import ResourceKind +from azure.core.exceptions import ODataV4Format + +from .._utils.model_base import Model as _Model, rest_discriminator, rest_field +from ._enums import Frequency, NotificationEventType, NotificationScopeType, ResourceKind, TriggerType if TYPE_CHECKING: from .. import models as _models -class AppComponent(_model_base.Model): +class AppComponent(_Model): """An Azure resource object (Refer azure generic resource model :`https://learn.microsoft.com/en-us/rest/api/resources/resources/get-by-id#genericresource `_). @@ -82,7 +83,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class ArtifactsContainerInfo(_model_base.Model): +class ArtifactsContainerInfo(_Model): """Artifacts container info. :ivar url: This is a SAS URI to an Azure Storage Container that contains the test run @@ -118,7 +119,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class AutoStopCriteria(_model_base.Model): +class AutoStopCriteria(_Model): """Auto stop criteria for a test. This will automatically stop a load test if the error percentage is high for a certain time window. @@ -130,6 +131,9 @@ class AutoStopCriteria(_model_base.Model): :ivar error_rate_time_window_in_seconds: Time window during which the error percentage should be evaluated in seconds. :vartype error_rate_time_window_in_seconds: int + :ivar maximum_virtual_users_per_engine: Maximum number of virtual users per load testing + engine, at which the test run should be automatically stopped. + :vartype maximum_virtual_users_per_engine: int """ auto_stop_disabled: Optional[bool] = rest_field( @@ -145,6 +149,11 @@ class AutoStopCriteria(_model_base.Model): name="errorRateTimeWindowInSeconds", visibility=["read", "create", "update", "delete", "query"] ) """Time window during which the error percentage should be evaluated in seconds.""" + maximum_virtual_users_per_engine: Optional[int] = rest_field( + name="maximumVirtualUsersPerEngine", visibility=["read", "create", "update", "delete", "query"] + ) + """Maximum number of virtual users per load testing engine, at which the test run should be + automatically stopped.""" @overload def __init__( @@ -153,6 +162,7 @@ def __init__( auto_stop_disabled: Optional[bool] = None, error_rate: Optional[float] = None, error_rate_time_window_in_seconds: Optional[int] = None, + maximum_virtual_users_per_engine: Optional[int] = None, ) -> None: ... @overload @@ -166,7 +176,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class CertificateMetadata(_model_base.Model): +class CertificateMetadata(_Model): """Certificates metadata. :ivar value: The value of the certificate for respective type. @@ -206,7 +216,101 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class DimensionFilter(_model_base.Model): +class Recurrence(_Model): + """Recurrence model. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + RecurrenceWithCron, DailyRecurrence, HourlyRecurrence, MonthlyRecurrenceByDates, + MonthlyRecurrenceByWeekDays, WeeklyRecurrence + + :ivar frequency: Frequency of the recurrence. Required. Known values are: "Cron", "Hourly", + "Daily", "Weekly", "MonthlyByDays", and "MonthlyByDates". + :vartype frequency: str or ~azure.developer.loadtesting.models.Frequency + :ivar recurrence_end: Recurrence end model. You can specify the end either by providing a + numberOfOccurrences (which will end the recurrence after the specified number of occurrences) + or by providing an endDateTime (which will end the recurrence after the specified date). If + neither value is provided, the recurrence will continue until it is manually ended. However, if + both values are provided, an error will be thrown. + :vartype recurrence_end: ~azure.developer.loadtesting.models.RecurrenceEnd + """ + + __mapping__: dict[str, _Model] = {} + frequency: str = rest_discriminator(name="frequency", visibility=["read", "create", "update", "delete", "query"]) + """Frequency of the recurrence. Required. Known values are: \"Cron\", \"Hourly\", \"Daily\", + \"Weekly\", \"MonthlyByDays\", and \"MonthlyByDates\".""" + recurrence_end: Optional["_models.RecurrenceEnd"] = rest_field( + name="recurrenceEnd", visibility=["read", "create", "update", "delete", "query"] + ) + """Recurrence end model. You can specify the end either by providing a numberOfOccurrences (which + will end the recurrence after the specified number of occurrences) or by providing an + endDateTime (which will end the recurrence after the specified date). If neither value is + provided, the recurrence will continue until it is manually ended. However, if both values are + provided, an error will be thrown.""" + + @overload + def __init__( + self, + *, + frequency: str, + recurrence_end: Optional["_models.RecurrenceEnd"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class DailyRecurrence(Recurrence, discriminator="Daily"): + """Recurrence model when frequency is set as Daily. + + :ivar recurrence_end: Recurrence end model. You can specify the end either by providing a + numberOfOccurrences (which will end the recurrence after the specified number of occurrences) + or by providing an endDateTime (which will end the recurrence after the specified date). If + neither value is provided, the recurrence will continue until it is manually ended. However, if + both values are provided, an error will be thrown. + :vartype recurrence_end: ~azure.developer.loadtesting.models.RecurrenceEnd + :ivar frequency: Frequency of the day recurrence. Required. Recurrence defined on a daily + basis, as specified by DailyRecurrence. + :vartype frequency: str or ~azure.developer.loadtesting.models.DAILY + :ivar interval: The interval at which the recurrence should repeat. It signifies the number of + days between each recurrence. Required. + :vartype interval: int + """ + + frequency: Literal[Frequency.DAILY] = rest_discriminator(name="frequency", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """Frequency of the day recurrence. Required. Recurrence defined on a daily basis, as specified by + DailyRecurrence.""" + interval: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The interval at which the recurrence should repeat. It signifies the number of days between + each recurrence. Required.""" + + @overload + def __init__( + self, + *, + interval: int, + recurrence_end: Optional["_models.RecurrenceEnd"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.frequency = Frequency.DAILY # type: ignore + + +class DimensionFilter(_Model): """Dimension name and values to filter. :ivar name: The dimension name. @@ -217,8 +321,8 @@ class DimensionFilter(_model_base.Model): name: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The dimension name.""" - values_property: Optional[List[str]] = rest_field( - name="values", visibility=["read", "create", "update", "delete", "query"] + values_property: Optional[list[str]] = rest_field( + name="values", visibility=["read", "create", "update", "delete", "query"], original_tsp_name="values" ) """The dimension values. Maximum values can be 20.""" @@ -227,7 +331,7 @@ def __init__( self, *, name: Optional[str] = None, - values_property: Optional[List[str]] = None, + values_property: Optional[list[str]] = None, ) -> None: ... @overload @@ -241,7 +345,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class DimensionValue(_model_base.Model): +class DimensionValue(_Model): """Represents a metric dimension value. :ivar name: The name of the dimension. @@ -274,18 +378,30 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class ErrorDetails(_model_base.Model): +class ErrorDetails(_Model): """Error details if there is any failure in load test run. + :ivar code: Error code if there is any failure in load test run. + :vartype code: str :ivar message: Error details in case test run was not successfully run. :vartype message: str + :ivar properties: A dictionary for storing additional error information for better context. + Each key is a property name (e.g., "Description", "Resolution", "Category", "Region"), and its + value is an array of strings with relevant details. + :vartype properties: dict[str, list[str]] """ + code: Optional[str] = rest_field(visibility=["read"]) + """Error code if there is any failure in load test run.""" message: Optional[str] = rest_field(visibility=["read"]) """Error details in case test run was not successfully run.""" + properties: Optional[dict[str, list[str]]] = rest_field(visibility=["read"]) + """A dictionary for storing additional error information for better context. Each key is a + property name (e.g., \"Description\", \"Resolution\", \"Category\", \"Region\"), and its value + is an array of strings with relevant details.""" -class FunctionFlexConsumptionResourceConfiguration(_model_base.Model): # pylint: disable=name-too-long +class FunctionFlexConsumptionResourceConfiguration(_Model): # pylint: disable=name-too-long """Resource configuration instance for a Flex Consumption based Azure Function App. :ivar instance_memory_mb: Memory size of the instance. Supported values are 2048, 4096. @@ -323,7 +439,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class TargetResourceConfigurations(_model_base.Model): +class TargetResourceConfigurations(_Model): """Configurations of a target resource. This varies with the kind of resource. You probably want to use the sub-classes and not this class directly. Known sub-classes are: @@ -334,7 +450,7 @@ class TargetResourceConfigurations(_model_base.Model): :vartype kind: str or ~azure.developer.loadtesting.models.ResourceKind """ - __mapping__: Dict[str, _model_base.Model] = {} + __mapping__: dict[str, _Model] = {} kind: str = rest_discriminator(name="kind", visibility=["read", "create"]) """Kind of the resource for which the configurations apply. Required. \"FunctionsFlexConsumption\"""" @@ -374,7 +490,7 @@ class FunctionFlexConsumptionTargetResourceConfigurations( """The kind value to use when providing configuration. This should typically be not changed from its value. Required. Resource is a Azure FunctionApp on Flex Consumption Plan.""" - configurations: Optional[Dict[str, "_models.FunctionFlexConsumptionResourceConfiguration"]] = rest_field( + configurations: Optional[dict[str, "_models.FunctionFlexConsumptionResourceConfiguration"]] = rest_field( visibility=["read", "create", "update", "delete", "query"] ) """A map of configurations for a Function app using Flex Consumption Plan.""" @@ -383,7 +499,51 @@ class FunctionFlexConsumptionTargetResourceConfigurations( def __init__( self, *, - configurations: Optional[Dict[str, "_models.FunctionFlexConsumptionResourceConfiguration"]] = None, + configurations: Optional[dict[str, "_models.FunctionFlexConsumptionResourceConfiguration"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.kind = ResourceKind.FUNCTIONS_FLEX_CONSUMPTION # type: ignore + + +class HourlyRecurrence(Recurrence, discriminator="Hourly"): + """Recurrence model when frequency is set as Hourly. + + :ivar recurrence_end: Recurrence end model. You can specify the end either by providing a + numberOfOccurrences (which will end the recurrence after the specified number of occurrences) + or by providing an endDateTime (which will end the recurrence after the specified date). If + neither value is provided, the recurrence will continue until it is manually ended. However, if + both values are provided, an error will be thrown. + :vartype recurrence_end: ~azure.developer.loadtesting.models.RecurrenceEnd + :ivar frequency: Frequency of the hour recurrence. Required. Recurrence defined on an hourly + basis, as specified by HourlyRecurrence. + :vartype frequency: str or ~azure.developer.loadtesting.models.HOURLY + :ivar interval: The interval at which the recurrence should repeat. It signifies the number of + hours between each recurrence. Required. + :vartype interval: int + """ + + frequency: Literal[Frequency.HOURLY] = rest_discriminator(name="frequency", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """Frequency of the hour recurrence. Required. Recurrence defined on an hourly basis, as specified + by HourlyRecurrence.""" + interval: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The interval at which the recurrence should repeat. It signifies the number of hours between + each recurrence. Required.""" + + @overload + def __init__( + self, + *, + interval: int, + recurrence_end: Optional["_models.RecurrenceEnd"] = None, ) -> None: ... @overload @@ -394,10 +554,11 @@ def __init__(self, mapping: Mapping[str, Any]) -> None: """ def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, kind=ResourceKind.FUNCTIONS_FLEX_CONSUMPTION, **kwargs) + super().__init__(*args, **kwargs) + self.frequency = Frequency.HOURLY # type: ignore -class LoadTestConfiguration(_model_base.Model): +class LoadTestConfiguration(_Model): """Configurations for the load test. :ivar engine_instances: The number of engine instances to execute load test. Supported values @@ -442,7 +603,7 @@ class LoadTestConfiguration(_model_base.Model): name="optionalLoadTestConfig", visibility=["read", "create", "update", "delete", "query"] ) """Configuration for quick load test.""" - regional_load_test_config: Optional[List["_models.RegionalConfiguration"]] = rest_field( + regional_load_test_config: Optional[list["_models.RegionalConfiguration"]] = rest_field( name="regionalLoadTestConfig", visibility=["read", "create", "update", "delete", "query"] ) """Region distribution configuration for the load test.""" @@ -455,7 +616,7 @@ def __init__( split_all_csvs: Optional[bool] = None, quick_start_test: Optional[bool] = None, optional_load_test_config: Optional["_models.OptionalLoadTestConfiguration"] = None, - regional_load_test_config: Optional[List["_models.RegionalConfiguration"]] = None, + regional_load_test_config: Optional[list["_models.RegionalConfiguration"]] = None, ) -> None: ... @overload @@ -469,7 +630,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class MetricAvailability(_model_base.Model): +class MetricAvailability(_Model): """Metric availability specifies the time grain (aggregation interval or frequency). :ivar time_grain: The time grain specifies the aggregation interval for the metric. Expressed @@ -503,7 +664,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class MetricDefinition(_model_base.Model): +class MetricDefinition(_Model): """Metric definition. :ivar dimensions: List of dimensions. @@ -530,7 +691,7 @@ class MetricDefinition(_model_base.Model): :vartype metric_availabilities: list[~azure.developer.loadtesting.models.MetricAvailability] """ - dimensions: Optional[List["_models.NameAndDescription"]] = rest_field( + dimensions: Optional[list["_models.NameAndDescription"]] = rest_field( visibility=["read", "create", "update", "delete", "query"] ) """List of dimensions.""" @@ -547,7 +708,7 @@ class MetricDefinition(_model_base.Model): are: \"Average\", \"Count\", \"None\", \"Total\", \"Percentile75\", \"Percentile90\", \"Percentile95\", \"Percentile96\", \"Percentile97\", \"Percentile98\", \"Percentile99\", \"Percentile999\", and \"Percentile9999\".""" - supported_aggregation_types: Optional[List[str]] = rest_field( + supported_aggregation_types: Optional[list[str]] = rest_field( name="supportedAggregationTypes", visibility=["read", "create", "update", "delete", "query"] ) """The collection of what all aggregation types are supported.""" @@ -556,7 +717,7 @@ class MetricDefinition(_model_base.Model): ) """The unit of the metric. Known values are: \"NotSpecified\", \"Percent\", \"Count\", \"Seconds\", \"Milliseconds\", \"Bytes\", \"BytesPerSecond\", and \"CountPerSecond\".""" - metric_availabilities: Optional[List["_models.MetricAvailability"]] = rest_field( + metric_availabilities: Optional[list["_models.MetricAvailability"]] = rest_field( name="metricAvailabilities", visibility=["read", "create", "update", "delete", "query"] ) """Metric availability specifies the time grain (aggregation interval or @@ -566,14 +727,14 @@ class MetricDefinition(_model_base.Model): def __init__( self, *, - dimensions: Optional[List["_models.NameAndDescription"]] = None, + dimensions: Optional[list["_models.NameAndDescription"]] = None, description: Optional[str] = None, name: Optional[str] = None, namespace: Optional[str] = None, primary_aggregation_type: Optional[Union[str, "_models.Aggregation"]] = None, - supported_aggregation_types: Optional[List[str]] = None, + supported_aggregation_types: Optional[list[str]] = None, unit: Optional[Union[str, "_models.MetricUnit"]] = None, - metric_availabilities: Optional[List["_models.MetricAvailability"]] = None, + metric_availabilities: Optional[list["_models.MetricAvailability"]] = None, ) -> None: ... @overload @@ -587,21 +748,21 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class MetricDefinitionCollection(_model_base.Model): +class MetricDefinitionCollection(_Model): """Represents collection of metric definitions. :ivar value: the values for the metric definitions. Required. :vartype value: list[~azure.developer.loadtesting.models.MetricDefinition] """ - value: List["_models.MetricDefinition"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + value: list["_models.MetricDefinition"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """the values for the metric definitions. Required.""" @overload def __init__( self, *, - value: List["_models.MetricDefinition"], + value: list["_models.MetricDefinition"], ) -> None: ... @overload @@ -615,7 +776,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class MetricNamespace(_model_base.Model): +class MetricNamespace(_Model): """Metric namespace class specifies the metadata for a metric namespace. :ivar description: The namespace description. @@ -648,21 +809,21 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class MetricNamespaceCollection(_model_base.Model): +class MetricNamespaceCollection(_Model): """Represents collection of metric namespaces. :ivar value: The values for the metric namespaces. Required. :vartype value: list[~azure.developer.loadtesting.models.MetricNamespace] """ - value: List["_models.MetricNamespace"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + value: list["_models.MetricNamespace"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The values for the metric namespaces. Required.""" @overload def __init__( self, *, - value: List["_models.MetricNamespace"], + value: list["_models.MetricNamespace"], ) -> None: ... @overload @@ -676,7 +837,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class MetricRequestPayload(_model_base.Model): +class MetricRequestPayload(_Model): """Filters to fetch the set of metric. :ivar filters: Get metrics for specific dimension values. Example: Metric contains dimension @@ -686,7 +847,7 @@ class MetricRequestPayload(_model_base.Model): :vartype filters: list[~azure.developer.loadtesting.models.DimensionFilter] """ - filters: Optional[List["_models.DimensionFilter"]] = rest_field( + filters: Optional[list["_models.DimensionFilter"]] = rest_field( visibility=["read", "create", "update", "delete", "query"] ) """Get metrics for specific dimension values. Example: Metric contains dimension @@ -698,7 +859,7 @@ class MetricRequestPayload(_model_base.Model): def __init__( self, *, - filters: Optional[List["_models.DimensionFilter"]] = None, + filters: Optional[list["_models.DimensionFilter"]] = None, ) -> None: ... @overload @@ -712,7 +873,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class MetricValue(_model_base.Model): +class MetricValue(_Model): """Represents a metric value. :ivar timestamp: The timestamp for the metric value in RFC 3339 format. @@ -747,7 +908,122 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class NameAndDescription(_model_base.Model): +class MonthlyRecurrenceByDates(Recurrence, discriminator="MonthlyByDates"): + """Recurrence model when frequency is set as MonthlyByDates. + + :ivar recurrence_end: Recurrence end model. You can specify the end either by providing a + numberOfOccurrences (which will end the recurrence after the specified number of occurrences) + or by providing an endDateTime (which will end the recurrence after the specified date). If + neither value is provided, the recurrence will continue until it is manually ended. However, if + both values are provided, an error will be thrown. + :vartype recurrence_end: ~azure.developer.loadtesting.models.RecurrenceEnd + :ivar frequency: Frequency of the month recurrence. Required. Recurrence defined monthly on + specific dates, as specified by MonthlyRecurrenceByDates. + :vartype frequency: str or ~azure.developer.loadtesting.models.MONTHLY_BY_DATES + :ivar dates_in_month: Recurrence set to repeat on the specified dates of the month. Value of + dates can be 1 to 31 and -1. -1 represents the last day of the month. + :vartype dates_in_month: list[int] + :ivar interval: The interval at which the recurrence should repeat. It signifies the number of + months between each recurrence. + :vartype interval: int + """ + + frequency: Literal[Frequency.MONTHLY_BY_DATES] = rest_discriminator(name="frequency", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """Frequency of the month recurrence. Required. Recurrence defined monthly on specific dates, as + specified by MonthlyRecurrenceByDates.""" + dates_in_month: Optional[list[int]] = rest_field( + name="datesInMonth", visibility=["read", "create", "update", "delete", "query"] + ) + """Recurrence set to repeat on the specified dates of the month. Value of dates can be 1 to 31 and + -1. -1 represents the last day of the month.""" + interval: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The interval at which the recurrence should repeat. It signifies the number of months between + each recurrence.""" + + @overload + def __init__( + self, + *, + recurrence_end: Optional["_models.RecurrenceEnd"] = None, + dates_in_month: Optional[list[int]] = None, + interval: Optional[int] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.frequency = Frequency.MONTHLY_BY_DATES # type: ignore + + +class MonthlyRecurrenceByWeekDays(Recurrence, discriminator="MonthlyByDays"): + """Recurrence model when frequency is set as MonthlyByDays . + + :ivar recurrence_end: Recurrence end model. You can specify the end either by providing a + numberOfOccurrences (which will end the recurrence after the specified number of occurrences) + or by providing an endDateTime (which will end the recurrence after the specified date). If + neither value is provided, the recurrence will continue until it is manually ended. However, if + both values are provided, an error will be thrown. + :vartype recurrence_end: ~azure.developer.loadtesting.models.RecurrenceEnd + :ivar frequency: Frequency of the month recurrence. Required. Recurrence defined monthly on + specific days, as specified by MonthlyRecurrenceByWeekDays. + :vartype frequency: str or ~azure.developer.loadtesting.models.MONTHLY_BY_DAYS + :ivar week_days_in_month: Specific days of the week when the recurrence should repeat. + :vartype week_days_in_month: list[str or ~azure.developer.loadtesting.models.WeekDays] + :ivar index: Index of the week in a month at which the recurrence should repeat. For example, + if the index is '2', weekDay is 'Monday', interval is 3 and frequency is 'Month', the + recurrence will run every second Monday of the month and repeat every 3 months. Value of index + can be 1 to 5. Required. + :vartype index: int + :ivar interval: The interval at which the recurrence should repeat. It signifies the number of + months between each recurrence. Required. + :vartype interval: int + """ + + frequency: Literal[Frequency.MONTHLY_BY_DAYS] = rest_discriminator(name="frequency", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """Frequency of the month recurrence. Required. Recurrence defined monthly on specific days, as + specified by MonthlyRecurrenceByWeekDays.""" + week_days_in_month: Optional[list[Union[str, "_models.WeekDays"]]] = rest_field( + name="weekDaysInMonth", visibility=["read", "create", "update", "delete", "query"] + ) + """Specific days of the week when the recurrence should repeat.""" + index: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Index of the week in a month at which the recurrence should repeat. For example, if the index + is '2', weekDay is 'Monday', interval is 3 and frequency is 'Month', the recurrence will run + every second Monday of the month and repeat every 3 months. Value of index can be 1 to 5. + Required.""" + interval: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The interval at which the recurrence should repeat. It signifies the number of months between + each recurrence. Required.""" + + @overload + def __init__( + self, + *, + index: int, + interval: int, + recurrence_end: Optional["_models.RecurrenceEnd"] = None, + week_days_in_month: Optional[list[Union[str, "_models.WeekDays"]]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.frequency = Frequency.MONTHLY_BY_DAYS # type: ignore + + +class NameAndDescription(_Model): """The name and description. :ivar description: The description. @@ -780,7 +1056,123 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class OptionalLoadTestConfiguration(_model_base.Model): +class NotificationRule(_Model): + """Notification rule model. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + TestsNotificationRule + + :ivar notification_rule_id: The unique identifier of the notification rule. Required. + :vartype notification_rule_id: str + :ivar display_name: The name of the notification rule. Required. + :vartype display_name: str + :ivar action_group_ids: The action groups to notify. Required. + :vartype action_group_ids: list[str] + :ivar scope: The scope of the notification rule. Required. "Tests" + :vartype scope: str or ~azure.developer.loadtesting.models.NotificationScopeType + :ivar created_date_time: The creation datetime(RFC 3339 literal format). + :vartype created_date_time: ~datetime.datetime + :ivar created_by: The user that created. + :vartype created_by: str + :ivar last_modified_date_time: The last Modified datetime(RFC 3339 literal format). + :vartype last_modified_date_time: ~datetime.datetime + :ivar last_modified_by: The user that last modified. + :vartype last_modified_by: str + """ + + __mapping__: dict[str, _Model] = {} + notification_rule_id: str = rest_field(name="notificationRuleId", visibility=["read"]) + """The unique identifier of the notification rule. Required.""" + display_name: str = rest_field(name="displayName", visibility=["read", "create", "update", "delete", "query"]) + """The name of the notification rule. Required.""" + action_group_ids: list[str] = rest_field( + name="actionGroupIds", visibility=["read", "create", "update", "delete", "query"] + ) + """The action groups to notify. Required.""" + scope: str = rest_discriminator(name="scope", visibility=["read", "create", "update", "delete", "query"]) + """The scope of the notification rule. Required. \"Tests\"""" + created_date_time: Optional[datetime.datetime] = rest_field( + name="createdDateTime", visibility=["read"], format="rfc3339" + ) + """The creation datetime(RFC 3339 literal format).""" + created_by: Optional[str] = rest_field(name="createdBy", visibility=["read"]) + """The user that created.""" + last_modified_date_time: Optional[datetime.datetime] = rest_field( + name="lastModifiedDateTime", visibility=["read"], format="rfc3339" + ) + """The last Modified datetime(RFC 3339 literal format).""" + last_modified_by: Optional[str] = rest_field(name="lastModifiedBy", visibility=["read"]) + """The user that last modified.""" + + @overload + def __init__( + self, + *, + display_name: str, + action_group_ids: list[str], + scope: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class OperationStatus(_Model): + """Status of a long running operation. + + :ivar id: The unique ID of the operation. Required. + :vartype id: str + :ivar status: The state of the operation. Required. Known values are: "NotStarted", "Running", + "Succeeded", "Failed", and "Canceled". + :vartype status: str or ~azure.developer.loadtesting.models.OperationState + :ivar kind: The kind of the operation. Required. Known values are: "CloneTest", + "GenerateTestRunInsights", and "TestPlanRecommendations". + :vartype kind: str or ~azure.developer.loadtesting.models.OperationKind + :ivar error: Error object that describes the error when status is "Failed". + :vartype error: ~azure.core.ODataV4Format + """ + + id: str = rest_field(visibility=["read"]) + """The unique ID of the operation. Required.""" + status: Union[str, "_models.OperationState"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The state of the operation. Required. Known values are: \"NotStarted\", \"Running\", + \"Succeeded\", \"Failed\", and \"Canceled\".""" + kind: Union[str, "_models.OperationKind"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The kind of the operation. Required. Known values are: \"CloneTest\", + \"GenerateTestRunInsights\", and \"TestPlanRecommendations\".""" + error: Optional[ODataV4Format] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Error object that describes the error when status is \"Failed\".""" + + @overload + def __init__( + self, + *, + status: Union[str, "_models.OperationState"], + kind: Union[str, "_models.OperationKind"], + error: Optional[ODataV4Format] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class OptionalLoadTestConfiguration(_Model): """Configuration for quick load test. :ivar endpoint_url: Test URL. Provide the complete HTTP URL. For example, @@ -847,7 +1239,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class PassFailCriteria(_model_base.Model): +class PassFailCriteria(_Model): """Pass fail criteria for a test. :ivar pass_fail_metrics: Map of id and pass fail metrics { id : pass fail metrics }. @@ -858,11 +1250,11 @@ class PassFailCriteria(_model_base.Model): ~azure.developer.loadtesting.models.PassFailServerMetric] """ - pass_fail_metrics: Optional[Dict[str, "_models.PassFailMetric"]] = rest_field( + pass_fail_metrics: Optional[dict[str, "_models.PassFailMetric"]] = rest_field( name="passFailMetrics", visibility=["read", "create", "update", "delete", "query"] ) """Map of id and pass fail metrics { id : pass fail metrics }.""" - pass_fail_server_metrics: Optional[Dict[str, "_models.PassFailServerMetric"]] = rest_field( + pass_fail_server_metrics: Optional[dict[str, "_models.PassFailServerMetric"]] = rest_field( name="passFailServerMetrics", visibility=["read", "create", "update", "delete", "query"] ) """Map of id and pass fail server metrics { id : pass fail metrics }.""" @@ -871,8 +1263,8 @@ class PassFailCriteria(_model_base.Model): def __init__( self, *, - pass_fail_metrics: Optional[Dict[str, "_models.PassFailMetric"]] = None, - pass_fail_server_metrics: Optional[Dict[str, "_models.PassFailServerMetric"]] = None, + pass_fail_metrics: Optional[dict[str, "_models.PassFailMetric"]] = None, + pass_fail_server_metrics: Optional[dict[str, "_models.PassFailServerMetric"]] = None, ) -> None: ... @overload @@ -886,17 +1278,18 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class PassFailMetric(_model_base.Model): +class PassFailMetric(_Model): """Pass fail metric. :ivar client_metric: The client metric on which the criteria should be applied. Known values are: "response_time_ms", "latency", "error", "requests", and "requests_per_sec". :vartype client_metric: str or ~azure.developer.loadtesting.models.PFMetrics :ivar aggregate: The aggregation function to be applied on the client metric. Allowed functions + * ‘percentage’ - for error metric , ‘avg’, percentiles like ‘p50’, ‘p90’, & so on, ‘min’, ‘max’ - for response_time_ms and latency metric, ‘avg’ - for requests_per_sec, - ‘count’ - for requests. Known values are: "count", "percentage", "avg", "p50", "p75", "p90", - "p95", "p96", "p97", "p98", "p99", "p99.9", "p99.99", "min", and "max". + ‘count’ - for requests. Known values are: "count", "percentage", "avg", "p50", "p75", + "p90", "p95", "p96", "p97", "p98", "p99", "p99.9", "p99.99", "min", and "max". :vartype aggregate: str or ~azure.developer.loadtesting.models.PassFailAggregationFunction :ivar condition: The comparison operator. Supported types ‘>’, ‘<’. :vartype condition: str @@ -924,11 +1317,12 @@ class PassFailMetric(_model_base.Model): visibility=["read", "create", "update", "delete", "query"] ) """The aggregation function to be applied on the client metric. Allowed functions - * ‘percentage’ - for error metric , ‘avg’, percentiles like ‘p50’, ‘p90’, & so on, ‘min’, - ‘max’ - for response_time_ms and latency metric, ‘avg’ - for requests_per_sec, - ‘count’ - for requests. Known values are: \"count\", \"percentage\", \"avg\", \"p50\", \"p75\", - \"p90\", \"p95\", \"p96\", \"p97\", \"p98\", \"p99\", \"p99.9\", \"p99.99\", \"min\", and - \"max\".""" + + * ‘percentage’ - for error metric , ‘avg’, percentiles like ‘p50’, ‘p90’, & so on, ‘min’, + ‘max’ - for response_time_ms and latency metric, ‘avg’ - for requests_per_sec, + ‘count’ - for requests. Known values are: \"count\", \"percentage\", \"avg\", \"p50\", + \"p75\", \"p90\", \"p95\", \"p96\", \"p97\", \"p98\", \"p99\", \"p99.9\", \"p99.99\", \"min\", + and \"max\".""" condition: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The comparison operator. Supported types ‘>’, ‘<’.""" request_name: Optional[str] = rest_field( @@ -971,7 +1365,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class PassFailServerMetric(_model_base.Model): +class PassFailServerMetric(_Model): """Pass fail server metric. :ivar resource_id: The resource id of the resource emitting the metric. Required. @@ -1044,7 +1438,129 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class RegionalConfiguration(_model_base.Model): +class RecurrenceEnd(_Model): + """Recurrence end model. Either provide numberOfOccurrences if you want recurrence to end after a + specified number of occurrences or provide endDate if you want recurrence to end after a + specified end date. If both values are provided, a validation error will be thrown indicating + that only one field should be provided. If neither value is provided, the recurrence will end + when manually ended. + + :ivar number_of_occurrences: Number of occurrences after which the recurrence will end. + :vartype number_of_occurrences: int + :ivar end_date_time: The date after which the recurrence will end. (RFC 3339 literal format). + :vartype end_date_time: ~datetime.datetime + """ + + number_of_occurrences: Optional[int] = rest_field( + name="numberOfOccurrences", visibility=["read", "create", "update", "delete", "query"] + ) + """Number of occurrences after which the recurrence will end.""" + end_date_time: Optional[datetime.datetime] = rest_field( + name="endDateTime", visibility=["read", "create", "update", "delete", "query"], format="rfc3339" + ) + """The date after which the recurrence will end. (RFC 3339 literal format).""" + + @overload + def __init__( + self, + *, + number_of_occurrences: Optional[int] = None, + end_date_time: Optional[datetime.datetime] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RecurrenceStatus(_Model): + """Actual state of the recurrence for the trigger. + + :ivar remaining_occurrences: The number of occurrences remaining for the trigger. Null if + recurrence end has end date instead of number of occurrences. + :vartype remaining_occurrences: int + :ivar next_scheduled_date_times: The next three execution times of the trigger. (RFC 3339 + literal format). + :vartype next_scheduled_date_times: list[~datetime.datetime] + """ + + remaining_occurrences: Optional[int] = rest_field( + name="remainingOccurrences", visibility=["read", "create", "update", "delete", "query"] + ) + """The number of occurrences remaining for the trigger. Null if recurrence end has end date + instead of number of occurrences.""" + next_scheduled_date_times: Optional[list[datetime.datetime]] = rest_field( + name="nextScheduledDateTimes", visibility=["read", "create", "update", "delete", "query"], format="rfc3339" + ) + """The next three execution times of the trigger. (RFC 3339 literal format).""" + + @overload + def __init__( + self, + *, + remaining_occurrences: Optional[int] = None, + next_scheduled_date_times: Optional[list[datetime.datetime]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RecurrenceWithCron(Recurrence, discriminator="Cron"): + """Recurrence is set based on cron expression. + + :ivar recurrence_end: Recurrence end model. You can specify the end either by providing a + numberOfOccurrences (which will end the recurrence after the specified number of occurrences) + or by providing an endDateTime (which will end the recurrence after the specified date). If + neither value is provided, the recurrence will continue until it is manually ended. However, if + both values are provided, an error will be thrown. + :vartype recurrence_end: ~azure.developer.loadtesting.models.RecurrenceEnd + :ivar frequency: Specify frequency using a cron expression. Required. Recurrence defined by a + cron expression. + :vartype frequency: str or ~azure.developer.loadtesting.models.CRON + :ivar cron_expression: Cron expression for the recurrence. Required. + :vartype cron_expression: str + """ + + frequency: Literal[Frequency.CRON] = rest_discriminator(name="frequency", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """Specify frequency using a cron expression. Required. Recurrence defined by a cron expression.""" + cron_expression: str = rest_field(name="cronExpression", visibility=["read", "create", "update", "delete", "query"]) + """Cron expression for the recurrence. Required.""" + + @overload + def __init__( + self, + *, + cron_expression: str, + recurrence_end: Optional["_models.RecurrenceEnd"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.frequency = Frequency.CRON # type: ignore + + +class RegionalConfiguration(_Model): """Region distribution configuration for the load test. :ivar engine_instances: The number of engine instances to execute load test in specified @@ -1089,7 +1605,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class ResourceMetric(_model_base.Model): +class ResourceMetric(_Model): """Associated metric definition for particular metrics of the azure resource ( Refer : `https://learn.microsoft.com/en-us/rest/api/monitor/metric-definitions/list#metricdefinition @@ -1158,7 +1674,161 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class Secret(_model_base.Model): +class Trigger(_Model): + """Trigger model. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + ScheduleTestsTrigger + + :ivar trigger_id: The unique identifier of the trigger. Required. + :vartype trigger_id: str + :ivar display_name: The name of the trigger. Required. + :vartype display_name: str + :ivar description: The description of the trigger. + :vartype description: str + :ivar kind: The type of the trigger. Required. "ScheduleTestsTrigger" + :vartype kind: str or ~azure.developer.loadtesting.models.TriggerType + :ivar state: The current state of the trigger. Known values are: "Active", "Paused", + "Completed", and "Disabled". + :vartype state: str or ~azure.developer.loadtesting.models.TriggerState + :ivar state_details: Details of current state of the trigger. + :vartype state_details: ~azure.developer.loadtesting.models.StateDetails + :ivar created_date_time: The creation datetime(RFC 3339 literal format). + :vartype created_date_time: ~datetime.datetime + :ivar created_by: The user that created. + :vartype created_by: str + :ivar last_modified_date_time: The last Modified datetime(RFC 3339 literal format). + :vartype last_modified_date_time: ~datetime.datetime + :ivar last_modified_by: The user that last modified. + :vartype last_modified_by: str + """ + + __mapping__: dict[str, _Model] = {} + trigger_id: str = rest_field(name="triggerId", visibility=["read"]) + """The unique identifier of the trigger. Required.""" + display_name: str = rest_field(name="displayName", visibility=["read", "create", "update", "delete", "query"]) + """The name of the trigger. Required.""" + description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The description of the trigger.""" + kind: str = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) + """The type of the trigger. Required. \"ScheduleTestsTrigger\"""" + state: Optional[Union[str, "_models.TriggerState"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The current state of the trigger. Known values are: \"Active\", \"Paused\", \"Completed\", and + \"Disabled\".""" + state_details: Optional["_models.StateDetails"] = rest_field(name="stateDetails", visibility=["read"]) + """Details of current state of the trigger.""" + created_date_time: Optional[datetime.datetime] = rest_field( + name="createdDateTime", visibility=["read"], format="rfc3339" + ) + """The creation datetime(RFC 3339 literal format).""" + created_by: Optional[str] = rest_field(name="createdBy", visibility=["read"]) + """The user that created.""" + last_modified_date_time: Optional[datetime.datetime] = rest_field( + name="lastModifiedDateTime", visibility=["read"], format="rfc3339" + ) + """The last Modified datetime(RFC 3339 literal format).""" + last_modified_by: Optional[str] = rest_field(name="lastModifiedBy", visibility=["read"]) + """The user that last modified.""" + + @overload + def __init__( + self, + *, + display_name: str, + kind: str, + description: Optional[str] = None, + state: Optional[Union[str, "_models.TriggerState"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class ScheduleTestsTrigger(Trigger, discriminator="ScheduleTestsTrigger"): + """ScheduleTestsTrigger model. + + :ivar trigger_id: The unique identifier of the trigger. Required. + :vartype trigger_id: str + :ivar display_name: The name of the trigger. Required. + :vartype display_name: str + :ivar description: The description of the trigger. + :vartype description: str + :ivar state: The current state of the trigger. Known values are: "Active", "Paused", + "Completed", and "Disabled". + :vartype state: str or ~azure.developer.loadtesting.models.TriggerState + :ivar state_details: Details of current state of the trigger. + :vartype state_details: ~azure.developer.loadtesting.models.StateDetails + :ivar created_date_time: The creation datetime(RFC 3339 literal format). + :vartype created_date_time: ~datetime.datetime + :ivar created_by: The user that created. + :vartype created_by: str + :ivar last_modified_date_time: The last Modified datetime(RFC 3339 literal format). + :vartype last_modified_date_time: ~datetime.datetime + :ivar last_modified_by: The user that last modified. + :vartype last_modified_by: str + :ivar kind: The type of the trigger is ScheduleTestsTrigger. Required. Trigger is a Scheduled + Trigger on a Test. + :vartype kind: str or ~azure.developer.loadtesting.models.SCHEDULE_TESTS_TRIGGER + :ivar test_ids: The test id of test to be triggered by this schedule trigger. Currently only + one test is supported for a trigger. Required. + :vartype test_ids: list[str] + :ivar start_date_time: Start date time of the trigger in UTC timezone. (RFC 3339 literal + format). + :vartype start_date_time: ~datetime.datetime + :ivar recurrence_status: + :vartype recurrence_status: ~azure.developer.loadtesting.models.RecurrenceStatus + :ivar recurrence: Recurrence details of the trigger. Null if schedule is not recurring. + :vartype recurrence: ~azure.developer.loadtesting.models.Recurrence + """ + + kind: Literal[TriggerType.SCHEDULE_TESTS_TRIGGER] = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The type of the trigger is ScheduleTestsTrigger. Required. Trigger is a Scheduled Trigger on a + Test.""" + test_ids: list[str] = rest_field(name="testIds", visibility=["read", "create", "update", "delete", "query"]) + """The test id of test to be triggered by this schedule trigger. Currently only one test is + supported for a trigger. Required.""" + start_date_time: Optional[datetime.datetime] = rest_field( + name="startDateTime", visibility=["read", "create", "update", "delete", "query"], format="rfc3339" + ) + """Start date time of the trigger in UTC timezone. (RFC 3339 literal format).""" + recurrence_status: Optional["_models.RecurrenceStatus"] = rest_field(name="recurrenceStatus", visibility=["read"]) + recurrence: Optional["_models.Recurrence"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Recurrence details of the trigger. Null if schedule is not recurring.""" + + @overload + def __init__( + self, + *, + display_name: str, + test_ids: list[str], + description: Optional[str] = None, + state: Optional[Union[str, "_models.TriggerState"]] = None, + start_date_time: Optional[datetime.datetime] = None, + recurrence: Optional["_models.Recurrence"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.kind = TriggerType.SCHEDULE_TESTS_TRIGGER # type: ignore + + +class Secret(_Model): """Secret. :ivar value: The value of the secret for the respective type. @@ -1178,8 +1848,36 @@ class Secret(_model_base.Model): def __init__( self, *, - value: Optional[str] = None, - type: Optional[Union[str, "_models.SecretType"]] = None, + value: Optional[str] = None, + type: Optional[Union[str, "_models.SecretType"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class StateDetails(_Model): + """State details of the trigger. + + :ivar message: The error message if the trigger is in disabled state. + :vartype message: str + """ + + message: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The error message if the trigger is in disabled state.""" + + @overload + def __init__( + self, + *, + message: Optional[str] = None, ) -> None: ... @overload @@ -1193,7 +1891,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class Test(_model_base.Model): +class Test(_Model): """Load test model. :ivar pass_fail_criteria: Pass fail criteria for a test. @@ -1252,6 +1950,10 @@ class Test(_model_base.Model): :ivar engine_built_in_identity_ids: Resource Ids of the managed identity built in to load test engines. Required if engineBuiltInIdentityType is UserAssigned. :vartype engine_built_in_identity_ids: list[str] + :ivar estimated_virtual_user_hours: Estimated virtual user hours for the test. + :vartype estimated_virtual_user_hours: float + :ivar preferences: Preferences for the test. + :vartype preferences: ~azure.developer.loadtesting.models.TestPreferences :ivar created_date_time: The creation datetime(RFC 3339 literal format). :vartype created_date_time: ~datetime.datetime :ivar created_by: The user that created. @@ -1271,7 +1973,7 @@ class Test(_model_base.Model): ) """Auto stop criteria for a test. This will automatically stop a load test if the error percentage is high for a certain time window.""" - secrets: Optional[Dict[str, "_models.Secret"]] = rest_field( + secrets: Optional[dict[str, "_models.Secret"]] = rest_field( visibility=["read", "create", "update", "delete", "query"] ) """Secrets can be stored in an Azure Key Vault or any other secret store. If the @@ -1283,7 +1985,7 @@ class Test(_model_base.Model): visibility=["read", "create", "update", "delete", "query"] ) """Certificates metadata.""" - environment_variables: Optional[Dict[str, str]] = rest_field( + environment_variables: Optional[dict[str, str]] = rest_field( name="environmentVariables", visibility=["read", "create", "update", "delete", "query"] ) """Environment variables which are defined as a set of pairs.""" @@ -1339,11 +2041,17 @@ class Test(_model_base.Model): ) """Type of the managed identity built in load test engines. Known values are: \"SystemAssigned\" and \"UserAssigned\".""" - engine_built_in_identity_ids: Optional[List[str]] = rest_field( + engine_built_in_identity_ids: Optional[list[str]] = rest_field( name="engineBuiltInIdentityIds", visibility=["read", "create", "update", "delete", "query"] ) """Resource Ids of the managed identity built in to load test engines. Required if engineBuiltInIdentityType is UserAssigned.""" + estimated_virtual_user_hours: Optional[float] = rest_field(name="estimatedVirtualUserHours", visibility=["read"]) + """Estimated virtual user hours for the test.""" + preferences: Optional["_models.TestPreferences"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Preferences for the test.""" created_date_time: Optional[datetime.datetime] = rest_field( name="createdDateTime", visibility=["read"], format="rfc3339" ) @@ -1363,9 +2071,9 @@ def __init__( # pylint: disable=too-many-locals *, pass_fail_criteria: Optional["_models.PassFailCriteria"] = None, auto_stop_criteria: Optional["_models.AutoStopCriteria"] = None, - secrets: Optional[Dict[str, "_models.Secret"]] = None, + secrets: Optional[dict[str, "_models.Secret"]] = None, certificate: Optional["_models.CertificateMetadata"] = None, - environment_variables: Optional[Dict[str, str]] = None, + environment_variables: Optional[dict[str, str]] = None, load_test_configuration: Optional["_models.LoadTestConfiguration"] = None, baseline_test_run_id: Optional[str] = None, description: Optional[str] = None, @@ -1378,7 +2086,8 @@ def __init__( # pylint: disable=too-many-locals metrics_reference_identity_type: Optional[Union[str, "_models.ManagedIdentityType"]] = None, metrics_reference_identity_id: Optional[str] = None, engine_built_in_identity_type: Optional[Union[str, "_models.ManagedIdentityType"]] = None, - engine_built_in_identity_ids: Optional[List[str]] = None, + engine_built_in_identity_ids: Optional[list[str]] = None, + preferences: Optional["_models.TestPreferences"] = None, ) -> None: ... @overload @@ -1392,7 +2101,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class TestAppComponents(_model_base.Model): +class TestAppComponents(_Model): """Test app components. :ivar components: Azure resource collection { resource id (fully qualified resource Id e.g @@ -1411,7 +2120,7 @@ class TestAppComponents(_model_base.Model): :vartype last_modified_by: str """ - components: Dict[str, "_models.AppComponent"] = rest_field( + components: dict[str, "_models.AppComponent"] = rest_field( visibility=["read", "create", "update", "delete", "query"] ) """Azure resource collection { resource id (fully qualified resource Id e.g @@ -1436,7 +2145,7 @@ class TestAppComponents(_model_base.Model): def __init__( self, *, - components: Dict[str, "_models.AppComponent"], + components: dict[str, "_models.AppComponent"], ) -> None: ... @overload @@ -1450,7 +2159,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class TestFileInfo(_model_base.Model): +class TestFileInfo(_Model): """Test file info. :ivar file_name: Name of the file. Required. @@ -1458,7 +2167,8 @@ class TestFileInfo(_model_base.Model): :ivar url: File URL. :vartype url: str :ivar file_type: File type. Known values are: "JMX_FILE", "USER_PROPERTIES", - "ADDITIONAL_ARTIFACTS", "ZIPPED_ARTIFACTS", "URL_TEST_CONFIG", and "TEST_SCRIPT". + "ADDITIONAL_ARTIFACTS", "ZIPPED_ARTIFACTS", "URL_TEST_CONFIG", "TEST_SCRIPT", + "BROWSER_RECORDING", and "TEST_PLAN_RECOMMENDATIONS". :vartype file_type: str or ~azure.developer.loadtesting.models.FileType :ivar expire_date_time: Expiry time of the file (RFC 3339 literal format). :vartype expire_date_time: ~datetime.datetime @@ -1476,7 +2186,8 @@ class TestFileInfo(_model_base.Model): """File URL.""" file_type: Optional[Union[str, "_models.FileType"]] = rest_field(name="fileType", visibility=["read"]) """File type. Known values are: \"JMX_FILE\", \"USER_PROPERTIES\", \"ADDITIONAL_ARTIFACTS\", - \"ZIPPED_ARTIFACTS\", \"URL_TEST_CONFIG\", and \"TEST_SCRIPT\".""" + \"ZIPPED_ARTIFACTS\", \"URL_TEST_CONFIG\", \"TEST_SCRIPT\", \"BROWSER_RECORDING\", and + \"TEST_PLAN_RECOMMENDATIONS\".""" expire_date_time: Optional[datetime.datetime] = rest_field( name="expireDateTime", visibility=["read"], format="rfc3339" ) @@ -1507,7 +2218,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class TestInputArtifacts(_model_base.Model): +class TestInputArtifacts(_Model): """The input artifacts for the test. :ivar config_file_info: The load test YAML file that contains the the test configuration. @@ -1544,7 +2255,7 @@ class TestInputArtifacts(_model_base.Model): name="urlTestConfigFileInfo", visibility=["read", "create", "update", "delete", "query"] ) """The config json file for url based test.""" - additional_file_info: Optional[List["_models.TestFileInfo"]] = rest_field( + additional_file_info: Optional[list["_models.TestFileInfo"]] = rest_field( name="additionalFileInfo", visibility=["read"] ) """Additional supported files for the test run.""" @@ -1571,7 +2282,37 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class TestProfile(_model_base.Model): +class TestPreferences(_Model): + """Preferences for the test. + + :ivar enable_ai_error_insights: Enable or disable AI based insights on Test Run Errors. + :vartype enable_ai_error_insights: bool + """ + + enable_ai_error_insights: Optional[bool] = rest_field( + name="enableAIErrorInsights", visibility=["read", "create", "update", "delete", "query"] + ) + """Enable or disable AI based insights on Test Run Errors.""" + + @overload + def __init__( + self, + *, + enable_ai_error_insights: Optional[bool] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class TestProfile(_Model): """Test Profile Model. :ivar test_profile_id: Unique identifier for the test profile, must contain only lower-case @@ -1655,7 +2396,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class TestProfileRun(_model_base.Model): +class TestProfileRun(_Model): """Test Profile Run model. :ivar test_profile_run_id: Unique identifier for the test profile run, must contain only @@ -1723,7 +2464,7 @@ class TestProfileRun(_model_base.Model): status: Optional[Union[str, "_models.TestProfileRunStatus"]] = rest_field(visibility=["read"]) """The test profile run status. Known values are: \"ACCEPTED\", \"NOTSTARTED\", \"EXECUTING\", \"DONE\", \"CANCELLING\", \"CANCELLED\", and \"FAILED\".""" - error_details: Optional[List["_models.ErrorDetails"]] = rest_field(name="errorDetails", visibility=["read"]) + error_details: Optional[list["_models.ErrorDetails"]] = rest_field(name="errorDetails", visibility=["read"]) """Error details if there is any failure in test profile run. These errors are specific to the Test Profile Run.""" start_date_time: Optional[datetime.datetime] = rest_field( @@ -1734,12 +2475,12 @@ class TestProfileRun(_model_base.Model): """The test profile run end DateTime(RFC 3339 literal format).""" duration_in_seconds: Optional[int] = rest_field(name="durationInSeconds", visibility=["read"]) """Test profile run duration in seconds.""" - test_run_details: Optional[Dict[str, "_models.TestRunDetail"]] = rest_field( + test_run_details: Optional[dict[str, "_models.TestRunDetail"]] = rest_field( name="testRunDetails", visibility=["read"] ) """Details of the test runs ran as part of the test profile run. Key is the testRunId of the corresponding testRun.""" - recommendations: Optional[List["_models.TestProfileRunRecommendation"]] = rest_field(visibility=["read"]) + recommendations: Optional[list["_models.TestProfileRunRecommendation"]] = rest_field(visibility=["read"]) """Recommendations provided based on a successful test profile run.""" created_date_time: Optional[datetime.datetime] = rest_field( name="createdDateTime", visibility=["read"], format="rfc3339" @@ -1774,7 +2515,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class TestProfileRunRecommendation(_model_base.Model): +class TestProfileRunRecommendation(_Model): """A recommendation object that provides a list of configuration that optimizes its category. :ivar category: Category of the recommendation. Required. Known values are: @@ -1790,7 +2531,7 @@ class TestProfileRunRecommendation(_model_base.Model): ) """Category of the recommendation. Required. Known values are: \"ThroughputOptimized\" and \"CostOptimized\".""" - configurations: Optional[List[str]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + configurations: Optional[list[str]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """List of configurations IDs for which the recommendation is applicable. These are a subset of the provided target resource configurations.""" @@ -1799,7 +2540,7 @@ def __init__( self, *, category: Union[str, "_models.RecommendationCategory"], - configurations: Optional[List[str]] = None, + configurations: Optional[list[str]] = None, ) -> None: ... @overload @@ -1813,7 +2554,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class TestRun(_model_base.Model): +class TestRun(_Model): """Load test run model. :ivar test_run_id: Unique test run identifier for the load test run, must contain only @@ -1895,8 +2636,18 @@ class TestRun(_model_base.Model): access. :vartype public_ip_disabled: bool :ivar created_by_type: The type of the entity that created the test run. (E.x. User, - ScheduleTrigger, etc). Known values are: "User" and "ScheduledTrigger". + ScheduleTrigger, etc). Known values are: "User", "ScheduledTrigger", "AzurePipelines", and + "GitHubWorkflows". :vartype created_by_type: str or ~azure.developer.loadtesting.models.CreatedByType + :ivar created_by_uri: The URI pointing to the entity that created the test run. + :vartype created_by_uri: str + :ivar estimated_virtual_user_hours: Estimated virtual user hours for the test run. + :vartype estimated_virtual_user_hours: float + :ivar execution_start_date_time: The test run execution start DateTime(RFC 3339 literal + format). + :vartype execution_start_date_time: ~datetime.datetime + :ivar execution_end_date_time: The test run execution end DateTime(RFC 3339 literal format). + :vartype execution_end_date_time: ~datetime.datetime :ivar created_date_time: The creation datetime(RFC 3339 literal format). :vartype created_date_time: ~datetime.datetime :ivar created_by: The user that created. @@ -1919,7 +2670,7 @@ class TestRun(_model_base.Model): ) """Auto stop criteria for a test. This will automatically stop a load test if the error percentage is high for a certain time window.""" - secrets: Optional[Dict[str, "_models.Secret"]] = rest_field( + secrets: Optional[dict[str, "_models.Secret"]] = rest_field( visibility=["read", "create", "update", "delete", "query"] ) """Secrets can be stored in an Azure Key Vault or any other secret store. If the @@ -1931,20 +2682,20 @@ class TestRun(_model_base.Model): visibility=["read", "create", "update", "delete", "query"] ) """Certificates metadata.""" - environment_variables: Optional[Dict[str, str]] = rest_field( + environment_variables: Optional[dict[str, str]] = rest_field( name="environmentVariables", visibility=["read", "create", "update", "delete", "query"] ) """Environment variables which are defined as a set of pairs.""" - error_details: Optional[List["_models.ErrorDetails"]] = rest_field(name="errorDetails", visibility=["read"]) + error_details: Optional[list["_models.ErrorDetails"]] = rest_field(name="errorDetails", visibility=["read"]) """Error details if there is any failure in load test run.""" - test_run_statistics: Optional[Dict[str, "_models.TestRunStatistics"]] = rest_field( + test_run_statistics: Optional[dict[str, "_models.TestRunStatistics"]] = rest_field( name="testRunStatistics", visibility=["read"] ) """Test run statistics. Key is the sampler name and value is the set of statistics for performance metrics like response time, throughput, etc. from the load test run. The sampler name is the same as the name mentioned in the test script. Sampler name \"Total\" represents the aggregated statistics of all the samplers.""" - regional_statistics: Optional[Dict[str, "_models.TestRunStatistics"]] = rest_field( + regional_statistics: Optional[dict[str, "_models.TestRunStatistics"]] = rest_field( name="regionalStatistics", visibility=["read"] ) """Regional statistics. Key is the Azure region name and value is the test run statistics. @@ -2011,7 +2762,19 @@ class TestRun(_model_base.Model): name="createdByType", visibility=["read", "create", "update", "delete", "query"] ) """The type of the entity that created the test run. (E.x. User, ScheduleTrigger, etc). Known - values are: \"User\" and \"ScheduledTrigger\".""" + values are: \"User\", \"ScheduledTrigger\", \"AzurePipelines\", and \"GitHubWorkflows\".""" + created_by_uri: Optional[str] = rest_field(name="createdByUri", visibility=["read"]) + """The URI pointing to the entity that created the test run.""" + estimated_virtual_user_hours: Optional[float] = rest_field(name="estimatedVirtualUserHours", visibility=["read"]) + """Estimated virtual user hours for the test run.""" + execution_start_date_time: Optional[datetime.datetime] = rest_field( + name="executionStartDateTime", visibility=["read"], format="rfc3339" + ) + """The test run execution start DateTime(RFC 3339 literal format).""" + execution_end_date_time: Optional[datetime.datetime] = rest_field( + name="executionEndDateTime", visibility=["read"], format="rfc3339" + ) + """The test run execution end DateTime(RFC 3339 literal format).""" created_date_time: Optional[datetime.datetime] = rest_field( name="createdDateTime", visibility=["read"], format="rfc3339" ) @@ -2031,9 +2794,9 @@ def __init__( # pylint: disable=too-many-locals *, pass_fail_criteria: Optional["_models.PassFailCriteria"] = None, auto_stop_criteria: Optional["_models.AutoStopCriteria"] = None, - secrets: Optional[Dict[str, "_models.Secret"]] = None, + secrets: Optional[dict[str, "_models.Secret"]] = None, certificate: Optional["_models.CertificateMetadata"] = None, - environment_variables: Optional[Dict[str, str]] = None, + environment_variables: Optional[dict[str, str]] = None, display_name: Optional[str] = None, test_id: Optional[str] = None, description: Optional[str] = None, @@ -2053,7 +2816,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class TestRunAppComponents(_model_base.Model): +class TestRunAppComponents(_Model): """Test run app component. :ivar components: Azure resource collection { resource id (fully qualified resource Id e.g @@ -2072,7 +2835,7 @@ class TestRunAppComponents(_model_base.Model): :vartype last_modified_by: str """ - components: Dict[str, "_models.AppComponent"] = rest_field( + components: dict[str, "_models.AppComponent"] = rest_field( visibility=["read", "create", "update", "delete", "query"] ) """Azure resource collection { resource id (fully qualified resource Id e.g @@ -2097,7 +2860,7 @@ class TestRunAppComponents(_model_base.Model): def __init__( self, *, - components: Dict[str, "_models.AppComponent"], + components: dict[str, "_models.AppComponent"], ) -> None: ... @overload @@ -2111,7 +2874,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class TestRunArtifacts(_model_base.Model): +class TestRunArtifacts(_Model): """Collection of test run artifacts. :ivar input_artifacts: The input artifacts for the test run. @@ -2145,7 +2908,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class TestRunDetail(_model_base.Model): +class TestRunDetail(_Model): """Details of a particular test run for a test profile run. :ivar status: Status of the test run. Required. Known values are: "ACCEPTED", "NOTSTARTED", @@ -2168,7 +2931,7 @@ class TestRunDetail(_model_base.Model): name="configurationId", visibility=["read", "create", "update", "delete", "query"] ) """ID of the configuration on which the test ran. Required.""" - properties: Dict[str, str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + properties: dict[str, str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Key value pair of extra properties associated with the test run. Required.""" @overload @@ -2177,7 +2940,115 @@ def __init__( *, status: Union[str, "_models.TestRunStatus"], configuration_id: str, - properties: Dict[str, str], + properties: dict[str, str], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class TestRunEndedEventCondition(_Model): + """TestRunEnded Event condition. + + :ivar test_run_statuses: The test run statuses to send notification for. + :vartype test_run_statuses: list[str or ~azure.developer.loadtesting.models.TestRunStatus] + :ivar test_run_results: The test run results to send notification for. + :vartype test_run_results: list[str or ~azure.developer.loadtesting.models.PassFailTestResult] + """ + + test_run_statuses: Optional[list[Union[str, "_models.TestRunStatus"]]] = rest_field( + name="testRunStatuses", visibility=["read", "create", "update", "delete", "query"] + ) + """The test run statuses to send notification for.""" + test_run_results: Optional[list[Union[str, "_models.PassFailTestResult"]]] = rest_field( + name="testRunResults", visibility=["read", "create", "update", "delete", "query"] + ) + """The test run results to send notification for.""" + + @overload + def __init__( + self, + *, + test_run_statuses: Optional[list[Union[str, "_models.TestRunStatus"]]] = None, + test_run_results: Optional[list[Union[str, "_models.PassFailTestResult"]]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class TestsNotificationEventFilter(_Model): + """The notification event filter for Tests scope. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + TestRunEndedNotificationEventFilter, TestRunStartedNotificationEventFilter, + TriggerCompletedNotificationEventFilter, TriggerDisabledNotificationEventFilter + + :ivar kind: The event type. Required. Known values are: "TestRunEnded", "TestRunStarted", + "TriggerCompleted", and "TriggerDisabled". + :vartype kind: str or ~azure.developer.loadtesting.models.NotificationEventType + """ + + __mapping__: dict[str, _Model] = {} + kind: str = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) + """The event type. Required. Known values are: \"TestRunEnded\", \"TestRunStarted\", + \"TriggerCompleted\", and \"TriggerDisabled\".""" + + @overload + def __init__( + self, + *, + kind: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class TestRunEndedNotificationEventFilter(TestsNotificationEventFilter, discriminator="TestRunEnded"): + """The notification event filter when the event type is TestRunEnded and scope is Tests. + + :ivar kind: Event type for test run ended event. Required. Test run ended event. This event + would occur when a test run reaches terminal state. + :vartype kind: str or ~azure.developer.loadtesting.models.TEST_RUN_ENDED + :ivar condition: Event filtering condition. + :vartype condition: ~azure.developer.loadtesting.models.TestRunEndedEventCondition + """ + + kind: Literal[NotificationEventType.TEST_RUN_ENDED] = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """Event type for test run ended event. Required. Test run ended event. This event would occur + when a test run reaches terminal state.""" + condition: Optional["_models.TestRunEndedEventCondition"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """Event filtering condition.""" + + @overload + def __init__( + self, + *, + condition: Optional["_models.TestRunEndedEventCondition"] = None, ) -> None: ... @overload @@ -2189,9 +3060,10 @@ def __init__(self, mapping: Mapping[str, Any]) -> None: def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) + self.kind = NotificationEventType.TEST_RUN_ENDED # type: ignore -class TestRunFileInfo(_model_base.Model): +class TestRunFileInfo(_Model): """Test run file info. :ivar file_name: Name of the file. Required. @@ -2199,7 +3071,8 @@ class TestRunFileInfo(_model_base.Model): :ivar url: File URL. :vartype url: str :ivar file_type: File type. Known values are: "JMX_FILE", "USER_PROPERTIES", - "ADDITIONAL_ARTIFACTS", "ZIPPED_ARTIFACTS", "URL_TEST_CONFIG", and "TEST_SCRIPT". + "ADDITIONAL_ARTIFACTS", "ZIPPED_ARTIFACTS", "URL_TEST_CONFIG", "TEST_SCRIPT", + "BROWSER_RECORDING", and "TEST_PLAN_RECOMMENDATIONS". :vartype file_type: str or ~azure.developer.loadtesting.models.FileType :ivar expire_date_time: Expiry time of the file (RFC 3339 literal format). :vartype expire_date_time: ~datetime.datetime @@ -2217,7 +3090,8 @@ class TestRunFileInfo(_model_base.Model): """File URL.""" file_type: Optional[Union[str, "_models.FileType"]] = rest_field(name="fileType", visibility=["read"]) """File type. Known values are: \"JMX_FILE\", \"USER_PROPERTIES\", \"ADDITIONAL_ARTIFACTS\", - \"ZIPPED_ARTIFACTS\", \"URL_TEST_CONFIG\", and \"TEST_SCRIPT\".""" + \"ZIPPED_ARTIFACTS\", \"URL_TEST_CONFIG\", \"TEST_SCRIPT\", \"BROWSER_RECORDING\", and + \"TEST_PLAN_RECOMMENDATIONS\".""" expire_date_time: Optional[datetime.datetime] = rest_field( name="expireDateTime", visibility=["read"], format="rfc3339" ) @@ -2248,7 +3122,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class TestRunInputArtifacts(_model_base.Model): +class TestRunInputArtifacts(_Model): """The input artifacts for the test run. :ivar config_file_info: The load test YAML file that contains the the test configuration. @@ -2285,7 +3159,7 @@ class TestRunInputArtifacts(_model_base.Model): name="urlTestConfigFileInfo", visibility=["read", "create", "update", "delete", "query"] ) """The config json file for url based test.""" - additional_file_info: Optional[List["_models.TestRunFileInfo"]] = rest_field( + additional_file_info: Optional[list["_models.TestRunFileInfo"]] = rest_field( name="additionalFileInfo", visibility=["read"] ) """Additional supported files for the test run.""" @@ -2312,7 +3186,82 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class TestRunOutputArtifacts(_model_base.Model): +class TestRunInsightColumn(_Model): + """Represents a column of the test run insight. + + :ivar name: Name of the column. Required. + :vartype name: str + :ivar data_type: The data type of the column. Required. + :vartype data_type: str + """ + + name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """Name of the column. Required.""" + data_type: str = rest_field(name="dataType", visibility=["read", "create", "update", "delete", "query"]) + """The data type of the column. Required.""" + + @overload + def __init__( + self, + *, + name: str, + data_type: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class TestRunInsights(_Model): + """Represents insights for the test run. + + :ivar columns: The columns of the insights. + :vartype columns: list[~azure.developer.loadtesting.models.TestRunInsightColumn] + :ivar rows: The rows of the insights. + :vartype rows: dict[str, dict[str, str]] + :ivar version: The version of the insights. + :vartype version: int + :ivar status: The status of the insights. Known values are: "NotStarted", "Running", + "Succeeded", "Failed", and "Canceled". + :vartype status: str or ~azure.developer.loadtesting.models.OperationState + """ + + columns: Optional[list["_models.TestRunInsightColumn"]] = rest_field(visibility=["read"]) + """The columns of the insights.""" + rows: Optional[dict[str, dict[str, str]]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The rows of the insights.""" + version: Optional[int] = rest_field(visibility=["read"]) + """The version of the insights.""" + status: Optional[Union[str, "_models.OperationState"]] = rest_field(visibility=["read"]) + """The status of the insights. Known values are: \"NotStarted\", \"Running\", \"Succeeded\", + \"Failed\", and \"Canceled\".""" + + @overload + def __init__( + self, + *, + rows: Optional[dict[str, dict[str, str]]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class TestRunOutputArtifacts(_Model): """The output artifacts for the test run. :ivar result_file_info: The test run results file. @@ -2363,7 +3312,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class TestRunServerMetricsConfiguration(_model_base.Model): +class TestRunServerMetricsConfiguration(_Model): """Test run server metrics configuration. :ivar test_run_id: Test run identifier. @@ -2385,7 +3334,7 @@ class TestRunServerMetricsConfiguration(_model_base.Model): test_run_id: Optional[str] = rest_field(name="testRunId", visibility=["read"]) """Test run identifier.""" - metrics: Optional[Dict[str, "_models.ResourceMetric"]] = rest_field( + metrics: Optional[dict[str, "_models.ResourceMetric"]] = rest_field( visibility=["read", "create", "update", "delete", "query"] ) """Azure resource metrics collection {metric id : metrics object} (Refer : @@ -2409,7 +3358,35 @@ class TestRunServerMetricsConfiguration(_model_base.Model): def __init__( self, *, - metrics: Optional[Dict[str, "_models.ResourceMetric"]] = None, + metrics: Optional[dict[str, "_models.ResourceMetric"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class TestRunStartedNotificationEventFilter(TestsNotificationEventFilter, discriminator="TestRunStarted"): + """The notification event filter when the event type is TestRunStarted and scope is Tests. + + :ivar kind: Event type for test run started event. Required. Test run started event. This event + would occur when a new test run is triggered. + :vartype kind: str or ~azure.developer.loadtesting.models.TEST_RUN_STARTED + """ + + kind: Literal[NotificationEventType.TEST_RUN_STARTED] = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """Event type for test run started event. Required. Test run started event. This event would occur + when a new test run is triggered.""" + + @overload + def __init__( + self, ) -> None: ... @overload @@ -2421,9 +3398,10 @@ def __init__(self, mapping: Mapping[str, Any]) -> None: def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) + self.kind = NotificationEventType.TEST_RUN_STARTED # type: ignore -class TestRunStatistics(_model_base.Model): +class TestRunStatistics(_Model): """Test run statistics. :ivar transaction: Transaction name. @@ -2510,7 +3488,7 @@ class TestRunStatistics(_model_base.Model): """Send network bytes.""" -class TestServerMetricsConfiguration(_model_base.Model): +class TestServerMetricsConfiguration(_Model): """Test server metrics configuration. :ivar test_id: Test identifier. @@ -2532,7 +3510,7 @@ class TestServerMetricsConfiguration(_model_base.Model): test_id: Optional[str] = rest_field(name="testId", visibility=["read"]) """Test identifier.""" - metrics: Dict[str, "_models.ResourceMetric"] = rest_field( + metrics: dict[str, "_models.ResourceMetric"] = rest_field( visibility=["read", "create", "update", "delete", "query"] ) """Azure resource metrics collection {metric id : metrics object} (Refer : @@ -2556,7 +3534,68 @@ class TestServerMetricsConfiguration(_model_base.Model): def __init__( self, *, - metrics: Dict[str, "_models.ResourceMetric"], + metrics: dict[str, "_models.ResourceMetric"], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class TestsNotificationRule(NotificationRule, discriminator="Tests"): + """Tests Notification rule model. + + :ivar notification_rule_id: The unique identifier of the notification rule. Required. + :vartype notification_rule_id: str + :ivar display_name: The name of the notification rule. Required. + :vartype display_name: str + :ivar action_group_ids: The action groups to notify. Required. + :vartype action_group_ids: list[str] + :ivar created_date_time: The creation datetime(RFC 3339 literal format). + :vartype created_date_time: ~datetime.datetime + :ivar created_by: The user that created. + :vartype created_by: str + :ivar last_modified_date_time: The last Modified datetime(RFC 3339 literal format). + :vartype last_modified_date_time: ~datetime.datetime + :ivar last_modified_by: The user that last modified. + :vartype last_modified_by: str + :ivar scope: Scope of type Tests. Required. Notification rule is for Tests. + :vartype scope: str or ~azure.developer.loadtesting.models.TESTS + :ivar test_ids: The test ids to include. If not provided, notification will be sent for all + testIds. + :vartype test_ids: list[str] + :ivar event_filters: The event to receive notifications for along with filtering conditions. + Key is a user-assigned identifier for the event filter. Required. + :vartype event_filters: dict[str, + ~azure.developer.loadtesting.models.TestsNotificationEventFilter] + """ + + scope: Literal[NotificationScopeType.TESTS] = rest_discriminator(name="scope", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """Scope of type Tests. Required. Notification rule is for Tests.""" + test_ids: Optional[list[str]] = rest_field( + name="testIds", visibility=["read", "create", "update", "delete", "query"] + ) + """The test ids to include. If not provided, notification will be sent for all testIds.""" + event_filters: dict[str, "_models.TestsNotificationEventFilter"] = rest_field( + name="eventFilters", visibility=["read", "create", "update", "delete", "query"] + ) + """The event to receive notifications for along with filtering conditions. + Key is a user-assigned identifier for the event filter. Required.""" + + @overload + def __init__( + self, + *, + display_name: str, + action_group_ids: list[str], + event_filters: dict[str, "_models.TestsNotificationEventFilter"], + test_ids: Optional[list[str]] = None, ) -> None: ... @overload @@ -2568,9 +3607,10 @@ def __init__(self, mapping: Mapping[str, Any]) -> None: def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) + self.scope = NotificationScopeType.TESTS # type: ignore -class TimeSeriesElement(_model_base.Model): +class TimeSeriesElement(_Model): """The time series returned when a data query is performed. :ivar data: An array of data points representing the metric values. @@ -2579,9 +3619,9 @@ class TimeSeriesElement(_model_base.Model): :vartype dimension_values: list[~azure.developer.loadtesting.models.DimensionValue] """ - data: Optional[List["_models.MetricValue"]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + data: Optional[list["_models.MetricValue"]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """An array of data points representing the metric values.""" - dimension_values: Optional[List["_models.DimensionValue"]] = rest_field( + dimension_values: Optional[list["_models.DimensionValue"]] = rest_field( name="dimensionValues", visibility=["read", "create", "update", "delete", "query"] ) """The dimension values.""" @@ -2590,8 +3630,116 @@ class TimeSeriesElement(_model_base.Model): def __init__( self, *, - data: Optional[List["_models.MetricValue"]] = None, - dimension_values: Optional[List["_models.DimensionValue"]] = None, + data: Optional[list["_models.MetricValue"]] = None, + dimension_values: Optional[list["_models.DimensionValue"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class TriggerCompletedNotificationEventFilter(TestsNotificationEventFilter, discriminator="TriggerCompleted"): + """The notification event filter when the event type is TriggerCompleted. + + :ivar kind: Event type for trigger ended event. Required. Trigger completed event. This event + would occur when a trigger completes. + :vartype kind: str or ~azure.developer.loadtesting.models.TRIGGER_COMPLETED + """ + + kind: Literal[NotificationEventType.TRIGGER_COMPLETED] = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """Event type for trigger ended event. Required. Trigger completed event. This event would occur + when a trigger completes.""" + + @overload + def __init__( + self, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.kind = NotificationEventType.TRIGGER_COMPLETED # type: ignore + + +class TriggerDisabledNotificationEventFilter(TestsNotificationEventFilter, discriminator="TriggerDisabled"): + """The notification event filter when the event type is TriggerDisabled. + + :ivar kind: Event type for trigger disabled event. Required. Trigger disabled event. This event + would occur when a trigger is disabled. + :vartype kind: str or ~azure.developer.loadtesting.models.TRIGGER_DISABLED + """ + + kind: Literal[NotificationEventType.TRIGGER_DISABLED] = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """Event type for trigger disabled event. Required. Trigger disabled event. This event would occur + when a trigger is disabled.""" + + @overload + def __init__( + self, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.kind = NotificationEventType.TRIGGER_DISABLED # type: ignore + + +class WeeklyRecurrence(Recurrence, discriminator="Weekly"): + """Recurrence model when frequency is set as weekly. + + :ivar recurrence_end: Recurrence end model. You can specify the end either by providing a + numberOfOccurrences (which will end the recurrence after the specified number of occurrences) + or by providing an endDateTime (which will end the recurrence after the specified date). If + neither value is provided, the recurrence will continue until it is manually ended. However, if + both values are provided, an error will be thrown. + :vartype recurrence_end: ~azure.developer.loadtesting.models.RecurrenceEnd + :ivar frequency: Frequency of the week recurrence. Required. Recurrence defined on a weekly + basis, as specified by WeeklyRecurrence. + :vartype frequency: str or ~azure.developer.loadtesting.models.WEEKLY + :ivar days_of_week: Recurrence set to repeat on the specified days of the week. + :vartype days_of_week: list[str or ~azure.developer.loadtesting.models.WeekDays] + :ivar interval: The interval at which the recurrence should repeat. It signifies the number of + weeks between each recurrence. + :vartype interval: int + """ + + frequency: Literal[Frequency.WEEKLY] = rest_discriminator(name="frequency", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """Frequency of the week recurrence. Required. Recurrence defined on a weekly basis, as specified + by WeeklyRecurrence.""" + days_of_week: Optional[list[Union[str, "_models.WeekDays"]]] = rest_field( + name="daysOfWeek", visibility=["read", "create", "update", "delete", "query"] + ) + """Recurrence set to repeat on the specified days of the week.""" + interval: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The interval at which the recurrence should repeat. It signifies the number of weeks between + each recurrence.""" + + @overload + def __init__( + self, + *, + recurrence_end: Optional["_models.RecurrenceEnd"] = None, + days_of_week: Optional[list[Union[str, "_models.WeekDays"]]] = None, + interval: Optional[int] = None, ) -> None: ... @overload @@ -2603,3 +3751,4 @@ def __init__(self, mapping: Mapping[str, Any]) -> None: def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) + self.frequency = Frequency.WEEKLY # type: ignore diff --git a/sdk/loadtesting/azure-developer-loadtesting/generated_samples/create_or_update_notification_rule.py b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/create_or_update_notification_rule.py new file mode 100644 index 000000000000..c8e9d34b9d05 --- /dev/null +++ b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/create_or_update_notification_rule.py @@ -0,0 +1,59 @@ +# pylint: disable=line-too-long,useless-suppression +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.developer.loadtesting import LoadTestAdministrationClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-developer-loadtesting +# USAGE + python create_or_update_notification_rule.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = LoadTestAdministrationClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.create_or_update_notification_rule( + notification_rule_id="12345678-1234-1234-1234-123456789012", + body={ + "actionGroupIds": [ + "/subscriptions/12345678-1234-1234-1234-123456789012/resourceGroups/sampleRG/providers/microsoft.insights/actionGroups/sampleActionGroup" + ], + "displayName": "Sample_Notification_Rule", + "eventFilters": { + "eventfilter1": { + "condition": {"testRunResults": ["PASSED", "FAILED"], "testRunStatuses": ["FAILED", "DONE"]}, + "kind": "TestRunEnded", + }, + "eventfilter2": {"kind": "TestRunStarted"}, + "eventfilter3": {"kind": "TriggerCompleted"}, + "eventfilter4": {"kind": "TriggerDisabled"}, + }, + "scope": "Tests", + "testIds": ["12345678-1234-1234-1234-123456789012"], + }, + ) + print(response) + + +# x-ms-original-file: 2025-11-01-preview/CreateOrUpdateNotificationRule.json +if __name__ == "__main__": + main() diff --git a/sdk/loadtesting/azure-developer-loadtesting/generated_samples/create_or_update_test.py b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/create_or_update_test.py new file mode 100644 index 000000000000..4a99a0b3eaa0 --- /dev/null +++ b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/create_or_update_test.py @@ -0,0 +1,99 @@ +# pylint: disable=line-too-long,useless-suppression +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.developer.loadtesting import LoadTestAdministrationClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-developer-loadtesting +# USAGE + python create_or_update_test.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = LoadTestAdministrationClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.create_or_update_test( + test_id="12345678-1234-1234-1234-123456789012", + body={ + "autoStopCriteria": { + "autoStopDisabled": True, + "errorRate": 70, + "errorRateTimeWindowInSeconds": 60, + "maximumVirtualUsersPerEngine": 5000, + }, + "description": "sample description", + "displayName": "Performance_LoadTest", + "engineBuiltInIdentityIds": [ + "/subscriptions/10000000-0000-0000-0000-000000000000/resourceGroups/samplerg1/providers/Microsoft.ManagedIdentity/userAssignedIdentities/sampleresourcename" + ], + "engineBuiltInIdentityType": "UserAssigned", + "environmentVariables": {"envvar1": "sampletext"}, + "keyvaultReferenceIdentityId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/samplerg/providers/sampleprovider/sampleresourcetype/sampleresourcename", + "keyvaultReferenceIdentityType": "UserAssigned", + "kind": "JMX", + "loadTestConfiguration": { + "engineInstances": 6, + "regionalLoadTestConfig": [ + {"engineInstances": 4, "region": "northeurope"}, + {"engineInstances": 2, "region": "westeurope"}, + ], + "splitAllCSVs": True, + }, + "metricsReferenceIdentityId": "/subscriptions/10000000-0000-0000-0000-000000000000/resourceGroups/samplerg1/providers/Microsoft.ManagedIdentity/userAssignedIdentities/sampleresourcename", + "metricsReferenceIdentityType": "UserAssigned", + "passFailCriteria": { + "passFailMetrics": { + "fefd759d-7fe8-4f83-8b6d-aeebe0f491fe": { + "action": "continue", + "aggregate": "percentage", + "clientMetric": "response_time_ms", + "condition": ">", + "value": 20, + } + }, + "passFailServerMetrics": { + "fefd759d-7fe8-4f83-8b6d-aeebe0f491fe": { + "action": "continue", + "aggregation": "Average", + "condition": ">", + "metricName": "Percentage CPU", + "metricNamespace": "Microsoft.Compute/virtualMachines", + "resourceId": "/subscriptions/12345678-1234-1234-1234-123456789abc/resourceGroups/MyResourceGroup/providers/Microsoft.Compute/virtualMachines/MyVM", + "value": 20, + } + }, + }, + "secrets": { + "secret1": { + "type": "AKV_SECRET_URI", + "value": "https://samplevault.vault.azure.net/secrets/samplesecret/f113f91fd4c44a368049849c164db827", + } + }, + "subnetId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/samplerg/providers/Microsoft.Network/virtualNetworks/samplenetworkresource/subnets/AAAAA0A0A0", + }, + ) + print(response) + + +# x-ms-original-file: 2025-11-01-preview/CreateOrUpdateTest.json +if __name__ == "__main__": + main() diff --git a/sdk/loadtesting/azure-developer-loadtesting/generated_samples/create_or_update_test_app_components.py b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/create_or_update_test_app_components.py new file mode 100644 index 000000000000..b18e3e56547a --- /dev/null +++ b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/create_or_update_test_app_components.py @@ -0,0 +1,52 @@ +# pylint: disable=line-too-long,useless-suppression +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.developer.loadtesting import LoadTestAdministrationClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-developer-loadtesting +# USAGE + python create_or_update_test_app_components.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = LoadTestAdministrationClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.create_or_update_app_components( + test_id="ee843bd9-a6d4-4364-a45c-427a03c39fa7", + body={ + "components": { + "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/samplerg/providers/microsoft.insights/components/appcomponentresource": { + "displayName": "Performance_LoadTest_Insights", + "kind": "web", + "resourceName": "appcomponentresource", + "resourceType": "microsoft.insights/components", + } + } + }, + ) + print(response) + + +# x-ms-original-file: 2025-11-01-preview/CreateOrUpdateTestAppComponents.json +if __name__ == "__main__": + main() diff --git a/sdk/loadtesting/azure-developer-loadtesting/generated_samples/create_or_update_test_locust.py b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/create_or_update_test_locust.py new file mode 100644 index 000000000000..a9eb4cc32cca --- /dev/null +++ b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/create_or_update_test_locust.py @@ -0,0 +1,99 @@ +# pylint: disable=line-too-long,useless-suppression +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.developer.loadtesting import LoadTestAdministrationClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-developer-loadtesting +# USAGE + python create_or_update_test_locust.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = LoadTestAdministrationClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.create_or_update_test( + test_id="12345678-1234-1234-1234-123456789012", + body={ + "autoStopCriteria": { + "autoStopDisabled": True, + "errorRate": 70, + "errorRateTimeWindowInSeconds": 60, + "maximumVirtualUsersPerEngine": 5000, + }, + "description": "sample description", + "displayName": "Performance_LoadTest", + "engineBuiltInIdentityIds": [ + "/subscriptions/10000000-0000-0000-0000-000000000000/resourceGroups/samplerg1/providers/Microsoft.ManagedIdentity/userAssignedIdentities/sampleresourcename" + ], + "engineBuiltInIdentityType": "UserAssigned", + "environmentVariables": {"envvar1": "sampletext"}, + "keyvaultReferenceIdentityId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/samplerg/providers/sampleprovider/sampleresourcetype/sampleresourcename", + "keyvaultReferenceIdentityType": "UserAssigned", + "kind": "Locust", + "loadTestConfiguration": { + "engineInstances": 6, + "regionalLoadTestConfig": [ + {"engineInstances": 4, "region": "northeurope"}, + {"engineInstances": 2, "region": "westeurope"}, + ], + "splitAllCSVs": False, + }, + "metricsReferenceIdentityId": "/subscriptions/10000000-0000-0000-0000-000000000000/resourceGroups/samplerg1/providers/Microsoft.ManagedIdentity/userAssignedIdentities/sampleresourcename", + "metricsReferenceIdentityType": "UserAssigned", + "passFailCriteria": { + "passFailMetrics": { + "fefd759d-7fe8-4f83-8b6d-aeebe0f491fe": { + "action": "continue", + "aggregate": "percentage", + "clientMetric": "response_time_ms", + "condition": ">", + "value": 20, + } + }, + "passFailServerMetrics": { + "fefd759d-7fe8-4f83-8b6d-aeebe0f491fe": { + "action": "continue", + "aggregation": "Average", + "condition": ">", + "metricName": "Percentage CPU", + "metricNamespace": "Microsoft.Compute/virtualMachines", + "resourceId": "/subscriptions/12345678-1234-1234-1234-123456789abc/resourceGroups/MyResourceGroup/providers/Microsoft.Compute/virtualMachines/MyVM", + "value": 20, + } + }, + }, + "secrets": { + "secret1": { + "type": "AKV_SECRET_URI", + "value": "https://samplevault.vault.azure.net/secrets/samplesecret/f113f91fd4c44a368049849c164db827", + } + }, + "subnetId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/samplerg/providers/Microsoft.Network/virtualNetworks/samplenetworkresource/subnets/AAAAA0A0A0", + }, + ) + print(response) + + +# x-ms-original-file: 2025-11-01-preview/CreateOrUpdateTest_Locust.json +if __name__ == "__main__": + main() diff --git a/sdk/loadtesting/azure-developer-loadtesting/generated_samples/create_or_update_test_profile.py b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/create_or_update_test_profile.py new file mode 100644 index 000000000000..87f1c2b82729 --- /dev/null +++ b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/create_or_update_test_profile.py @@ -0,0 +1,55 @@ +# pylint: disable=line-too-long,useless-suppression +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.developer.loadtesting import LoadTestAdministrationClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-developer-loadtesting +# USAGE + python create_or_update_test_profile.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = LoadTestAdministrationClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.create_or_update_test_profile( + test_profile_id="12345678-1234-1234-1234-123456789012", + body={ + "description": "sample description", + "displayName": "Performance_TestProfile", + "targetResourceConfigurations": { + "configurations": { + "config1": {"httpConcurrency": 16, "instanceMemoryMB": 2048}, + "config2": {"httpConcurrency": 16, "instanceMemoryMB": 4096}, + }, + "kind": "FunctionsFlexConsumption", + }, + "targetResourceId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/samplerg/providers/Microsoft.Web/sites/myfunctionapp", + "testId": "12346-abcd-6789", + }, + ) + print(response) + + +# x-ms-original-file: 2025-11-01-preview/CreateOrUpdateTestProfile.json +if __name__ == "__main__": + main() diff --git a/sdk/loadtesting/azure-developer-loadtesting/generated_samples/create_or_update_test_run_app_components.py b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/create_or_update_test_run_app_components.py new file mode 100644 index 000000000000..eacc8ba11d06 --- /dev/null +++ b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/create_or_update_test_run_app_components.py @@ -0,0 +1,52 @@ +# pylint: disable=line-too-long,useless-suppression +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.developer.loadtesting import LoadTestRunClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-developer-loadtesting +# USAGE + python create_or_update_test_run_app_components.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = LoadTestRunClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.create_or_update_app_components( + test_run_id="ee843bd9-a6d4-4364-a45c-427a03c39fa7", + body={ + "components": { + "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/samplerg/providers/microsoft.insights/components/appcomponentresource": { + "displayName": "Performance_LoadTest_Insights", + "kind": "web", + "resourceName": "appcomponentresource", + "resourceType": "microsoft.insights/components", + } + } + }, + ) + print(response) + + +# x-ms-original-file: 2025-11-01-preview/CreateOrUpdateTestRunAppComponents.json +if __name__ == "__main__": + main() diff --git a/sdk/loadtesting/azure-developer-loadtesting/generated_samples/create_or_update_test_run_server_metrics_config.py b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/create_or_update_test_run_server_metrics_config.py new file mode 100644 index 000000000000..12d1c77e3a7f --- /dev/null +++ b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/create_or_update_test_run_server_metrics_config.py @@ -0,0 +1,55 @@ +# pylint: disable=line-too-long,useless-suppression +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.developer.loadtesting import LoadTestRunClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-developer-loadtesting +# USAGE + python create_or_update_test_run_server_metrics_config.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = LoadTestRunClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.create_or_update_server_metrics_config( + test_run_id="edc6e529-d009-4b99-b763-ca492e3a2823", + body={ + "metrics": { + "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/samplerg/providers/microsoft.insights/components/appcomponentresource/providers/microsoft.insights/metricdefinitions/requests/duration": { + "aggregation": "Average", + "displayDescription": "sample description", + "metricNamespace": "microsoft.insights/components", + "name": "requests/duration", + "resourceId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/samplerg/providers/microsoft.insights/components/appcomponentresource", + "resourceType": "microsoft.insights/components", + "unit": None, + } + } + }, + ) + print(response) + + +# x-ms-original-file: 2025-11-01-preview/CreateOrUpdateTestRunServerMetricsConfig.json +if __name__ == "__main__": + main() diff --git a/sdk/loadtesting/azure-developer-loadtesting/generated_samples/create_or_update_test_server_metrics_config.py b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/create_or_update_test_server_metrics_config.py new file mode 100644 index 000000000000..934215fd32ad --- /dev/null +++ b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/create_or_update_test_server_metrics_config.py @@ -0,0 +1,55 @@ +# pylint: disable=line-too-long,useless-suppression +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.developer.loadtesting import LoadTestAdministrationClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-developer-loadtesting +# USAGE + python create_or_update_test_server_metrics_config.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = LoadTestAdministrationClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.create_or_update_server_metrics_config( + test_id="edc6e529-d009-4b99-b763-ca492e3a2823", + body={ + "metrics": { + "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/samplerg/providers/microsoft.insights/components/appcomponentresource/providers/microsoft.insights/metricdefinitions/requests/duration": { + "aggregation": "Average", + "displayDescription": "sample description", + "metricNamespace": "microsoft.insights/components", + "name": "requests/duration", + "resourceId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/samplerg/providers/microsoft.insights/components/appcomponentresource", + "resourceType": "microsoft.insights/components", + "unit": None, + } + } + }, + ) + print(response) + + +# x-ms-original-file: 2025-11-01-preview/CreateOrUpdateTestServerMetricsConfig.json +if __name__ == "__main__": + main() diff --git a/sdk/loadtesting/azure-developer-loadtesting/generated_samples/create_or_update_trigger.py b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/create_or_update_trigger.py new file mode 100644 index 000000000000..e91c5eaee016 --- /dev/null +++ b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/create_or_update_trigger.py @@ -0,0 +1,50 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.developer.loadtesting import LoadTestAdministrationClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-developer-loadtesting +# USAGE + python create_or_update_trigger.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = LoadTestAdministrationClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.create_or_update_trigger( + trigger_id="12345678-1234-1234-1234-123456789012", + body={ + "description": "sample description", + "displayName": "Sample_Trigger", + "kind": "ScheduleTestsTrigger", + "recurrence": {"frequency": "Daily", "interval": 1, "recurrenceEnd": {"numberOfOccurrences": 10}}, + "startDateTime": "2024-05-01T00:00:00Z", + "state": "Active", + "testIds": ["12345678-1234-1234-1234-123456789012"], + }, + ) + print(response) + + +# x-ms-original-file: 2025-11-01-preview/CreateOrUpdateTrigger.json +if __name__ == "__main__": + main() diff --git a/sdk/loadtesting/azure-developer-loadtesting/generated_samples/delete_notification_rule.py b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/delete_notification_rule.py new file mode 100644 index 000000000000..10ae91611587 --- /dev/null +++ b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/delete_notification_rule.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.developer.loadtesting import LoadTestAdministrationClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-developer-loadtesting +# USAGE + python delete_notification_rule.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = LoadTestAdministrationClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + client.delete_notification_rule( + notification_rule_id="12345678-1234-1234-1234-123456789012", + ) + + +# x-ms-original-file: 2025-11-01-preview/DeleteNotificationRule.json +if __name__ == "__main__": + main() diff --git a/sdk/loadtesting/azure-developer-loadtesting/generated_samples/delete_test.py b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/delete_test.py new file mode 100644 index 000000000000..73c9c3d2ec23 --- /dev/null +++ b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/delete_test.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.developer.loadtesting import LoadTestAdministrationClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-developer-loadtesting +# USAGE + python delete_test.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = LoadTestAdministrationClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + client.delete_test( + test_id="12345678-1234-1234-1234-123456789012", + ) + + +# x-ms-original-file: 2025-11-01-preview/DeleteTest.json +if __name__ == "__main__": + main() diff --git a/sdk/loadtesting/azure-developer-loadtesting/generated_samples/delete_test_file.py b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/delete_test_file.py new file mode 100644 index 000000000000..eaff10b2d9a4 --- /dev/null +++ b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/delete_test_file.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.developer.loadtesting import LoadTestAdministrationClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-developer-loadtesting +# USAGE + python delete_test_file.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = LoadTestAdministrationClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + client.delete_test_file( + test_id="12345678-1234-1234-1234-123456789012", + file_name="app.jmx", + ) + + +# x-ms-original-file: 2025-11-01-preview/DeleteTestFile.json +if __name__ == "__main__": + main() diff --git a/sdk/loadtesting/azure-developer-loadtesting/generated_samples/delete_test_profile.py b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/delete_test_profile.py new file mode 100644 index 000000000000..6f5630580f48 --- /dev/null +++ b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/delete_test_profile.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.developer.loadtesting import LoadTestAdministrationClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-developer-loadtesting +# USAGE + python delete_test_profile.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = LoadTestAdministrationClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + client.delete_test_profile( + test_profile_id="12345678-1234-1234-1234-123456789012", + ) + + +# x-ms-original-file: 2025-11-01-preview/DeleteTestProfile.json +if __name__ == "__main__": + main() diff --git a/sdk/loadtesting/azure-developer-loadtesting/generated_samples/delete_test_profile_run.py b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/delete_test_profile_run.py new file mode 100644 index 000000000000..7a4a01b37dfe --- /dev/null +++ b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/delete_test_profile_run.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.developer.loadtesting import LoadTestRunClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-developer-loadtesting +# USAGE + python delete_test_profile_run.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = LoadTestRunClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + client.delete_test_profile_run( + test_profile_run_id="12316678-1234-1234-1234-122451189012", + ) + + +# x-ms-original-file: 2025-11-01-preview/DeleteTestProfileRun.json +if __name__ == "__main__": + main() diff --git a/sdk/loadtesting/azure-developer-loadtesting/generated_samples/delete_test_run.py b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/delete_test_run.py new file mode 100644 index 000000000000..7d0789381df2 --- /dev/null +++ b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/delete_test_run.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.developer.loadtesting import LoadTestRunClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-developer-loadtesting +# USAGE + python delete_test_run.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = LoadTestRunClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + client.delete_test_run( + test_run_id="12316678-1234-1234-1234-122451189012", + ) + + +# x-ms-original-file: 2025-11-01-preview/DeleteTestRun.json +if __name__ == "__main__": + main() diff --git a/sdk/loadtesting/azure-developer-loadtesting/generated_samples/delete_trigger.py b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/delete_trigger.py new file mode 100644 index 000000000000..72193f15fe1d --- /dev/null +++ b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/delete_trigger.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.developer.loadtesting import LoadTestAdministrationClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-developer-loadtesting +# USAGE + python delete_trigger.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = LoadTestAdministrationClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + client.delete_trigger( + trigger_id="12345678-1234-1234-1234-123456789012", + ) + + +# x-ms-original-file: 2025-11-01-preview/DeleteTrigger.json +if __name__ == "__main__": + main() diff --git a/sdk/loadtesting/azure-developer-loadtesting/generated_samples/generate_test_plan_recommendations.py b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/generate_test_plan_recommendations.py new file mode 100644 index 000000000000..3b92a4545866 --- /dev/null +++ b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/generate_test_plan_recommendations.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.developer.loadtesting import LoadTestAdministrationClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-developer-loadtesting +# USAGE + python generate_test_plan_recommendations.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = LoadTestAdministrationClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.begin_generate_test_plan_recommendations( + test_id="12316678-1234-1234-1234-122451189012", + ).result() + print(response) + + +# x-ms-original-file: 2025-11-01-preview/GenerateTestPlanRecommendations.json +if __name__ == "__main__": + main() diff --git a/sdk/loadtesting/azure-developer-loadtesting/generated_samples/generate_test_run_insights.py b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/generate_test_run_insights.py new file mode 100644 index 000000000000..5a3f9892ae40 --- /dev/null +++ b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/generate_test_run_insights.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.developer.loadtesting import LoadTestRunClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-developer-loadtesting +# USAGE + python generate_test_run_insights.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = LoadTestRunClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + client.begin_generate_test_run_insights( + test_run_id="12316678-1234-1234-1234-122451189012", + ).result() + + +# x-ms-original-file: 2025-11-01-preview/GenerateTestRunInsights.json +if __name__ == "__main__": + main() diff --git a/sdk/loadtesting/azure-developer-loadtesting/generated_samples/get_notification_rule.py b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/get_notification_rule.py new file mode 100644 index 000000000000..2b1c27b82746 --- /dev/null +++ b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/get_notification_rule.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.developer.loadtesting import LoadTestAdministrationClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-developer-loadtesting +# USAGE + python get_notification_rule.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = LoadTestAdministrationClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.get_notification_rule( + notification_rule_id="12345678-1234-1234-1234-123456789012", + ) + print(response) + + +# x-ms-original-file: 2025-11-01-preview/GetNotificationRule.json +if __name__ == "__main__": + main() diff --git a/sdk/loadtesting/azure-developer-loadtesting/generated_samples/get_status.py b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/get_status.py new file mode 100644 index 000000000000..58df8087c4d0 --- /dev/null +++ b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/get_status.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.developer.loadtesting import LoadTestAdministrationClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-developer-loadtesting +# USAGE + python get_status.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = LoadTestAdministrationClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.get_operation_status( + operation_id="12345678-1234-1234-1234-123456789012", + ) + print(response) + + +# x-ms-original-file: 2025-11-01-preview/GetStatus.json +if __name__ == "__main__": + main() diff --git a/sdk/loadtesting/azure-developer-loadtesting/generated_samples/get_test.py b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/get_test.py new file mode 100644 index 000000000000..314466a8827a --- /dev/null +++ b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/get_test.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.developer.loadtesting import LoadTestAdministrationClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-developer-loadtesting +# USAGE + python get_test.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = LoadTestAdministrationClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.get_test( + test_id="12345678-1234-1234-1234-123456789012", + ) + print(response) + + +# x-ms-original-file: 2025-11-01-preview/GetTest.json +if __name__ == "__main__": + main() diff --git a/sdk/loadtesting/azure-developer-loadtesting/generated_samples/get_test_file.py b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/get_test_file.py new file mode 100644 index 000000000000..affe92001c1f --- /dev/null +++ b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/get_test_file.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.developer.loadtesting import LoadTestAdministrationClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-developer-loadtesting +# USAGE + python get_test_file.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = LoadTestAdministrationClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.get_test_file( + test_id="12345678-1234-1234-1234-123456789012", + file_name="sample.jmx", + ) + print(response) + + +# x-ms-original-file: 2025-11-01-preview/GetTestFile.json +if __name__ == "__main__": + main() diff --git a/sdk/loadtesting/azure-developer-loadtesting/generated_samples/get_test_profile.py b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/get_test_profile.py new file mode 100644 index 000000000000..c9ecec8c71aa --- /dev/null +++ b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/get_test_profile.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.developer.loadtesting import LoadTestAdministrationClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-developer-loadtesting +# USAGE + python get_test_profile.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = LoadTestAdministrationClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.get_test_profile( + test_profile_id="12345678-1234-1234-1234-123456789012", + ) + print(response) + + +# x-ms-original-file: 2025-11-01-preview/GetTestProfile.json +if __name__ == "__main__": + main() diff --git a/sdk/loadtesting/azure-developer-loadtesting/generated_samples/get_test_profile_run_executed.py b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/get_test_profile_run_executed.py new file mode 100644 index 000000000000..ae2fd4db826f --- /dev/null +++ b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/get_test_profile_run_executed.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.developer.loadtesting import LoadTestRunClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-developer-loadtesting +# USAGE + python get_test_profile_run_executed.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = LoadTestRunClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.get_test_profile_run( + test_profile_run_id="12316678-1234-1234-1234-122451189012", + ) + print(response) + + +# x-ms-original-file: 2025-11-01-preview/GetTestProfileRun_Executed.json +if __name__ == "__main__": + main() diff --git a/sdk/loadtesting/azure-developer-loadtesting/generated_samples/get_test_profile_run_executing.py b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/get_test_profile_run_executing.py new file mode 100644 index 000000000000..44b0d94bc0f6 --- /dev/null +++ b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/get_test_profile_run_executing.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.developer.loadtesting import LoadTestRunClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-developer-loadtesting +# USAGE + python get_test_profile_run_executing.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = LoadTestRunClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.get_test_profile_run( + test_profile_run_id="12316678-1234-1234-1234-122451189012", + ) + print(response) + + +# x-ms-original-file: 2025-11-01-preview/GetTestProfileRun_Executing.json +if __name__ == "__main__": + main() diff --git a/sdk/loadtesting/azure-developer-loadtesting/generated_samples/get_test_run.py b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/get_test_run.py new file mode 100644 index 000000000000..756725c2022c --- /dev/null +++ b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/get_test_run.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.developer.loadtesting import LoadTestRunClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-developer-loadtesting +# USAGE + python get_test_run.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = LoadTestRunClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.get_test_run( + test_run_id="12316678-1234-1234-1234-122451189012", + ) + print(response) + + +# x-ms-original-file: 2025-11-01-preview/GetTestRun.json +if __name__ == "__main__": + main() diff --git a/sdk/loadtesting/azure-developer-loadtesting/generated_samples/get_test_run_executing.py b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/get_test_run_executing.py new file mode 100644 index 000000000000..e78c072b34c0 --- /dev/null +++ b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/get_test_run_executing.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.developer.loadtesting import LoadTestRunClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-developer-loadtesting +# USAGE + python get_test_run_executing.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = LoadTestRunClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.get_test_run( + test_run_id="12316678-1234-1234-1234-122451189012", + ) + print(response) + + +# x-ms-original-file: 2025-11-01-preview/GetTestRun_Executing.json +if __name__ == "__main__": + main() diff --git a/sdk/loadtesting/azure-developer-loadtesting/generated_samples/get_test_run_file.py b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/get_test_run_file.py new file mode 100644 index 000000000000..4cf5a9b016f1 --- /dev/null +++ b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/get_test_run_file.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.developer.loadtesting import LoadTestRunClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-developer-loadtesting +# USAGE + python get_test_run_file.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = LoadTestRunClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.get_test_run_file( + test_run_id="12316678-1234-1234-1234-122451189012", + file_name="sample.jmx", + ) + print(response) + + +# x-ms-original-file: 2025-11-01-preview/GetTestRunFile.json +if __name__ == "__main__": + main() diff --git a/sdk/loadtesting/azure-developer-loadtesting/generated_samples/get_test_run_latest_insights.py b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/get_test_run_latest_insights.py new file mode 100644 index 000000000000..d39a234d762b --- /dev/null +++ b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/get_test_run_latest_insights.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.developer.loadtesting import LoadTestRunClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-developer-loadtesting +# USAGE + python get_test_run_latest_insights.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = LoadTestRunClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.get_latest_test_run_insights( + test_run_id="12316678-1234-1234-1234-122451189012", + ) + print(response) + + +# x-ms-original-file: 2025-11-01-preview/GetTestRunLatestInsights.json +if __name__ == "__main__": + main() diff --git a/sdk/loadtesting/azure-developer-loadtesting/generated_samples/get_trigger.py b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/get_trigger.py new file mode 100644 index 000000000000..f5d597e92569 --- /dev/null +++ b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/get_trigger.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.developer.loadtesting import LoadTestAdministrationClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-developer-loadtesting +# USAGE + python get_trigger.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = LoadTestAdministrationClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.get_trigger( + trigger_id="12345678-1234-1234-1234-123456789012", + ) + print(response) + + +# x-ms-original-file: 2025-11-01-preview/GetTrigger.json +if __name__ == "__main__": + main() diff --git a/sdk/loadtesting/azure-developer-loadtesting/generated_samples/list_metric_dimension_values.py b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/list_metric_dimension_values.py new file mode 100644 index 000000000000..0cabb644198b --- /dev/null +++ b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/list_metric_dimension_values.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.developer.loadtesting import LoadTestRunClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-developer-loadtesting +# USAGE + python list_metric_dimension_values.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = LoadTestRunClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.list_metric_dimension_values( + test_run_id="12316678-1234-1234-1234-122451189012", + name="SamplerName", + metric_name="ActiveThreads", + metric_namespace="LoadTestRunMetrics", + time_interval="2022-09-24T19:00:40Z/2022-09-25T19:28:40Z", + ) + for item in response: + print(item) + + +# x-ms-original-file: 2025-11-01-preview/ListMetricDimensionValues.json +if __name__ == "__main__": + main() diff --git a/sdk/loadtesting/azure-developer-loadtesting/generated_samples/list_notification_rules.py b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/list_notification_rules.py new file mode 100644 index 000000000000..ddb5ffc101be --- /dev/null +++ b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/list_notification_rules.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.developer.loadtesting import LoadTestAdministrationClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-developer-loadtesting +# USAGE + python list_notification_rules.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = LoadTestAdministrationClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.list_notification_rules() + for item in response: + print(item) + + +# x-ms-original-file: 2025-11-01-preview/ListNotificationRules.json +if __name__ == "__main__": + main() diff --git a/sdk/loadtesting/azure-developer-loadtesting/generated_samples/list_test_app_components.py b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/list_test_app_components.py new file mode 100644 index 000000000000..f7ed9e0b82e7 --- /dev/null +++ b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/list_test_app_components.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.developer.loadtesting import LoadTestAdministrationClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-developer-loadtesting +# USAGE + python list_test_app_components.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = LoadTestAdministrationClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.get_app_components( + test_id="ee843bd9-a6d4-4364-a45c-427a03c39fa7", + ) + print(response) + + +# x-ms-original-file: 2025-11-01-preview/ListTestAppComponents.json +if __name__ == "__main__": + main() diff --git a/sdk/loadtesting/azure-developer-loadtesting/generated_samples/list_test_files.py b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/list_test_files.py new file mode 100644 index 000000000000..66b7e0cd905d --- /dev/null +++ b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/list_test_files.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.developer.loadtesting import LoadTestAdministrationClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-developer-loadtesting +# USAGE + python list_test_files.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = LoadTestAdministrationClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.list_test_files( + test_id="12345678-1234-1234-1234-123456789012", + ) + for item in response: + print(item) + + +# x-ms-original-file: 2025-11-01-preview/ListTestFiles.json +if __name__ == "__main__": + main() diff --git a/sdk/loadtesting/azure-developer-loadtesting/generated_samples/list_test_profile_runs.py b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/list_test_profile_runs.py new file mode 100644 index 000000000000..3502920c5eb3 --- /dev/null +++ b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/list_test_profile_runs.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.developer.loadtesting import LoadTestRunClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-developer-loadtesting +# USAGE + python list_test_profile_runs.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = LoadTestRunClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.list_test_profile_runs() + for item in response: + print(item) + + +# x-ms-original-file: 2025-11-01-preview/ListTestProfileRuns.json +if __name__ == "__main__": + main() diff --git a/sdk/loadtesting/azure-developer-loadtesting/generated_samples/list_test_profiles.py b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/list_test_profiles.py new file mode 100644 index 000000000000..2fbd32a1d007 --- /dev/null +++ b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/list_test_profiles.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.developer.loadtesting import LoadTestAdministrationClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-developer-loadtesting +# USAGE + python list_test_profiles.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = LoadTestAdministrationClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.list_test_profiles() + for item in response: + print(item) + + +# x-ms-original-file: 2025-11-01-preview/ListTestProfiles.json +if __name__ == "__main__": + main() diff --git a/sdk/loadtesting/azure-developer-loadtesting/generated_samples/list_test_run_app_components.py b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/list_test_run_app_components.py new file mode 100644 index 000000000000..f47b71532ca6 --- /dev/null +++ b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/list_test_run_app_components.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.developer.loadtesting import LoadTestRunClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-developer-loadtesting +# USAGE + python list_test_run_app_components.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = LoadTestRunClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.get_app_components( + test_run_id="ee843bd9-a6d4-4364-a45c-427a03c39fa7", + ) + print(response) + + +# x-ms-original-file: 2025-11-01-preview/ListTestRunAppComponents.json +if __name__ == "__main__": + main() diff --git a/sdk/loadtesting/azure-developer-loadtesting/generated_samples/list_test_run_metrics.py b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/list_test_run_metrics.py new file mode 100644 index 000000000000..ef4a71a45c10 --- /dev/null +++ b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/list_test_run_metrics.py @@ -0,0 +1,45 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.developer.loadtesting import LoadTestRunClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-developer-loadtesting +# USAGE + python list_test_run_metrics.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = LoadTestRunClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.list_metrics( + test_run_id="12316678-1234-1234-1234-122451189012", + metric_name="ActiveThreads", + metric_namespace="LoadTestRunMetrics", + time_interval="2022-09-24T19:00:40Z/2022-09-25T19:28:40Z", + ) + for item in response: + print(item) + + +# x-ms-original-file: 2025-11-01-preview/ListTestRunMetrics.json +if __name__ == "__main__": + main() diff --git a/sdk/loadtesting/azure-developer-loadtesting/generated_samples/list_test_run_metrics_definitions.py b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/list_test_run_metrics_definitions.py new file mode 100644 index 000000000000..733c72baab1d --- /dev/null +++ b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/list_test_run_metrics_definitions.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.developer.loadtesting import LoadTestRunClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-developer-loadtesting +# USAGE + python list_test_run_metrics_definitions.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = LoadTestRunClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.get_metric_definitions( + test_run_id="12316678-1234-1234-1234-122451189012", + metric_namespace="LoadTestRunMetrics", + ) + print(response) + + +# x-ms-original-file: 2025-11-01-preview/ListTestRunMetricsDefinitions.json +if __name__ == "__main__": + main() diff --git a/sdk/loadtesting/azure-developer-loadtesting/generated_samples/list_test_run_metrics_namespaces.py b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/list_test_run_metrics_namespaces.py new file mode 100644 index 000000000000..63f73c01282e --- /dev/null +++ b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/list_test_run_metrics_namespaces.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.developer.loadtesting import LoadTestRunClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-developer-loadtesting +# USAGE + python list_test_run_metrics_namespaces.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = LoadTestRunClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.get_metric_namespaces( + test_run_id="12316678-1234-1234-1234-122451189012", + ) + print(response) + + +# x-ms-original-file: 2025-11-01-preview/ListTestRunMetricsNamespaces.json +if __name__ == "__main__": + main() diff --git a/sdk/loadtesting/azure-developer-loadtesting/generated_samples/list_test_run_server_metrics_config.py b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/list_test_run_server_metrics_config.py new file mode 100644 index 000000000000..c7268e4a7b6b --- /dev/null +++ b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/list_test_run_server_metrics_config.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.developer.loadtesting import LoadTestRunClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-developer-loadtesting +# USAGE + python list_test_run_server_metrics_config.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = LoadTestRunClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.get_server_metrics_config( + test_run_id="12345678-1234-1234-1234-123456789012", + ) + print(response) + + +# x-ms-original-file: 2025-11-01-preview/ListTestRunServerMetricsConfig.json +if __name__ == "__main__": + main() diff --git a/sdk/loadtesting/azure-developer-loadtesting/generated_samples/list_test_runs.py b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/list_test_runs.py new file mode 100644 index 000000000000..205c2be9dc49 --- /dev/null +++ b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/list_test_runs.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.developer.loadtesting import LoadTestRunClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-developer-loadtesting +# USAGE + python list_test_runs.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = LoadTestRunClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.list_test_runs() + for item in response: + print(item) + + +# x-ms-original-file: 2025-11-01-preview/ListTestRuns.json +if __name__ == "__main__": + main() diff --git a/sdk/loadtesting/azure-developer-loadtesting/generated_samples/list_test_runs_test_ids.py b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/list_test_runs_test_ids.py new file mode 100644 index 000000000000..aeb2a3d26c62 --- /dev/null +++ b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/list_test_runs_test_ids.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.developer.loadtesting import LoadTestRunClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-developer-loadtesting +# USAGE + python list_test_runs_test_ids.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = LoadTestRunClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.list_test_runs() + for item in response: + print(item) + + +# x-ms-original-file: 2025-11-01-preview/ListTestRuns_TestIds.json +if __name__ == "__main__": + main() diff --git a/sdk/loadtesting/azure-developer-loadtesting/generated_samples/list_test_server_metrics_config.py b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/list_test_server_metrics_config.py new file mode 100644 index 000000000000..bb7aad4e5e72 --- /dev/null +++ b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/list_test_server_metrics_config.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.developer.loadtesting import LoadTestAdministrationClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-developer-loadtesting +# USAGE + python list_test_server_metrics_config.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = LoadTestAdministrationClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.get_server_metrics_config( + test_id="12345678-1234-1234-1234-123456789012", + ) + print(response) + + +# x-ms-original-file: 2025-11-01-preview/ListTestServerMetricsConfig.json +if __name__ == "__main__": + main() diff --git a/sdk/loadtesting/azure-developer-loadtesting/generated_samples/list_tests.py b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/list_tests.py new file mode 100644 index 000000000000..8636c79b7368 --- /dev/null +++ b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/list_tests.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.developer.loadtesting import LoadTestAdministrationClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-developer-loadtesting +# USAGE + python list_tests.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = LoadTestAdministrationClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.list_tests() + for item in response: + print(item) + + +# x-ms-original-file: 2025-11-01-preview/ListTests.json +if __name__ == "__main__": + main() diff --git a/sdk/loadtesting/azure-developer-loadtesting/generated_samples/list_triggers.py b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/list_triggers.py new file mode 100644 index 000000000000..15df0da3295f --- /dev/null +++ b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/list_triggers.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.developer.loadtesting import LoadTestAdministrationClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-developer-loadtesting +# USAGE + python list_triggers.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = LoadTestAdministrationClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.list_triggers() + for item in response: + print(item) + + +# x-ms-original-file: 2025-11-01-preview/ListTriggers.json +if __name__ == "__main__": + main() diff --git a/sdk/loadtesting/azure-developer-loadtesting/generated_samples/patch_test_run_latest_insights.py b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/patch_test_run_latest_insights.py new file mode 100644 index 000000000000..cd9f418b1e4f --- /dev/null +++ b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/patch_test_run_latest_insights.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.developer.loadtesting import LoadTestRunClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-developer-loadtesting +# USAGE + python patch_test_run_latest_insights.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = LoadTestRunClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.patch_latest_test_run_insights( + test_run_id="12316678-1234-1234-1234-122451189012", + body={"rows": {"1": {"WorkItemUrl": "https://devdiv/workitems/1234"}}}, + ) + print(response) + + +# x-ms-original-file: 2025-11-01-preview/PatchTestRunLatestInsights.json +if __name__ == "__main__": + main() diff --git a/sdk/loadtesting/azure-developer-loadtesting/generated_samples/stop_test_profile_run.py b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/stop_test_profile_run.py new file mode 100644 index 000000000000..00779fe88db7 --- /dev/null +++ b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/stop_test_profile_run.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.developer.loadtesting import LoadTestRunClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-developer-loadtesting +# USAGE + python stop_test_profile_run.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = LoadTestRunClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.stop_test_profile_run( + test_profile_run_id="12316678-1234-1234-1234-122451189012", + ) + print(response) + + +# x-ms-original-file: 2025-11-01-preview/StopTestProfileRun.json +if __name__ == "__main__": + main() diff --git a/sdk/loadtesting/azure-developer-loadtesting/generated_samples/stop_test_run.py b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/stop_test_run.py new file mode 100644 index 000000000000..c6f29c830194 --- /dev/null +++ b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/stop_test_run.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.developer.loadtesting import LoadTestRunClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-developer-loadtesting +# USAGE + python stop_test_run.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = LoadTestRunClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.stop_test_run( + test_run_id="12316678-1234-1234-1234-122451189012", + ) + print(response) + + +# x-ms-original-file: 2025-11-01-preview/StopTestRun.json +if __name__ == "__main__": + main() diff --git a/sdk/loadtesting/azure-developer-loadtesting/generated_tests/conftest.py b/sdk/loadtesting/azure-developer-loadtesting/generated_tests/conftest.py new file mode 100644 index 000000000000..4350841161b5 --- /dev/null +++ b/sdk/loadtesting/azure-developer-loadtesting/generated_tests/conftest.py @@ -0,0 +1,56 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import os +import pytest +from dotenv import load_dotenv +from devtools_testutils import ( + test_proxy, + add_general_regex_sanitizer, + add_body_key_sanitizer, + add_header_regex_sanitizer, +) + +load_dotenv() + + +# For security, please avoid record sensitive identity information in recordings +@pytest.fixture(scope="session", autouse=True) +def add_sanitizers(test_proxy): + loadtestadministration_subscription_id = os.environ.get( + "LOADTESTADMINISTRATION_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000" + ) + loadtestadministration_tenant_id = os.environ.get( + "LOADTESTADMINISTRATION_TENANT_ID", "00000000-0000-0000-0000-000000000000" + ) + loadtestadministration_client_id = os.environ.get( + "LOADTESTADMINISTRATION_CLIENT_ID", "00000000-0000-0000-0000-000000000000" + ) + loadtestadministration_client_secret = os.environ.get( + "LOADTESTADMINISTRATION_CLIENT_SECRET", "00000000-0000-0000-0000-000000000000" + ) + add_general_regex_sanitizer( + regex=loadtestadministration_subscription_id, value="00000000-0000-0000-0000-000000000000" + ) + add_general_regex_sanitizer(regex=loadtestadministration_tenant_id, value="00000000-0000-0000-0000-000000000000") + add_general_regex_sanitizer(regex=loadtestadministration_client_id, value="00000000-0000-0000-0000-000000000000") + add_general_regex_sanitizer( + regex=loadtestadministration_client_secret, value="00000000-0000-0000-0000-000000000000" + ) + + loadtestrun_subscription_id = os.environ.get("LOADTESTRUN_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") + loadtestrun_tenant_id = os.environ.get("LOADTESTRUN_TENANT_ID", "00000000-0000-0000-0000-000000000000") + loadtestrun_client_id = os.environ.get("LOADTESTRUN_CLIENT_ID", "00000000-0000-0000-0000-000000000000") + loadtestrun_client_secret = os.environ.get("LOADTESTRUN_CLIENT_SECRET", "00000000-0000-0000-0000-000000000000") + add_general_regex_sanitizer(regex=loadtestrun_subscription_id, value="00000000-0000-0000-0000-000000000000") + add_general_regex_sanitizer(regex=loadtestrun_tenant_id, value="00000000-0000-0000-0000-000000000000") + add_general_regex_sanitizer(regex=loadtestrun_client_id, value="00000000-0000-0000-0000-000000000000") + add_general_regex_sanitizer(regex=loadtestrun_client_secret, value="00000000-0000-0000-0000-000000000000") + + add_header_regex_sanitizer(key="Set-Cookie", value="[set-cookie;]") + add_header_regex_sanitizer(key="Cookie", value="cookie;") + add_body_key_sanitizer(json_path="$..access_token", value="access_token") diff --git a/sdk/loadtesting/azure-developer-loadtesting/generated_tests/test_load_test_administration.py b/sdk/loadtesting/azure-developer-loadtesting/generated_tests/test_load_test_administration.py new file mode 100644 index 000000000000..00556f66f017 --- /dev/null +++ b/sdk/loadtesting/azure-developer-loadtesting/generated_tests/test_load_test_administration.py @@ -0,0 +1,495 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import pytest +from devtools_testutils import recorded_by_proxy +from testpreparer import LoadTestAdministrationClientTestBase, LoadTestAdministrationPreparer + + +@pytest.mark.skip("you may need to update the auto-generated test case before run it") +class TestLoadTestAdministration(LoadTestAdministrationClientTestBase): + @LoadTestAdministrationPreparer() + @recorded_by_proxy + def test_create_or_update_test(self, loadtestadministration_endpoint): + client = self.create_client(endpoint=loadtestadministration_endpoint) + response = client.create_or_update_test( + test_id="str", + body={ + "testId": "str", + "autoStopCriteria": { + "autoStopDisabled": bool, + "errorRate": 0.0, + "errorRateTimeWindowInSeconds": 0, + "maximumVirtualUsersPerEngine": 0, + }, + "baselineTestRunId": "str", + "certificate": {"name": "str", "type": "str", "value": "str"}, + "createdBy": "str", + "createdDateTime": "2020-02-20 00:00:00", + "description": "str", + "displayName": "str", + "engineBuiltInIdentityIds": ["str"], + "engineBuiltInIdentityType": "str", + "environmentVariables": {"str": "str"}, + "estimatedVirtualUserHours": 0.0, + "inputArtifacts": { + "additionalFileInfo": [ + { + "fileName": "str", + "expireDateTime": "2020-02-20 00:00:00", + "fileType": "str", + "url": "str", + "validationFailureDetails": "str", + "validationStatus": "str", + } + ], + "configFileInfo": { + "fileName": "str", + "expireDateTime": "2020-02-20 00:00:00", + "fileType": "str", + "url": "str", + "validationFailureDetails": "str", + "validationStatus": "str", + }, + "inputArtifactsZipFileInfo": { + "fileName": "str", + "expireDateTime": "2020-02-20 00:00:00", + "fileType": "str", + "url": "str", + "validationFailureDetails": "str", + "validationStatus": "str", + }, + "testScriptFileInfo": { + "fileName": "str", + "expireDateTime": "2020-02-20 00:00:00", + "fileType": "str", + "url": "str", + "validationFailureDetails": "str", + "validationStatus": "str", + }, + "urlTestConfigFileInfo": { + "fileName": "str", + "expireDateTime": "2020-02-20 00:00:00", + "fileType": "str", + "url": "str", + "validationFailureDetails": "str", + "validationStatus": "str", + }, + "userPropFileInfo": { + "fileName": "str", + "expireDateTime": "2020-02-20 00:00:00", + "fileType": "str", + "url": "str", + "validationFailureDetails": "str", + "validationStatus": "str", + }, + }, + "keyvaultReferenceIdentityId": "str", + "keyvaultReferenceIdentityType": "str", + "kind": "str", + "lastModifiedBy": "str", + "lastModifiedDateTime": "2020-02-20 00:00:00", + "loadTestConfiguration": { + "engineInstances": 0, + "optionalLoadTestConfig": { + "duration": 0, + "endpointUrl": "str", + "maxResponseTimeInMs": 0, + "rampUpTime": 0, + "requestsPerSecond": 0, + "virtualUsers": 0, + }, + "quickStartTest": bool, + "regionalLoadTestConfig": [{"engineInstances": 0, "region": "str"}], + "splitAllCSVs": bool, + }, + "metricsReferenceIdentityId": "str", + "metricsReferenceIdentityType": "str", + "passFailCriteria": { + "passFailMetrics": { + "str": { + "action": "str", + "actualValue": 0.0, + "aggregate": "str", + "clientMetric": "str", + "condition": "str", + "requestName": "str", + "result": "str", + "value": 0.0, + } + }, + "passFailServerMetrics": { + "str": { + "aggregation": "str", + "condition": "str", + "metricName": "str", + "metricNamespace": "str", + "resourceId": "str", + "value": 0.0, + "action": "str", + "actualValue": 0.0, + "result": "str", + } + }, + }, + "preferences": {"enableAIErrorInsights": bool}, + "publicIPDisabled": bool, + "secrets": {"str": {"type": "str", "value": "str"}}, + "subnetId": "str", + }, + ) + + # please add some check logic here by yourself + # ... + + @LoadTestAdministrationPreparer() + @recorded_by_proxy + def test_create_or_update_app_components(self, loadtestadministration_endpoint): + client = self.create_client(endpoint=loadtestadministration_endpoint) + response = client.create_or_update_app_components( + test_id="str", + body={ + "components": { + "str": { + "resourceId": "str", + "resourceName": "str", + "resourceType": "str", + "displayName": "str", + "kind": "str", + "resourceGroup": "str", + "subscriptionId": "str", + } + }, + "createdBy": "str", + "createdDateTime": "2020-02-20 00:00:00", + "lastModifiedBy": "str", + "lastModifiedDateTime": "2020-02-20 00:00:00", + "testId": "str", + }, + ) + + # please add some check logic here by yourself + # ... + + @LoadTestAdministrationPreparer() + @recorded_by_proxy + def test_create_or_update_server_metrics_config(self, loadtestadministration_endpoint): + client = self.create_client(endpoint=loadtestadministration_endpoint) + response = client.create_or_update_server_metrics_config( + test_id="str", + body={ + "metrics": { + "str": { + "aggregation": "str", + "metricNamespace": "str", + "name": "str", + "resourceId": "str", + "resourceType": "str", + "displayDescription": "str", + "id": "str", + "unit": "str", + } + }, + "createdBy": "str", + "createdDateTime": "2020-02-20 00:00:00", + "lastModifiedBy": "str", + "lastModifiedDateTime": "2020-02-20 00:00:00", + "testId": "str", + }, + ) + + # please add some check logic here by yourself + # ... + + @LoadTestAdministrationPreparer() + @recorded_by_proxy + def test_get_app_components(self, loadtestadministration_endpoint): + client = self.create_client(endpoint=loadtestadministration_endpoint) + response = client.get_app_components( + test_id="str", + ) + + # please add some check logic here by yourself + # ... + + @LoadTestAdministrationPreparer() + @recorded_by_proxy + def test_get_server_metrics_config(self, loadtestadministration_endpoint): + client = self.create_client(endpoint=loadtestadministration_endpoint) + response = client.get_server_metrics_config( + test_id="str", + ) + + # please add some check logic here by yourself + # ... + + @LoadTestAdministrationPreparer() + @recorded_by_proxy + def test_get_test(self, loadtestadministration_endpoint): + client = self.create_client(endpoint=loadtestadministration_endpoint) + response = client.get_test( + test_id="str", + ) + + # please add some check logic here by yourself + # ... + + @LoadTestAdministrationPreparer() + @recorded_by_proxy + def test_get_test_file(self, loadtestadministration_endpoint): + client = self.create_client(endpoint=loadtestadministration_endpoint) + response = client.get_test_file( + test_id="str", + file_name="str", + ) + + # please add some check logic here by yourself + # ... + + @LoadTestAdministrationPreparer() + @recorded_by_proxy + def test_list_test_files(self, loadtestadministration_endpoint): + client = self.create_client(endpoint=loadtestadministration_endpoint) + response = client.list_test_files( + test_id="str", + ) + result = [r for r in response] + # please add some check logic here by yourself + # ... + + @LoadTestAdministrationPreparer() + @recorded_by_proxy + def test_list_tests(self, loadtestadministration_endpoint): + client = self.create_client(endpoint=loadtestadministration_endpoint) + response = client.list_tests() + result = [r for r in response] + # please add some check logic here by yourself + # ... + + @LoadTestAdministrationPreparer() + @recorded_by_proxy + def test_delete_test_file(self, loadtestadministration_endpoint): + client = self.create_client(endpoint=loadtestadministration_endpoint) + response = client.delete_test_file( + test_id="str", + file_name="str", + ) + + # please add some check logic here by yourself + # ... + + @LoadTestAdministrationPreparer() + @recorded_by_proxy + def test_delete_test(self, loadtestadministration_endpoint): + client = self.create_client(endpoint=loadtestadministration_endpoint) + response = client.delete_test( + test_id="str", + ) + + # please add some check logic here by yourself + # ... + + @LoadTestAdministrationPreparer() + @recorded_by_proxy + def test_create_or_update_test_profile(self, loadtestadministration_endpoint): + client = self.create_client(endpoint=loadtestadministration_endpoint) + response = client.create_or_update_test_profile( + test_profile_id="str", + body={ + "testProfileId": "str", + "createdBy": "str", + "createdDateTime": "2020-02-20 00:00:00", + "description": "str", + "displayName": "str", + "lastModifiedBy": "str", + "lastModifiedDateTime": "2020-02-20 00:00:00", + "targetResourceConfigurations": "target_resource_configurations", + "targetResourceId": "str", + "testId": "str", + }, + ) + + # please add some check logic here by yourself + # ... + + @LoadTestAdministrationPreparer() + @recorded_by_proxy + def test_delete_test_profile(self, loadtestadministration_endpoint): + client = self.create_client(endpoint=loadtestadministration_endpoint) + response = client.delete_test_profile( + test_profile_id="str", + ) + + # please add some check logic here by yourself + # ... + + @LoadTestAdministrationPreparer() + @recorded_by_proxy + def test_get_test_profile(self, loadtestadministration_endpoint): + client = self.create_client(endpoint=loadtestadministration_endpoint) + response = client.get_test_profile( + test_profile_id="str", + ) + + # please add some check logic here by yourself + # ... + + @LoadTestAdministrationPreparer() + @recorded_by_proxy + def test_list_test_profiles(self, loadtestadministration_endpoint): + client = self.create_client(endpoint=loadtestadministration_endpoint) + response = client.list_test_profiles() + result = [r for r in response] + # please add some check logic here by yourself + # ... + + @LoadTestAdministrationPreparer() + @recorded_by_proxy + def test_create_or_update_trigger(self, loadtestadministration_endpoint): + client = self.create_client(endpoint=loadtestadministration_endpoint) + response = client.create_or_update_trigger( + trigger_id="str", + body={ + "displayName": "str", + "kind": "ScheduleTestsTrigger", + "testIds": ["str"], + "triggerId": "str", + "createdBy": "str", + "createdDateTime": "2020-02-20 00:00:00", + "description": "str", + "lastModifiedBy": "str", + "lastModifiedDateTime": "2020-02-20 00:00:00", + "recurrence": "recurrence", + "recurrenceStatus": {"nextScheduledDateTimes": ["2020-02-20 00:00:00"], "remainingOccurrences": 0}, + "startDateTime": "2020-02-20 00:00:00", + "state": "str", + "stateDetails": {"message": "str"}, + }, + ) + + # please add some check logic here by yourself + # ... + + @LoadTestAdministrationPreparer() + @recorded_by_proxy + def test_delete_trigger(self, loadtestadministration_endpoint): + client = self.create_client(endpoint=loadtestadministration_endpoint) + response = client.delete_trigger( + trigger_id="str", + ) + + # please add some check logic here by yourself + # ... + + @LoadTestAdministrationPreparer() + @recorded_by_proxy + def test_get_trigger(self, loadtestadministration_endpoint): + client = self.create_client(endpoint=loadtestadministration_endpoint) + response = client.get_trigger( + trigger_id="str", + ) + + # please add some check logic here by yourself + # ... + + @LoadTestAdministrationPreparer() + @recorded_by_proxy + def test_list_triggers(self, loadtestadministration_endpoint): + client = self.create_client(endpoint=loadtestadministration_endpoint) + response = client.list_triggers() + result = [r for r in response] + # please add some check logic here by yourself + # ... + + @LoadTestAdministrationPreparer() + @recorded_by_proxy + def test_create_or_update_notification_rule(self, loadtestadministration_endpoint): + client = self.create_client(endpoint=loadtestadministration_endpoint) + response = client.create_or_update_notification_rule( + notification_rule_id="str", + body={ + "actionGroupIds": ["str"], + "displayName": "str", + "eventFilters": {"str": "tests_notification_event_filter"}, + "notificationRuleId": "str", + "scope": "Tests", + "createdBy": "str", + "createdDateTime": "2020-02-20 00:00:00", + "lastModifiedBy": "str", + "lastModifiedDateTime": "2020-02-20 00:00:00", + "testIds": ["str"], + }, + ) + + # please add some check logic here by yourself + # ... + + @LoadTestAdministrationPreparer() + @recorded_by_proxy + def test_delete_notification_rule(self, loadtestadministration_endpoint): + client = self.create_client(endpoint=loadtestadministration_endpoint) + response = client.delete_notification_rule( + notification_rule_id="str", + ) + + # please add some check logic here by yourself + # ... + + @LoadTestAdministrationPreparer() + @recorded_by_proxy + def test_get_notification_rule(self, loadtestadministration_endpoint): + client = self.create_client(endpoint=loadtestadministration_endpoint) + response = client.get_notification_rule( + notification_rule_id="str", + ) + + # please add some check logic here by yourself + # ... + + @LoadTestAdministrationPreparer() + @recorded_by_proxy + def test_list_notification_rules(self, loadtestadministration_endpoint): + client = self.create_client(endpoint=loadtestadministration_endpoint) + response = client.list_notification_rules() + result = [r for r in response] + # please add some check logic here by yourself + # ... + + @LoadTestAdministrationPreparer() + @recorded_by_proxy + def test_begin_clone_test(self, loadtestadministration_endpoint): + client = self.create_client(endpoint=loadtestadministration_endpoint) + response = client.begin_clone_test( + test_id="str", + body={"newTestId": "str", "description": "str", "displayName": "str"}, + new_test_id="str", + ).result() # call '.result()' to poll until service return final result + + # please add some check logic here by yourself + # ... + + @LoadTestAdministrationPreparer() + @recorded_by_proxy + def test_begin_generate_test_plan_recommendations(self, loadtestadministration_endpoint): + client = self.create_client(endpoint=loadtestadministration_endpoint) + response = client.begin_generate_test_plan_recommendations( + test_id="str", + ).result() # call '.result()' to poll until service return final result + + # please add some check logic here by yourself + # ... + + @LoadTestAdministrationPreparer() + @recorded_by_proxy + def test_get_operation_status(self, loadtestadministration_endpoint): + client = self.create_client(endpoint=loadtestadministration_endpoint) + response = client.get_operation_status( + operation_id="str", + ) + + # please add some check logic here by yourself + # ... diff --git a/sdk/loadtesting/azure-developer-loadtesting/generated_tests/test_load_test_administration_async.py b/sdk/loadtesting/azure-developer-loadtesting/generated_tests/test_load_test_administration_async.py new file mode 100644 index 000000000000..0a57f1730907 --- /dev/null +++ b/sdk/loadtesting/azure-developer-loadtesting/generated_tests/test_load_test_administration_async.py @@ -0,0 +1,500 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import pytest +from devtools_testutils.aio import recorded_by_proxy_async +from testpreparer import LoadTestAdministrationPreparer +from testpreparer_async import LoadTestAdministrationClientTestBaseAsync + + +@pytest.mark.skip("you may need to update the auto-generated test case before run it") +class TestLoadTestAdministrationAsync(LoadTestAdministrationClientTestBaseAsync): + @LoadTestAdministrationPreparer() + @recorded_by_proxy_async + async def test_create_or_update_test(self, loadtestadministration_endpoint): + client = self.create_async_client(endpoint=loadtestadministration_endpoint) + response = await client.create_or_update_test( + test_id="str", + body={ + "testId": "str", + "autoStopCriteria": { + "autoStopDisabled": bool, + "errorRate": 0.0, + "errorRateTimeWindowInSeconds": 0, + "maximumVirtualUsersPerEngine": 0, + }, + "baselineTestRunId": "str", + "certificate": {"name": "str", "type": "str", "value": "str"}, + "createdBy": "str", + "createdDateTime": "2020-02-20 00:00:00", + "description": "str", + "displayName": "str", + "engineBuiltInIdentityIds": ["str"], + "engineBuiltInIdentityType": "str", + "environmentVariables": {"str": "str"}, + "estimatedVirtualUserHours": 0.0, + "inputArtifacts": { + "additionalFileInfo": [ + { + "fileName": "str", + "expireDateTime": "2020-02-20 00:00:00", + "fileType": "str", + "url": "str", + "validationFailureDetails": "str", + "validationStatus": "str", + } + ], + "configFileInfo": { + "fileName": "str", + "expireDateTime": "2020-02-20 00:00:00", + "fileType": "str", + "url": "str", + "validationFailureDetails": "str", + "validationStatus": "str", + }, + "inputArtifactsZipFileInfo": { + "fileName": "str", + "expireDateTime": "2020-02-20 00:00:00", + "fileType": "str", + "url": "str", + "validationFailureDetails": "str", + "validationStatus": "str", + }, + "testScriptFileInfo": { + "fileName": "str", + "expireDateTime": "2020-02-20 00:00:00", + "fileType": "str", + "url": "str", + "validationFailureDetails": "str", + "validationStatus": "str", + }, + "urlTestConfigFileInfo": { + "fileName": "str", + "expireDateTime": "2020-02-20 00:00:00", + "fileType": "str", + "url": "str", + "validationFailureDetails": "str", + "validationStatus": "str", + }, + "userPropFileInfo": { + "fileName": "str", + "expireDateTime": "2020-02-20 00:00:00", + "fileType": "str", + "url": "str", + "validationFailureDetails": "str", + "validationStatus": "str", + }, + }, + "keyvaultReferenceIdentityId": "str", + "keyvaultReferenceIdentityType": "str", + "kind": "str", + "lastModifiedBy": "str", + "lastModifiedDateTime": "2020-02-20 00:00:00", + "loadTestConfiguration": { + "engineInstances": 0, + "optionalLoadTestConfig": { + "duration": 0, + "endpointUrl": "str", + "maxResponseTimeInMs": 0, + "rampUpTime": 0, + "requestsPerSecond": 0, + "virtualUsers": 0, + }, + "quickStartTest": bool, + "regionalLoadTestConfig": [{"engineInstances": 0, "region": "str"}], + "splitAllCSVs": bool, + }, + "metricsReferenceIdentityId": "str", + "metricsReferenceIdentityType": "str", + "passFailCriteria": { + "passFailMetrics": { + "str": { + "action": "str", + "actualValue": 0.0, + "aggregate": "str", + "clientMetric": "str", + "condition": "str", + "requestName": "str", + "result": "str", + "value": 0.0, + } + }, + "passFailServerMetrics": { + "str": { + "aggregation": "str", + "condition": "str", + "metricName": "str", + "metricNamespace": "str", + "resourceId": "str", + "value": 0.0, + "action": "str", + "actualValue": 0.0, + "result": "str", + } + }, + }, + "preferences": {"enableAIErrorInsights": bool}, + "publicIPDisabled": bool, + "secrets": {"str": {"type": "str", "value": "str"}}, + "subnetId": "str", + }, + ) + + # please add some check logic here by yourself + # ... + + @LoadTestAdministrationPreparer() + @recorded_by_proxy_async + async def test_create_or_update_app_components(self, loadtestadministration_endpoint): + client = self.create_async_client(endpoint=loadtestadministration_endpoint) + response = await client.create_or_update_app_components( + test_id="str", + body={ + "components": { + "str": { + "resourceId": "str", + "resourceName": "str", + "resourceType": "str", + "displayName": "str", + "kind": "str", + "resourceGroup": "str", + "subscriptionId": "str", + } + }, + "createdBy": "str", + "createdDateTime": "2020-02-20 00:00:00", + "lastModifiedBy": "str", + "lastModifiedDateTime": "2020-02-20 00:00:00", + "testId": "str", + }, + ) + + # please add some check logic here by yourself + # ... + + @LoadTestAdministrationPreparer() + @recorded_by_proxy_async + async def test_create_or_update_server_metrics_config(self, loadtestadministration_endpoint): + client = self.create_async_client(endpoint=loadtestadministration_endpoint) + response = await client.create_or_update_server_metrics_config( + test_id="str", + body={ + "metrics": { + "str": { + "aggregation": "str", + "metricNamespace": "str", + "name": "str", + "resourceId": "str", + "resourceType": "str", + "displayDescription": "str", + "id": "str", + "unit": "str", + } + }, + "createdBy": "str", + "createdDateTime": "2020-02-20 00:00:00", + "lastModifiedBy": "str", + "lastModifiedDateTime": "2020-02-20 00:00:00", + "testId": "str", + }, + ) + + # please add some check logic here by yourself + # ... + + @LoadTestAdministrationPreparer() + @recorded_by_proxy_async + async def test_get_app_components(self, loadtestadministration_endpoint): + client = self.create_async_client(endpoint=loadtestadministration_endpoint) + response = await client.get_app_components( + test_id="str", + ) + + # please add some check logic here by yourself + # ... + + @LoadTestAdministrationPreparer() + @recorded_by_proxy_async + async def test_get_server_metrics_config(self, loadtestadministration_endpoint): + client = self.create_async_client(endpoint=loadtestadministration_endpoint) + response = await client.get_server_metrics_config( + test_id="str", + ) + + # please add some check logic here by yourself + # ... + + @LoadTestAdministrationPreparer() + @recorded_by_proxy_async + async def test_get_test(self, loadtestadministration_endpoint): + client = self.create_async_client(endpoint=loadtestadministration_endpoint) + response = await client.get_test( + test_id="str", + ) + + # please add some check logic here by yourself + # ... + + @LoadTestAdministrationPreparer() + @recorded_by_proxy_async + async def test_get_test_file(self, loadtestadministration_endpoint): + client = self.create_async_client(endpoint=loadtestadministration_endpoint) + response = await client.get_test_file( + test_id="str", + file_name="str", + ) + + # please add some check logic here by yourself + # ... + + @LoadTestAdministrationPreparer() + @recorded_by_proxy_async + async def test_list_test_files(self, loadtestadministration_endpoint): + client = self.create_async_client(endpoint=loadtestadministration_endpoint) + response = client.list_test_files( + test_id="str", + ) + result = [r async for r in response] + # please add some check logic here by yourself + # ... + + @LoadTestAdministrationPreparer() + @recorded_by_proxy_async + async def test_list_tests(self, loadtestadministration_endpoint): + client = self.create_async_client(endpoint=loadtestadministration_endpoint) + response = client.list_tests() + result = [r async for r in response] + # please add some check logic here by yourself + # ... + + @LoadTestAdministrationPreparer() + @recorded_by_proxy_async + async def test_delete_test_file(self, loadtestadministration_endpoint): + client = self.create_async_client(endpoint=loadtestadministration_endpoint) + response = await client.delete_test_file( + test_id="str", + file_name="str", + ) + + # please add some check logic here by yourself + # ... + + @LoadTestAdministrationPreparer() + @recorded_by_proxy_async + async def test_delete_test(self, loadtestadministration_endpoint): + client = self.create_async_client(endpoint=loadtestadministration_endpoint) + response = await client.delete_test( + test_id="str", + ) + + # please add some check logic here by yourself + # ... + + @LoadTestAdministrationPreparer() + @recorded_by_proxy_async + async def test_create_or_update_test_profile(self, loadtestadministration_endpoint): + client = self.create_async_client(endpoint=loadtestadministration_endpoint) + response = await client.create_or_update_test_profile( + test_profile_id="str", + body={ + "testProfileId": "str", + "createdBy": "str", + "createdDateTime": "2020-02-20 00:00:00", + "description": "str", + "displayName": "str", + "lastModifiedBy": "str", + "lastModifiedDateTime": "2020-02-20 00:00:00", + "targetResourceConfigurations": "target_resource_configurations", + "targetResourceId": "str", + "testId": "str", + }, + ) + + # please add some check logic here by yourself + # ... + + @LoadTestAdministrationPreparer() + @recorded_by_proxy_async + async def test_delete_test_profile(self, loadtestadministration_endpoint): + client = self.create_async_client(endpoint=loadtestadministration_endpoint) + response = await client.delete_test_profile( + test_profile_id="str", + ) + + # please add some check logic here by yourself + # ... + + @LoadTestAdministrationPreparer() + @recorded_by_proxy_async + async def test_get_test_profile(self, loadtestadministration_endpoint): + client = self.create_async_client(endpoint=loadtestadministration_endpoint) + response = await client.get_test_profile( + test_profile_id="str", + ) + + # please add some check logic here by yourself + # ... + + @LoadTestAdministrationPreparer() + @recorded_by_proxy_async + async def test_list_test_profiles(self, loadtestadministration_endpoint): + client = self.create_async_client(endpoint=loadtestadministration_endpoint) + response = client.list_test_profiles() + result = [r async for r in response] + # please add some check logic here by yourself + # ... + + @LoadTestAdministrationPreparer() + @recorded_by_proxy_async + async def test_create_or_update_trigger(self, loadtestadministration_endpoint): + client = self.create_async_client(endpoint=loadtestadministration_endpoint) + response = await client.create_or_update_trigger( + trigger_id="str", + body={ + "displayName": "str", + "kind": "ScheduleTestsTrigger", + "testIds": ["str"], + "triggerId": "str", + "createdBy": "str", + "createdDateTime": "2020-02-20 00:00:00", + "description": "str", + "lastModifiedBy": "str", + "lastModifiedDateTime": "2020-02-20 00:00:00", + "recurrence": "recurrence", + "recurrenceStatus": {"nextScheduledDateTimes": ["2020-02-20 00:00:00"], "remainingOccurrences": 0}, + "startDateTime": "2020-02-20 00:00:00", + "state": "str", + "stateDetails": {"message": "str"}, + }, + ) + + # please add some check logic here by yourself + # ... + + @LoadTestAdministrationPreparer() + @recorded_by_proxy_async + async def test_delete_trigger(self, loadtestadministration_endpoint): + client = self.create_async_client(endpoint=loadtestadministration_endpoint) + response = await client.delete_trigger( + trigger_id="str", + ) + + # please add some check logic here by yourself + # ... + + @LoadTestAdministrationPreparer() + @recorded_by_proxy_async + async def test_get_trigger(self, loadtestadministration_endpoint): + client = self.create_async_client(endpoint=loadtestadministration_endpoint) + response = await client.get_trigger( + trigger_id="str", + ) + + # please add some check logic here by yourself + # ... + + @LoadTestAdministrationPreparer() + @recorded_by_proxy_async + async def test_list_triggers(self, loadtestadministration_endpoint): + client = self.create_async_client(endpoint=loadtestadministration_endpoint) + response = client.list_triggers() + result = [r async for r in response] + # please add some check logic here by yourself + # ... + + @LoadTestAdministrationPreparer() + @recorded_by_proxy_async + async def test_create_or_update_notification_rule(self, loadtestadministration_endpoint): + client = self.create_async_client(endpoint=loadtestadministration_endpoint) + response = await client.create_or_update_notification_rule( + notification_rule_id="str", + body={ + "actionGroupIds": ["str"], + "displayName": "str", + "eventFilters": {"str": "tests_notification_event_filter"}, + "notificationRuleId": "str", + "scope": "Tests", + "createdBy": "str", + "createdDateTime": "2020-02-20 00:00:00", + "lastModifiedBy": "str", + "lastModifiedDateTime": "2020-02-20 00:00:00", + "testIds": ["str"], + }, + ) + + # please add some check logic here by yourself + # ... + + @LoadTestAdministrationPreparer() + @recorded_by_proxy_async + async def test_delete_notification_rule(self, loadtestadministration_endpoint): + client = self.create_async_client(endpoint=loadtestadministration_endpoint) + response = await client.delete_notification_rule( + notification_rule_id="str", + ) + + # please add some check logic here by yourself + # ... + + @LoadTestAdministrationPreparer() + @recorded_by_proxy_async + async def test_get_notification_rule(self, loadtestadministration_endpoint): + client = self.create_async_client(endpoint=loadtestadministration_endpoint) + response = await client.get_notification_rule( + notification_rule_id="str", + ) + + # please add some check logic here by yourself + # ... + + @LoadTestAdministrationPreparer() + @recorded_by_proxy_async + async def test_list_notification_rules(self, loadtestadministration_endpoint): + client = self.create_async_client(endpoint=loadtestadministration_endpoint) + response = client.list_notification_rules() + result = [r async for r in response] + # please add some check logic here by yourself + # ... + + @LoadTestAdministrationPreparer() + @recorded_by_proxy_async + async def test_begin_clone_test(self, loadtestadministration_endpoint): + client = self.create_async_client(endpoint=loadtestadministration_endpoint) + response = await ( + await client.begin_clone_test( + test_id="str", + body={"newTestId": "str", "description": "str", "displayName": "str"}, + new_test_id="str", + ) + ).result() # call '.result()' to poll until service return final result + + # please add some check logic here by yourself + # ... + + @LoadTestAdministrationPreparer() + @recorded_by_proxy_async + async def test_begin_generate_test_plan_recommendations(self, loadtestadministration_endpoint): + client = self.create_async_client(endpoint=loadtestadministration_endpoint) + response = await ( + await client.begin_generate_test_plan_recommendations( + test_id="str", + ) + ).result() # call '.result()' to poll until service return final result + + # please add some check logic here by yourself + # ... + + @LoadTestAdministrationPreparer() + @recorded_by_proxy_async + async def test_get_operation_status(self, loadtestadministration_endpoint): + client = self.create_async_client(endpoint=loadtestadministration_endpoint) + response = await client.get_operation_status( + operation_id="str", + ) + + # please add some check logic here by yourself + # ... diff --git a/sdk/loadtesting/azure-developer-loadtesting/generated_tests/test_load_test_run.py b/sdk/loadtesting/azure-developer-loadtesting/generated_tests/test_load_test_run.py new file mode 100644 index 000000000000..bd43cd3fcc28 --- /dev/null +++ b/sdk/loadtesting/azure-developer-loadtesting/generated_tests/test_load_test_run.py @@ -0,0 +1,281 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import pytest +from devtools_testutils import recorded_by_proxy +from testpreparer import LoadTestRunClientTestBase, LoadTestRunPreparer + + +@pytest.mark.skip("you may need to update the auto-generated test case before run it") +class TestLoadTestRun(LoadTestRunClientTestBase): + @LoadTestRunPreparer() + @recorded_by_proxy + def test_create_or_update_app_components(self, loadtestrun_endpoint): + client = self.create_client(endpoint=loadtestrun_endpoint) + response = client.create_or_update_app_components( + test_run_id="str", + body={ + "components": { + "str": { + "resourceId": "str", + "resourceName": "str", + "resourceType": "str", + "displayName": "str", + "kind": "str", + "resourceGroup": "str", + "subscriptionId": "str", + } + }, + "createdBy": "str", + "createdDateTime": "2020-02-20 00:00:00", + "lastModifiedBy": "str", + "lastModifiedDateTime": "2020-02-20 00:00:00", + "testRunId": "str", + }, + ) + + # please add some check logic here by yourself + # ... + + @LoadTestRunPreparer() + @recorded_by_proxy + def test_create_or_update_server_metrics_config(self, loadtestrun_endpoint): + client = self.create_client(endpoint=loadtestrun_endpoint) + response = client.create_or_update_server_metrics_config( + test_run_id="str", + body={ + "createdBy": "str", + "createdDateTime": "2020-02-20 00:00:00", + "lastModifiedBy": "str", + "lastModifiedDateTime": "2020-02-20 00:00:00", + "metrics": { + "str": { + "aggregation": "str", + "metricNamespace": "str", + "name": "str", + "resourceId": "str", + "resourceType": "str", + "displayDescription": "str", + "id": "str", + "unit": "str", + } + }, + "testRunId": "str", + }, + ) + + # please add some check logic here by yourself + # ... + + @LoadTestRunPreparer() + @recorded_by_proxy + def test_delete_test_run(self, loadtestrun_endpoint): + client = self.create_client(endpoint=loadtestrun_endpoint) + response = client.delete_test_run( + test_run_id="str", + ) + + # please add some check logic here by yourself + # ... + + @LoadTestRunPreparer() + @recorded_by_proxy + def test_get_app_components(self, loadtestrun_endpoint): + client = self.create_client(endpoint=loadtestrun_endpoint) + response = client.get_app_components( + test_run_id="str", + ) + + # please add some check logic here by yourself + # ... + + @LoadTestRunPreparer() + @recorded_by_proxy + def test_get_server_metrics_config(self, loadtestrun_endpoint): + client = self.create_client(endpoint=loadtestrun_endpoint) + response = client.get_server_metrics_config( + test_run_id="str", + ) + + # please add some check logic here by yourself + # ... + + @LoadTestRunPreparer() + @recorded_by_proxy + def test_get_test_run(self, loadtestrun_endpoint): + client = self.create_client(endpoint=loadtestrun_endpoint) + response = client.get_test_run( + test_run_id="str", + ) + + # please add some check logic here by yourself + # ... + + @LoadTestRunPreparer() + @recorded_by_proxy + def test_get_test_run_file(self, loadtestrun_endpoint): + client = self.create_client(endpoint=loadtestrun_endpoint) + response = client.get_test_run_file( + test_run_id="str", + file_name="str", + ) + + # please add some check logic here by yourself + # ... + + @LoadTestRunPreparer() + @recorded_by_proxy + def test_list_metric_dimension_values(self, loadtestrun_endpoint): + client = self.create_client(endpoint=loadtestrun_endpoint) + response = client.list_metric_dimension_values( + test_run_id="str", + name="str", + metric_name="str", + metric_namespace="str", + time_interval="str", + ) + result = [r for r in response] + # please add some check logic here by yourself + # ... + + @LoadTestRunPreparer() + @recorded_by_proxy + def test_get_metric_definitions(self, loadtestrun_endpoint): + client = self.create_client(endpoint=loadtestrun_endpoint) + response = client.get_metric_definitions( + test_run_id="str", + metric_namespace="str", + ) + + # please add some check logic here by yourself + # ... + + @LoadTestRunPreparer() + @recorded_by_proxy + def test_get_metric_namespaces(self, loadtestrun_endpoint): + client = self.create_client(endpoint=loadtestrun_endpoint) + response = client.get_metric_namespaces( + test_run_id="str", + ) + + # please add some check logic here by yourself + # ... + + @LoadTestRunPreparer() + @recorded_by_proxy + def test_list_metrics(self, loadtestrun_endpoint): + client = self.create_client(endpoint=loadtestrun_endpoint) + response = client.list_metrics( + test_run_id="str", + metric_name="str", + metric_namespace="str", + time_interval="str", + ) + result = [r for r in response] + # please add some check logic here by yourself + # ... + + @LoadTestRunPreparer() + @recorded_by_proxy + def test_list_test_runs(self, loadtestrun_endpoint): + client = self.create_client(endpoint=loadtestrun_endpoint) + response = client.list_test_runs() + result = [r for r in response] + # please add some check logic here by yourself + # ... + + @LoadTestRunPreparer() + @recorded_by_proxy + def test_stop_test_run(self, loadtestrun_endpoint): + client = self.create_client(endpoint=loadtestrun_endpoint) + response = client.stop_test_run( + test_run_id="str", + ) + + # please add some check logic here by yourself + # ... + + @LoadTestRunPreparer() + @recorded_by_proxy + def test_delete_test_profile_run(self, loadtestrun_endpoint): + client = self.create_client(endpoint=loadtestrun_endpoint) + response = client.delete_test_profile_run( + test_profile_run_id="str", + ) + + # please add some check logic here by yourself + # ... + + @LoadTestRunPreparer() + @recorded_by_proxy + def test_get_test_profile_run(self, loadtestrun_endpoint): + client = self.create_client(endpoint=loadtestrun_endpoint) + response = client.get_test_profile_run( + test_profile_run_id="str", + ) + + # please add some check logic here by yourself + # ... + + @LoadTestRunPreparer() + @recorded_by_proxy + def test_list_test_profile_runs(self, loadtestrun_endpoint): + client = self.create_client(endpoint=loadtestrun_endpoint) + response = client.list_test_profile_runs() + result = [r for r in response] + # please add some check logic here by yourself + # ... + + @LoadTestRunPreparer() + @recorded_by_proxy + def test_stop_test_profile_run(self, loadtestrun_endpoint): + client = self.create_client(endpoint=loadtestrun_endpoint) + response = client.stop_test_profile_run( + test_profile_run_id="str", + ) + + # please add some check logic here by yourself + # ... + + @LoadTestRunPreparer() + @recorded_by_proxy + def test_get_latest_test_run_insights(self, loadtestrun_endpoint): + client = self.create_client(endpoint=loadtestrun_endpoint) + response = client.get_latest_test_run_insights( + test_run_id="str", + ) + + # please add some check logic here by yourself + # ... + + @LoadTestRunPreparer() + @recorded_by_proxy + def test_patch_latest_test_run_insights(self, loadtestrun_endpoint): + client = self.create_client(endpoint=loadtestrun_endpoint) + response = client.patch_latest_test_run_insights( + test_run_id="str", + body={ + "columns": [{"dataType": "str", "name": "str"}], + "rows": {"str": {"str": "str"}}, + "status": "str", + "version": 0, + }, + ) + + # please add some check logic here by yourself + # ... + + @LoadTestRunPreparer() + @recorded_by_proxy + def test_begin_generate_test_run_insights(self, loadtestrun_endpoint): + client = self.create_client(endpoint=loadtestrun_endpoint) + response = client.begin_generate_test_run_insights( + test_run_id="str", + ).result() # call '.result()' to poll until service return final result + + # please add some check logic here by yourself + # ... diff --git a/sdk/loadtesting/azure-developer-loadtesting/generated_tests/test_load_test_run_async.py b/sdk/loadtesting/azure-developer-loadtesting/generated_tests/test_load_test_run_async.py new file mode 100644 index 000000000000..6175fed44928 --- /dev/null +++ b/sdk/loadtesting/azure-developer-loadtesting/generated_tests/test_load_test_run_async.py @@ -0,0 +1,284 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import pytest +from devtools_testutils.aio import recorded_by_proxy_async +from testpreparer import LoadTestRunPreparer +from testpreparer_async import LoadTestRunClientTestBaseAsync + + +@pytest.mark.skip("you may need to update the auto-generated test case before run it") +class TestLoadTestRunAsync(LoadTestRunClientTestBaseAsync): + @LoadTestRunPreparer() + @recorded_by_proxy_async + async def test_create_or_update_app_components(self, loadtestrun_endpoint): + client = self.create_async_client(endpoint=loadtestrun_endpoint) + response = await client.create_or_update_app_components( + test_run_id="str", + body={ + "components": { + "str": { + "resourceId": "str", + "resourceName": "str", + "resourceType": "str", + "displayName": "str", + "kind": "str", + "resourceGroup": "str", + "subscriptionId": "str", + } + }, + "createdBy": "str", + "createdDateTime": "2020-02-20 00:00:00", + "lastModifiedBy": "str", + "lastModifiedDateTime": "2020-02-20 00:00:00", + "testRunId": "str", + }, + ) + + # please add some check logic here by yourself + # ... + + @LoadTestRunPreparer() + @recorded_by_proxy_async + async def test_create_or_update_server_metrics_config(self, loadtestrun_endpoint): + client = self.create_async_client(endpoint=loadtestrun_endpoint) + response = await client.create_or_update_server_metrics_config( + test_run_id="str", + body={ + "createdBy": "str", + "createdDateTime": "2020-02-20 00:00:00", + "lastModifiedBy": "str", + "lastModifiedDateTime": "2020-02-20 00:00:00", + "metrics": { + "str": { + "aggregation": "str", + "metricNamespace": "str", + "name": "str", + "resourceId": "str", + "resourceType": "str", + "displayDescription": "str", + "id": "str", + "unit": "str", + } + }, + "testRunId": "str", + }, + ) + + # please add some check logic here by yourself + # ... + + @LoadTestRunPreparer() + @recorded_by_proxy_async + async def test_delete_test_run(self, loadtestrun_endpoint): + client = self.create_async_client(endpoint=loadtestrun_endpoint) + response = await client.delete_test_run( + test_run_id="str", + ) + + # please add some check logic here by yourself + # ... + + @LoadTestRunPreparer() + @recorded_by_proxy_async + async def test_get_app_components(self, loadtestrun_endpoint): + client = self.create_async_client(endpoint=loadtestrun_endpoint) + response = await client.get_app_components( + test_run_id="str", + ) + + # please add some check logic here by yourself + # ... + + @LoadTestRunPreparer() + @recorded_by_proxy_async + async def test_get_server_metrics_config(self, loadtestrun_endpoint): + client = self.create_async_client(endpoint=loadtestrun_endpoint) + response = await client.get_server_metrics_config( + test_run_id="str", + ) + + # please add some check logic here by yourself + # ... + + @LoadTestRunPreparer() + @recorded_by_proxy_async + async def test_get_test_run(self, loadtestrun_endpoint): + client = self.create_async_client(endpoint=loadtestrun_endpoint) + response = await client.get_test_run( + test_run_id="str", + ) + + # please add some check logic here by yourself + # ... + + @LoadTestRunPreparer() + @recorded_by_proxy_async + async def test_get_test_run_file(self, loadtestrun_endpoint): + client = self.create_async_client(endpoint=loadtestrun_endpoint) + response = await client.get_test_run_file( + test_run_id="str", + file_name="str", + ) + + # please add some check logic here by yourself + # ... + + @LoadTestRunPreparer() + @recorded_by_proxy_async + async def test_list_metric_dimension_values(self, loadtestrun_endpoint): + client = self.create_async_client(endpoint=loadtestrun_endpoint) + response = client.list_metric_dimension_values( + test_run_id="str", + name="str", + metric_name="str", + metric_namespace="str", + time_interval="str", + ) + result = [r async for r in response] + # please add some check logic here by yourself + # ... + + @LoadTestRunPreparer() + @recorded_by_proxy_async + async def test_get_metric_definitions(self, loadtestrun_endpoint): + client = self.create_async_client(endpoint=loadtestrun_endpoint) + response = await client.get_metric_definitions( + test_run_id="str", + metric_namespace="str", + ) + + # please add some check logic here by yourself + # ... + + @LoadTestRunPreparer() + @recorded_by_proxy_async + async def test_get_metric_namespaces(self, loadtestrun_endpoint): + client = self.create_async_client(endpoint=loadtestrun_endpoint) + response = await client.get_metric_namespaces( + test_run_id="str", + ) + + # please add some check logic here by yourself + # ... + + @LoadTestRunPreparer() + @recorded_by_proxy_async + async def test_list_metrics(self, loadtestrun_endpoint): + client = self.create_async_client(endpoint=loadtestrun_endpoint) + response = client.list_metrics( + test_run_id="str", + metric_name="str", + metric_namespace="str", + time_interval="str", + ) + result = [r async for r in response] + # please add some check logic here by yourself + # ... + + @LoadTestRunPreparer() + @recorded_by_proxy_async + async def test_list_test_runs(self, loadtestrun_endpoint): + client = self.create_async_client(endpoint=loadtestrun_endpoint) + response = client.list_test_runs() + result = [r async for r in response] + # please add some check logic here by yourself + # ... + + @LoadTestRunPreparer() + @recorded_by_proxy_async + async def test_stop_test_run(self, loadtestrun_endpoint): + client = self.create_async_client(endpoint=loadtestrun_endpoint) + response = await client.stop_test_run( + test_run_id="str", + ) + + # please add some check logic here by yourself + # ... + + @LoadTestRunPreparer() + @recorded_by_proxy_async + async def test_delete_test_profile_run(self, loadtestrun_endpoint): + client = self.create_async_client(endpoint=loadtestrun_endpoint) + response = await client.delete_test_profile_run( + test_profile_run_id="str", + ) + + # please add some check logic here by yourself + # ... + + @LoadTestRunPreparer() + @recorded_by_proxy_async + async def test_get_test_profile_run(self, loadtestrun_endpoint): + client = self.create_async_client(endpoint=loadtestrun_endpoint) + response = await client.get_test_profile_run( + test_profile_run_id="str", + ) + + # please add some check logic here by yourself + # ... + + @LoadTestRunPreparer() + @recorded_by_proxy_async + async def test_list_test_profile_runs(self, loadtestrun_endpoint): + client = self.create_async_client(endpoint=loadtestrun_endpoint) + response = client.list_test_profile_runs() + result = [r async for r in response] + # please add some check logic here by yourself + # ... + + @LoadTestRunPreparer() + @recorded_by_proxy_async + async def test_stop_test_profile_run(self, loadtestrun_endpoint): + client = self.create_async_client(endpoint=loadtestrun_endpoint) + response = await client.stop_test_profile_run( + test_profile_run_id="str", + ) + + # please add some check logic here by yourself + # ... + + @LoadTestRunPreparer() + @recorded_by_proxy_async + async def test_get_latest_test_run_insights(self, loadtestrun_endpoint): + client = self.create_async_client(endpoint=loadtestrun_endpoint) + response = await client.get_latest_test_run_insights( + test_run_id="str", + ) + + # please add some check logic here by yourself + # ... + + @LoadTestRunPreparer() + @recorded_by_proxy_async + async def test_patch_latest_test_run_insights(self, loadtestrun_endpoint): + client = self.create_async_client(endpoint=loadtestrun_endpoint) + response = await client.patch_latest_test_run_insights( + test_run_id="str", + body={ + "columns": [{"dataType": "str", "name": "str"}], + "rows": {"str": {"str": "str"}}, + "status": "str", + "version": 0, + }, + ) + + # please add some check logic here by yourself + # ... + + @LoadTestRunPreparer() + @recorded_by_proxy_async + async def test_begin_generate_test_run_insights(self, loadtestrun_endpoint): + client = self.create_async_client(endpoint=loadtestrun_endpoint) + response = await ( + await client.begin_generate_test_run_insights( + test_run_id="str", + ) + ).result() # call '.result()' to poll until service return final result + + # please add some check logic here by yourself + # ... diff --git a/sdk/loadtesting/azure-developer-loadtesting/generated_tests/testpreparer.py b/sdk/loadtesting/azure-developer-loadtesting/generated_tests/testpreparer.py new file mode 100644 index 000000000000..d2a65a724bdf --- /dev/null +++ b/sdk/loadtesting/azure-developer-loadtesting/generated_tests/testpreparer.py @@ -0,0 +1,44 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from azure.developer.loadtesting import LoadTestAdministrationClient, LoadTestRunClient +from devtools_testutils import AzureRecordedTestCase, PowerShellPreparer +import functools + + +class LoadTestAdministrationClientTestBase(AzureRecordedTestCase): + + def create_client(self, endpoint): + credential = self.get_credential(LoadTestAdministrationClient) + return self.create_client_from_credential( + LoadTestAdministrationClient, + credential=credential, + endpoint=endpoint, + ) + + +LoadTestAdministrationPreparer = functools.partial( + PowerShellPreparer, + "loadtestadministration", + loadtestadministration_endpoint="https://fake_loadtestadministration_endpoint.com", +) + + +class LoadTestRunClientTestBase(AzureRecordedTestCase): + + def create_client(self, endpoint): + credential = self.get_credential(LoadTestRunClient) + return self.create_client_from_credential( + LoadTestRunClient, + credential=credential, + endpoint=endpoint, + ) + + +LoadTestRunPreparer = functools.partial( + PowerShellPreparer, "loadtestrun", loadtestrun_endpoint="https://fake_loadtestrun_endpoint.com" +) diff --git a/sdk/loadtesting/azure-developer-loadtesting/generated_tests/testpreparer_async.py b/sdk/loadtesting/azure-developer-loadtesting/generated_tests/testpreparer_async.py new file mode 100644 index 000000000000..a45d09fcf9b4 --- /dev/null +++ b/sdk/loadtesting/azure-developer-loadtesting/generated_tests/testpreparer_async.py @@ -0,0 +1,31 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from azure.developer.loadtesting.aio import LoadTestAdministrationClient, LoadTestRunClient +from devtools_testutils import AzureRecordedTestCase + + +class LoadTestAdministrationClientTestBaseAsync(AzureRecordedTestCase): + + def create_async_client(self, endpoint): + credential = self.get_credential(LoadTestAdministrationClient, is_async=True) + return self.create_client_from_credential( + LoadTestAdministrationClient, + credential=credential, + endpoint=endpoint, + ) + + +class LoadTestRunClientTestBaseAsync(AzureRecordedTestCase): + + def create_async_client(self, endpoint): + credential = self.get_credential(LoadTestRunClient, is_async=True) + return self.create_client_from_credential( + LoadTestRunClient, + credential=credential, + endpoint=endpoint, + ) diff --git a/sdk/loadtesting/azure-developer-loadtesting/pyproject.toml b/sdk/loadtesting/azure-developer-loadtesting/pyproject.toml index 9cb578ae2491..36cfbcf34d51 100644 --- a/sdk/loadtesting/azure-developer-loadtesting/pyproject.toml +++ b/sdk/loadtesting/azure-developer-loadtesting/pyproject.toml @@ -1,3 +1,65 @@ +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +[build-system] +requires = ["setuptools>=77.0.3", "wheel"] +build-backend = "setuptools.build_meta" + +[project] +name = "azure-developer-loadtesting" +authors = [ + { name = "Microsoft Corporation", email = "azpysdkhelp@microsoft.com" }, +] +description = "Microsoft Corporation Azure Developer Loadtesting Client Library for Python" +license = "MIT" +classifiers = [ + "Development Status :: 4 - Beta", + "Programming Language :: Python", + "Programming Language :: Python :: 3 :: Only", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", +] +requires-python = ">=3.9" +keywords = ["azure", "azure sdk"] + +dependencies = [ + "isodate>=0.6.1", + "azure-core>=1.36.0", + "typing-extensions>=4.6.0", +] +dynamic = [ +"version", "readme" +] + +[project.urls] +repository = "https://github.com/Azure/azure-sdk-for-python" + +[tool.setuptools.dynamic] +version = {attr = "azure.developer.loadtesting._version.VERSION"} +readme = {file = ["README.md", "CHANGELOG.md"], content-type = "text/markdown"} + +[tool.setuptools.packages.find] +exclude = [ + "tests*", + "generated_tests*", + "samples*", + "generated_samples*", + "doc*", + "azure", + "azure.developer", +] + +[tool.setuptools.package-data] +pytyped = ["py.typed"] + [tool.azure-sdk-build] pylint = false pyright = false diff --git a/sdk/loadtesting/azure-developer-loadtesting/samples/create_or_update_app_components_test.py b/sdk/loadtesting/azure-developer-loadtesting/samples/create_or_update_app_components_test.py index 79adb48c202b..8cfc28a1de29 100644 --- a/sdk/loadtesting/azure-developer-loadtesting/samples/create_or_update_app_components_test.py +++ b/sdk/loadtesting/azure-developer-loadtesting/samples/create_or_update_app_components_test.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # ------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for diff --git a/sdk/loadtesting/azure-developer-loadtesting/samples/create_or_update_test.py b/sdk/loadtesting/azure-developer-loadtesting/samples/create_or_update_test.py index 36b793be030d..402294d5b89d 100644 --- a/sdk/loadtesting/azure-developer-loadtesting/samples/create_or_update_test.py +++ b/sdk/loadtesting/azure-developer-loadtesting/samples/create_or_update_test.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # ------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for diff --git a/sdk/loadtesting/azure-developer-loadtesting/samples/create_or_update_test_profile.py b/sdk/loadtesting/azure-developer-loadtesting/samples/create_or_update_test_profile.py index 6d19b26a3aca..b49976147e7c 100644 --- a/sdk/loadtesting/azure-developer-loadtesting/samples/create_or_update_test_profile.py +++ b/sdk/loadtesting/azure-developer-loadtesting/samples/create_or_update_test_profile.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # ------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for @@ -59,9 +60,9 @@ "config2": { "instanceMemoryMB": 4096, "httpConcurrency": 100, - } - } - } + }, + }, + }, }, ) diff --git a/sdk/loadtesting/azure-developer-loadtesting/samples/upload_test_file.py b/sdk/loadtesting/azure-developer-loadtesting/samples/upload_test_file.py index ed8380e6ca39..1c8c7fbe5d0e 100644 --- a/sdk/loadtesting/azure-developer-loadtesting/samples/upload_test_file.py +++ b/sdk/loadtesting/azure-developer-loadtesting/samples/upload_test_file.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # ------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for diff --git a/sdk/loadtesting/azure-developer-loadtesting/setup.py b/sdk/loadtesting/azure-developer-loadtesting/setup.py deleted file mode 100644 index 0d0752f9040b..000000000000 --- a/sdk/loadtesting/azure-developer-loadtesting/setup.py +++ /dev/null @@ -1,71 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -# coding: utf-8 - -import os -import re -from setuptools import setup, find_packages - - -PACKAGE_NAME = "azure-developer-loadtesting" -PACKAGE_PPRINT_NAME = "Azure Developer Loadtesting" - -# a-b-c => a/b/c -package_folder_path = PACKAGE_NAME.replace("-", "/") - -# Version extraction inspired from 'requests' -with open(os.path.join(package_folder_path, "_version.py"), "r") as fd: - version = re.search(r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]', fd.read(), re.MULTILINE).group(1) - -if not version: - raise RuntimeError("Cannot find version information") - - -setup( - name=PACKAGE_NAME, - version=version, - description="Microsoft {} Client Library for Python".format(PACKAGE_PPRINT_NAME), - long_description=open("README.md", "r").read(), - long_description_content_type="text/markdown", - license="MIT License", - author="Microsoft Corporation", - author_email="azpysdkhelp@microsoft.com", - url="https://github.com/Azure/azure-sdk-for-python/tree/main/sdk", - keywords="azure, azure sdk", - classifiers=[ - "Development Status :: 4 - Beta", - "Programming Language :: Python", - "Programming Language :: Python :: 3 :: Only", - "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.8", - "Programming Language :: Python :: 3.9", - "Programming Language :: Python :: 3.10", - "Programming Language :: Python :: 3.11", - "Programming Language :: Python :: 3.12", - "License :: OSI Approved :: MIT License", - ], - zip_safe=False, - packages=find_packages( - exclude=[ - "tests", - # Exclude packages that will be covered by PEP420 or nspkg - "azure", - "azure.developer", - ] - ), - include_package_data=True, - package_data={ - "azure.developer.loadtesting": ["py.typed"], - }, - install_requires=[ - "isodate>=0.6.1", - "azure-core>=1.30.0", - "typing-extensions>=4.6.0", - ], - python_requires=">=3.8", -) diff --git a/sdk/loadtesting/azure-developer-loadtesting/tests/test_async_load_test_administration_ops.py b/sdk/loadtesting/azure-developer-loadtesting/tests/test_async_load_test_administration_ops.py index 655cfce51f17..69232f7a0183 100644 --- a/sdk/loadtesting/azure-developer-loadtesting/tests/test_async_load_test_administration_ops.py +++ b/sdk/loadtesting/azure-developer-loadtesting/tests/test_async_load_test_administration_ops.py @@ -16,6 +16,7 @@ DISPLAY_NAME = "TestingResourcePyTest" + class TestLoadTestAdministrationOperations(LoadTestingAsyncTest): @LoadTestingPreparer() @@ -36,8 +37,18 @@ async def test_create_or_update_load_test(self, loadtesting_endpoint, loadtestin }, "passFailCriteria": { "passFailMetrics": { - "condition1": {"clientmetric": "response_time_ms", "aggregate": "avg", "condition": ">", "value": 300}, - "condition2": {"clientmetric": "error", "aggregate": "percentage", "condition": ">", "value": 50}, + "condition1": { + "clientmetric": "response_time_ms", + "aggregate": "avg", + "condition": ">", + "value": 300, + }, + "condition2": { + "clientmetric": "error", + "aggregate": "percentage", + "condition": ">", + "value": 50, + }, "condition3": { "clientmetric": "latency", "aggregate": "avg", @@ -49,7 +60,8 @@ async def test_create_or_update_load_test(self, loadtesting_endpoint, loadtestin }, "secrets": {}, "environmentVariables": {"my-variable": "value"}, - }) + }, + ) assert result is not None await self.close_admin_client() @@ -137,7 +149,7 @@ async def test_delete_test_file(self, loadtesting_endpoint, loadtesting_test_id) assert result is None await self.close_admin_client() - + @LoadTestingPreparer() @recorded_by_proxy_async @pytest.mark.asyncio @@ -230,6 +242,7 @@ async def test_delete_load_test(self, loadtesting_endpoint, loadtesting_test_id) await self.close_admin_client() + class TestTestProfileAdministrationOperations(LoadTestingAsyncTest): @LoadTestingPreparer() @@ -250,8 +263,18 @@ async def test_create_or_update_load_test(self, loadtesting_endpoint, loadtestin }, "passFailCriteria": { "passFailMetrics": { - "condition1": {"clientmetric": "response_time_ms", "aggregate": "avg", "condition": ">", "value": 300}, - "condition2": {"clientmetric": "error", "aggregate": "percentage", "condition": ">", "value": 50}, + "condition1": { + "clientmetric": "response_time_ms", + "aggregate": "avg", + "condition": ">", + "value": 300, + }, + "condition2": { + "clientmetric": "error", + "aggregate": "percentage", + "condition": ">", + "value": 50, + }, "condition3": { "clientmetric": "latency", "aggregate": "avg", @@ -263,7 +286,8 @@ async def test_create_or_update_load_test(self, loadtesting_endpoint, loadtestin }, "secrets": {}, "environmentVariables": {"my-variable": "value"}, - }) + }, + ) assert result is not None await self.close_admin_client() @@ -283,7 +307,9 @@ async def test_get_load_test(self, loadtesting_endpoint, loadtesting_test_id): @LoadTestingPreparer() @recorded_by_proxy_async @pytest.mark.asyncio - async def test_create_or_update_test_profile(self, loadtesting_endpoint, loadtesting_test_id, loadtesting_test_profile_id, loadtesting_target_resource_id): + async def test_create_or_update_test_profile( + self, loadtesting_endpoint, loadtesting_test_id, loadtesting_test_profile_id, loadtesting_target_resource_id + ): set_bodiless_matcher() client = self.create_administration_client(loadtesting_endpoint) @@ -297,16 +323,10 @@ async def test_create_or_update_test_profile(self, loadtesting_endpoint, loadtes "targetResourceConfigurations": { "kind": "FunctionsFlexConsumption", "configurations": { - "config1": { - "instanceMemoryMB": 2048, - "httpConcurrency": 20 - }, - "config2": { - "instanceMemoryMB": 4096, - "httpConcurrency": 100 - }, - } - } + "config1": {"instanceMemoryMB": 2048, "httpConcurrency": 20}, + "config2": {"instanceMemoryMB": 4096, "httpConcurrency": 100}, + }, + }, }, ) @@ -318,7 +338,7 @@ async def test_create_or_update_test_profile(self, loadtesting_endpoint, loadtes @pytest.mark.asyncio async def test_get_test_profile(self, loadtesting_endpoint, loadtesting_test_profile_id): set_bodiless_matcher() - + client = self.create_administration_client(loadtesting_endpoint) result = await client.get_test_profile(loadtesting_test_profile_id) assert result is not None @@ -330,7 +350,7 @@ async def test_get_test_profile(self, loadtesting_endpoint, loadtesting_test_pro @pytest.mark.asyncio async def test_list_test_profiles(self, loadtesting_endpoint): set_bodiless_matcher() - + client = self.create_administration_client(loadtesting_endpoint) result = client.list_test_profiles() assert result is not None diff --git a/sdk/loadtesting/azure-developer-loadtesting/tests/test_async_load_test_run_ops.py b/sdk/loadtesting/azure-developer-loadtesting/tests/test_async_load_test_run_ops.py index a35c670296df..c00da04555ee 100644 --- a/sdk/loadtesting/azure-developer-loadtesting/tests/test_async_load_test_run_ops.py +++ b/sdk/loadtesting/azure-developer-loadtesting/tests/test_async_load_test_run_ops.py @@ -17,6 +17,7 @@ DISPLAY_NAME = "TestingResourcePyTest" NON_EXISTING_RESOURCE = "nonexistingresource" + class TestLoadTestRunOperations(LoadTestingAsyncTest): # Pre-requisite: Test creation is needed for test run related tests @@ -37,8 +38,18 @@ async def test_create_or_update_load_test(self, loadtesting_endpoint, loadtestin }, "passFailCriteria": { "passFailMetrics": { - "condition1": {"clientmetric": "response_time_ms", "aggregate": "avg", "condition": ">", "value": 300}, - "condition2": {"clientmetric": "error", "aggregate": "percentage", "condition": ">", "value": 50}, + "condition1": { + "clientmetric": "response_time_ms", + "aggregate": "avg", + "condition": ">", + "value": 300, + }, + "condition2": { + "clientmetric": "error", + "aggregate": "percentage", + "condition": ">", + "value": 50, + }, "condition3": { "clientmetric": "latency", "aggregate": "avg", @@ -50,7 +61,8 @@ async def test_create_or_update_load_test(self, loadtesting_endpoint, loadtestin }, "secrets": {}, "environmentVariables": {"my-variable": "value"}, - }) + }, + ) assert result is not None @@ -66,7 +78,7 @@ async def test_get_load_test(self, loadtesting_endpoint, loadtesting_test_id): assert result is not None await self.close_admin_client() - + @LoadTestingPreparer() @recorded_by_proxy_async async def test_upload_test_file(self, loadtesting_endpoint, loadtesting_test_id): @@ -154,7 +166,7 @@ async def test_list_test_runs(self, loadtesting_endpoint): result = run_client.list_test_runs() assert result is not None items = [item async for item in result] - assert len(items) > 0 # Atleast one item in the page + assert len(items) > 0 # Atleast one item in the page await self.close_run_client() @@ -217,9 +229,7 @@ async def test_create_or_update_app_component( @LoadTestingPreparer() @recorded_by_proxy_async - async def test_get_app_component( - self, loadtesting_endpoint, loadtesting_test_run_id - ): + async def test_get_app_component(self, loadtesting_endpoint, loadtesting_test_run_id): set_bodiless_matcher() run_client = self.create_run_client(loadtesting_endpoint) @@ -260,9 +270,7 @@ async def test_create_or_update_server_metrics_config( @LoadTestingPreparer() @recorded_by_proxy_async - async def test_get_server_metrics_config( - self, loadtesting_endpoint, loadtesting_test_run_id - ): + async def test_get_server_metrics_config(self, loadtesting_endpoint, loadtesting_test_run_id): set_bodiless_matcher() run_client = self.create_run_client(loadtesting_endpoint) @@ -302,7 +310,7 @@ async def test_delete_test_run(self, loadtesting_endpoint, loadtesting_test_run_ assert result is None await self.close_run_client() - + @LoadTestingPreparer() @recorded_by_proxy_async async def test_delete_test(self, loadtesting_endpoint, loadtesting_test_id): @@ -315,6 +323,7 @@ async def test_delete_test(self, loadtesting_endpoint, loadtesting_test_id): await self.close_admin_client() + class TestTestProfileRunOperations(LoadTestingAsyncTest): # Pre-requisite: Test & Test Profile creation is needed for test profile run related tests @@ -335,8 +344,18 @@ async def test_create_or_update_load_test(self, loadtesting_endpoint, loadtestin }, "passFailCriteria": { "passFailMetrics": { - "condition1": {"clientmetric": "response_time_ms", "aggregate": "avg", "condition": ">", "value": 300}, - "condition2": {"clientmetric": "error", "aggregate": "percentage", "condition": ">", "value": 50}, + "condition1": { + "clientmetric": "response_time_ms", + "aggregate": "avg", + "condition": ">", + "value": 300, + }, + "condition2": { + "clientmetric": "error", + "aggregate": "percentage", + "condition": ">", + "value": 50, + }, "condition3": { "clientmetric": "latency", "aggregate": "avg", @@ -398,7 +417,9 @@ async def test_get_test_file(self, loadtesting_endpoint, loadtesting_test_id): @LoadTestingPreparer() @recorded_by_proxy_async - async def test_create_or_update_test_profile(self, loadtesting_endpoint, loadtesting_test_id, loadtesting_test_profile_id, loadtesting_target_resource_id): + async def test_create_or_update_test_profile( + self, loadtesting_endpoint, loadtesting_test_id, loadtesting_test_profile_id, loadtesting_target_resource_id + ): set_bodiless_matcher() client = self.create_administration_client(loadtesting_endpoint) @@ -412,16 +433,10 @@ async def test_create_or_update_test_profile(self, loadtesting_endpoint, loadtes "targetResourceConfigurations": { "kind": "FunctionsFlexConsumption", "configurations": { - "config1": { - "instanceMemoryMB": 2048, - "httpConcurrency": 20 - }, - "config2": { - "instanceMemoryMB": 4096, - "httpConcurrency": 100 - }, - } - } + "config1": {"instanceMemoryMB": 2048, "httpConcurrency": 20}, + "config2": {"instanceMemoryMB": 4096, "httpConcurrency": 100}, + }, + }, }, ) assert result is not None @@ -441,7 +456,9 @@ async def test_get_test_profile(self, loadtesting_endpoint, loadtesting_test_pro @LoadTestingPreparer() @recorded_by_proxy_async - async def test_begin_test_profile_run(self, loadtesting_endpoint, loadtesting_test_profile_id, loadtesting_test_profile_run_id): + async def test_begin_test_profile_run( + self, loadtesting_endpoint, loadtesting_test_profile_id, loadtesting_test_profile_run_id + ): set_bodiless_matcher() run_client = self.create_run_client(loadtesting_endpoint) diff --git a/sdk/loadtesting/azure-developer-loadtesting/tests/test_load_test_administration_ops.py b/sdk/loadtesting/azure-developer-loadtesting/tests/test_load_test_administration_ops.py index 22b217d8200e..25ee309e1019 100644 --- a/sdk/loadtesting/azure-developer-loadtesting/tests/test_load_test_administration_ops.py +++ b/sdk/loadtesting/azure-developer-loadtesting/tests/test_load_test_administration_ops.py @@ -14,6 +14,7 @@ DISPLAY_NAME = "TestingResourcePyTest" + class TestLoadTestAdministrationOperations(LoadTestingTest): @LoadTestingPreparer() @@ -33,8 +34,18 @@ def test_create_or_update_load_test(self, loadtesting_endpoint, loadtesting_test }, "passFailCriteria": { "passFailMetrics": { - "condition1": {"clientmetric": "response_time_ms", "aggregate": "avg", "condition": ">", "value": 300}, - "condition2": {"clientmetric": "error", "aggregate": "percentage", "condition": ">", "value": 50}, + "condition1": { + "clientmetric": "response_time_ms", + "aggregate": "avg", + "condition": ">", + "value": 300, + }, + "condition2": { + "clientmetric": "error", + "aggregate": "percentage", + "condition": ">", + "value": 50, + }, "condition3": { "clientmetric": "latency", "aggregate": "avg", @@ -46,7 +57,8 @@ def test_create_or_update_load_test(self, loadtesting_endpoint, loadtesting_test }, "secrets": {}, "environmentVariables": {"my-variable": "value"}, - }) + }, + ) assert result is not None @@ -110,7 +122,8 @@ def list_test_files(self, loadtesting_endpoint, loadtesting_test_id): @LoadTestingPreparer() @recorded_by_proxy def test_create_or_update_app_components( - self, loadtesting_endpoint, loadtesting_test_id, loadtesting_app_component_id): + self, loadtesting_endpoint, loadtesting_test_id, loadtesting_app_component_id + ): set_bodiless_matcher() client = self.create_administration_client(loadtesting_endpoint) @@ -181,7 +194,7 @@ def test_delete_test_file(self, loadtesting_endpoint, loadtesting_test_id): client = self.create_administration_client(loadtesting_endpoint) result = client.delete_test_file(loadtesting_test_id, "sample.jmx") assert result is None - + @LoadTestingPreparer() @recorded_by_proxy def test_delete_load_test(self, loadtesting_endpoint, loadtesting_test_id): @@ -191,6 +204,7 @@ def test_delete_load_test(self, loadtesting_endpoint, loadtesting_test_id): result = client.delete_test(loadtesting_test_id) assert result is None + class TestTestProfileAdministrationOperations(LoadTestingTest): # Pre-requisite: Test creation is needed for test profile related tests @@ -211,8 +225,18 @@ def test_create_or_update_load_test(self, loadtesting_endpoint, loadtesting_test }, "passFailCriteria": { "passFailMetrics": { - "condition1": {"clientmetric": "response_time_ms", "aggregate": "avg", "condition": ">", "value": 300}, - "condition2": {"clientmetric": "error", "aggregate": "percentage", "condition": ">", "value": 50}, + "condition1": { + "clientmetric": "response_time_ms", + "aggregate": "avg", + "condition": ">", + "value": 300, + }, + "condition2": { + "clientmetric": "error", + "aggregate": "percentage", + "condition": ">", + "value": 50, + }, "condition3": { "clientmetric": "latency", "aggregate": "avg", @@ -224,7 +248,8 @@ def test_create_or_update_load_test(self, loadtesting_endpoint, loadtesting_test }, "secrets": {}, "environmentVariables": {"my-variable": "value"}, - }) + }, + ) assert result is not None @@ -239,7 +264,9 @@ def test_get_load_test(self, loadtesting_endpoint, loadtesting_test_id): @LoadTestingPreparer() @recorded_by_proxy - def test_create_or_update_test_profile(self, loadtesting_endpoint, loadtesting_test_id, loadtesting_test_profile_id, loadtesting_target_resource_id): + def test_create_or_update_test_profile( + self, loadtesting_endpoint, loadtesting_test_id, loadtesting_test_profile_id, loadtesting_target_resource_id + ): set_bodiless_matcher() client = self.create_administration_client(loadtesting_endpoint) @@ -253,20 +280,14 @@ def test_create_or_update_test_profile(self, loadtesting_endpoint, loadtesting_t "targetResourceConfigurations": { "kind": "FunctionsFlexConsumption", "configurations": { - "config1": { - "instanceMemoryMB": 2048, - "httpConcurrency": 20 - }, - "config2": { - "instanceMemoryMB": 4096, - "httpConcurrency": 100 - }, - } - } + "config1": {"instanceMemoryMB": 2048, "httpConcurrency": 20}, + "config2": {"instanceMemoryMB": 4096, "httpConcurrency": 100}, + }, + }, }, ) assert result is not None - + @LoadTestingPreparer() @recorded_by_proxy def test_get_test_profile(self, loadtesting_endpoint, loadtesting_test_profile_id): @@ -286,7 +307,7 @@ def test_list_test_profiles(self, loadtesting_endpoint): assert result is not None items = [r for r in result] assert len(items) > 0 # page has atleast one item - + @LoadTestingPreparer() @recorded_by_proxy def test_delete_test_profile(self, loadtesting_endpoint, loadtesting_test_profile_id): diff --git a/sdk/loadtesting/azure-developer-loadtesting/tests/test_load_test_run_ops.py b/sdk/loadtesting/azure-developer-loadtesting/tests/test_load_test_run_ops.py index be5b5d5efa29..ab60ea9c712b 100644 --- a/sdk/loadtesting/azure-developer-loadtesting/tests/test_load_test_run_ops.py +++ b/sdk/loadtesting/azure-developer-loadtesting/tests/test_load_test_run_ops.py @@ -35,8 +35,18 @@ def test_create_or_update_load_test(self, loadtesting_endpoint, loadtesting_test }, "passFailCriteria": { "passFailMetrics": { - "condition1": {"clientmetric": "response_time_ms", "aggregate": "avg", "condition": ">", "value": 300}, - "condition2": {"clientmetric": "error", "aggregate": "percentage", "condition": ">", "value": 50}, + "condition1": { + "clientmetric": "response_time_ms", + "aggregate": "avg", + "condition": ">", + "value": 300, + }, + "condition2": { + "clientmetric": "error", + "aggregate": "percentage", + "condition": ">", + "value": 50, + }, "condition3": { "clientmetric": "latency", "aggregate": "avg", @@ -48,7 +58,8 @@ def test_create_or_update_load_test(self, loadtesting_endpoint, loadtesting_test }, "secrets": {}, "environmentVariables": {"my-variable": "value"}, - }) + }, + ) assert result is not None @@ -60,7 +71,7 @@ def test_get_load_test(self, loadtesting_endpoint, loadtesting_test_id): client = self.create_administration_client(loadtesting_endpoint) result = client.get_test(loadtesting_test_id) assert result is not None - + @LoadTestingPreparer() @recorded_by_proxy def test_upload_test_file(self, loadtesting_endpoint, loadtesting_test_id): @@ -138,7 +149,7 @@ def test_list_test_runs(self, loadtesting_endpoint, loadtesting_test_id, loadtes result = run_client.list_test_runs() assert result is not None items = [item for item in result] - assert len(items) > 0 # Atleast one item in the page + assert len(items) > 0 # Atleast one item in the page @LoadTestingPreparer() @recorded_by_proxy @@ -195,9 +206,7 @@ def test_create_or_update_app_component( @LoadTestingPreparer() @recorded_by_proxy - def test_get_app_component( - self, loadtesting_endpoint, loadtesting_test_run_id - ): + def test_get_app_component(self, loadtesting_endpoint, loadtesting_test_run_id): set_bodiless_matcher() run_client = self.create_run_client(loadtesting_endpoint) @@ -234,9 +243,7 @@ def test_create_or_update_server_metrics_config( @LoadTestingPreparer() @recorded_by_proxy - def test_get_server_metrics_config( - self, loadtesting_endpoint, loadtesting_test_run_id - ): + def test_get_server_metrics_config(self, loadtesting_endpoint, loadtesting_test_run_id): set_bodiless_matcher() run_client = self.create_run_client(loadtesting_endpoint) @@ -275,7 +282,7 @@ def test_delete_test_run(self, loadtesting_endpoint, loadtesting_test_run_id): result = run_client.delete_test_run(loadtesting_test_run_id) assert result is None - + @LoadTestingPreparer() @recorded_by_proxy def test_delete_test(self, loadtesting_endpoint, loadtesting_test_id): @@ -286,6 +293,7 @@ def test_delete_test(self, loadtesting_endpoint, loadtesting_test_id): result = client.delete_test(loadtesting_test_id) assert result is None + class TestTestProfileRunOperations(LoadTestingTest): # Pre-requisite: Test & Test Profile creation is needed for test profile run related tests @@ -306,8 +314,18 @@ def test_create_or_update_load_test(self, loadtesting_endpoint, loadtesting_test }, "passFailCriteria": { "passFailMetrics": { - "condition1": {"clientmetric": "response_time_ms", "aggregate": "avg", "condition": ">", "value": 300}, - "condition2": {"clientmetric": "error", "aggregate": "percentage", "condition": ">", "value": 50}, + "condition1": { + "clientmetric": "response_time_ms", + "aggregate": "avg", + "condition": ">", + "value": 300, + }, + "condition2": { + "clientmetric": "error", + "aggregate": "percentage", + "condition": ">", + "value": 50, + }, "condition3": { "clientmetric": "latency", "aggregate": "avg", @@ -319,7 +337,8 @@ def test_create_or_update_load_test(self, loadtesting_endpoint, loadtesting_test }, "secrets": {}, "environmentVariables": {"my-variable": "value"}, - }) + }, + ) assert result is not None @@ -331,7 +350,7 @@ def test_get_load_test(self, loadtesting_endpoint, loadtesting_test_id): client = self.create_administration_client(loadtesting_endpoint) result = client.get_test(loadtesting_test_id) assert result is not None - + @LoadTestingPreparer() @recorded_by_proxy def test_upload_test_file(self, loadtesting_endpoint, loadtesting_test_id): @@ -360,7 +379,9 @@ def test_get_test_file(self, loadtesting_endpoint, loadtesting_test_id): @LoadTestingPreparer() @recorded_by_proxy - def test_create_or_update_test_profile(self, loadtesting_endpoint, loadtesting_test_id, loadtesting_test_profile_id, loadtesting_target_resource_id): + def test_create_or_update_test_profile( + self, loadtesting_endpoint, loadtesting_test_id, loadtesting_test_profile_id, loadtesting_target_resource_id + ): set_bodiless_matcher() client = self.create_administration_client(loadtesting_endpoint) @@ -374,20 +395,14 @@ def test_create_or_update_test_profile(self, loadtesting_endpoint, loadtesting_t "targetResourceConfigurations": { "kind": "FunctionsFlexConsumption", "configurations": { - "config1": { - "instanceMemoryMB": 2048, - "httpConcurrency": 20 - }, - "config2": { - "instanceMemoryMB": 4096, - "httpConcurrency": 100 - }, - } - } + "config1": {"instanceMemoryMB": 2048, "httpConcurrency": 20}, + "config2": {"instanceMemoryMB": 4096, "httpConcurrency": 100}, + }, + }, }, ) assert result is not None - + @LoadTestingPreparer() @recorded_by_proxy def test_get_test_profile(self, loadtesting_endpoint, loadtesting_test_profile_id): @@ -399,7 +414,9 @@ def test_get_test_profile(self, loadtesting_endpoint, loadtesting_test_profile_i @LoadTestingPreparer() @recorded_by_proxy - def test_begin_test_profile_run(self, loadtesting_endpoint, loadtesting_test_profile_id, loadtesting_test_profile_run_id): + def test_begin_test_profile_run( + self, loadtesting_endpoint, loadtesting_test_profile_id, loadtesting_test_profile_run_id + ): set_bodiless_matcher() run_client = self.create_run_client(loadtesting_endpoint) @@ -428,7 +445,7 @@ def test_get_test_profile_run(self, loadtesting_endpoint, loadtesting_test_profi result = run_client.get_test_profile_run(loadtesting_test_profile_run_id) assert result is not None assert len(result["recommendations"]) > 0 - + @LoadTestingPreparer() @recorded_by_proxy def test_stop_test_profile_run(self, loadtesting_endpoint, loadtesting_test_profile_id): @@ -460,7 +477,7 @@ def test_delete_test_profile_run(self, loadtesting_endpoint, loadtesting_test_pr result = run_client.delete_test_profile_run(loadtesting_test_profile_run_id) assert result is None - + @LoadTestingPreparer() @recorded_by_proxy def test_delete_test_profile(self, loadtesting_endpoint, loadtesting_test_profile_id): diff --git a/sdk/loadtesting/azure-developer-loadtesting/tests/testcase.py b/sdk/loadtesting/azure-developer-loadtesting/tests/testcase.py index 08c5ecb92644..df4dc143934d 100644 --- a/sdk/loadtesting/azure-developer-loadtesting/tests/testcase.py +++ b/sdk/loadtesting/azure-developer-loadtesting/tests/testcase.py @@ -38,5 +38,5 @@ def create_run_client(self, endpoint) -> LoadTestRunClient: loadtesting_app_component_id="/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myRG/providers/Microsoft.Web/sites/contoso-sampleapp", loadtesting_test_profile_id="some-test-profile-id", loadtesting_target_resource_id="/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myRG/providers/Microsoft.Web/sites/myFlexFunction", - loadtesting_test_profile_run_id="some-test-profile-run-id" + loadtesting_test_profile_run_id="some-test-profile-run-id", ) diff --git a/sdk/loadtesting/azure-developer-loadtesting/tests/testcase_async.py b/sdk/loadtesting/azure-developer-loadtesting/tests/testcase_async.py index e7ad3bfcd12c..7e256725c64a 100644 --- a/sdk/loadtesting/azure-developer-loadtesting/tests/testcase_async.py +++ b/sdk/loadtesting/azure-developer-loadtesting/tests/testcase_async.py @@ -16,7 +16,7 @@ def create_administration_client(self, endpoint) -> LoadTestAdministrationClient credential=self.admin_credential, endpoint=endpoint, ) - + return self.admin_client def create_run_client(self, endpoint) -> LoadTestRunClient: @@ -28,11 +28,11 @@ def create_run_client(self, endpoint) -> LoadTestRunClient: ) return self.run_client - + async def close_admin_client(self): await self.admin_credential.close() await self.admin_client.close() - + async def close_run_client(self): await self.run_credential.close() await self.run_client.close() diff --git a/sdk/loadtesting/azure-developer-loadtesting/tsp-location.yaml b/sdk/loadtesting/azure-developer-loadtesting/tsp-location.yaml index f862b18befa1..dd6f2a2e02ea 100644 --- a/sdk/loadtesting/azure-developer-loadtesting/tsp-location.yaml +++ b/sdk/loadtesting/azure-developer-loadtesting/tsp-location.yaml @@ -1,4 +1,4 @@ -directory: specification/loadtestservice/LoadTestService -commit: 3582fd3aac7f024efda7e439acd84a83fc5ad3df +directory: specification/loadtestservice/data-plane/loadtesting +commit: f1e9f3d40922dc193783b10ee22794d82b917fc1 repo: Azure/azure-rest-api-specs additionalDirectories: From 7834ce45a7f80f60f686127caf8e49cb7b22bc77 Mon Sep 17 00:00:00 2001 From: Himanshu Bisht Date: Tue, 3 Feb 2026 21:21:53 +0000 Subject: [PATCH 2/2] Genreated sdk with latest commit --- .../apiview-properties.json | 4 +- .../loadtesting/_operations/_operations.py | 17 ++++---- .../aio/_operations/_operations.py | 17 ++++---- .../developer/loadtesting/models/_models.py | 5 ++- .../update_test_run_latest_insights.py | 42 +++++++++++++++++++ .../generated_tests/test_load_test_run.py | 4 +- .../test_load_test_run_async.py | 4 +- .../tsp-location.yaml | 2 +- 8 files changed, 70 insertions(+), 25 deletions(-) create mode 100644 sdk/loadtesting/azure-developer-loadtesting/generated_samples/update_test_run_latest_insights.py diff --git a/sdk/loadtesting/azure-developer-loadtesting/apiview-properties.json b/sdk/loadtesting/azure-developer-loadtesting/apiview-properties.json index 52d2ded487e5..d0282e85ec54 100644 --- a/sdk/loadtesting/azure-developer-loadtesting/apiview-properties.json +++ b/sdk/loadtesting/azure-developer-loadtesting/apiview-properties.json @@ -185,8 +185,8 @@ "azure.developer.loadtesting.aio.LoadTestRunClient.stop_test_profile_run": "Customizations.TestRunOperations.stopTestProfileRun", "azure.developer.loadtesting.LoadTestRunClient.get_latest_test_run_insights": "Customizations.TestRunOperations.getLatestTestRunInsights", "azure.developer.loadtesting.aio.LoadTestRunClient.get_latest_test_run_insights": "Customizations.TestRunOperations.getLatestTestRunInsights", - "azure.developer.loadtesting.LoadTestRunClient.patch_latest_test_run_insights": "Customizations.TestRunOperations.patchLatestTestRunInsights", - "azure.developer.loadtesting.aio.LoadTestRunClient.patch_latest_test_run_insights": "Customizations.TestRunOperations.patchLatestTestRunInsights", + "azure.developer.loadtesting.LoadTestRunClient.update_latest_test_run_insights": "Customizations.TestRunOperations.updateLatestTestRunInsights", + "azure.developer.loadtesting.aio.LoadTestRunClient.update_latest_test_run_insights": "Customizations.TestRunOperations.updateLatestTestRunInsights", "azure.developer.loadtesting.LoadTestRunClient.begin_generate_test_run_insights": "Customizations.TestRunOperations.generateTestRunInsights", "azure.developer.loadtesting.aio.LoadTestRunClient.begin_generate_test_run_insights": "Customizations.TestRunOperations.generateTestRunInsights" } diff --git a/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/_operations/_operations.py b/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/_operations/_operations.py index 0783f86f0d3a..dc11efcb5f45 100644 --- a/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/_operations/_operations.py +++ b/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/_operations/_operations.py @@ -1403,7 +1403,7 @@ def build_load_test_run_get_latest_test_run_insights_request( # pylint: disable return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) -def build_load_test_run_patch_latest_test_run_insights_request( # pylint: disable=name-too-long +def build_load_test_run_update_latest_test_run_insights_request( # pylint: disable=name-too-long test_run_id: str, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) @@ -5412,8 +5412,9 @@ def list_test_runs( Get all test runs for the given filters. - :keyword orderby: Sort on the supported fields in (field asc/desc) format. eg: executedDateTime - asc. Supported fields - executedDateTime. Default value is None. + :keyword orderby: Sort on the supported fields in (field asc/desc) format. eg: createdDateTime + asc. + Supported fields - createdDateTime, executedDateTime (legacy). Default value is None. :paramtype orderby: str :keyword search: Prefix based, case sensitive search on searchable fields - description, executedUser. For example, to search for a test run, with description 500 VUs, @@ -6132,7 +6133,7 @@ def get_latest_test_run_insights(self, test_run_id: str, **kwargs: Any) -> _mode return deserialized # type: ignore @overload - def patch_latest_test_run_insights( + def update_latest_test_run_insights( self, test_run_id: str, body: _models.TestRunInsights, @@ -6158,7 +6159,7 @@ def patch_latest_test_run_insights( """ @overload - def patch_latest_test_run_insights( + def update_latest_test_run_insights( self, test_run_id: str, body: JSON, *, content_type: str = "application/merge-patch+json", **kwargs: Any ) -> _models.TestRunInsights: """Update the latest insights for the test run. @@ -6179,7 +6180,7 @@ def patch_latest_test_run_insights( """ @overload - def patch_latest_test_run_insights( + def update_latest_test_run_insights( self, test_run_id: str, body: IO[bytes], *, content_type: str = "application/merge-patch+json", **kwargs: Any ) -> _models.TestRunInsights: """Update the latest insights for the test run. @@ -6205,7 +6206,7 @@ def patch_latest_test_run_insights( params_added_on={"2025-11-01-preview": ["api_version", "content_type", "test_run_id", "accept"]}, api_versions_list=["2025-11-01-preview"], ) - def patch_latest_test_run_insights( + def update_latest_test_run_insights( self, test_run_id: str, body: Union[_models.TestRunInsights, JSON, IO[bytes]], **kwargs: Any ) -> _models.TestRunInsights: """Update the latest insights for the test run. @@ -6243,7 +6244,7 @@ def patch_latest_test_run_insights( else: _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_load_test_run_patch_latest_test_run_insights_request( + _request = build_load_test_run_update_latest_test_run_insights_request( test_run_id=test_run_id, content_type=content_type, api_version=self._config.api_version, diff --git a/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/aio/_operations/_operations.py b/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/aio/_operations/_operations.py index 2ce0a6b39361..40a4f9e1d406 100644 --- a/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/aio/_operations/_operations.py +++ b/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/aio/_operations/_operations.py @@ -81,9 +81,9 @@ build_load_test_run_list_metrics_request, build_load_test_run_list_test_profile_runs_request, build_load_test_run_list_test_runs_request, - build_load_test_run_patch_latest_test_run_insights_request, build_load_test_run_stop_test_profile_run_request, build_load_test_run_stop_test_run_request, + build_load_test_run_update_latest_test_run_insights_request, ) from ..._utils.model_base import SdkJSONEncoder, _deserialize from ..._utils.utils import ClientMixinABC @@ -4060,8 +4060,9 @@ def list_test_runs( Get all test runs for the given filters. - :keyword orderby: Sort on the supported fields in (field asc/desc) format. eg: executedDateTime - asc. Supported fields - executedDateTime. Default value is None. + :keyword orderby: Sort on the supported fields in (field asc/desc) format. eg: createdDateTime + asc. + Supported fields - createdDateTime, executedDateTime (legacy). Default value is None. :paramtype orderby: str :keyword search: Prefix based, case sensitive search on searchable fields - description, executedUser. For example, to search for a test run, with description 500 VUs, @@ -4779,7 +4780,7 @@ async def get_latest_test_run_insights(self, test_run_id: str, **kwargs: Any) -> return deserialized # type: ignore @overload - async def patch_latest_test_run_insights( + async def update_latest_test_run_insights( self, test_run_id: str, body: _models.TestRunInsights, @@ -4805,7 +4806,7 @@ async def patch_latest_test_run_insights( """ @overload - async def patch_latest_test_run_insights( + async def update_latest_test_run_insights( self, test_run_id: str, body: JSON, *, content_type: str = "application/merge-patch+json", **kwargs: Any ) -> _models.TestRunInsights: """Update the latest insights for the test run. @@ -4826,7 +4827,7 @@ async def patch_latest_test_run_insights( """ @overload - async def patch_latest_test_run_insights( + async def update_latest_test_run_insights( self, test_run_id: str, body: IO[bytes], *, content_type: str = "application/merge-patch+json", **kwargs: Any ) -> _models.TestRunInsights: """Update the latest insights for the test run. @@ -4852,7 +4853,7 @@ async def patch_latest_test_run_insights( params_added_on={"2025-11-01-preview": ["api_version", "content_type", "test_run_id", "accept"]}, api_versions_list=["2025-11-01-preview"], ) - async def patch_latest_test_run_insights( + async def update_latest_test_run_insights( self, test_run_id: str, body: Union[_models.TestRunInsights, JSON, IO[bytes]], **kwargs: Any ) -> _models.TestRunInsights: """Update the latest insights for the test run. @@ -4890,7 +4891,7 @@ async def patch_latest_test_run_insights( else: _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_load_test_run_patch_latest_test_run_insights_request( + _request = build_load_test_run_update_latest_test_run_insights_request( test_run_id=test_run_id, content_type=content_type, api_version=self._config.api_version, diff --git a/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/models/_models.py b/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/models/_models.py index 97b989d69e25..79833f9ddbd7 100644 --- a/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/models/_models.py +++ b/sdk/loadtesting/azure-developer-loadtesting/azure/developer/loadtesting/models/_models.py @@ -2614,7 +2614,8 @@ class TestRun(_Model): :vartype start_date_time: ~datetime.datetime :ivar end_date_time: The test run end DateTime(RFC 3339 literal format). :vartype end_date_time: ~datetime.datetime - :ivar executed_date_time: Test run initiated time. + :ivar executed_date_time: Test run initiated time. This is legacy, new developments should use + createdDateTime. :vartype executed_date_time: ~datetime.datetime :ivar portal_url: Portal url. :vartype portal_url: str @@ -2736,7 +2737,7 @@ class TestRun(_Model): executed_date_time: Optional[datetime.datetime] = rest_field( name="executedDateTime", visibility=["read"], format="rfc3339" ) - """Test run initiated time.""" + """Test run initiated time. This is legacy, new developments should use createdDateTime.""" portal_url: Optional[str] = rest_field(name="portalUrl", visibility=["read"]) """Portal url.""" duration: Optional[int] = rest_field(visibility=["read"]) diff --git a/sdk/loadtesting/azure-developer-loadtesting/generated_samples/update_test_run_latest_insights.py b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/update_test_run_latest_insights.py new file mode 100644 index 000000000000..735101053f1c --- /dev/null +++ b/sdk/loadtesting/azure-developer-loadtesting/generated_samples/update_test_run_latest_insights.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.developer.loadtesting import LoadTestRunClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-developer-loadtesting +# USAGE + python update_test_run_latest_insights.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = LoadTestRunClient( + endpoint="ENDPOINT", + credential=DefaultAzureCredential(), + ) + + response = client.update_latest_test_run_insights( + test_run_id="12316678-1234-1234-1234-122451189012", + body={"rows": {"1": {"WorkItemUrl": "https://devdiv/workitems/1234"}}}, + ) + print(response) + + +# x-ms-original-file: 2025-11-01-preview/UpdateTestRunLatestInsights.json +if __name__ == "__main__": + main() diff --git a/sdk/loadtesting/azure-developer-loadtesting/generated_tests/test_load_test_run.py b/sdk/loadtesting/azure-developer-loadtesting/generated_tests/test_load_test_run.py index bd43cd3fcc28..677e6009c2e5 100644 --- a/sdk/loadtesting/azure-developer-loadtesting/generated_tests/test_load_test_run.py +++ b/sdk/loadtesting/azure-developer-loadtesting/generated_tests/test_load_test_run.py @@ -254,9 +254,9 @@ def test_get_latest_test_run_insights(self, loadtestrun_endpoint): @LoadTestRunPreparer() @recorded_by_proxy - def test_patch_latest_test_run_insights(self, loadtestrun_endpoint): + def test_update_latest_test_run_insights(self, loadtestrun_endpoint): client = self.create_client(endpoint=loadtestrun_endpoint) - response = client.patch_latest_test_run_insights( + response = client.update_latest_test_run_insights( test_run_id="str", body={ "columns": [{"dataType": "str", "name": "str"}], diff --git a/sdk/loadtesting/azure-developer-loadtesting/generated_tests/test_load_test_run_async.py b/sdk/loadtesting/azure-developer-loadtesting/generated_tests/test_load_test_run_async.py index 6175fed44928..5621ebfd5ee7 100644 --- a/sdk/loadtesting/azure-developer-loadtesting/generated_tests/test_load_test_run_async.py +++ b/sdk/loadtesting/azure-developer-loadtesting/generated_tests/test_load_test_run_async.py @@ -255,9 +255,9 @@ async def test_get_latest_test_run_insights(self, loadtestrun_endpoint): @LoadTestRunPreparer() @recorded_by_proxy_async - async def test_patch_latest_test_run_insights(self, loadtestrun_endpoint): + async def test_update_latest_test_run_insights(self, loadtestrun_endpoint): client = self.create_async_client(endpoint=loadtestrun_endpoint) - response = await client.patch_latest_test_run_insights( + response = await client.update_latest_test_run_insights( test_run_id="str", body={ "columns": [{"dataType": "str", "name": "str"}], diff --git a/sdk/loadtesting/azure-developer-loadtesting/tsp-location.yaml b/sdk/loadtesting/azure-developer-loadtesting/tsp-location.yaml index dd6f2a2e02ea..85ff464bc0a2 100644 --- a/sdk/loadtesting/azure-developer-loadtesting/tsp-location.yaml +++ b/sdk/loadtesting/azure-developer-loadtesting/tsp-location.yaml @@ -1,4 +1,4 @@ directory: specification/loadtestservice/data-plane/loadtesting -commit: f1e9f3d40922dc193783b10ee22794d82b917fc1 +commit: 0435ba6a2a8416ab7c623dd3ca88f43526c5e2e4 repo: Azure/azure-rest-api-specs additionalDirectories: