diff --git a/apis/evidently/2021-02-01/api-2.json b/apis/evidently/2021-02-01/api-2.json
deleted file mode 100644
index 080e808c0ac..00000000000
--- a/apis/evidently/2021-02-01/api-2.json
+++ /dev/null
@@ -1,2995 +0,0 @@
-{
- "version":"2.0",
- "metadata":{
- "apiVersion":"2021-02-01",
- "endpointPrefix":"evidently",
- "jsonVersion":"1.1",
- "protocol":"rest-json",
- "protocols":["rest-json"],
- "serviceFullName":"Amazon CloudWatch Evidently",
- "serviceId":"Evidently",
- "signatureVersion":"v4",
- "signingName":"evidently",
- "uid":"evidently-2021-02-01",
- "auth":["aws.auth#sigv4"]
- },
- "operations":{
- "BatchEvaluateFeature":{
- "name":"BatchEvaluateFeature",
- "http":{
- "method":"POST",
- "requestUri":"/projects/{project}/evaluations",
- "responseCode":200
- },
- "input":{"shape":"BatchEvaluateFeatureRequest"},
- "output":{"shape":"BatchEvaluateFeatureResponse"},
- "errors":[
- {"shape":"ThrottlingException"},
- {"shape":"ValidationException"},
- {"shape":"ResourceNotFoundException"},
- {"shape":"AccessDeniedException"}
- ],
- "deprecated":true,
- "deprecatedMessage":"AWS CloudWatch Evidently has been deprecated since 11/17/2025.",
- "endpoint":{"hostPrefix":"dataplane."}
- },
- "CreateExperiment":{
- "name":"CreateExperiment",
- "http":{
- "method":"POST",
- "requestUri":"/projects/{project}/experiments",
- "responseCode":200
- },
- "input":{"shape":"CreateExperimentRequest"},
- "output":{"shape":"CreateExperimentResponse"},
- "errors":[
- {"shape":"ValidationException"},
- {"shape":"ConflictException"},
- {"shape":"ServiceQuotaExceededException"},
- {"shape":"ResourceNotFoundException"},
- {"shape":"AccessDeniedException"}
- ],
- "deprecated":true,
- "deprecatedMessage":"AWS CloudWatch Evidently has been deprecated since 11/17/2025.",
- "idempotent":true
- },
- "CreateFeature":{
- "name":"CreateFeature",
- "http":{
- "method":"POST",
- "requestUri":"/projects/{project}/features",
- "responseCode":200
- },
- "input":{"shape":"CreateFeatureRequest"},
- "output":{"shape":"CreateFeatureResponse"},
- "errors":[
- {"shape":"ValidationException"},
- {"shape":"ConflictException"},
- {"shape":"ServiceQuotaExceededException"},
- {"shape":"ResourceNotFoundException"},
- {"shape":"AccessDeniedException"}
- ],
- "deprecated":true,
- "deprecatedMessage":"AWS CloudWatch Evidently has been deprecated since 11/17/2025."
- },
- "CreateLaunch":{
- "name":"CreateLaunch",
- "http":{
- "method":"POST",
- "requestUri":"/projects/{project}/launches",
- "responseCode":200
- },
- "input":{"shape":"CreateLaunchRequest"},
- "output":{"shape":"CreateLaunchResponse"},
- "errors":[
- {"shape":"ValidationException"},
- {"shape":"ConflictException"},
- {"shape":"ServiceQuotaExceededException"},
- {"shape":"ResourceNotFoundException"},
- {"shape":"AccessDeniedException"}
- ],
- "deprecated":true,
- "deprecatedMessage":"AWS CloudWatch Evidently has been deprecated since 11/17/2025."
- },
- "CreateProject":{
- "name":"CreateProject",
- "http":{
- "method":"POST",
- "requestUri":"/projects",
- "responseCode":200
- },
- "input":{"shape":"CreateProjectRequest"},
- "output":{"shape":"CreateProjectResponse"},
- "errors":[
- {"shape":"ValidationException"},
- {"shape":"ConflictException"},
- {"shape":"ServiceQuotaExceededException"},
- {"shape":"AccessDeniedException"}
- ],
- "deprecated":true,
- "deprecatedMessage":"AWS CloudWatch Evidently has been deprecated since 11/17/2025."
- },
- "CreateSegment":{
- "name":"CreateSegment",
- "http":{
- "method":"POST",
- "requestUri":"/segments",
- "responseCode":200
- },
- "input":{"shape":"CreateSegmentRequest"},
- "output":{"shape":"CreateSegmentResponse"},
- "errors":[
- {"shape":"ValidationException"},
- {"shape":"ConflictException"},
- {"shape":"ServiceQuotaExceededException"},
- {"shape":"AccessDeniedException"}
- ],
- "deprecated":true,
- "deprecatedMessage":"AWS CloudWatch Evidently has been deprecated since 11/17/2025."
- },
- "DeleteExperiment":{
- "name":"DeleteExperiment",
- "http":{
- "method":"DELETE",
- "requestUri":"/projects/{project}/experiments/{experiment}",
- "responseCode":200
- },
- "input":{"shape":"DeleteExperimentRequest"},
- "output":{"shape":"DeleteExperimentResponse"},
- "errors":[
- {"shape":"ValidationException"},
- {"shape":"InternalServerException"},
- {"shape":"ConflictException"},
- {"shape":"ServiceUnavailableException"},
- {"shape":"ResourceNotFoundException"},
- {"shape":"AccessDeniedException"}
- ],
- "deprecated":true,
- "deprecatedMessage":"AWS CloudWatch Evidently has been deprecated since 11/17/2025.",
- "idempotent":true
- },
- "DeleteFeature":{
- "name":"DeleteFeature",
- "http":{
- "method":"DELETE",
- "requestUri":"/projects/{project}/features/{feature}",
- "responseCode":200
- },
- "input":{"shape":"DeleteFeatureRequest"},
- "output":{"shape":"DeleteFeatureResponse"},
- "errors":[
- {"shape":"ThrottlingException"},
- {"shape":"ValidationException"},
- {"shape":"ConflictException"},
- {"shape":"ResourceNotFoundException"},
- {"shape":"AccessDeniedException"}
- ],
- "deprecated":true,
- "deprecatedMessage":"AWS CloudWatch Evidently has been deprecated since 11/17/2025.",
- "idempotent":true
- },
- "DeleteLaunch":{
- "name":"DeleteLaunch",
- "http":{
- "method":"DELETE",
- "requestUri":"/projects/{project}/launches/{launch}",
- "responseCode":200
- },
- "input":{"shape":"DeleteLaunchRequest"},
- "output":{"shape":"DeleteLaunchResponse"},
- "errors":[
- {"shape":"ThrottlingException"},
- {"shape":"ValidationException"},
- {"shape":"ConflictException"},
- {"shape":"ResourceNotFoundException"},
- {"shape":"AccessDeniedException"}
- ],
- "deprecated":true,
- "deprecatedMessage":"AWS CloudWatch Evidently has been deprecated since 11/17/2025.",
- "idempotent":true
- },
- "DeleteProject":{
- "name":"DeleteProject",
- "http":{
- "method":"DELETE",
- "requestUri":"/projects/{project}",
- "responseCode":200
- },
- "input":{"shape":"DeleteProjectRequest"},
- "output":{"shape":"DeleteProjectResponse"},
- "errors":[
- {"shape":"ThrottlingException"},
- {"shape":"ValidationException"},
- {"shape":"ConflictException"},
- {"shape":"ResourceNotFoundException"},
- {"shape":"AccessDeniedException"}
- ],
- "deprecated":true,
- "deprecatedMessage":"AWS CloudWatch Evidently has been deprecated since 11/17/2025.",
- "idempotent":true
- },
- "DeleteSegment":{
- "name":"DeleteSegment",
- "http":{
- "method":"DELETE",
- "requestUri":"/segments/{segment}",
- "responseCode":200
- },
- "input":{"shape":"DeleteSegmentRequest"},
- "output":{"shape":"DeleteSegmentResponse"},
- "errors":[
- {"shape":"ThrottlingException"},
- {"shape":"ValidationException"},
- {"shape":"ConflictException"},
- {"shape":"ResourceNotFoundException"},
- {"shape":"AccessDeniedException"}
- ],
- "deprecated":true,
- "deprecatedMessage":"AWS CloudWatch Evidently has been deprecated since 11/17/2025.",
- "idempotent":true
- },
- "EvaluateFeature":{
- "name":"EvaluateFeature",
- "http":{
- "method":"POST",
- "requestUri":"/projects/{project}/evaluations/{feature}",
- "responseCode":200
- },
- "input":{"shape":"EvaluateFeatureRequest"},
- "output":{"shape":"EvaluateFeatureResponse"},
- "errors":[
- {"shape":"ThrottlingException"},
- {"shape":"ValidationException"},
- {"shape":"ResourceNotFoundException"},
- {"shape":"AccessDeniedException"}
- ],
- "deprecated":true,
- "deprecatedMessage":"AWS CloudWatch Evidently has been deprecated since 11/17/2025.",
- "endpoint":{"hostPrefix":"dataplane."}
- },
- "GetExperiment":{
- "name":"GetExperiment",
- "http":{
- "method":"GET",
- "requestUri":"/projects/{project}/experiments/{experiment}",
- "responseCode":200
- },
- "input":{"shape":"GetExperimentRequest"},
- "output":{"shape":"GetExperimentResponse"},
- "errors":[
- {"shape":"ThrottlingException"},
- {"shape":"ValidationException"},
- {"shape":"ResourceNotFoundException"},
- {"shape":"AccessDeniedException"}
- ],
- "deprecated":true,
- "deprecatedMessage":"AWS CloudWatch Evidently has been deprecated since 11/17/2025."
- },
- "GetExperimentResults":{
- "name":"GetExperimentResults",
- "http":{
- "method":"POST",
- "requestUri":"/projects/{project}/experiments/{experiment}/results",
- "responseCode":200
- },
- "input":{"shape":"GetExperimentResultsRequest"},
- "output":{"shape":"GetExperimentResultsResponse"},
- "errors":[
- {"shape":"ThrottlingException"},
- {"shape":"ValidationException"},
- {"shape":"ConflictException"},
- {"shape":"ResourceNotFoundException"},
- {"shape":"AccessDeniedException"}
- ],
- "deprecated":true,
- "deprecatedMessage":"AWS CloudWatch Evidently has been deprecated since 11/17/2025."
- },
- "GetFeature":{
- "name":"GetFeature",
- "http":{
- "method":"GET",
- "requestUri":"/projects/{project}/features/{feature}",
- "responseCode":200
- },
- "input":{"shape":"GetFeatureRequest"},
- "output":{"shape":"GetFeatureResponse"},
- "errors":[
- {"shape":"ThrottlingException"},
- {"shape":"ValidationException"},
- {"shape":"ResourceNotFoundException"},
- {"shape":"AccessDeniedException"}
- ],
- "deprecated":true,
- "deprecatedMessage":"AWS CloudWatch Evidently has been deprecated since 11/17/2025."
- },
- "GetLaunch":{
- "name":"GetLaunch",
- "http":{
- "method":"GET",
- "requestUri":"/projects/{project}/launches/{launch}",
- "responseCode":200
- },
- "input":{"shape":"GetLaunchRequest"},
- "output":{"shape":"GetLaunchResponse"},
- "errors":[
- {"shape":"ThrottlingException"},
- {"shape":"ValidationException"},
- {"shape":"ResourceNotFoundException"},
- {"shape":"AccessDeniedException"}
- ],
- "deprecated":true,
- "deprecatedMessage":"AWS CloudWatch Evidently has been deprecated since 11/17/2025."
- },
- "GetProject":{
- "name":"GetProject",
- "http":{
- "method":"GET",
- "requestUri":"/projects/{project}",
- "responseCode":200
- },
- "input":{"shape":"GetProjectRequest"},
- "output":{"shape":"GetProjectResponse"},
- "errors":[
- {"shape":"ThrottlingException"},
- {"shape":"ValidationException"},
- {"shape":"ResourceNotFoundException"},
- {"shape":"AccessDeniedException"}
- ],
- "deprecated":true,
- "deprecatedMessage":"AWS CloudWatch Evidently has been deprecated since 11/17/2025."
- },
- "GetSegment":{
- "name":"GetSegment",
- "http":{
- "method":"GET",
- "requestUri":"/segments/{segment}",
- "responseCode":200
- },
- "input":{"shape":"GetSegmentRequest"},
- "output":{"shape":"GetSegmentResponse"},
- "errors":[
- {"shape":"ThrottlingException"},
- {"shape":"ValidationException"},
- {"shape":"ResourceNotFoundException"},
- {"shape":"AccessDeniedException"}
- ],
- "deprecated":true,
- "deprecatedMessage":"AWS CloudWatch Evidently has been deprecated since 11/17/2025."
- },
- "ListExperiments":{
- "name":"ListExperiments",
- "http":{
- "method":"GET",
- "requestUri":"/projects/{project}/experiments",
- "responseCode":200
- },
- "input":{"shape":"ListExperimentsRequest"},
- "output":{"shape":"ListExperimentsResponse"},
- "errors":[
- {"shape":"ValidationException"},
- {"shape":"ResourceNotFoundException"},
- {"shape":"AccessDeniedException"}
- ],
- "deprecated":true,
- "deprecatedMessage":"AWS CloudWatch Evidently has been deprecated since 11/17/2025."
- },
- "ListFeatures":{
- "name":"ListFeatures",
- "http":{
- "method":"GET",
- "requestUri":"/projects/{project}/features",
- "responseCode":200
- },
- "input":{"shape":"ListFeaturesRequest"},
- "output":{"shape":"ListFeaturesResponse"},
- "errors":[
- {"shape":"ThrottlingException"},
- {"shape":"ValidationException"},
- {"shape":"ResourceNotFoundException"},
- {"shape":"AccessDeniedException"}
- ],
- "deprecated":true,
- "deprecatedMessage":"AWS CloudWatch Evidently has been deprecated since 11/17/2025."
- },
- "ListLaunches":{
- "name":"ListLaunches",
- "http":{
- "method":"GET",
- "requestUri":"/projects/{project}/launches",
- "responseCode":200
- },
- "input":{"shape":"ListLaunchesRequest"},
- "output":{"shape":"ListLaunchesResponse"},
- "errors":[
- {"shape":"ThrottlingException"},
- {"shape":"ValidationException"},
- {"shape":"AccessDeniedException"}
- ],
- "deprecated":true,
- "deprecatedMessage":"AWS CloudWatch Evidently has been deprecated since 11/17/2025."
- },
- "ListProjects":{
- "name":"ListProjects",
- "http":{
- "method":"GET",
- "requestUri":"/projects",
- "responseCode":200
- },
- "input":{"shape":"ListProjectsRequest"},
- "output":{"shape":"ListProjectsResponse"},
- "errors":[
- {"shape":"ThrottlingException"},
- {"shape":"ValidationException"},
- {"shape":"AccessDeniedException"}
- ],
- "deprecated":true,
- "deprecatedMessage":"AWS CloudWatch Evidently has been deprecated since 11/17/2025."
- },
- "ListSegmentReferences":{
- "name":"ListSegmentReferences",
- "http":{
- "method":"GET",
- "requestUri":"/segments/{segment}/references",
- "responseCode":200
- },
- "input":{"shape":"ListSegmentReferencesRequest"},
- "output":{"shape":"ListSegmentReferencesResponse"},
- "errors":[
- {"shape":"ThrottlingException"},
- {"shape":"ValidationException"},
- {"shape":"ResourceNotFoundException"},
- {"shape":"AccessDeniedException"}
- ],
- "deprecated":true,
- "deprecatedMessage":"AWS CloudWatch Evidently has been deprecated since 11/17/2025."
- },
- "ListSegments":{
- "name":"ListSegments",
- "http":{
- "method":"GET",
- "requestUri":"/segments",
- "responseCode":200
- },
- "input":{"shape":"ListSegmentsRequest"},
- "output":{"shape":"ListSegmentsResponse"},
- "errors":[
- {"shape":"ThrottlingException"},
- {"shape":"ValidationException"},
- {"shape":"AccessDeniedException"}
- ],
- "deprecated":true,
- "deprecatedMessage":"AWS CloudWatch Evidently has been deprecated since 11/17/2025."
- },
- "ListTagsForResource":{
- "name":"ListTagsForResource",
- "http":{
- "method":"GET",
- "requestUri":"/tags/{resourceArn}",
- "responseCode":200
- },
- "input":{"shape":"ListTagsForResourceRequest"},
- "output":{"shape":"ListTagsForResourceResponse"},
- "errors":[
- {"shape":"ValidationException"},
- {"shape":"ConflictException"},
- {"shape":"ResourceNotFoundException"}
- ],
- "deprecated":true,
- "deprecatedMessage":"AWS CloudWatch Evidently has been deprecated since 11/17/2025."
- },
- "PutProjectEvents":{
- "name":"PutProjectEvents",
- "http":{
- "method":"POST",
- "requestUri":"/events/projects/{project}",
- "responseCode":200
- },
- "input":{"shape":"PutProjectEventsRequest"},
- "output":{"shape":"PutProjectEventsResponse"},
- "errors":[
- {"shape":"ThrottlingException"},
- {"shape":"ValidationException"},
- {"shape":"ResourceNotFoundException"},
- {"shape":"AccessDeniedException"}
- ],
- "deprecated":true,
- "deprecatedMessage":"AWS CloudWatch Evidently has been deprecated since 11/17/2025.",
- "endpoint":{"hostPrefix":"dataplane."}
- },
- "StartExperiment":{
- "name":"StartExperiment",
- "http":{
- "method":"POST",
- "requestUri":"/projects/{project}/experiments/{experiment}/start",
- "responseCode":200
- },
- "input":{"shape":"StartExperimentRequest"},
- "output":{"shape":"StartExperimentResponse"},
- "errors":[
- {"shape":"ThrottlingException"},
- {"shape":"ValidationException"},
- {"shape":"ConflictException"},
- {"shape":"ServiceQuotaExceededException"},
- {"shape":"ResourceNotFoundException"},
- {"shape":"AccessDeniedException"}
- ],
- "deprecated":true,
- "deprecatedMessage":"AWS CloudWatch Evidently has been deprecated since 11/17/2025."
- },
- "StartLaunch":{
- "name":"StartLaunch",
- "http":{
- "method":"POST",
- "requestUri":"/projects/{project}/launches/{launch}/start",
- "responseCode":200
- },
- "input":{"shape":"StartLaunchRequest"},
- "output":{"shape":"StartLaunchResponse"},
- "errors":[
- {"shape":"ThrottlingException"},
- {"shape":"ValidationException"},
- {"shape":"ConflictException"},
- {"shape":"ServiceQuotaExceededException"},
- {"shape":"ResourceNotFoundException"},
- {"shape":"AccessDeniedException"}
- ],
- "deprecated":true,
- "deprecatedMessage":"AWS CloudWatch Evidently has been deprecated since 11/17/2025."
- },
- "StopExperiment":{
- "name":"StopExperiment",
- "http":{
- "method":"POST",
- "requestUri":"/projects/{project}/experiments/{experiment}/cancel",
- "responseCode":200
- },
- "input":{"shape":"StopExperimentRequest"},
- "output":{"shape":"StopExperimentResponse"},
- "errors":[
- {"shape":"ThrottlingException"},
- {"shape":"ValidationException"},
- {"shape":"ConflictException"},
- {"shape":"ServiceQuotaExceededException"},
- {"shape":"ResourceNotFoundException"},
- {"shape":"AccessDeniedException"}
- ],
- "deprecated":true,
- "deprecatedMessage":"AWS CloudWatch Evidently has been deprecated since 11/17/2025."
- },
- "StopLaunch":{
- "name":"StopLaunch",
- "http":{
- "method":"POST",
- "requestUri":"/projects/{project}/launches/{launch}/cancel",
- "responseCode":200
- },
- "input":{"shape":"StopLaunchRequest"},
- "output":{"shape":"StopLaunchResponse"},
- "errors":[
- {"shape":"ThrottlingException"},
- {"shape":"ValidationException"},
- {"shape":"ResourceNotFoundException"},
- {"shape":"AccessDeniedException"}
- ],
- "deprecated":true,
- "deprecatedMessage":"AWS CloudWatch Evidently has been deprecated since 11/17/2025."
- },
- "TagResource":{
- "name":"TagResource",
- "http":{
- "method":"POST",
- "requestUri":"/tags/{resourceArn}",
- "responseCode":200
- },
- "input":{"shape":"TagResourceRequest"},
- "output":{"shape":"TagResourceResponse"},
- "errors":[
- {"shape":"ValidationException"},
- {"shape":"ConflictException"},
- {"shape":"ResourceNotFoundException"}
- ],
- "deprecated":true,
- "deprecatedMessage":"AWS CloudWatch Evidently has been deprecated since 11/17/2025.",
- "idempotent":true
- },
- "TestSegmentPattern":{
- "name":"TestSegmentPattern",
- "http":{
- "method":"POST",
- "requestUri":"/test-segment-pattern",
- "responseCode":200
- },
- "input":{"shape":"TestSegmentPatternRequest"},
- "output":{"shape":"TestSegmentPatternResponse"},
- "errors":[
- {"shape":"ThrottlingException"},
- {"shape":"ValidationException"},
- {"shape":"AccessDeniedException"}
- ],
- "deprecated":true,
- "deprecatedMessage":"AWS CloudWatch Evidently has been deprecated since 11/17/2025."
- },
- "UntagResource":{
- "name":"UntagResource",
- "http":{
- "method":"DELETE",
- "requestUri":"/tags/{resourceArn}",
- "responseCode":200
- },
- "input":{"shape":"UntagResourceRequest"},
- "output":{"shape":"UntagResourceResponse"},
- "errors":[
- {"shape":"ValidationException"},
- {"shape":"ConflictException"},
- {"shape":"ResourceNotFoundException"}
- ],
- "deprecated":true,
- "deprecatedMessage":"AWS CloudWatch Evidently has been deprecated since 11/17/2025.",
- "idempotent":true
- },
- "UpdateExperiment":{
- "name":"UpdateExperiment",
- "http":{
- "method":"PATCH",
- "requestUri":"/projects/{project}/experiments/{experiment}",
- "responseCode":200
- },
- "input":{"shape":"UpdateExperimentRequest"},
- "output":{"shape":"UpdateExperimentResponse"},
- "errors":[
- {"shape":"ValidationException"},
- {"shape":"ConflictException"},
- {"shape":"ResourceNotFoundException"},
- {"shape":"AccessDeniedException"}
- ],
- "deprecated":true,
- "deprecatedMessage":"AWS CloudWatch Evidently has been deprecated since 11/17/2025."
- },
- "UpdateFeature":{
- "name":"UpdateFeature",
- "http":{
- "method":"PATCH",
- "requestUri":"/projects/{project}/features/{feature}",
- "responseCode":200
- },
- "input":{"shape":"UpdateFeatureRequest"},
- "output":{"shape":"UpdateFeatureResponse"},
- "errors":[
- {"shape":"ValidationException"},
- {"shape":"ConflictException"},
- {"shape":"ServiceQuotaExceededException"},
- {"shape":"ResourceNotFoundException"},
- {"shape":"AccessDeniedException"}
- ],
- "deprecated":true,
- "deprecatedMessage":"AWS CloudWatch Evidently has been deprecated since 11/17/2025."
- },
- "UpdateLaunch":{
- "name":"UpdateLaunch",
- "http":{
- "method":"PATCH",
- "requestUri":"/projects/{project}/launches/{launch}",
- "responseCode":200
- },
- "input":{"shape":"UpdateLaunchRequest"},
- "output":{"shape":"UpdateLaunchResponse"},
- "errors":[
- {"shape":"ValidationException"},
- {"shape":"ConflictException"},
- {"shape":"ResourceNotFoundException"},
- {"shape":"AccessDeniedException"}
- ],
- "deprecated":true,
- "deprecatedMessage":"AWS CloudWatch Evidently has been deprecated since 11/17/2025."
- },
- "UpdateProject":{
- "name":"UpdateProject",
- "http":{
- "method":"PATCH",
- "requestUri":"/projects/{project}",
- "responseCode":200
- },
- "input":{"shape":"UpdateProjectRequest"},
- "output":{"shape":"UpdateProjectResponse"},
- "errors":[
- {"shape":"ValidationException"},
- {"shape":"ConflictException"},
- {"shape":"ServiceQuotaExceededException"},
- {"shape":"ResourceNotFoundException"},
- {"shape":"AccessDeniedException"}
- ],
- "deprecated":true,
- "deprecatedMessage":"AWS CloudWatch Evidently has been deprecated since 11/17/2025."
- },
- "UpdateProjectDataDelivery":{
- "name":"UpdateProjectDataDelivery",
- "http":{
- "method":"PATCH",
- "requestUri":"/projects/{project}/data-delivery",
- "responseCode":200
- },
- "input":{"shape":"UpdateProjectDataDeliveryRequest"},
- "output":{"shape":"UpdateProjectDataDeliveryResponse"},
- "errors":[
- {"shape":"ValidationException"},
- {"shape":"ConflictException"},
- {"shape":"ServiceQuotaExceededException"},
- {"shape":"ResourceNotFoundException"},
- {"shape":"AccessDeniedException"}
- ],
- "deprecated":true,
- "deprecatedMessage":"AWS CloudWatch Evidently has been deprecated since 11/17/2025."
- }
- },
- "shapes":{
- "AccessDeniedException":{
- "type":"structure",
- "members":{
- "message":{"shape":"String"}
- },
- "error":{
- "httpStatusCode":403,
- "senderFault":true
- },
- "exception":true
- },
- "AppConfigResourceId":{
- "type":"string",
- "pattern":"[a-z0-9]{4,7}"
- },
- "Arn":{
- "type":"string",
- "max":2048,
- "min":0,
- "pattern":"arn:[^:]*:[^:]*:[^:]*:[^:]*:.*"
- },
- "BatchEvaluateFeatureRequest":{
- "type":"structure",
- "required":[
- "project",
- "requests"
- ],
- "members":{
- "project":{
- "shape":"ProjectRef",
- "location":"uri",
- "locationName":"project"
- },
- "requests":{"shape":"EvaluationRequestsList"}
- }
- },
- "BatchEvaluateFeatureResponse":{
- "type":"structure",
- "members":{
- "results":{"shape":"EvaluationResultsList"}
- }
- },
- "Boolean":{
- "type":"boolean",
- "box":true
- },
- "ChangeDirectionEnum":{
- "type":"string",
- "enum":[
- "INCREASE",
- "DECREASE"
- ]
- },
- "CloudWatchLogsDestination":{
- "type":"structure",
- "members":{
- "logGroup":{"shape":"CwLogGroupSafeName"}
- }
- },
- "CloudWatchLogsDestinationConfig":{
- "type":"structure",
- "members":{
- "logGroup":{"shape":"CwLogGroupSafeName"}
- }
- },
- "ConflictException":{
- "type":"structure",
- "members":{
- "message":{"shape":"String"},
- "resourceId":{"shape":"String"},
- "resourceType":{"shape":"String"}
- },
- "error":{
- "httpStatusCode":409,
- "senderFault":true
- },
- "exception":true
- },
- "CreateExperimentRequest":{
- "type":"structure",
- "required":[
- "metricGoals",
- "name",
- "project",
- "treatments"
- ],
- "members":{
- "description":{"shape":"Description"},
- "metricGoals":{"shape":"MetricGoalConfigList"},
- "name":{"shape":"ExperimentName"},
- "onlineAbConfig":{"shape":"OnlineAbConfig"},
- "project":{
- "shape":"ProjectRef",
- "location":"uri",
- "locationName":"project"
- },
- "randomizationSalt":{"shape":"RandomizationSalt"},
- "samplingRate":{
- "shape":"SplitWeight",
- "box":true
- },
- "segment":{"shape":"SegmentRef"},
- "tags":{"shape":"TagMap"},
- "treatments":{"shape":"TreatmentConfigList"}
- }
- },
- "CreateExperimentResponse":{
- "type":"structure",
- "required":["experiment"],
- "members":{
- "experiment":{"shape":"Experiment"}
- }
- },
- "CreateFeatureRequest":{
- "type":"structure",
- "required":[
- "name",
- "project",
- "variations"
- ],
- "members":{
- "defaultVariation":{"shape":"VariationName"},
- "description":{"shape":"Description"},
- "entityOverrides":{"shape":"EntityOverrideMap"},
- "evaluationStrategy":{"shape":"FeatureEvaluationStrategy"},
- "name":{"shape":"FeatureName"},
- "project":{
- "shape":"ProjectRef",
- "location":"uri",
- "locationName":"project"
- },
- "tags":{"shape":"TagMap"},
- "variations":{"shape":"VariationConfigsList"}
- }
- },
- "CreateFeatureResponse":{
- "type":"structure",
- "members":{
- "feature":{"shape":"Feature"}
- }
- },
- "CreateLaunchRequest":{
- "type":"structure",
- "required":[
- "groups",
- "name",
- "project"
- ],
- "members":{
- "description":{"shape":"Description"},
- "groups":{"shape":"LaunchGroupConfigList"},
- "metricMonitors":{"shape":"MetricMonitorConfigList"},
- "name":{"shape":"LaunchName"},
- "project":{
- "shape":"ProjectRef",
- "location":"uri",
- "locationName":"project"
- },
- "randomizationSalt":{"shape":"RandomizationSalt"},
- "scheduledSplitsConfig":{"shape":"ScheduledSplitsLaunchConfig"},
- "tags":{"shape":"TagMap"}
- }
- },
- "CreateLaunchResponse":{
- "type":"structure",
- "required":["launch"],
- "members":{
- "launch":{"shape":"Launch"}
- }
- },
- "CreateProjectRequest":{
- "type":"structure",
- "required":["name"],
- "members":{
- "appConfigResource":{"shape":"ProjectAppConfigResourceConfig"},
- "dataDelivery":{"shape":"ProjectDataDeliveryConfig"},
- "description":{"shape":"Description"},
- "name":{"shape":"ProjectName"},
- "tags":{"shape":"TagMap"}
- }
- },
- "CreateProjectResponse":{
- "type":"structure",
- "required":["project"],
- "members":{
- "project":{"shape":"Project"}
- }
- },
- "CreateSegmentRequest":{
- "type":"structure",
- "required":[
- "name",
- "pattern"
- ],
- "members":{
- "description":{"shape":"Description"},
- "name":{"shape":"SegmentName"},
- "pattern":{
- "shape":"SegmentPattern",
- "jsonvalue":true
- },
- "tags":{"shape":"TagMap"}
- }
- },
- "CreateSegmentResponse":{
- "type":"structure",
- "required":["segment"],
- "members":{
- "segment":{"shape":"Segment"}
- }
- },
- "CwDimensionSafeName":{
- "type":"string",
- "max":255,
- "min":1,
- "pattern":"^[\\S]+$"
- },
- "CwLogGroupSafeName":{
- "type":"string",
- "max":512,
- "min":1,
- "pattern":"^[-a-zA-Z0-9._/]+$"
- },
- "DeleteExperimentRequest":{
- "type":"structure",
- "required":[
- "experiment",
- "project"
- ],
- "members":{
- "experiment":{
- "shape":"ExperimentName",
- "location":"uri",
- "locationName":"experiment"
- },
- "project":{
- "shape":"ProjectRef",
- "location":"uri",
- "locationName":"project"
- }
- }
- },
- "DeleteExperimentResponse":{
- "type":"structure",
- "members":{}
- },
- "DeleteFeatureRequest":{
- "type":"structure",
- "required":[
- "feature",
- "project"
- ],
- "members":{
- "feature":{
- "shape":"FeatureName",
- "location":"uri",
- "locationName":"feature"
- },
- "project":{
- "shape":"ProjectRef",
- "location":"uri",
- "locationName":"project"
- }
- }
- },
- "DeleteFeatureResponse":{
- "type":"structure",
- "members":{}
- },
- "DeleteLaunchRequest":{
- "type":"structure",
- "required":[
- "launch",
- "project"
- ],
- "members":{
- "launch":{
- "shape":"LaunchName",
- "location":"uri",
- "locationName":"launch"
- },
- "project":{
- "shape":"ProjectRef",
- "location":"uri",
- "locationName":"project"
- }
- }
- },
- "DeleteLaunchResponse":{
- "type":"structure",
- "members":{}
- },
- "DeleteProjectRequest":{
- "type":"structure",
- "required":["project"],
- "members":{
- "project":{
- "shape":"ProjectRef",
- "location":"uri",
- "locationName":"project"
- }
- }
- },
- "DeleteProjectResponse":{
- "type":"structure",
- "members":{}
- },
- "DeleteSegmentRequest":{
- "type":"structure",
- "required":["segment"],
- "members":{
- "segment":{
- "shape":"SegmentRef",
- "location":"uri",
- "locationName":"segment"
- }
- }
- },
- "DeleteSegmentResponse":{
- "type":"structure",
- "members":{}
- },
- "Description":{
- "type":"string",
- "max":160,
- "min":0,
- "pattern":".*"
- },
- "Double":{
- "type":"double",
- "box":true
- },
- "DoubleValueList":{
- "type":"list",
- "member":{"shape":"Double"},
- "max":100800,
- "min":0
- },
- "EntityId":{
- "type":"string",
- "max":512,
- "min":1,
- "pattern":".*"
- },
- "EntityOverrideMap":{
- "type":"map",
- "key":{"shape":"EntityId"},
- "value":{"shape":"VariationName"},
- "max":2500,
- "min":0
- },
- "ErrorCodeEnum":{
- "type":"string",
- "max":64,
- "min":1
- },
- "ErrorMessage":{
- "type":"string",
- "max":1024,
- "min":1,
- "pattern":".*"
- },
- "EvaluateFeatureRequest":{
- "type":"structure",
- "required":[
- "entityId",
- "feature",
- "project"
- ],
- "members":{
- "entityId":{"shape":"EntityId"},
- "evaluationContext":{
- "shape":"JsonValue",
- "jsonvalue":true
- },
- "feature":{
- "shape":"FeatureName",
- "location":"uri",
- "locationName":"feature"
- },
- "project":{
- "shape":"ProjectRef",
- "location":"uri",
- "locationName":"project"
- }
- }
- },
- "EvaluateFeatureResponse":{
- "type":"structure",
- "members":{
- "details":{
- "shape":"JsonValue",
- "jsonvalue":true
- },
- "reason":{"shape":"String"},
- "value":{"shape":"VariableValue"},
- "variation":{"shape":"String"}
- }
- },
- "EvaluationRequest":{
- "type":"structure",
- "required":[
- "entityId",
- "feature"
- ],
- "members":{
- "entityId":{"shape":"EntityId"},
- "evaluationContext":{
- "shape":"JsonValue",
- "jsonvalue":true
- },
- "feature":{"shape":"FeatureName"}
- }
- },
- "EvaluationRequestsList":{
- "type":"list",
- "member":{"shape":"EvaluationRequest"},
- "max":20,
- "min":1
- },
- "EvaluationResult":{
- "type":"structure",
- "required":[
- "entityId",
- "feature"
- ],
- "members":{
- "details":{
- "shape":"JsonValue",
- "jsonvalue":true
- },
- "entityId":{"shape":"EntityId"},
- "feature":{"shape":"FeatureName"},
- "project":{"shape":"Arn"},
- "reason":{"shape":"String"},
- "value":{"shape":"VariableValue"},
- "variation":{"shape":"String"}
- }
- },
- "EvaluationResultsList":{
- "type":"list",
- "member":{"shape":"EvaluationResult"}
- },
- "EvaluationRule":{
- "type":"structure",
- "required":["type"],
- "members":{
- "name":{"shape":"RuleName"},
- "type":{"shape":"RuleType"}
- }
- },
- "EvaluationRulesList":{
- "type":"list",
- "member":{"shape":"EvaluationRule"}
- },
- "Event":{
- "type":"structure",
- "required":[
- "data",
- "timestamp",
- "type"
- ],
- "members":{
- "data":{
- "shape":"JsonValue",
- "jsonvalue":true
- },
- "timestamp":{"shape":"Timestamp"},
- "type":{"shape":"EventType"}
- }
- },
- "EventList":{
- "type":"list",
- "member":{"shape":"Event"},
- "max":50,
- "min":0
- },
- "EventType":{
- "type":"string",
- "enum":[
- "aws.evidently.evaluation",
- "aws.evidently.custom"
- ]
- },
- "Experiment":{
- "type":"structure",
- "required":[
- "arn",
- "createdTime",
- "lastUpdatedTime",
- "name",
- "status",
- "type"
- ],
- "members":{
- "arn":{"shape":"ExperimentArn"},
- "createdTime":{"shape":"Timestamp"},
- "description":{"shape":"Description"},
- "execution":{"shape":"ExperimentExecution"},
- "lastUpdatedTime":{"shape":"Timestamp"},
- "metricGoals":{"shape":"MetricGoalsList"},
- "name":{"shape":"ExperimentName"},
- "onlineAbDefinition":{"shape":"OnlineAbDefinition"},
- "project":{"shape":"ProjectArn"},
- "randomizationSalt":{"shape":"RandomizationSalt"},
- "samplingRate":{"shape":"SplitWeight"},
- "schedule":{"shape":"ExperimentSchedule"},
- "segment":{"shape":"SegmentArn"},
- "status":{"shape":"ExperimentStatus"},
- "statusReason":{"shape":"Description"},
- "tags":{"shape":"TagMap"},
- "treatments":{"shape":"TreatmentList"},
- "type":{"shape":"ExperimentType"}
- }
- },
- "ExperimentArn":{
- "type":"string",
- "max":2048,
- "min":0,
- "pattern":"arn:[^:]*:[^:]*:[^:]*:[^:]*:project/[-a-zA-Z0-9._]*/experiment/[-a-zA-Z0-9._]*"
- },
- "ExperimentBaseStat":{
- "type":"string",
- "enum":["Mean"]
- },
- "ExperimentExecution":{
- "type":"structure",
- "members":{
- "endedTime":{"shape":"Timestamp"},
- "startedTime":{"shape":"Timestamp"}
- }
- },
- "ExperimentList":{
- "type":"list",
- "member":{"shape":"Experiment"}
- },
- "ExperimentName":{
- "type":"string",
- "max":127,
- "min":1,
- "pattern":"^[-a-zA-Z0-9._]*$"
- },
- "ExperimentReport":{
- "type":"structure",
- "members":{
- "content":{
- "shape":"JsonValue",
- "jsonvalue":true
- },
- "metricName":{"shape":"CwDimensionSafeName"},
- "reportName":{"shape":"ExperimentReportName"},
- "treatmentName":{"shape":"TreatmentName"}
- }
- },
- "ExperimentReportList":{
- "type":"list",
- "member":{"shape":"ExperimentReport"},
- "max":1000,
- "min":0
- },
- "ExperimentReportName":{
- "type":"string",
- "enum":["BayesianInference"]
- },
- "ExperimentReportNameList":{
- "type":"list",
- "member":{"shape":"ExperimentReportName"},
- "max":5,
- "min":0
- },
- "ExperimentResultRequestType":{
- "type":"string",
- "enum":[
- "BaseStat",
- "TreatmentEffect",
- "ConfidenceInterval",
- "PValue"
- ]
- },
- "ExperimentResultRequestTypeList":{
- "type":"list",
- "member":{"shape":"ExperimentResultRequestType"},
- "max":5,
- "min":0
- },
- "ExperimentResultResponseType":{
- "type":"string",
- "enum":[
- "Mean",
- "TreatmentEffect",
- "ConfidenceIntervalUpperBound",
- "ConfidenceIntervalLowerBound",
- "PValue"
- ]
- },
- "ExperimentResultsData":{
- "type":"structure",
- "members":{
- "metricName":{"shape":"CwDimensionSafeName"},
- "resultStat":{"shape":"ExperimentResultResponseType"},
- "treatmentName":{"shape":"TreatmentName"},
- "values":{"shape":"DoubleValueList"}
- }
- },
- "ExperimentResultsDataList":{
- "type":"list",
- "member":{"shape":"ExperimentResultsData"},
- "max":1000,
- "min":0
- },
- "ExperimentSchedule":{
- "type":"structure",
- "members":{
- "analysisCompleteTime":{"shape":"Timestamp"}
- }
- },
- "ExperimentStatus":{
- "type":"string",
- "enum":[
- "CREATED",
- "UPDATING",
- "RUNNING",
- "COMPLETED",
- "CANCELLED"
- ]
- },
- "ExperimentStopDesiredState":{
- "type":"string",
- "enum":[
- "COMPLETED",
- "CANCELLED"
- ]
- },
- "ExperimentType":{
- "type":"string",
- "enum":["aws.evidently.onlineab"]
- },
- "Feature":{
- "type":"structure",
- "required":[
- "arn",
- "createdTime",
- "evaluationStrategy",
- "lastUpdatedTime",
- "name",
- "status",
- "valueType",
- "variations"
- ],
- "members":{
- "arn":{"shape":"FeatureArn"},
- "createdTime":{"shape":"Timestamp"},
- "defaultVariation":{"shape":"VariationName"},
- "description":{"shape":"Description"},
- "entityOverrides":{"shape":"EntityOverrideMap"},
- "evaluationRules":{"shape":"EvaluationRulesList"},
- "evaluationStrategy":{"shape":"FeatureEvaluationStrategy"},
- "lastUpdatedTime":{"shape":"Timestamp"},
- "name":{"shape":"FeatureName"},
- "project":{"shape":"ProjectArn"},
- "status":{"shape":"FeatureStatus"},
- "tags":{"shape":"TagMap"},
- "valueType":{"shape":"VariationValueType"},
- "variations":{"shape":"VariationsList"}
- }
- },
- "FeatureArn":{
- "type":"string",
- "max":2048,
- "min":0,
- "pattern":"arn:[^:]*:[^:]*:[^:]*:[^:]*:project/[-a-zA-Z0-9._]*/feature/[-a-zA-Z0-9._]*"
- },
- "FeatureEvaluationStrategy":{
- "type":"string",
- "enum":[
- "ALL_RULES",
- "DEFAULT_VARIATION"
- ]
- },
- "FeatureName":{
- "type":"string",
- "max":127,
- "min":1,
- "pattern":"^[-a-zA-Z0-9._]*$"
- },
- "FeatureStatus":{
- "type":"string",
- "enum":[
- "AVAILABLE",
- "UPDATING"
- ]
- },
- "FeatureSummariesList":{
- "type":"list",
- "member":{"shape":"FeatureSummary"}
- },
- "FeatureSummary":{
- "type":"structure",
- "required":[
- "arn",
- "createdTime",
- "evaluationStrategy",
- "lastUpdatedTime",
- "name",
- "status"
- ],
- "members":{
- "arn":{"shape":"Arn"},
- "createdTime":{"shape":"Timestamp"},
- "defaultVariation":{"shape":"VariationName"},
- "evaluationRules":{"shape":"EvaluationRulesList"},
- "evaluationStrategy":{"shape":"FeatureEvaluationStrategy"},
- "lastUpdatedTime":{"shape":"Timestamp"},
- "name":{"shape":"FeatureName"},
- "project":{"shape":"ProjectRef"},
- "status":{"shape":"FeatureStatus"},
- "tags":{"shape":"TagMap"}
- }
- },
- "FeatureToVariationMap":{
- "type":"map",
- "key":{"shape":"FeatureName"},
- "value":{"shape":"VariationName"}
- },
- "GetExperimentRequest":{
- "type":"structure",
- "required":[
- "experiment",
- "project"
- ],
- "members":{
- "experiment":{
- "shape":"ExperimentName",
- "location":"uri",
- "locationName":"experiment"
- },
- "project":{
- "shape":"ProjectRef",
- "location":"uri",
- "locationName":"project"
- }
- }
- },
- "GetExperimentResponse":{
- "type":"structure",
- "members":{
- "experiment":{"shape":"Experiment"}
- }
- },
- "GetExperimentResultsRequest":{
- "type":"structure",
- "required":[
- "experiment",
- "metricNames",
- "project",
- "treatmentNames"
- ],
- "members":{
- "baseStat":{"shape":"ExperimentBaseStat"},
- "endTime":{"shape":"Timestamp"},
- "experiment":{
- "shape":"ExperimentName",
- "location":"uri",
- "locationName":"experiment"
- },
- "metricNames":{"shape":"MetricNameList"},
- "period":{"shape":"ResultsPeriod"},
- "project":{
- "shape":"ProjectRef",
- "location":"uri",
- "locationName":"project"
- },
- "reportNames":{"shape":"ExperimentReportNameList"},
- "resultStats":{"shape":"ExperimentResultRequestTypeList"},
- "startTime":{"shape":"Timestamp"},
- "treatmentNames":{"shape":"TreatmentNameList"}
- }
- },
- "GetExperimentResultsResponse":{
- "type":"structure",
- "members":{
- "details":{"shape":"String"},
- "reports":{"shape":"ExperimentReportList"},
- "resultsData":{"shape":"ExperimentResultsDataList"},
- "timestamps":{"shape":"TimestampList"}
- }
- },
- "GetFeatureRequest":{
- "type":"structure",
- "required":[
- "feature",
- "project"
- ],
- "members":{
- "feature":{
- "shape":"FeatureName",
- "location":"uri",
- "locationName":"feature"
- },
- "project":{
- "shape":"ProjectRef",
- "location":"uri",
- "locationName":"project"
- }
- }
- },
- "GetFeatureResponse":{
- "type":"structure",
- "required":["feature"],
- "members":{
- "feature":{"shape":"Feature"}
- }
- },
- "GetLaunchRequest":{
- "type":"structure",
- "required":[
- "launch",
- "project"
- ],
- "members":{
- "launch":{
- "shape":"LaunchName",
- "location":"uri",
- "locationName":"launch"
- },
- "project":{
- "shape":"ProjectRef",
- "location":"uri",
- "locationName":"project"
- }
- }
- },
- "GetLaunchResponse":{
- "type":"structure",
- "members":{
- "launch":{"shape":"Launch"}
- }
- },
- "GetProjectRequest":{
- "type":"structure",
- "required":["project"],
- "members":{
- "project":{
- "shape":"ProjectRef",
- "location":"uri",
- "locationName":"project"
- }
- }
- },
- "GetProjectResponse":{
- "type":"structure",
- "required":["project"],
- "members":{
- "project":{"shape":"Project"}
- }
- },
- "GetSegmentRequest":{
- "type":"structure",
- "required":["segment"],
- "members":{
- "segment":{
- "shape":"SegmentRef",
- "location":"uri",
- "locationName":"segment"
- }
- }
- },
- "GetSegmentResponse":{
- "type":"structure",
- "required":["segment"],
- "members":{
- "segment":{"shape":"Segment"}
- }
- },
- "GroupName":{
- "type":"string",
- "max":127,
- "min":1,
- "pattern":"^[-a-zA-Z0-9._]*$"
- },
- "GroupToWeightMap":{
- "type":"map",
- "key":{"shape":"GroupName"},
- "value":{"shape":"SplitWeight"},
- "max":5,
- "min":0
- },
- "Integer":{
- "type":"integer",
- "box":true
- },
- "InternalServerException":{
- "type":"structure",
- "members":{
- "message":{"shape":"String"}
- },
- "error":{"httpStatusCode":500},
- "exception":true,
- "fault":true
- },
- "JsonPath":{
- "type":"string",
- "max":256,
- "min":1,
- "pattern":".*"
- },
- "JsonValue":{"type":"string"},
- "Launch":{
- "type":"structure",
- "required":[
- "arn",
- "createdTime",
- "lastUpdatedTime",
- "name",
- "status",
- "type"
- ],
- "members":{
- "arn":{"shape":"LaunchArn"},
- "createdTime":{"shape":"Timestamp"},
- "description":{"shape":"Description"},
- "execution":{"shape":"LaunchExecution"},
- "groups":{"shape":"LaunchGroupList"},
- "lastUpdatedTime":{"shape":"Timestamp"},
- "metricMonitors":{"shape":"MetricMonitorList"},
- "name":{"shape":"LaunchName"},
- "project":{"shape":"ProjectRef"},
- "randomizationSalt":{"shape":"RandomizationSalt"},
- "scheduledSplitsDefinition":{"shape":"ScheduledSplitsLaunchDefinition"},
- "status":{"shape":"LaunchStatus"},
- "statusReason":{"shape":"Description"},
- "tags":{"shape":"TagMap"},
- "type":{"shape":"LaunchType"}
- }
- },
- "LaunchArn":{
- "type":"string",
- "max":2048,
- "min":0,
- "pattern":"arn:[^:]*:[^:]*:[^:]*:[^:]*:project/[-a-zA-Z0-9._]*/launch/[-a-zA-Z0-9._]*"
- },
- "LaunchExecution":{
- "type":"structure",
- "members":{
- "endedTime":{"shape":"Timestamp"},
- "startedTime":{"shape":"Timestamp"}
- }
- },
- "LaunchGroup":{
- "type":"structure",
- "required":[
- "featureVariations",
- "name"
- ],
- "members":{
- "description":{"shape":"Description"},
- "featureVariations":{"shape":"FeatureToVariationMap"},
- "name":{"shape":"GroupName"}
- }
- },
- "LaunchGroupConfig":{
- "type":"structure",
- "required":[
- "feature",
- "name",
- "variation"
- ],
- "members":{
- "description":{"shape":"Description"},
- "feature":{"shape":"FeatureName"},
- "name":{"shape":"GroupName"},
- "variation":{"shape":"VariationName"}
- }
- },
- "LaunchGroupConfigList":{
- "type":"list",
- "member":{"shape":"LaunchGroupConfig"},
- "max":5,
- "min":1
- },
- "LaunchGroupList":{
- "type":"list",
- "member":{"shape":"LaunchGroup"}
- },
- "LaunchName":{
- "type":"string",
- "max":127,
- "min":1,
- "pattern":"^[-a-zA-Z0-9._]*$"
- },
- "LaunchStatus":{
- "type":"string",
- "enum":[
- "CREATED",
- "UPDATING",
- "RUNNING",
- "COMPLETED",
- "CANCELLED"
- ]
- },
- "LaunchStopDesiredState":{
- "type":"string",
- "enum":[
- "COMPLETED",
- "CANCELLED"
- ]
- },
- "LaunchType":{
- "type":"string",
- "enum":["aws.evidently.splits"]
- },
- "LaunchesList":{
- "type":"list",
- "member":{"shape":"Launch"}
- },
- "ListExperimentsRequest":{
- "type":"structure",
- "required":["project"],
- "members":{
- "maxResults":{
- "shape":"MaxExperiments",
- "location":"querystring",
- "locationName":"maxResults"
- },
- "nextToken":{
- "shape":"NextToken",
- "location":"querystring",
- "locationName":"nextToken"
- },
- "project":{
- "shape":"ProjectRef",
- "location":"uri",
- "locationName":"project"
- },
- "status":{
- "shape":"ExperimentStatus",
- "location":"querystring",
- "locationName":"status"
- }
- }
- },
- "ListExperimentsResponse":{
- "type":"structure",
- "members":{
- "experiments":{"shape":"ExperimentList"},
- "nextToken":{"shape":"NextToken"}
- }
- },
- "ListFeaturesRequest":{
- "type":"structure",
- "required":["project"],
- "members":{
- "maxResults":{
- "shape":"MaxFeatures",
- "location":"querystring",
- "locationName":"maxResults"
- },
- "nextToken":{
- "shape":"NextToken",
- "location":"querystring",
- "locationName":"nextToken"
- },
- "project":{
- "shape":"ProjectRef",
- "location":"uri",
- "locationName":"project"
- }
- }
- },
- "ListFeaturesResponse":{
- "type":"structure",
- "members":{
- "features":{"shape":"FeatureSummariesList"},
- "nextToken":{"shape":"NextToken"}
- }
- },
- "ListLaunchesRequest":{
- "type":"structure",
- "required":["project"],
- "members":{
- "maxResults":{
- "shape":"MaxLaunches",
- "location":"querystring",
- "locationName":"maxResults"
- },
- "nextToken":{
- "shape":"NextToken",
- "location":"querystring",
- "locationName":"nextToken"
- },
- "project":{
- "shape":"ProjectRef",
- "location":"uri",
- "locationName":"project"
- },
- "status":{
- "shape":"LaunchStatus",
- "location":"querystring",
- "locationName":"status"
- }
- }
- },
- "ListLaunchesResponse":{
- "type":"structure",
- "members":{
- "launches":{"shape":"LaunchesList"},
- "nextToken":{"shape":"NextToken"}
- }
- },
- "ListProjectsRequest":{
- "type":"structure",
- "members":{
- "maxResults":{
- "shape":"MaxProjects",
- "location":"querystring",
- "locationName":"maxResults"
- },
- "nextToken":{
- "shape":"NextToken",
- "location":"querystring",
- "locationName":"nextToken"
- }
- }
- },
- "ListProjectsResponse":{
- "type":"structure",
- "members":{
- "nextToken":{"shape":"NextToken"},
- "projects":{"shape":"ProjectSummariesList"}
- }
- },
- "ListSegmentReferencesRequest":{
- "type":"structure",
- "required":[
- "segment",
- "type"
- ],
- "members":{
- "maxResults":{
- "shape":"MaxReferences",
- "location":"querystring",
- "locationName":"maxResults"
- },
- "nextToken":{
- "shape":"NextToken",
- "location":"querystring",
- "locationName":"nextToken"
- },
- "segment":{
- "shape":"SegmentRef",
- "location":"uri",
- "locationName":"segment"
- },
- "type":{
- "shape":"SegmentReferenceResourceType",
- "location":"querystring",
- "locationName":"type"
- }
- }
- },
- "ListSegmentReferencesResponse":{
- "type":"structure",
- "members":{
- "nextToken":{"shape":"NextToken"},
- "referencedBy":{"shape":"RefResourceList"}
- }
- },
- "ListSegmentsRequest":{
- "type":"structure",
- "members":{
- "maxResults":{
- "shape":"MaxSegments",
- "location":"querystring",
- "locationName":"maxResults"
- },
- "nextToken":{
- "shape":"NextToken",
- "location":"querystring",
- "locationName":"nextToken"
- }
- }
- },
- "ListSegmentsResponse":{
- "type":"structure",
- "members":{
- "nextToken":{"shape":"NextToken"},
- "segments":{"shape":"SegmentList"}
- }
- },
- "ListTagsForResourceRequest":{
- "type":"structure",
- "required":["resourceArn"],
- "members":{
- "resourceArn":{
- "shape":"Arn",
- "location":"uri",
- "locationName":"resourceArn"
- }
- }
- },
- "ListTagsForResourceResponse":{
- "type":"structure",
- "members":{
- "tags":{"shape":"TagMap"}
- }
- },
- "Long":{
- "type":"long",
- "box":true
- },
- "MaxExperiments":{
- "type":"integer",
- "box":true,
- "max":100,
- "min":1
- },
- "MaxFeatures":{
- "type":"integer",
- "box":true,
- "max":100,
- "min":1
- },
- "MaxLaunches":{
- "type":"integer",
- "box":true,
- "max":100,
- "min":1
- },
- "MaxProjects":{
- "type":"integer",
- "box":true,
- "max":50,
- "min":1
- },
- "MaxReferences":{
- "type":"integer",
- "box":true,
- "max":100,
- "min":1
- },
- "MaxSegments":{
- "type":"integer",
- "box":true,
- "max":50,
- "min":1
- },
- "MetricDefinition":{
- "type":"structure",
- "members":{
- "entityIdKey":{"shape":"JsonPath"},
- "eventPattern":{
- "shape":"JsonValue",
- "jsonvalue":true
- },
- "name":{"shape":"CwDimensionSafeName"},
- "unitLabel":{"shape":"MetricUnitLabel"},
- "valueKey":{"shape":"JsonPath"}
- }
- },
- "MetricDefinitionConfig":{
- "type":"structure",
- "required":[
- "entityIdKey",
- "name",
- "valueKey"
- ],
- "members":{
- "entityIdKey":{"shape":"JsonPath"},
- "eventPattern":{
- "shape":"MetricDefinitionConfigEventPatternString",
- "jsonvalue":true
- },
- "name":{"shape":"CwDimensionSafeName"},
- "unitLabel":{"shape":"MetricUnitLabel"},
- "valueKey":{"shape":"JsonPath"}
- }
- },
- "MetricDefinitionConfigEventPatternString":{
- "type":"string",
- "max":1024,
- "min":0
- },
- "MetricGoal":{
- "type":"structure",
- "required":["metricDefinition"],
- "members":{
- "desiredChange":{"shape":"ChangeDirectionEnum"},
- "metricDefinition":{"shape":"MetricDefinition"}
- }
- },
- "MetricGoalConfig":{
- "type":"structure",
- "required":["metricDefinition"],
- "members":{
- "desiredChange":{"shape":"ChangeDirectionEnum"},
- "metricDefinition":{"shape":"MetricDefinitionConfig"}
- }
- },
- "MetricGoalConfigList":{
- "type":"list",
- "member":{"shape":"MetricGoalConfig"},
- "max":3,
- "min":1
- },
- "MetricGoalsList":{
- "type":"list",
- "member":{"shape":"MetricGoal"},
- "max":3,
- "min":1
- },
- "MetricMonitor":{
- "type":"structure",
- "required":["metricDefinition"],
- "members":{
- "metricDefinition":{"shape":"MetricDefinition"}
- }
- },
- "MetricMonitorConfig":{
- "type":"structure",
- "required":["metricDefinition"],
- "members":{
- "metricDefinition":{"shape":"MetricDefinitionConfig"}
- }
- },
- "MetricMonitorConfigList":{
- "type":"list",
- "member":{"shape":"MetricMonitorConfig"},
- "max":3,
- "min":0
- },
- "MetricMonitorList":{
- "type":"list",
- "member":{"shape":"MetricMonitor"},
- "max":3,
- "min":0
- },
- "MetricNameList":{
- "type":"list",
- "member":{"shape":"CwDimensionSafeName"},
- "max":1,
- "min":1
- },
- "MetricUnitLabel":{
- "type":"string",
- "max":256,
- "min":1,
- "pattern":".*"
- },
- "NextToken":{
- "type":"string",
- "max":8192,
- "min":1,
- "pattern":".*"
- },
- "OnlineAbConfig":{
- "type":"structure",
- "members":{
- "controlTreatmentName":{"shape":"TreatmentName"},
- "treatmentWeights":{"shape":"TreatmentToWeightMap"}
- }
- },
- "OnlineAbDefinition":{
- "type":"structure",
- "members":{
- "controlTreatmentName":{"shape":"TreatmentName"},
- "treatmentWeights":{"shape":"TreatmentToWeightMap"}
- }
- },
- "PrimitiveBoolean":{"type":"boolean"},
- "Project":{
- "type":"structure",
- "required":[
- "arn",
- "createdTime",
- "lastUpdatedTime",
- "name",
- "status"
- ],
- "members":{
- "activeExperimentCount":{"shape":"Long"},
- "activeLaunchCount":{"shape":"Long"},
- "appConfigResource":{"shape":"ProjectAppConfigResource"},
- "arn":{"shape":"ProjectArn"},
- "createdTime":{"shape":"Timestamp"},
- "dataDelivery":{"shape":"ProjectDataDelivery"},
- "description":{"shape":"Description"},
- "experimentCount":{"shape":"Long"},
- "featureCount":{"shape":"Long"},
- "lastUpdatedTime":{"shape":"Timestamp"},
- "launchCount":{"shape":"Long"},
- "name":{"shape":"ProjectName"},
- "status":{"shape":"ProjectStatus"},
- "tags":{"shape":"TagMap"}
- }
- },
- "ProjectAppConfigResource":{
- "type":"structure",
- "required":[
- "applicationId",
- "configurationProfileId",
- "environmentId"
- ],
- "members":{
- "applicationId":{"shape":"AppConfigResourceId"},
- "configurationProfileId":{"shape":"AppConfigResourceId"},
- "environmentId":{"shape":"AppConfigResourceId"}
- }
- },
- "ProjectAppConfigResourceConfig":{
- "type":"structure",
- "members":{
- "applicationId":{"shape":"AppConfigResourceId"},
- "environmentId":{"shape":"AppConfigResourceId"}
- }
- },
- "ProjectArn":{
- "type":"string",
- "max":2048,
- "min":0,
- "pattern":"arn:[^:]*:[^:]*:[^:]*:[^:]*:project/[-a-zA-Z0-9._]*"
- },
- "ProjectDataDelivery":{
- "type":"structure",
- "members":{
- "cloudWatchLogs":{"shape":"CloudWatchLogsDestination"},
- "s3Destination":{"shape":"S3Destination"}
- }
- },
- "ProjectDataDeliveryConfig":{
- "type":"structure",
- "members":{
- "cloudWatchLogs":{"shape":"CloudWatchLogsDestinationConfig"},
- "s3Destination":{"shape":"S3DestinationConfig"}
- }
- },
- "ProjectName":{
- "type":"string",
- "max":127,
- "min":1,
- "pattern":"^[-a-zA-Z0-9._]*$"
- },
- "ProjectRef":{
- "type":"string",
- "max":2048,
- "min":0,
- "pattern":"(^[a-zA-Z0-9._-]*$)|(arn:[^:]*:[^:]*:[^:]*:[^:]*:project/[a-zA-Z0-9._-]*)"
- },
- "ProjectStatus":{
- "type":"string",
- "enum":[
- "AVAILABLE",
- "UPDATING"
- ]
- },
- "ProjectSummariesList":{
- "type":"list",
- "member":{"shape":"ProjectSummary"}
- },
- "ProjectSummary":{
- "type":"structure",
- "required":[
- "arn",
- "createdTime",
- "lastUpdatedTime",
- "name",
- "status"
- ],
- "members":{
- "activeExperimentCount":{"shape":"Long"},
- "activeLaunchCount":{"shape":"Long"},
- "arn":{"shape":"ProjectArn"},
- "createdTime":{"shape":"Timestamp"},
- "description":{"shape":"Description"},
- "experimentCount":{"shape":"Long"},
- "featureCount":{"shape":"Long"},
- "lastUpdatedTime":{"shape":"Timestamp"},
- "launchCount":{"shape":"Long"},
- "name":{"shape":"ProjectName"},
- "status":{"shape":"ProjectStatus"},
- "tags":{"shape":"TagMap"}
- }
- },
- "PutProjectEventsRequest":{
- "type":"structure",
- "required":[
- "events",
- "project"
- ],
- "members":{
- "events":{"shape":"EventList"},
- "project":{
- "shape":"ProjectRef",
- "location":"uri",
- "locationName":"project"
- }
- }
- },
- "PutProjectEventsResponse":{
- "type":"structure",
- "members":{
- "eventResults":{"shape":"PutProjectEventsResultEntryList"},
- "failedEventCount":{"shape":"Integer"}
- }
- },
- "PutProjectEventsResultEntry":{
- "type":"structure",
- "members":{
- "errorCode":{"shape":"ErrorCodeEnum"},
- "errorMessage":{"shape":"ErrorMessage"},
- "eventId":{"shape":"Uuid"}
- }
- },
- "PutProjectEventsResultEntryList":{
- "type":"list",
- "member":{"shape":"PutProjectEventsResultEntry"}
- },
- "RandomizationSalt":{
- "type":"string",
- "max":127,
- "min":0,
- "pattern":".*"
- },
- "RefResource":{
- "type":"structure",
- "required":[
- "name",
- "type"
- ],
- "members":{
- "arn":{"shape":"String"},
- "endTime":{"shape":"String"},
- "lastUpdatedOn":{"shape":"String"},
- "name":{"shape":"String"},
- "startTime":{"shape":"String"},
- "status":{"shape":"String"},
- "type":{"shape":"String"}
- }
- },
- "RefResourceList":{
- "type":"list",
- "member":{"shape":"RefResource"}
- },
- "ResourceNotFoundException":{
- "type":"structure",
- "members":{
- "message":{"shape":"String"},
- "resourceId":{"shape":"String"},
- "resourceType":{"shape":"String"}
- },
- "error":{
- "httpStatusCode":404,
- "senderFault":true
- },
- "exception":true
- },
- "ResultsPeriod":{
- "type":"long",
- "max":90000,
- "min":300
- },
- "RuleName":{
- "type":"string",
- "max":1024,
- "min":0
- },
- "RuleType":{
- "type":"string",
- "max":1024,
- "min":0
- },
- "S3BucketSafeName":{
- "type":"string",
- "max":63,
- "min":3,
- "pattern":"^[a-z0-9][-a-z0-9]*[a-z0-9]$"
- },
- "S3Destination":{
- "type":"structure",
- "members":{
- "bucket":{"shape":"S3BucketSafeName"},
- "prefix":{"shape":"S3PrefixSafeName"}
- }
- },
- "S3DestinationConfig":{
- "type":"structure",
- "members":{
- "bucket":{"shape":"S3BucketSafeName"},
- "prefix":{"shape":"S3PrefixSafeName"}
- }
- },
- "S3PrefixSafeName":{
- "type":"string",
- "max":1024,
- "min":1,
- "pattern":"^[-a-zA-Z0-9!_.*'()/]*$"
- },
- "ScheduledSplit":{
- "type":"structure",
- "required":["startTime"],
- "members":{
- "groupWeights":{"shape":"GroupToWeightMap"},
- "segmentOverrides":{"shape":"SegmentOverridesList"},
- "startTime":{"shape":"Timestamp"}
- }
- },
- "ScheduledSplitConfig":{
- "type":"structure",
- "required":[
- "groupWeights",
- "startTime"
- ],
- "members":{
- "groupWeights":{"shape":"GroupToWeightMap"},
- "segmentOverrides":{"shape":"SegmentOverridesList"},
- "startTime":{"shape":"Timestamp"}
- }
- },
- "ScheduledSplitConfigList":{
- "type":"list",
- "member":{"shape":"ScheduledSplitConfig"},
- "max":6,
- "min":1
- },
- "ScheduledSplitsLaunchConfig":{
- "type":"structure",
- "required":["steps"],
- "members":{
- "steps":{"shape":"ScheduledSplitConfigList"}
- }
- },
- "ScheduledSplitsLaunchDefinition":{
- "type":"structure",
- "members":{
- "steps":{"shape":"ScheduledStepList"}
- }
- },
- "ScheduledStepList":{
- "type":"list",
- "member":{"shape":"ScheduledSplit"},
- "max":6,
- "min":1
- },
- "Segment":{
- "type":"structure",
- "required":[
- "arn",
- "createdTime",
- "lastUpdatedTime",
- "name",
- "pattern"
- ],
- "members":{
- "arn":{"shape":"SegmentArn"},
- "createdTime":{"shape":"Timestamp"},
- "description":{"shape":"Description"},
- "experimentCount":{"shape":"Long"},
- "lastUpdatedTime":{"shape":"Timestamp"},
- "launchCount":{"shape":"Long"},
- "name":{"shape":"SegmentName"},
- "pattern":{
- "shape":"SegmentPattern",
- "jsonvalue":true
- },
- "tags":{"shape":"TagMap"}
- }
- },
- "SegmentArn":{
- "type":"string",
- "max":2048,
- "min":0,
- "pattern":"arn:[^:]*:[^:]*:[^:]*:[^:]*:segment/[-a-zA-Z0-9._]*"
- },
- "SegmentList":{
- "type":"list",
- "member":{"shape":"Segment"}
- },
- "SegmentName":{
- "type":"string",
- "max":64,
- "min":1,
- "pattern":"^[-a-zA-Z0-9._]*$"
- },
- "SegmentOverride":{
- "type":"structure",
- "required":[
- "evaluationOrder",
- "segment",
- "weights"
- ],
- "members":{
- "evaluationOrder":{"shape":"Long"},
- "segment":{"shape":"SegmentRef"},
- "weights":{"shape":"GroupToWeightMap"}
- }
- },
- "SegmentOverridesList":{
- "type":"list",
- "member":{"shape":"SegmentOverride"},
- "max":6,
- "min":0
- },
- "SegmentPattern":{
- "type":"string",
- "max":1024,
- "min":1
- },
- "SegmentRef":{
- "type":"string",
- "max":2048,
- "min":0,
- "pattern":"(^[-a-zA-Z0-9._]*$)|(arn:[^:]*:[^:]*:[^:]*:[^:]*:segment/[-a-zA-Z0-9._]*)"
- },
- "SegmentReferenceResourceType":{
- "type":"string",
- "enum":[
- "EXPERIMENT",
- "LAUNCH"
- ]
- },
- "ServiceQuotaExceededException":{
- "type":"structure",
- "members":{
- "message":{"shape":"String"},
- "quotaCode":{"shape":"String"},
- "resourceId":{"shape":"String"},
- "resourceType":{"shape":"String"},
- "serviceCode":{"shape":"String"}
- },
- "error":{
- "httpStatusCode":402,
- "senderFault":true
- },
- "exception":true
- },
- "ServiceUnavailableException":{
- "type":"structure",
- "members":{
- "message":{"shape":"String"}
- },
- "error":{"httpStatusCode":503},
- "exception":true,
- "fault":true
- },
- "SplitWeight":{
- "type":"long",
- "max":100000,
- "min":0
- },
- "StartExperimentRequest":{
- "type":"structure",
- "required":[
- "analysisCompleteTime",
- "experiment",
- "project"
- ],
- "members":{
- "analysisCompleteTime":{"shape":"Timestamp"},
- "experiment":{
- "shape":"ExperimentName",
- "location":"uri",
- "locationName":"experiment"
- },
- "project":{
- "shape":"ProjectRef",
- "location":"uri",
- "locationName":"project"
- }
- }
- },
- "StartExperimentResponse":{
- "type":"structure",
- "members":{
- "startedTime":{"shape":"Timestamp"}
- }
- },
- "StartLaunchRequest":{
- "type":"structure",
- "required":[
- "launch",
- "project"
- ],
- "members":{
- "launch":{
- "shape":"LaunchName",
- "location":"uri",
- "locationName":"launch"
- },
- "project":{
- "shape":"ProjectRef",
- "location":"uri",
- "locationName":"project"
- }
- }
- },
- "StartLaunchResponse":{
- "type":"structure",
- "required":["launch"],
- "members":{
- "launch":{"shape":"Launch"}
- }
- },
- "StopExperimentRequest":{
- "type":"structure",
- "required":[
- "experiment",
- "project"
- ],
- "members":{
- "desiredState":{"shape":"ExperimentStopDesiredState"},
- "experiment":{
- "shape":"ExperimentName",
- "location":"uri",
- "locationName":"experiment"
- },
- "project":{
- "shape":"ProjectRef",
- "location":"uri",
- "locationName":"project"
- },
- "reason":{"shape":"Description"}
- }
- },
- "StopExperimentResponse":{
- "type":"structure",
- "members":{
- "endedTime":{"shape":"Timestamp"}
- }
- },
- "StopLaunchRequest":{
- "type":"structure",
- "required":[
- "launch",
- "project"
- ],
- "members":{
- "desiredState":{"shape":"LaunchStopDesiredState"},
- "launch":{
- "shape":"LaunchName",
- "location":"uri",
- "locationName":"launch"
- },
- "project":{
- "shape":"ProjectRef",
- "location":"uri",
- "locationName":"project"
- },
- "reason":{"shape":"Description"}
- }
- },
- "StopLaunchResponse":{
- "type":"structure",
- "members":{
- "endedTime":{"shape":"Timestamp"}
- }
- },
- "String":{"type":"string"},
- "TagKey":{
- "type":"string",
- "max":128,
- "min":1,
- "pattern":"^(?!aws:)[a-zA-Z+-=._:/]+$"
- },
- "TagKeyList":{
- "type":"list",
- "member":{"shape":"TagKey"},
- "max":50,
- "min":0
- },
- "TagMap":{
- "type":"map",
- "key":{"shape":"TagKey"},
- "value":{"shape":"TagValue"}
- },
- "TagResourceRequest":{
- "type":"structure",
- "required":[
- "resourceArn",
- "tags"
- ],
- "members":{
- "resourceArn":{
- "shape":"Arn",
- "location":"uri",
- "locationName":"resourceArn"
- },
- "tags":{"shape":"TagMap"}
- }
- },
- "TagResourceResponse":{
- "type":"structure",
- "members":{}
- },
- "TagValue":{
- "type":"string",
- "max":256,
- "min":0
- },
- "TestSegmentPatternRequest":{
- "type":"structure",
- "required":[
- "pattern",
- "payload"
- ],
- "members":{
- "pattern":{
- "shape":"SegmentPattern",
- "jsonvalue":true
- },
- "payload":{
- "shape":"JsonValue",
- "jsonvalue":true
- }
- }
- },
- "TestSegmentPatternResponse":{
- "type":"structure",
- "required":["match"],
- "members":{
- "match":{"shape":"Boolean"}
- }
- },
- "ThrottlingException":{
- "type":"structure",
- "members":{
- "message":{"shape":"String"},
- "quotaCode":{"shape":"String"},
- "serviceCode":{"shape":"String"}
- },
- "error":{
- "httpStatusCode":429,
- "senderFault":true
- },
- "exception":true
- },
- "Timestamp":{"type":"timestamp"},
- "TimestampList":{
- "type":"list",
- "member":{"shape":"Timestamp"},
- "max":100800,
- "min":0
- },
- "Treatment":{
- "type":"structure",
- "required":["name"],
- "members":{
- "description":{"shape":"Description"},
- "featureVariations":{"shape":"FeatureToVariationMap"},
- "name":{"shape":"TreatmentName"}
- }
- },
- "TreatmentConfig":{
- "type":"structure",
- "required":[
- "feature",
- "name",
- "variation"
- ],
- "members":{
- "description":{"shape":"Description"},
- "feature":{"shape":"FeatureName"},
- "name":{"shape":"TreatmentName"},
- "variation":{"shape":"VariationName"}
- }
- },
- "TreatmentConfigList":{
- "type":"list",
- "member":{"shape":"TreatmentConfig"},
- "max":5,
- "min":0
- },
- "TreatmentList":{
- "type":"list",
- "member":{"shape":"Treatment"},
- "max":5,
- "min":2
- },
- "TreatmentName":{
- "type":"string",
- "max":127,
- "min":1,
- "pattern":"^[-a-zA-Z0-9._]*$"
- },
- "TreatmentNameList":{
- "type":"list",
- "member":{"shape":"TreatmentName"},
- "max":5,
- "min":1
- },
- "TreatmentToWeightMap":{
- "type":"map",
- "key":{"shape":"TreatmentName"},
- "value":{"shape":"SplitWeight"}
- },
- "UntagResourceRequest":{
- "type":"structure",
- "required":[
- "resourceArn",
- "tagKeys"
- ],
- "members":{
- "resourceArn":{
- "shape":"Arn",
- "location":"uri",
- "locationName":"resourceArn"
- },
- "tagKeys":{
- "shape":"TagKeyList",
- "location":"querystring",
- "locationName":"tagKeys"
- }
- }
- },
- "UntagResourceResponse":{
- "type":"structure",
- "members":{}
- },
- "UpdateExperimentRequest":{
- "type":"structure",
- "required":[
- "experiment",
- "project"
- ],
- "members":{
- "description":{"shape":"Description"},
- "experiment":{
- "shape":"ExperimentName",
- "location":"uri",
- "locationName":"experiment"
- },
- "metricGoals":{"shape":"MetricGoalConfigList"},
- "onlineAbConfig":{"shape":"OnlineAbConfig"},
- "project":{
- "shape":"ProjectRef",
- "location":"uri",
- "locationName":"project"
- },
- "randomizationSalt":{"shape":"RandomizationSalt"},
- "removeSegment":{"shape":"PrimitiveBoolean"},
- "samplingRate":{
- "shape":"SplitWeight",
- "box":true
- },
- "segment":{"shape":"SegmentRef"},
- "treatments":{"shape":"TreatmentConfigList"}
- }
- },
- "UpdateExperimentResponse":{
- "type":"structure",
- "required":["experiment"],
- "members":{
- "experiment":{"shape":"Experiment"}
- }
- },
- "UpdateFeatureRequest":{
- "type":"structure",
- "required":[
- "feature",
- "project"
- ],
- "members":{
- "addOrUpdateVariations":{"shape":"VariationConfigsList"},
- "defaultVariation":{"shape":"VariationName"},
- "description":{"shape":"Description"},
- "entityOverrides":{"shape":"EntityOverrideMap"},
- "evaluationStrategy":{"shape":"FeatureEvaluationStrategy"},
- "feature":{
- "shape":"FeatureName",
- "location":"uri",
- "locationName":"feature"
- },
- "project":{
- "shape":"ProjectRef",
- "location":"uri",
- "locationName":"project"
- },
- "removeVariations":{"shape":"VariationNameList"}
- }
- },
- "UpdateFeatureResponse":{
- "type":"structure",
- "required":["feature"],
- "members":{
- "feature":{"shape":"Feature"}
- }
- },
- "UpdateLaunchRequest":{
- "type":"structure",
- "required":[
- "launch",
- "project"
- ],
- "members":{
- "description":{"shape":"Description"},
- "groups":{"shape":"LaunchGroupConfigList"},
- "launch":{
- "shape":"LaunchName",
- "location":"uri",
- "locationName":"launch"
- },
- "metricMonitors":{"shape":"MetricMonitorConfigList"},
- "project":{
- "shape":"ProjectRef",
- "location":"uri",
- "locationName":"project"
- },
- "randomizationSalt":{"shape":"RandomizationSalt"},
- "scheduledSplitsConfig":{"shape":"ScheduledSplitsLaunchConfig"}
- }
- },
- "UpdateLaunchResponse":{
- "type":"structure",
- "required":["launch"],
- "members":{
- "launch":{"shape":"Launch"}
- }
- },
- "UpdateProjectDataDeliveryRequest":{
- "type":"structure",
- "required":["project"],
- "members":{
- "cloudWatchLogs":{"shape":"CloudWatchLogsDestinationConfig"},
- "project":{
- "shape":"ProjectRef",
- "location":"uri",
- "locationName":"project"
- },
- "s3Destination":{"shape":"S3DestinationConfig"}
- }
- },
- "UpdateProjectDataDeliveryResponse":{
- "type":"structure",
- "required":["project"],
- "members":{
- "project":{"shape":"Project"}
- }
- },
- "UpdateProjectRequest":{
- "type":"structure",
- "required":["project"],
- "members":{
- "appConfigResource":{"shape":"ProjectAppConfigResourceConfig"},
- "description":{"shape":"Description"},
- "project":{
- "shape":"ProjectRef",
- "location":"uri",
- "locationName":"project"
- }
- }
- },
- "UpdateProjectResponse":{
- "type":"structure",
- "required":["project"],
- "members":{
- "project":{"shape":"Project"}
- }
- },
- "Uuid":{
- "type":"string",
- "max":36,
- "min":36,
- "pattern":"[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}"
- },
- "ValidationException":{
- "type":"structure",
- "members":{
- "fieldList":{"shape":"ValidationExceptionFieldList"},
- "message":{"shape":"String"},
- "reason":{"shape":"ValidationExceptionReason"}
- },
- "error":{
- "httpStatusCode":400,
- "senderFault":true
- },
- "exception":true
- },
- "ValidationExceptionField":{
- "type":"structure",
- "required":[
- "message",
- "name"
- ],
- "members":{
- "message":{"shape":"String"},
- "name":{"shape":"String"}
- }
- },
- "ValidationExceptionFieldList":{
- "type":"list",
- "member":{"shape":"ValidationExceptionField"}
- },
- "ValidationExceptionReason":{
- "type":"string",
- "enum":[
- "unknownOperation",
- "cannotParse",
- "fieldValidationFailed",
- "other"
- ]
- },
- "VariableValue":{
- "type":"structure",
- "members":{
- "boolValue":{"shape":"Boolean"},
- "doubleValue":{"shape":"Double"},
- "longValue":{"shape":"VariableValueLongValueLong"},
- "stringValue":{"shape":"VariableValueStringValueString"}
- },
- "union":true
- },
- "VariableValueLongValueLong":{
- "type":"long",
- "box":true,
- "max":9007199254740991,
- "min":-9007199254740991
- },
- "VariableValueStringValueString":{
- "type":"string",
- "max":512,
- "min":0
- },
- "Variation":{
- "type":"structure",
- "members":{
- "name":{"shape":"VariationName"},
- "value":{"shape":"VariableValue"}
- }
- },
- "VariationConfig":{
- "type":"structure",
- "required":[
- "name",
- "value"
- ],
- "members":{
- "name":{"shape":"VariationName"},
- "value":{"shape":"VariableValue"}
- }
- },
- "VariationConfigsList":{
- "type":"list",
- "member":{"shape":"VariationConfig"},
- "max":5,
- "min":1
- },
- "VariationName":{
- "type":"string",
- "max":127,
- "min":1,
- "pattern":"^[-a-zA-Z0-9._]*$"
- },
- "VariationNameList":{
- "type":"list",
- "member":{"shape":"VariationName"},
- "max":5,
- "min":0
- },
- "VariationValueType":{
- "type":"string",
- "enum":[
- "STRING",
- "LONG",
- "DOUBLE",
- "BOOLEAN"
- ]
- },
- "VariationsList":{
- "type":"list",
- "member":{"shape":"Variation"}
- }
- }
-}
diff --git a/apis/evidently/2021-02-01/docs-2.json b/apis/evidently/2021-02-01/docs-2.json
deleted file mode 100644
index 512c6213d23..00000000000
--- a/apis/evidently/2021-02-01/docs-2.json
+++ /dev/null
@@ -1,1625 +0,0 @@
-{
- "version": "2.0",
- "service": "
You can use Amazon CloudWatch Evidently to safely validate new features by serving them to a specified percentage of your users while you roll out the feature. You can monitor the performance of the new feature to help you decide when to ramp up traffic to your users. This helps you reduce risk and identify unintended consequences before you fully launch the feature.
You can also conduct A/B experiments to make feature design decisions based on evidence and data. An experiment can test as many as five variations at once. Evidently collects experiment data and analyzes it using statistical methods. It also provides clear recommendations about which variations perform better. You can test both user-facing features and backend features.
",
- "operations": {
- "BatchEvaluateFeature": "This operation assigns feature variation to user sessions. For each user session, you pass in an entityID that represents the user. Evidently then checks the evaluation rules and assigns the variation.
The first rules that are evaluated are the override rules. If the user's entityID matches an override rule, the user is served the variation specified by that rule.
Next, if there is a launch of the feature, the user might be assigned to a variation in the launch. The chance of this depends on the percentage of users that are allocated to that launch. If the user is enrolled in the launch, the variation they are served depends on the allocation of the various feature variations used for the launch.
If the user is not assigned to a launch, and there is an ongoing experiment for this feature, the user might be assigned to a variation in the experiment. The chance of this depends on the percentage of users that are allocated to that experiment. If the user is enrolled in the experiment, the variation they are served depends on the allocation of the various feature variations used for the experiment.
If the user is not assigned to a launch or experiment, they are served the default variation.
",
- "CreateExperiment": "Creates an Evidently experiment . Before you create an experiment, you must create the feature to use for the experiment.
An experiment helps you make feature design decisions based on evidence and data. An experiment can test as many as five variations at once. Evidently collects experiment data and analyzes it by statistical methods, and provides clear recommendations about which variations perform better.
You can optionally specify a segment to have the experiment consider only certain audience types in the experiment, such as using only user sessions from a certain location or who use a certain internet browser.
Don't use this operation to update an existing experiment. Instead, use UpdateExperiment .
",
- "CreateFeature": "Creates an Evidently feature that you want to launch or test. You can define up to five variations of a feature, and use these variations in your launches and experiments. A feature must be created in a project. For information about creating a project, see CreateProject .
Don't use this operation to update an existing feature. Instead, use UpdateFeature .
",
- "CreateLaunch": "Creates a launch of a given feature. Before you create a launch, you must create the feature to use for the launch.
You can use a launch to safely validate new features by serving them to a specified percentage of your users while you roll out the feature. You can monitor the performance of the new feature to help you decide when to ramp up traffic to more users. This helps you reduce risk and identify unintended consequences before you fully launch the feature.
Don't use this operation to update an existing launch. Instead, use UpdateLaunch .
",
- "CreateProject": "Creates a project, which is the logical object in Evidently that can contain features, launches, and experiments. Use projects to group similar features together.
To update an existing project, use UpdateProject .
",
- "CreateSegment": "Use this operation to define a segment of your audience. A segment is a portion of your audience that share one or more characteristics. Examples could be Chrome browser users, users in Europe, or Firefox browser users in Europe who also fit other criteria that your application collects, such as age.
Using a segment in an experiment limits that experiment to evaluate only the users who match the segment criteria. Using one or more segments in a launch allows you to define different traffic splits for the different audience segments.
For more information about segment pattern syntax, see Segment rule pattern syntax .
The pattern that you define for a segment is matched against the value of evaluationContext, which is passed into Evidently in the EvaluateFeature operation, when Evidently assigns a feature variation to a user.
",
- "DeleteExperiment": "Deletes an Evidently experiment. The feature used for the experiment is not deleted.
To stop an experiment without deleting it, use StopExperiment .
",
- "DeleteFeature": "Deletes an Evidently feature.
",
- "DeleteLaunch": "Deletes an Evidently launch. The feature used for the launch is not deleted.
To stop a launch without deleting it, use StopLaunch .
",
- "DeleteProject": "Deletes an Evidently project. Before you can delete a project, you must delete all the features that the project contains. To delete a feature, use DeleteFeature .
",
- "DeleteSegment": "Deletes a segment. You can't delete a segment that is being used in a launch or experiment, even if that launch or experiment is not currently running.
",
- "EvaluateFeature": "This operation assigns a feature variation to one given user session. You pass in an entityID that represents the user. Evidently then checks the evaluation rules and assigns the variation.
The first rules that are evaluated are the override rules. If the user's entityID matches an override rule, the user is served the variation specified by that rule.
If there is a current launch with this feature that uses segment overrides, and if the user session's evaluationContext matches a segment rule defined in a segment override, the configuration in the segment overrides is used. For more information about segments, see CreateSegment and Use segments to focus your audience .
If there is a launch with no segment overrides, the user might be assigned to a variation in the launch. The chance of this depends on the percentage of users that are allocated to that launch. If the user is enrolled in the launch, the variation they are served depends on the allocation of the various feature variations used for the launch.
If the user is not assigned to a launch, and there is an ongoing experiment for this feature, the user might be assigned to a variation in the experiment. The chance of this depends on the percentage of users that are allocated to that experiment.
If the experiment uses a segment, then only user sessions with evaluationContext values that match the segment rule are used in the experiment.
If the user is enrolled in the experiment, the variation they are served depends on the allocation of the various feature variations used for the experiment.
If the user is not assigned to a launch or experiment, they are served the default variation.
",
- "GetExperiment": "Returns the details about one experiment. You must already know the experiment name. To retrieve a list of experiments in your account, use ListExperiments .
",
- "GetExperimentResults": "Retrieves the results of a running or completed experiment. No results are available until there have been 100 events for each variation and at least 10 minutes have passed since the start of the experiment. To increase the statistical power, Evidently performs an additional offline p-value analysis at the end of the experiment. Offline p-value analysis can detect statistical significance in some cases where the anytime p-values used during the experiment do not find statistical significance.
Experiment results are available up to 63 days after the start of the experiment. They are not available after that because of CloudWatch data retention policies.
",
- "GetFeature": "Returns the details about one feature. You must already know the feature name. To retrieve a list of features in your account, use ListFeatures .
",
- "GetLaunch": "Returns the details about one launch. You must already know the launch name. To retrieve a list of launches in your account, use ListLaunches .
",
- "GetProject": "Returns the details about one launch. You must already know the project name. To retrieve a list of projects in your account, use ListProjects .
",
- "GetSegment": "Returns information about the specified segment. Specify the segment you want to view by specifying its ARN.
",
- "ListExperiments": "Returns configuration details about all the experiments in the specified project.
",
- "ListFeatures": "Returns configuration details about all the features in the specified project.
",
- "ListLaunches": "Returns configuration details about all the launches in the specified project.
",
- "ListProjects": "Returns configuration details about all the projects in the current Region in your account.
",
- "ListSegmentReferences": "Use this operation to find which experiments or launches are using a specified segment.
",
- "ListSegments": "Returns a list of audience segments that you have created in your account in this Region.
",
- "ListTagsForResource": "Displays the tags associated with an Evidently resource.
",
- "PutProjectEvents": "Sends performance events to Evidently. These events can be used to evaluate a launch or an experiment.
",
- "StartExperiment": "Starts an existing experiment. To create an experiment, use CreateExperiment .
",
- "StartLaunch": "Starts an existing launch. To create a launch, use CreateLaunch .
",
- "StopExperiment": "Stops an experiment that is currently running. If you stop an experiment, you can't resume it or restart it.
",
- "StopLaunch": "Stops a launch that is currently running. After you stop a launch, you will not be able to resume it or restart it. Also, it will not be evaluated as a rule for traffic allocation, and the traffic that was allocated to the launch will instead be available to the feature's experiment, if there is one. Otherwise, all traffic will be served the default variation after the launch is stopped.
",
- "TagResource": "Assigns one or more tags (key-value pairs) to the specified CloudWatch Evidently resource. Projects, features, launches, and experiments can be tagged.
Tags can help you organize and categorize your resources. You can also use them to scope user permissions by granting a user permission to access or change only resources with certain tag values.
Tags don't have any semantic meaning to Amazon Web Services and are interpreted strictly as strings of characters.
You can use the TagResource action with a resource that already has tags. If you specify a new tag key for the resource, this tag is appended to the list of tags associated with the alarm. If you specify a tag key that is already associated with the resource, the new tag value that you specify replaces the previous value for that tag.
You can associate as many as 50 tags with a resource.
For more information, see Tagging Amazon Web Services resources .
",
- "TestSegmentPattern": "Use this operation to test a rules pattern that you plan to use to create an audience segment. For more information about segments, see CreateSegment .
",
- "UntagResource": "Removes one or more tags from the specified resource.
",
- "UpdateExperiment": "Updates an Evidently experiment.
Don't use this operation to update an experiment's tag. Instead, use TagResource .
",
- "UpdateFeature": "Updates an existing feature.
You can't use this operation to update the tags of an existing feature. Instead, use TagResource .
",
- "UpdateLaunch": "Updates a launch of a given feature.
Don't use this operation to update the tags of an existing launch. Instead, use TagResource .
",
- "UpdateProject": "Updates the description of an existing project.
To create a new project, use CreateProject .
Don't use this operation to update the data storage options of a project. Instead, use UpdateProjectDataDelivery .
Don't use this operation to update the tags of a project. Instead, use TagResource .
",
- "UpdateProjectDataDelivery": "Updates the data storage options for this project. If you store evaluation events, you an keep them and analyze them on your own. If you choose not to store evaluation events, Evidently deletes them after using them to produce metrics and other experiment results that you can view.
You can't specify both cloudWatchLogs and s3Destination in the same operation.
"
- },
- "shapes": {
- "AccessDeniedException": {
- "base": "You do not have sufficient permissions to perform this action.
",
- "refs": {}
- },
- "AppConfigResourceId": {
- "base": null,
- "refs": {
- "ProjectAppConfigResource$applicationId": "The ID of the AppConfig application to use for client-side evaluation.
",
- "ProjectAppConfigResource$configurationProfileId": "The ID of the AppConfig profile to use for client-side evaluation.
",
- "ProjectAppConfigResource$environmentId": "The ID of the AppConfig environment to use for client-side evaluation. This must be an environment that is within the application that you specify for applicationId.
",
- "ProjectAppConfigResourceConfig$applicationId": "The ID of the AppConfig application to use for client-side evaluation.
",
- "ProjectAppConfigResourceConfig$environmentId": "The ID of the AppConfig environment to use for client-side evaluation. This must be an environment that is within the application that you specify for applicationId.
"
- }
- },
- "Arn": {
- "base": null,
- "refs": {
- "EvaluationResult$project": "The name or ARN of the project that contains the feature being evaluated.
",
- "FeatureSummary$arn": "The ARN of the feature.
",
- "ListTagsForResourceRequest$resourceArn": "The ARN of the resource that you want to see the tags of.
",
- "TagResourceRequest$resourceArn": "The ARN of the CloudWatch Evidently resource that you're adding tags to.
",
- "UntagResourceRequest$resourceArn": "The ARN of the CloudWatch Evidently resource that you're removing tags from.
"
- }
- },
- "BatchEvaluateFeatureRequest": {
- "base": null,
- "refs": {}
- },
- "BatchEvaluateFeatureResponse": {
- "base": null,
- "refs": {}
- },
- "Boolean": {
- "base": null,
- "refs": {
- "TestSegmentPatternResponse$match": "Returns true if the pattern matches the payload.
",
- "VariableValue$boolValue": "If this feature uses the Boolean variation type, this field contains the Boolean value of this variation.
"
- }
- },
- "ChangeDirectionEnum": {
- "base": null,
- "refs": {
- "MetricGoal$desiredChange": " INCREASE means that a variation with a higher number for this metric is performing better.
DECREASE means that a variation with a lower number for this metric is performing better.
",
- "MetricGoalConfig$desiredChange": " INCREASE means that a variation with a higher number for this metric is performing better.
DECREASE means that a variation with a lower number for this metric is performing better.
"
- }
- },
- "CloudWatchLogsDestination": {
- "base": "A structure containing the CloudWatch Logs log group where the project stores evaluation events.
",
- "refs": {
- "ProjectDataDelivery$cloudWatchLogs": "If the project stores evaluation events in CloudWatch Logs, this structure stores the log group name.
"
- }
- },
- "CloudWatchLogsDestinationConfig": {
- "base": "A structure containing the CloudWatch Logs log group where the project stores evaluation events.
",
- "refs": {
- "ProjectDataDeliveryConfig$cloudWatchLogs": "If the project stores evaluation events in CloudWatch Logs, this structure stores the log group name.
",
- "UpdateProjectDataDeliveryRequest$cloudWatchLogs": "A structure containing the CloudWatch Logs log group where you want to store evaluation events.
"
- }
- },
- "ConflictException": {
- "base": "A resource was in an inconsistent state during an update or a deletion.
",
- "refs": {}
- },
- "CreateExperimentRequest": {
- "base": null,
- "refs": {}
- },
- "CreateExperimentResponse": {
- "base": null,
- "refs": {}
- },
- "CreateFeatureRequest": {
- "base": null,
- "refs": {}
- },
- "CreateFeatureResponse": {
- "base": null,
- "refs": {}
- },
- "CreateLaunchRequest": {
- "base": null,
- "refs": {}
- },
- "CreateLaunchResponse": {
- "base": null,
- "refs": {}
- },
- "CreateProjectRequest": {
- "base": null,
- "refs": {}
- },
- "CreateProjectResponse": {
- "base": null,
- "refs": {}
- },
- "CreateSegmentRequest": {
- "base": null,
- "refs": {}
- },
- "CreateSegmentResponse": {
- "base": null,
- "refs": {}
- },
- "CwDimensionSafeName": {
- "base": null,
- "refs": {
- "ExperimentReport$metricName": "The name of the metric that is analyzed in this experiment report.
",
- "ExperimentResultsData$metricName": "The name of the metric.
",
- "MetricDefinition$name": "The name of the metric.
",
- "MetricDefinitionConfig$name": "A name for the metric.
",
- "MetricNameList$member": null
- }
- },
- "CwLogGroupSafeName": {
- "base": null,
- "refs": {
- "CloudWatchLogsDestination$logGroup": "The name of the log group where the project stores evaluation events.
",
- "CloudWatchLogsDestinationConfig$logGroup": "The name of the log group where the project stores evaluation events.
"
- }
- },
- "DeleteExperimentRequest": {
- "base": null,
- "refs": {}
- },
- "DeleteExperimentResponse": {
- "base": null,
- "refs": {}
- },
- "DeleteFeatureRequest": {
- "base": null,
- "refs": {}
- },
- "DeleteFeatureResponse": {
- "base": null,
- "refs": {}
- },
- "DeleteLaunchRequest": {
- "base": null,
- "refs": {}
- },
- "DeleteLaunchResponse": {
- "base": null,
- "refs": {}
- },
- "DeleteProjectRequest": {
- "base": null,
- "refs": {}
- },
- "DeleteProjectResponse": {
- "base": null,
- "refs": {}
- },
- "DeleteSegmentRequest": {
- "base": null,
- "refs": {}
- },
- "DeleteSegmentResponse": {
- "base": null,
- "refs": {}
- },
- "Description": {
- "base": null,
- "refs": {
- "CreateExperimentRequest$description": "An optional description of the experiment.
",
- "CreateFeatureRequest$description": "An optional description of the feature.
",
- "CreateLaunchRequest$description": "An optional description for the launch.
",
- "CreateProjectRequest$description": "An optional description of the project.
",
- "CreateSegmentRequest$description": "An optional description for this segment.
",
- "Experiment$description": "A description of the experiment.
",
- "Experiment$statusReason": "If the experiment was stopped, this is the string that was entered by the person who stopped the experiment, to explain why it was stopped.
",
- "Feature$description": "The description of the feature.
",
- "Launch$description": "The description of the launch.
",
- "Launch$statusReason": "If the launch was stopped, this is the string that was entered by the person who stopped the launch, to explain why it was stopped.
",
- "LaunchGroup$description": "A description of the launch group.
",
- "LaunchGroupConfig$description": "A description of the launch group.
",
- "Project$description": "The user-entered description of the project.
",
- "ProjectSummary$description": "The description of the project.
",
- "Segment$description": "The customer-created description for this segment.
",
- "StopExperimentRequest$reason": "A string that describes why you are stopping the experiment.
",
- "StopLaunchRequest$reason": "A string that describes why you are stopping the launch.
",
- "Treatment$description": "The description of the treatment.
",
- "TreatmentConfig$description": "A description for this treatment.
",
- "UpdateExperimentRequest$description": "An optional description of the experiment.
",
- "UpdateFeatureRequest$description": "An optional description of the feature.
",
- "UpdateLaunchRequest$description": "An optional description for the launch.
",
- "UpdateProjectRequest$description": "An optional description of the project.
"
- }
- },
- "Double": {
- "base": null,
- "refs": {
- "DoubleValueList$member": null,
- "VariableValue$doubleValue": "If this feature uses the double integer variation type, this field contains the double integer value of this variation.
"
- }
- },
- "DoubleValueList": {
- "base": null,
- "refs": {
- "ExperimentResultsData$values": "The values for the metricName that were recorded in the experiment.
"
- }
- },
- "EntityId": {
- "base": null,
- "refs": {
- "EntityOverrideMap$key": null,
- "EvaluateFeatureRequest$entityId": "An internal ID that represents a unique user of the application. This entityID is checked against any override rules assigned for this feature.
",
- "EvaluationRequest$entityId": "An internal ID that represents a unique user session of the application. This entityID is checked against any override rules assigned for this feature.
",
- "EvaluationResult$entityId": "An internal ID that represents a unique user session of the application.
"
- }
- },
- "EntityOverrideMap": {
- "base": null,
- "refs": {
- "CreateFeatureRequest$entityOverrides": "Specify users that should always be served a specific variation of a feature. Each user is specified by a key-value pair . For each key, specify a user by entering their user ID, account ID, or some other identifier. For the value, specify the name of the variation that they are to be served.
This parameter is limited to 2500 overrides or a total of 40KB. The 40KB limit includes an overhead of 6 bytes per override.
",
- "Feature$entityOverrides": "A set of key-value pairs that specify users who should always be served a specific variation of a feature. Each key specifies a user using their user ID, account ID, or some other identifier. The value specifies the name of the variation that the user is to be served.
For the override to be successful, the value of the key must match the entityId used in the EvaluateFeature operation.
",
- "UpdateFeatureRequest$entityOverrides": "Specified users that should always be served a specific variation of a feature. Each user is specified by a key-value pair . For each key, specify a user by entering their user ID, account ID, or some other identifier. For the value, specify the name of the variation that they are to be served.
This parameter is limited to 2500 overrides or a total of 40KB. The 40KB limit includes an overhead of 6 bytes per override.
"
- }
- },
- "ErrorCodeEnum": {
- "base": "Recommended errors from AWS API standards: https://w.amazon.com/bin/view/AWS/API_Standards/Exceptions ErrorCode = "ValidationException"|"ServiceQuotaExceededException"|"AccessDeniedException"|"ResourceNotFoundException"|"ConflictException"|"ThrottlingException"|"InternalServerException"|string;
",
- "refs": {
- "PutProjectEventsResultEntry$errorCode": "If the PutProjectEvents operation has an error, the error code is returned here.
"
- }
- },
- "ErrorMessage": {
- "base": null,
- "refs": {
- "PutProjectEventsResultEntry$errorMessage": "If the PutProjectEvents operation has an error, the error message is returned here.
"
- }
- },
- "EvaluateFeatureRequest": {
- "base": null,
- "refs": {}
- },
- "EvaluateFeatureResponse": {
- "base": null,
- "refs": {}
- },
- "EvaluationRequest": {
- "base": "This structure assigns a feature variation to one user session.
",
- "refs": {
- "EvaluationRequestsList$member": null
- }
- },
- "EvaluationRequestsList": {
- "base": null,
- "refs": {
- "BatchEvaluateFeatureRequest$requests": "An array of structures, where each structure assigns a feature variation to one user session.
"
- }
- },
- "EvaluationResult": {
- "base": "This structure displays the results of one feature evaluation assignment to one user session.
",
- "refs": {
- "EvaluationResultsList$member": null
- }
- },
- "EvaluationResultsList": {
- "base": null,
- "refs": {
- "BatchEvaluateFeatureResponse$results": "An array of structures, where each structure displays the results of one feature evaluation assignment to one user session.
"
- }
- },
- "EvaluationRule": {
- "base": "A structure that contains the information about an evaluation rule for this feature, if it is used in a launch or experiment.
",
- "refs": {
- "EvaluationRulesList$member": null
- }
- },
- "EvaluationRulesList": {
- "base": null,
- "refs": {
- "Feature$evaluationRules": "An array of structures that define the evaluation rules for the feature.
",
- "FeatureSummary$evaluationRules": "An array of structures that define
"
- }
- },
- "Event": {
- "base": "A structure that contains the information about one evaluation event or custom event sent to Evidently. This is a JSON payload. If this event specifies a pre-defined event type, the payload must follow the defined event schema.
",
- "refs": {
- "EventList$member": null
- }
- },
- "EventList": {
- "base": null,
- "refs": {
- "PutProjectEventsRequest$events": "An array of event structures that contain the performance data that is being sent to Evidently.
"
- }
- },
- "EventType": {
- "base": null,
- "refs": {
- "Event$type": " aws.evidently.evaluation specifies an evaluation event, which determines which feature variation that a user sees. aws.evidently.custom specifies a custom event, which generates metrics from user actions such as clicks and checkouts.
"
- }
- },
- "Experiment": {
- "base": "A structure containing the configuration details of an experiment.
",
- "refs": {
- "CreateExperimentResponse$experiment": "A structure containing the configuration details of the experiment that you created.
",
- "ExperimentList$member": null,
- "GetExperimentResponse$experiment": "A structure containing the configuration details of the experiment.
",
- "UpdateExperimentResponse$experiment": "A structure containing the configuration details of the experiment that was updated.
"
- }
- },
- "ExperimentArn": {
- "base": null,
- "refs": {
- "Experiment$arn": "The ARN of the experiment.
"
- }
- },
- "ExperimentBaseStat": {
- "base": null,
- "refs": {
- "GetExperimentResultsRequest$baseStat": "The statistic used to calculate experiment results. Currently the only valid value is mean, which uses the mean of the collected values as the statistic.
"
- }
- },
- "ExperimentExecution": {
- "base": "This structure contains the date and time that the experiment started and ended.
",
- "refs": {
- "Experiment$execution": "A structure that contains the date and time that the experiment started and ended.
"
- }
- },
- "ExperimentList": {
- "base": null,
- "refs": {
- "ListExperimentsResponse$experiments": "An array of structures that contain the configuration details of the experiments in the specified project.
"
- }
- },
- "ExperimentName": {
- "base": null,
- "refs": {
- "CreateExperimentRequest$name": "A name for the new experiment.
",
- "DeleteExperimentRequest$experiment": "The name of the experiment to delete.
",
- "Experiment$name": "The name of the experiment.
",
- "GetExperimentRequest$experiment": "The name of the experiment that you want to see the details of.
",
- "GetExperimentResultsRequest$experiment": "The name of the experiment to retrieve the results of.
",
- "StartExperimentRequest$experiment": "The name of the experiment to start.
",
- "StopExperimentRequest$experiment": "The name of the experiment to stop.
",
- "UpdateExperimentRequest$experiment": "The name of the experiment to update.
"
- }
- },
- "ExperimentReport": {
- "base": "A structure that contains results of an experiment.
",
- "refs": {
- "ExperimentReportList$member": null
- }
- },
- "ExperimentReportList": {
- "base": null,
- "refs": {
- "GetExperimentResultsResponse$reports": "An array of structures that include the reports that you requested.
"
- }
- },
- "ExperimentReportName": {
- "base": null,
- "refs": {
- "ExperimentReport$reportName": "The type of analysis used for this report.
",
- "ExperimentReportNameList$member": null
- }
- },
- "ExperimentReportNameList": {
- "base": null,
- "refs": {
- "GetExperimentResultsRequest$reportNames": "The names of the report types that you want to see. Currently, BayesianInference is the only valid value.
"
- }
- },
- "ExperimentResultRequestType": {
- "base": null,
- "refs": {
- "ExperimentResultRequestTypeList$member": null
- }
- },
- "ExperimentResultRequestTypeList": {
- "base": null,
- "refs": {
- "GetExperimentResultsRequest$resultStats": "The statistics that you want to see in the returned results.
PValue specifies to use p-values for the results. A p-value is used in hypothesis testing to measure how often you are willing to make a mistake in rejecting the null hypothesis. A general practice is to reject the null hypothesis and declare that the results are statistically significant when the p-value is less than 0.05.
ConfidenceInterval specifies a confidence interval for the results. The confidence interval represents the range of values for the chosen metric that is likely to contain the true difference between the baseStat of a variation and the baseline. Evidently returns the 95% confidence interval.
TreatmentEffect is the difference in the statistic specified by the baseStat parameter between each variation and the default variation.
BaseStat returns the statistical values collected for the metric for each variation. The statistic uses the same statistic specified in the baseStat parameter. Therefore, if baseStat is mean, this returns the mean of the values collected for each variation.
"
- }
- },
- "ExperimentResultResponseType": {
- "base": null,
- "refs": {
- "ExperimentResultsData$resultStat": "The experiment statistic that these results pertain to.
"
- }
- },
- "ExperimentResultsData": {
- "base": "A structure that contains experiment results for one metric that is monitored in the experiment.
",
- "refs": {
- "ExperimentResultsDataList$member": null
- }
- },
- "ExperimentResultsDataList": {
- "base": null,
- "refs": {
- "GetExperimentResultsResponse$resultsData": "An array of structures that include experiment results including metric names and values.
"
- }
- },
- "ExperimentSchedule": {
- "base": "This structure contains the time and date that Evidently completed the analysis of the experiment.
",
- "refs": {
- "Experiment$schedule": "A structure that contains the time and date that Evidently completed the analysis of the experiment.
"
- }
- },
- "ExperimentStatus": {
- "base": null,
- "refs": {
- "Experiment$status": "The current state of the experiment.
",
- "ListExperimentsRequest$status": "Use this optional parameter to limit the returned results to only the experiments with the status that you specify here.
"
- }
- },
- "ExperimentStopDesiredState": {
- "base": null,
- "refs": {
- "StopExperimentRequest$desiredState": "Specify whether the experiment is to be considered COMPLETED or CANCELLED after it stops.
"
- }
- },
- "ExperimentType": {
- "base": null,
- "refs": {
- "Experiment$type": "The type of this experiment. Currently, this value must be aws.experiment.onlineab.
"
- }
- },
- "Feature": {
- "base": "This structure contains information about one Evidently feature in your account.
",
- "refs": {
- "CreateFeatureResponse$feature": "A structure that contains information about the new feature.
",
- "GetFeatureResponse$feature": "A structure containing the configuration details of the feature.
",
- "UpdateFeatureResponse$feature": "A structure that contains information about the updated feature.
"
- }
- },
- "FeatureArn": {
- "base": null,
- "refs": {
- "Feature$arn": "The ARN of the feature.
"
- }
- },
- "FeatureEvaluationStrategy": {
- "base": null,
- "refs": {
- "CreateFeatureRequest$evaluationStrategy": "Specify ALL_RULES to activate the traffic allocation specified by any ongoing launches or experiments. Specify DEFAULT_VARIATION to serve the default variation to all users instead.
",
- "Feature$evaluationStrategy": "If this value is ALL_RULES, the traffic allocation specified by any ongoing launches or experiments is being used. If this is DEFAULT_VARIATION, the default variation is being served to all users.
",
- "FeatureSummary$evaluationStrategy": "If this value is ALL_RULES, the traffic allocation specified by any ongoing launches or experiments is being used. If this is DEFAULT_VARIATION, the default variation is being served to all users.
",
- "UpdateFeatureRequest$evaluationStrategy": "Specify ALL_RULES to activate the traffic allocation specified by any ongoing launches or experiments. Specify DEFAULT_VARIATION to serve the default variation to all users instead.
"
- }
- },
- "FeatureName": {
- "base": null,
- "refs": {
- "CreateFeatureRequest$name": "The name for the new feature.
",
- "DeleteFeatureRequest$feature": "The name of the feature to delete.
",
- "EvaluateFeatureRequest$feature": "The name of the feature being evaluated.
",
- "EvaluationRequest$feature": "The name of the feature being evaluated.
",
- "EvaluationResult$feature": "The name of the feature being evaluated.
",
- "Feature$name": "The name of the feature.
",
- "FeatureSummary$name": "The name of the feature.
",
- "FeatureToVariationMap$key": null,
- "GetFeatureRequest$feature": "The name of the feature that you want to retrieve information for.
",
- "LaunchGroupConfig$feature": "The feature that this launch is using.
",
- "TreatmentConfig$feature": "The feature that this experiment is testing.
",
- "UpdateFeatureRequest$feature": "The name of the feature to be updated.
"
- }
- },
- "FeatureStatus": {
- "base": null,
- "refs": {
- "Feature$status": "The current state of the feature.
",
- "FeatureSummary$status": "The current state of the feature.
"
- }
- },
- "FeatureSummariesList": {
- "base": null,
- "refs": {
- "ListFeaturesResponse$features": "An array of structures that contain the configuration details of the features in the specified project.
"
- }
- },
- "FeatureSummary": {
- "base": "This structure contains information about one Evidently feature in your account.
",
- "refs": {
- "FeatureSummariesList$member": null
- }
- },
- "FeatureToVariationMap": {
- "base": null,
- "refs": {
- "LaunchGroup$featureVariations": "The feature variation for this launch group. This is a key-value pair.
",
- "Treatment$featureVariations": "The feature variation used for this treatment. This is a key-value pair. The key is the feature name, and the value is the variation name.
"
- }
- },
- "GetExperimentRequest": {
- "base": null,
- "refs": {}
- },
- "GetExperimentResponse": {
- "base": null,
- "refs": {}
- },
- "GetExperimentResultsRequest": {
- "base": null,
- "refs": {}
- },
- "GetExperimentResultsResponse": {
- "base": null,
- "refs": {}
- },
- "GetFeatureRequest": {
- "base": null,
- "refs": {}
- },
- "GetFeatureResponse": {
- "base": null,
- "refs": {}
- },
- "GetLaunchRequest": {
- "base": null,
- "refs": {}
- },
- "GetLaunchResponse": {
- "base": null,
- "refs": {}
- },
- "GetProjectRequest": {
- "base": null,
- "refs": {}
- },
- "GetProjectResponse": {
- "base": null,
- "refs": {}
- },
- "GetSegmentRequest": {
- "base": null,
- "refs": {}
- },
- "GetSegmentResponse": {
- "base": null,
- "refs": {}
- },
- "GroupName": {
- "base": null,
- "refs": {
- "GroupToWeightMap$key": null,
- "LaunchGroup$name": "The name of the launch group.
",
- "LaunchGroupConfig$name": "A name for this launch group.
"
- }
- },
- "GroupToWeightMap": {
- "base": null,
- "refs": {
- "ScheduledSplit$groupWeights": "The traffic allocation percentages among the feature variations during one step of a launch. This is a set of key-value pairs. The keys are variation names. The values represent the percentage of traffic to allocate to that variation during this step.
The values is expressed in thousandths of a percent, so assigning a weight of 50000 assigns 50% of traffic to that variation.
If the sum of the weights for all the variations in a segment override does not add up to 100,000, then the remaining traffic that matches this segment is not assigned by this segment override, and instead moves on to the next segment override or the default traffic split.
",
- "ScheduledSplitConfig$groupWeights": "The traffic allocation percentages among the feature variations during one step of a launch. This is a set of key-value pairs. The keys are variation names. The values represent the percentage of traffic to allocate to that variation during this step.
The values is expressed in thousandths of a percent, so assigning a weight of 50000 assigns 50% of traffic to that variation.
If the sum of the weights for all the variations in a segment override does not add up to 100,000, then the remaining traffic that matches this segment is not assigned by this segment override, and instead moves on to the next segment override or the default traffic split.
",
- "SegmentOverride$weights": "The traffic allocation percentages among the feature variations to assign to this segment. This is a set of key-value pairs. The keys are variation names. The values represent the amount of traffic to allocate to that variation for this segment. This is expressed in thousandths of a percent, so a weight of 50000 represents 50% of traffic.
"
- }
- },
- "Integer": {
- "base": null,
- "refs": {
- "PutProjectEventsResponse$failedEventCount": "The number of events in the operation that could not be used by Evidently.
"
- }
- },
- "InternalServerException": {
- "base": "Unexpected error while processing the request. Retry the request.
",
- "refs": {}
- },
- "JsonPath": {
- "base": null,
- "refs": {
- "MetricDefinition$entityIdKey": "The entity, such as a user or session, that does an action that causes a metric value to be recorded.
",
- "MetricDefinition$valueKey": "The value that is tracked to produce the metric.
",
- "MetricDefinitionConfig$entityIdKey": "The entity, such as a user or session, that does an action that causes a metric value to be recorded. An example is userDetails.userID.
",
- "MetricDefinitionConfig$valueKey": "The value that is tracked to produce the metric.
"
- }
- },
- "JsonValue": {
- "base": null,
- "refs": {
- "EvaluateFeatureRequest$evaluationContext": "A JSON object of attributes that you can optionally pass in as part of the evaluation event sent to Evidently from the user session. Evidently can use this value to match user sessions with defined audience segments. For more information, see Use segments to focus your audience .
If you include this parameter, the value must be a JSON object. A JSON array is not supported.
",
- "EvaluateFeatureResponse$details": "If this user was assigned to a launch or experiment, this field lists the launch or experiment name.
",
- "EvaluationRequest$evaluationContext": "A JSON block of attributes that you can optionally pass in. This JSON block is included in the evaluation events sent to Evidently from the user session.
",
- "EvaluationResult$details": "If this user was assigned to a launch or experiment, this field lists the launch or experiment name.
",
- "Event$data": "The event data.
",
- "ExperimentReport$content": "The content of the report.
",
- "MetricDefinition$eventPattern": "The EventBridge event pattern that defines how the metric is recorded.
For more information about EventBridge event patterns, see Amazon EventBridge event patterns .
",
- "TestSegmentPatternRequest$payload": "A sample evaluationContext JSON block to test against the specified pattern.
"
- }
- },
- "Launch": {
- "base": "This structure contains the configuration details of one Evidently launch.
",
- "refs": {
- "CreateLaunchResponse$launch": "A structure that contains the configuration of the launch that was created.
",
- "GetLaunchResponse$launch": "A structure containing the configuration details of the launch.
",
- "LaunchesList$member": null,
- "StartLaunchResponse$launch": "A structure that contains information about the launch that was started.
",
- "UpdateLaunchResponse$launch": "A structure that contains the new configuration of the launch that was updated.
"
- }
- },
- "LaunchArn": {
- "base": null,
- "refs": {
- "Launch$arn": "The ARN of the launch.
"
- }
- },
- "LaunchExecution": {
- "base": "This structure contains information about the start and end times of the launch.
",
- "refs": {
- "Launch$execution": "A structure that contains information about the start and end times of the launch.
"
- }
- },
- "LaunchGroup": {
- "base": "A structure that defines one launch group in a launch. A launch group is a variation of the feature that you are including in the launch.
",
- "refs": {
- "LaunchGroupList$member": null
- }
- },
- "LaunchGroupConfig": {
- "base": "A structure that defines one launch group in a launch. A launch group is a variation of the feature that you are including in the launch.
",
- "refs": {
- "LaunchGroupConfigList$member": null
- }
- },
- "LaunchGroupConfigList": {
- "base": null,
- "refs": {
- "CreateLaunchRequest$groups": "An array of structures that contains the feature and variations that are to be used for the launch.
",
- "UpdateLaunchRequest$groups": "An array of structures that contains the feature and variations that are to be used for the launch.
"
- }
- },
- "LaunchGroupList": {
- "base": null,
- "refs": {
- "Launch$groups": "An array of structures that define the feature variations that are being used in the launch.
"
- }
- },
- "LaunchName": {
- "base": null,
- "refs": {
- "CreateLaunchRequest$name": "The name for the new launch.
",
- "DeleteLaunchRequest$launch": "The name of the launch to delete.
",
- "GetLaunchRequest$launch": "The name of the launch that you want to see the details of.
",
- "Launch$name": "The name of the launch.
",
- "StartLaunchRequest$launch": "The name of the launch to start.
",
- "StopLaunchRequest$launch": "The name of the launch to stop.
",
- "UpdateLaunchRequest$launch": "The name of the launch that is to be updated.
"
- }
- },
- "LaunchStatus": {
- "base": null,
- "refs": {
- "Launch$status": "The current state of the launch.
",
- "ListLaunchesRequest$status": "Use this optional parameter to limit the returned results to only the launches with the status that you specify here.
"
- }
- },
- "LaunchStopDesiredState": {
- "base": null,
- "refs": {
- "StopLaunchRequest$desiredState": "Specify whether to consider the launch as COMPLETED or CANCELLED after it stops.
"
- }
- },
- "LaunchType": {
- "base": null,
- "refs": {
- "Launch$type": "The type of launch.
"
- }
- },
- "LaunchesList": {
- "base": null,
- "refs": {
- "ListLaunchesResponse$launches": "An array of structures that contain the configuration details of the launches in the specified project.
"
- }
- },
- "ListExperimentsRequest": {
- "base": null,
- "refs": {}
- },
- "ListExperimentsResponse": {
- "base": null,
- "refs": {}
- },
- "ListFeaturesRequest": {
- "base": null,
- "refs": {}
- },
- "ListFeaturesResponse": {
- "base": null,
- "refs": {}
- },
- "ListLaunchesRequest": {
- "base": null,
- "refs": {}
- },
- "ListLaunchesResponse": {
- "base": null,
- "refs": {}
- },
- "ListProjectsRequest": {
- "base": null,
- "refs": {}
- },
- "ListProjectsResponse": {
- "base": null,
- "refs": {}
- },
- "ListSegmentReferencesRequest": {
- "base": null,
- "refs": {}
- },
- "ListSegmentReferencesResponse": {
- "base": null,
- "refs": {}
- },
- "ListSegmentsRequest": {
- "base": null,
- "refs": {}
- },
- "ListSegmentsResponse": {
- "base": null,
- "refs": {}
- },
- "ListTagsForResourceRequest": {
- "base": null,
- "refs": {}
- },
- "ListTagsForResourceResponse": {
- "base": null,
- "refs": {}
- },
- "Long": {
- "base": null,
- "refs": {
- "Project$activeExperimentCount": "The number of ongoing experiments currently in the project.
",
- "Project$activeLaunchCount": "The number of ongoing launches currently in the project.
",
- "Project$experimentCount": "The number of experiments currently in the project. This includes all experiments that have been created and not deleted, whether they are ongoing or not.
",
- "Project$featureCount": "The number of features currently in the project.
",
- "Project$launchCount": "The number of launches currently in the project. This includes all launches that have been created and not deleted, whether they are ongoing or not.
",
- "ProjectSummary$activeExperimentCount": "The number of experiments currently in the project.
",
- "ProjectSummary$activeLaunchCount": "The number of ongoing launches currently in the project.
",
- "ProjectSummary$experimentCount": "The number of experiments currently in the project.
",
- "ProjectSummary$featureCount": "The number of features currently in the project.
",
- "ProjectSummary$launchCount": "The number of launches currently in the project, including launches that are ongoing, completed, and not started yet.
",
- "Segment$experimentCount": "The number of experiments that this segment is used in. This count includes all current experiments, not just those that are currently running.
",
- "Segment$launchCount": "The number of launches that this segment is used in. This count includes all current launches, not just those that are currently running.
",
- "SegmentOverride$evaluationOrder": "A number indicating the order to use to evaluate segment overrides, if there are more than one. Segment overrides with lower numbers are evaluated first.
"
- }
- },
- "MaxExperiments": {
- "base": null,
- "refs": {
- "ListExperimentsRequest$maxResults": "The maximum number of results to include in the response.
"
- }
- },
- "MaxFeatures": {
- "base": null,
- "refs": {
- "ListFeaturesRequest$maxResults": "The maximum number of results to include in the response.
"
- }
- },
- "MaxLaunches": {
- "base": null,
- "refs": {
- "ListLaunchesRequest$maxResults": "The maximum number of results to include in the response.
"
- }
- },
- "MaxProjects": {
- "base": null,
- "refs": {
- "ListProjectsRequest$maxResults": "The maximum number of results to include in the response.
"
- }
- },
- "MaxReferences": {
- "base": null,
- "refs": {
- "ListSegmentReferencesRequest$maxResults": "The maximum number of results to include in the response. If you omit this, the default of 50 is used.
"
- }
- },
- "MaxSegments": {
- "base": null,
- "refs": {
- "ListSegmentsRequest$maxResults": "The maximum number of results to include in the response. If you omit this, the default of 50 is used.
"
- }
- },
- "MetricDefinition": {
- "base": "This structure defines a metric that is being used to evaluate the variations during a launch or experiment.
",
- "refs": {
- "MetricGoal$metricDefinition": "A structure that contains details about the metric.
",
- "MetricMonitor$metricDefinition": "A structure that defines the metric.
"
- }
- },
- "MetricDefinitionConfig": {
- "base": "This structure defines a metric that you want to use to evaluate the variations during a launch or experiment.
",
- "refs": {
- "MetricGoalConfig$metricDefinition": "A structure that contains details about the metric.
",
- "MetricMonitorConfig$metricDefinition": "A structure that defines the metric.
"
- }
- },
- "MetricDefinitionConfigEventPatternString": {
- "base": null,
- "refs": {
- "MetricDefinitionConfig$eventPattern": "The EventBridge event pattern that defines how the metric is recorded.
For more information about EventBridge event patterns, see Amazon EventBridge event patterns .
"
- }
- },
- "MetricGoal": {
- "base": "A structure that tells Evidently whether higher or lower values are desired for a metric that is used in an experiment.
",
- "refs": {
- "MetricGoalsList$member": null
- }
- },
- "MetricGoalConfig": {
- "base": "Use this structure to tell Evidently whether higher or lower values are desired for a metric that is used in an experiment.
",
- "refs": {
- "MetricGoalConfigList$member": null
- }
- },
- "MetricGoalConfigList": {
- "base": null,
- "refs": {
- "CreateExperimentRequest$metricGoals": "An array of structures that defines the metrics used for the experiment, and whether a higher or lower value for each metric is the goal.
",
- "UpdateExperimentRequest$metricGoals": "An array of structures that defines the metrics used for the experiment, and whether a higher or lower value for each metric is the goal.
"
- }
- },
- "MetricGoalsList": {
- "base": null,
- "refs": {
- "Experiment$metricGoals": "An array of structures that defines the metrics used for the experiment, and whether a higher or lower value for each metric is the goal.
"
- }
- },
- "MetricMonitor": {
- "base": "A structure that defines a metric to be used to monitor performance of the variations during a launch.
",
- "refs": {
- "MetricMonitorList$member": null
- }
- },
- "MetricMonitorConfig": {
- "base": "A structure that defines a metric to be used to monitor performance of the variations during a launch.
",
- "refs": {
- "MetricMonitorConfigList$member": null
- }
- },
- "MetricMonitorConfigList": {
- "base": null,
- "refs": {
- "CreateLaunchRequest$metricMonitors": "An array of structures that define the metrics that will be used to monitor the launch performance.
",
- "UpdateLaunchRequest$metricMonitors": "An array of structures that define the metrics that will be used to monitor the launch performance.
"
- }
- },
- "MetricMonitorList": {
- "base": null,
- "refs": {
- "Launch$metricMonitors": "An array of structures that define the metrics that are being used to monitor the launch performance.
"
- }
- },
- "MetricNameList": {
- "base": null,
- "refs": {
- "GetExperimentResultsRequest$metricNames": "The names of the experiment metrics that you want to see the results of.
"
- }
- },
- "MetricUnitLabel": {
- "base": null,
- "refs": {
- "MetricDefinition$unitLabel": "The label for the units that the metric is measuring.
",
- "MetricDefinitionConfig$unitLabel": "A label for the units that the metric is measuring.
"
- }
- },
- "NextToken": {
- "base": null,
- "refs": {
- "ListExperimentsRequest$nextToken": "The token to use when requesting the next set of results. You received this token from a previous ListExperiments operation.
",
- "ListExperimentsResponse$nextToken": "The token to use in a subsequent ListExperiments operation to return the next set of results.
",
- "ListFeaturesRequest$nextToken": "The token to use when requesting the next set of results. You received this token from a previous ListFeatures operation.
",
- "ListFeaturesResponse$nextToken": "The token to use in a subsequent ListFeatures operation to return the next set of results.
",
- "ListLaunchesRequest$nextToken": "The token to use when requesting the next set of results. You received this token from a previous ListLaunches operation.
",
- "ListLaunchesResponse$nextToken": "The token to use in a subsequent ListLaunches operation to return the next set of results.
",
- "ListProjectsRequest$nextToken": "The token to use when requesting the next set of results. You received this token from a previous ListProjects operation.
",
- "ListProjectsResponse$nextToken": "The token to use in a subsequent ListProjects operation to return the next set of results.
",
- "ListSegmentReferencesRequest$nextToken": "The token to use when requesting the next set of results. You received this token from a previous ListSegmentReferences operation.
",
- "ListSegmentReferencesResponse$nextToken": "The token to use in a subsequent ListSegmentReferences operation to return the next set of results.
",
- "ListSegmentsRequest$nextToken": "The token to use when requesting the next set of results. You received this token from a previous ListSegments operation.
",
- "ListSegmentsResponse$nextToken": "The token to use in a subsequent ListSegments operation to return the next set of results.
"
- }
- },
- "OnlineAbConfig": {
- "base": "A structure that contains the configuration of which variation to use as the \"control\" version. The \"control\" version is used for comparison with other variations. This structure also specifies how much experiment traffic is allocated to each variation.
",
- "refs": {
- "CreateExperimentRequest$onlineAbConfig": "A structure that contains the configuration of which variation to use as the \"control\" version. tThe \"control\" version is used for comparison with other variations. This structure also specifies how much experiment traffic is allocated to each variation.
",
- "UpdateExperimentRequest$onlineAbConfig": "A structure that contains the configuration of which variation o use as the \"control\" version. The \"control\" version is used for comparison with other variations. This structure also specifies how much experiment traffic is allocated to each variation.
"
- }
- },
- "OnlineAbDefinition": {
- "base": "A structure that contains the configuration of which variation to use as the \"control\" version. The \"control\" version is used for comparison with other variations. This structure also specifies how much experiment traffic is allocated to each variation.
",
- "refs": {
- "Experiment$onlineAbDefinition": "A structure that contains the configuration of which variation to use as the \"control\" version. The \"control\" version is used for comparison with other variations. This structure also specifies how much experiment traffic is allocated to each variation.
"
- }
- },
- "PrimitiveBoolean": {
- "base": null,
- "refs": {
- "UpdateExperimentRequest$removeSegment": "Removes a segment from being used in an experiment. You can't use this parameter if the experiment is currently running.
"
- }
- },
- "Project": {
- "base": "This structure defines a project, which is the logical object in Evidently that can contain features, launches, and experiments. Use projects to group similar features together.
",
- "refs": {
- "CreateProjectResponse$project": "A structure that contains information about the created project.
",
- "GetProjectResponse$project": "A structure containing the configuration details of the project.
",
- "UpdateProjectDataDeliveryResponse$project": "A structure containing details about the project that you updated.
",
- "UpdateProjectResponse$project": "A structure containing information about the updated project.
"
- }
- },
- "ProjectAppConfigResource": {
- "base": "This is a structure that defines the configuration of how your application integrates with AppConfig to run client-side evaluation.
",
- "refs": {
- "Project$appConfigResource": "This structure defines the configuration of how your application integrates with AppConfig to run client-side evaluation.
"
- }
- },
- "ProjectAppConfigResourceConfig": {
- "base": "Use this parameter to configure client-side evaluation for your project. Client-side evaluation allows your application to assign variations to user sessions locally instead of by calling the EvaluateFeature operation to assign the variations. This mitigates the latency and availability risks that come with an API call.
ProjectAppConfigResource is a structure that defines the configuration of how your application integrates with AppConfig to run client-side evaluation.
",
- "refs": {
- "CreateProjectRequest$appConfigResource": "Use this parameter if the project will use client-side evaluation powered by AppConfig . Client-side evaluation allows your application to assign variations to user sessions locally instead of by calling the EvaluateFeature operation. This mitigates the latency and availability risks that come with an API call. For more information, see Client-side evaluation - powered by AppConfig.
This parameter is a structure that contains information about the AppConfig application and environment that will be used as for client-side evaluation.
To create a project that uses client-side evaluation, you must have the evidently:ExportProjectAsConfiguration permission.
",
- "UpdateProjectRequest$appConfigResource": "Use this parameter if the project will use client-side evaluation powered by AppConfig. Client-side evaluation allows your application to assign variations to user sessions locally instead of by calling the EvaluateFeature operation. This mitigates the latency and availability risks that come with an API call. allows you to
This parameter is a structure that contains information about the AppConfig application that will be used for client-side evaluation.
"
- }
- },
- "ProjectArn": {
- "base": null,
- "refs": {
- "Experiment$project": "The name or ARN of the project that contains this experiment.
",
- "Feature$project": "The name or ARN of the project that contains the feature.
",
- "Project$arn": "The name or ARN of the project.
",
- "ProjectSummary$arn": "The name or ARN of the project.
"
- }
- },
- "ProjectDataDelivery": {
- "base": "A structure that contains information about where Evidently is to store evaluation events for longer term storage.
",
- "refs": {
- "Project$dataDelivery": "A structure that contains information about where Evidently is to store evaluation events for longer term storage.
"
- }
- },
- "ProjectDataDeliveryConfig": {
- "base": "A structure that contains information about where Evidently is to store evaluation events for longer term storage.
",
- "refs": {
- "CreateProjectRequest$dataDelivery": "A structure that contains information about where Evidently is to store evaluation events for longer term storage, if you choose to do so. If you choose not to store these events, Evidently deletes them after using them to produce metrics and other experiment results that you can view.
"
- }
- },
- "ProjectName": {
- "base": null,
- "refs": {
- "CreateProjectRequest$name": "The name for the project.
",
- "Project$name": "The name of the project.
",
- "ProjectSummary$name": "The name of the project.
"
- }
- },
- "ProjectRef": {
- "base": null,
- "refs": {
- "BatchEvaluateFeatureRequest$project": "The name or ARN of the project that contains the feature being evaluated.
",
- "CreateExperimentRequest$project": "The name or ARN of the project that you want to create the new experiment in.
",
- "CreateFeatureRequest$project": "The name or ARN of the project that is to contain the new feature.
",
- "CreateLaunchRequest$project": "The name or ARN of the project that you want to create the launch in.
",
- "DeleteExperimentRequest$project": "The name or ARN of the project that contains the experiment to delete.
",
- "DeleteFeatureRequest$project": "The name or ARN of the project that contains the feature to delete.
",
- "DeleteLaunchRequest$project": "The name or ARN of the project that contains the launch to delete.
",
- "DeleteProjectRequest$project": "The name or ARN of the project to delete.
",
- "EvaluateFeatureRequest$project": "The name or ARN of the project that contains this feature.
",
- "FeatureSummary$project": "The name or ARN of the project that contains the feature.
",
- "GetExperimentRequest$project": "The name or ARN of the project that contains the experiment.
",
- "GetExperimentResultsRequest$project": "The name or ARN of the project that contains the experiment that you want to see the results of.
",
- "GetFeatureRequest$project": "The name or ARN of the project that contains the feature.
",
- "GetLaunchRequest$project": "The name or ARN of the project that contains the launch.
",
- "GetProjectRequest$project": "The name or ARN of the project that you want to see the details of.
",
- "Launch$project": "The name or ARN of the project that contains the launch.
",
- "ListExperimentsRequest$project": "The name or ARN of the project to return the experiment list from.
",
- "ListFeaturesRequest$project": "The name or ARN of the project to return the feature list from.
",
- "ListLaunchesRequest$project": "The name or ARN of the project to return the launch list from.
",
- "PutProjectEventsRequest$project": "The name or ARN of the project to write the events to.
",
- "StartExperimentRequest$project": "The name or ARN of the project that contains the experiment to start.
",
- "StartLaunchRequest$project": "The name or ARN of the project that contains the launch to start.
",
- "StopExperimentRequest$project": "The name or ARN of the project that contains the experiment to stop.
",
- "StopLaunchRequest$project": "The name or ARN of the project that contains the launch that you want to stop.
",
- "UpdateExperimentRequest$project": "The name or ARN of the project that contains the experiment that you want to update.
",
- "UpdateFeatureRequest$project": "The name or ARN of the project that contains the feature to be updated.
",
- "UpdateLaunchRequest$project": "The name or ARN of the project that contains the launch that you want to update.
",
- "UpdateProjectDataDeliveryRequest$project": "The name or ARN of the project that you want to modify the data storage options for.
",
- "UpdateProjectRequest$project": "The name or ARN of the project to update.
"
- }
- },
- "ProjectStatus": {
- "base": null,
- "refs": {
- "Project$status": "The current state of the project.
",
- "ProjectSummary$status": "The current state of the project.
"
- }
- },
- "ProjectSummariesList": {
- "base": null,
- "refs": {
- "ListProjectsResponse$projects": "An array of structures that contain the configuration details of the projects in the Region.
"
- }
- },
- "ProjectSummary": {
- "base": "A structure that contains configuration information about an Evidently project.
",
- "refs": {
- "ProjectSummariesList$member": null
- }
- },
- "PutProjectEventsRequest": {
- "base": null,
- "refs": {}
- },
- "PutProjectEventsResponse": {
- "base": null,
- "refs": {}
- },
- "PutProjectEventsResultEntry": {
- "base": "A structure that contains Evidently's response to the sent events, including an event ID and error codes, if any.
",
- "refs": {
- "PutProjectEventsResultEntryList$member": null
- }
- },
- "PutProjectEventsResultEntryList": {
- "base": null,
- "refs": {
- "PutProjectEventsResponse$eventResults": "A structure that contains Evidently's response to the sent events, including an event ID and error codes, if any.
"
- }
- },
- "RandomizationSalt": {
- "base": null,
- "refs": {
- "CreateExperimentRequest$randomizationSalt": "When Evidently assigns a particular user session to an experiment, it must use a randomization ID to determine which variation the user session is served. This randomization ID is a combination of the entity ID and randomizationSalt. If you omit randomizationSalt, Evidently uses the experiment name as the randomizationSalt.
",
- "CreateLaunchRequest$randomizationSalt": "When Evidently assigns a particular user session to a launch, it must use a randomization ID to determine which variation the user session is served. This randomization ID is a combination of the entity ID and randomizationSalt. If you omit randomizationSalt, Evidently uses the launch name as the randomizationSalt.
",
- "Experiment$randomizationSalt": "This value is used when Evidently assigns a particular user session to the experiment. It helps create a randomization ID to determine which variation the user session is served. This randomization ID is a combination of the entity ID and randomizationSalt.
",
- "Launch$randomizationSalt": "This value is used when Evidently assigns a particular user session to the launch, to help create a randomization ID to determine which variation the user session is served. This randomization ID is a combination of the entity ID and randomizationSalt.
",
- "UpdateExperimentRequest$randomizationSalt": "When Evidently assigns a particular user session to an experiment, it must use a randomization ID to determine which variation the user session is served. This randomization ID is a combination of the entity ID and randomizationSalt. If you omit randomizationSalt, Evidently uses the experiment name as the randomizationSalt.
",
- "UpdateLaunchRequest$randomizationSalt": "When Evidently assigns a particular user session to a launch, it must use a randomization ID to determine which variation the user session is served. This randomization ID is a combination of the entity ID and randomizationSalt. If you omit randomizationSalt, Evidently uses the launch name as the randomizationSalt.
"
- }
- },
- "RefResource": {
- "base": "A structure that contains information about one experiment or launch that uses the specified segment.
",
- "refs": {
- "RefResourceList$member": null
- }
- },
- "RefResourceList": {
- "base": null,
- "refs": {
- "ListSegmentReferencesResponse$referencedBy": "An array of structures, where each structure contains information about one experiment or launch that uses this segment.
"
- }
- },
- "ResourceNotFoundException": {
- "base": "The request references a resource that does not exist.
",
- "refs": {}
- },
- "ResultsPeriod": {
- "base": null,
- "refs": {
- "GetExperimentResultsRequest$period": "In seconds, the amount of time to aggregate results together.
"
- }
- },
- "RuleName": {
- "base": null,
- "refs": {
- "EvaluationRule$name": "The name of the experiment or launch.
"
- }
- },
- "RuleType": {
- "base": null,
- "refs": {
- "EvaluationRule$type": "This value is aws.evidently.splits if this is an evaluation rule for a launch, and it is aws.evidently.onlineab if this is an evaluation rule for an experiment.
"
- }
- },
- "S3BucketSafeName": {
- "base": null,
- "refs": {
- "S3Destination$bucket": "The name of the bucket in which Evidently stores evaluation events.
",
- "S3DestinationConfig$bucket": "The name of the bucket in which Evidently stores evaluation events.
"
- }
- },
- "S3Destination": {
- "base": "If the project stores evaluation events in an Amazon S3 bucket, this structure stores the bucket name and bucket prefix.
",
- "refs": {
- "ProjectDataDelivery$s3Destination": "If the project stores evaluation events in an Amazon S3 bucket, this structure stores the bucket name and bucket prefix.
"
- }
- },
- "S3DestinationConfig": {
- "base": "If the project stores evaluation events in an Amazon S3 bucket, this structure stores the bucket name and bucket prefix.
",
- "refs": {
- "ProjectDataDeliveryConfig$s3Destination": "If the project stores evaluation events in an Amazon S3 bucket, this structure stores the bucket name and bucket prefix.
",
- "UpdateProjectDataDeliveryRequest$s3Destination": "A structure containing the S3 bucket name and bucket prefix where you want to store evaluation events.
"
- }
- },
- "S3PrefixSafeName": {
- "base": null,
- "refs": {
- "S3Destination$prefix": "The bucket prefix in which Evidently stores evaluation events.
",
- "S3DestinationConfig$prefix": "The bucket prefix in which Evidently stores evaluation events.
"
- }
- },
- "ScheduledSplit": {
- "base": "This structure defines the traffic allocation percentages among the feature variations during one step of a launch, and the start time of that step.
",
- "refs": {
- "ScheduledStepList$member": null
- }
- },
- "ScheduledSplitConfig": {
- "base": "This structure defines the traffic allocation percentages among the feature variations during one step of a launch, and the start time of that step.
",
- "refs": {
- "ScheduledSplitConfigList$member": null
- }
- },
- "ScheduledSplitConfigList": {
- "base": null,
- "refs": {
- "ScheduledSplitsLaunchConfig$steps": "An array of structures that define the traffic allocation percentages among the feature variations during each step of the launch. This also defines the start time of each step.
"
- }
- },
- "ScheduledSplitsLaunchConfig": {
- "base": "An array of structures that define the traffic allocation percentages among the feature variations during each step of a launch. This also defines the start time of each step.
",
- "refs": {
- "CreateLaunchRequest$scheduledSplitsConfig": "An array of structures that define the traffic allocation percentages among the feature variations during each step of the launch.
",
- "UpdateLaunchRequest$scheduledSplitsConfig": "An array of structures that define the traffic allocation percentages among the feature variations during each step of the launch.
"
- }
- },
- "ScheduledSplitsLaunchDefinition": {
- "base": "An array of structures that define the traffic allocation percentages among the feature variations during each step of a launch. This also defines the start time of each step.
",
- "refs": {
- "Launch$scheduledSplitsDefinition": "An array of structures that define the traffic allocation percentages among the feature variations during each step of the launch.
"
- }
- },
- "ScheduledStepList": {
- "base": null,
- "refs": {
- "ScheduledSplitsLaunchDefinition$steps": "An array of structures that define the traffic allocation percentages among the feature variations during each step of the launch. This also defines the start time of each step.
"
- }
- },
- "Segment": {
- "base": "This structure contains information about one audience segment . You can use segments in your experiments and launches to narrow the user sessions used for experiment or launch to only the user sessions that match one or more criteria.
",
- "refs": {
- "CreateSegmentResponse$segment": "A structure that contains the complete information about the segment that was just created.
",
- "GetSegmentResponse$segment": "A structure that contains the complete information about the segment.
",
- "SegmentList$member": null
- }
- },
- "SegmentArn": {
- "base": null,
- "refs": {
- "Experiment$segment": "The audience segment being used for the experiment, if a segment is being used.
",
- "Segment$arn": "The ARN of the segment.
"
- }
- },
- "SegmentList": {
- "base": null,
- "refs": {
- "ListSegmentsResponse$segments": "An array of structures that contain information about the segments in this Region.
"
- }
- },
- "SegmentName": {
- "base": null,
- "refs": {
- "CreateSegmentRequest$name": "A name for the segment.
",
- "Segment$name": "The name of the segment.
"
- }
- },
- "SegmentOverride": {
- "base": "This structure specifies a segment that you have already created, and defines the traffic split for that segment to be used in a launch.
",
- "refs": {
- "SegmentOverridesList$member": null
- }
- },
- "SegmentOverridesList": {
- "base": null,
- "refs": {
- "ScheduledSplit$segmentOverrides": "Use this parameter to specify different traffic splits for one or more audience segments . A segment is a portion of your audience that share one or more characteristics. Examples could be Chrome browser users, users in Europe, or Firefox browser users in Europe who also fit other criteria that your application collects, such as age.
This parameter is an array of up to six segment override objects. Each of these objects specifies a segment that you have already created, and defines the traffic split for that segment.
",
- "ScheduledSplitConfig$segmentOverrides": "Use this parameter to specify different traffic splits for one or more audience segments . A segment is a portion of your audience that share one or more characteristics. Examples could be Chrome browser users, users in Europe, or Firefox browser users in Europe who also fit other criteria that your application collects, such as age.
This parameter is an array of up to six segment override objects. Each of these objects specifies a segment that you have already created, and defines the traffic split for that segment.
"
- }
- },
- "SegmentPattern": {
- "base": null,
- "refs": {
- "CreateSegmentRequest$pattern": "The pattern to use for the segment. For more information about pattern syntax, see Segment rule pattern syntax .
",
- "Segment$pattern": "The pattern that defines the attributes to use to evalute whether a user session will be in the segment. For more information about the pattern syntax, see Segment rule pattern syntax .
",
- "TestSegmentPatternRequest$pattern": "The pattern to test.
"
- }
- },
- "SegmentRef": {
- "base": null,
- "refs": {
- "CreateExperimentRequest$segment": "Specifies an audience segment to use in the experiment. When a segment is used in an experiment, only user sessions that match the segment pattern are used in the experiment.
",
- "DeleteSegmentRequest$segment": "Specifies the segment to delete.
",
- "GetSegmentRequest$segment": "The ARN of the segment to return information for.
",
- "ListSegmentReferencesRequest$segment": "The ARN of the segment that you want to view information for.
",
- "SegmentOverride$segment": "The ARN of the segment to use.
",
- "UpdateExperimentRequest$segment": "Adds an audience segment to an experiment. When a segment is used in an experiment, only user sessions that match the segment pattern are used in the experiment. You can't use this parameter if the experiment is currently running.
"
- }
- },
- "SegmentReferenceResourceType": {
- "base": null,
- "refs": {
- "ListSegmentReferencesRequest$type": "Specifies whether to return information about launches or experiments that use this segment.
"
- }
- },
- "ServiceQuotaExceededException": {
- "base": "The request would cause a service quota to be exceeded.
",
- "refs": {}
- },
- "ServiceUnavailableException": {
- "base": "The service was unavailable. Retry the request.
",
- "refs": {}
- },
- "SplitWeight": {
- "base": null,
- "refs": {
- "CreateExperimentRequest$samplingRate": "The portion of the available audience that you want to allocate to this experiment, in thousandths of a percent. The available audience is the total audience minus the audience that you have allocated to overrides or current launches of this feature.
This is represented in thousandths of a percent. For example, specify 10,000 to allocate 10% of the available audience.
",
- "Experiment$samplingRate": "In thousandths of a percent, the amount of the available audience that is allocated to this experiment. The available audience is the total audience minus the audience that you have allocated to overrides or current launches of this feature.
This is represented in thousandths of a percent, so a value of 10,000 is 10% of the available audience.
",
- "GroupToWeightMap$value": null,
- "TreatmentToWeightMap$value": null,
- "UpdateExperimentRequest$samplingRate": "The portion of the available audience that you want to allocate to this experiment, in thousandths of a percent. The available audience is the total audience minus the audience that you have allocated to overrides or current launches of this feature.
This is represented in thousandths of a percent. For example, specify 20,000 to allocate 20% of the available audience.
"
- }
- },
- "StartExperimentRequest": {
- "base": null,
- "refs": {}
- },
- "StartExperimentResponse": {
- "base": null,
- "refs": {}
- },
- "StartLaunchRequest": {
- "base": null,
- "refs": {}
- },
- "StartLaunchResponse": {
- "base": null,
- "refs": {}
- },
- "StopExperimentRequest": {
- "base": null,
- "refs": {}
- },
- "StopExperimentResponse": {
- "base": null,
- "refs": {}
- },
- "StopLaunchRequest": {
- "base": null,
- "refs": {}
- },
- "StopLaunchResponse": {
- "base": null,
- "refs": {}
- },
- "String": {
- "base": null,
- "refs": {
- "AccessDeniedException$message": null,
- "ConflictException$message": null,
- "ConflictException$resourceId": "The ID of the resource that caused the exception.
",
- "ConflictException$resourceType": "The type of the resource that is associated with the error.
",
- "EvaluateFeatureResponse$reason": "Specifies the reason that the user session was assigned this variation. Possible values include DEFAULT, meaning the user was served the default variation; LAUNCH_RULE_MATCH, if the user session was enrolled in a launch; EXPERIMENT_RULE_MATCH, if the user session was enrolled in an experiment; or ENTITY_OVERRIDES_MATCH, if the user's entityId matches an override rule.
",
- "EvaluateFeatureResponse$variation": "The name of the variation that was served to the user session.
",
- "EvaluationResult$reason": "Specifies the reason that the user session was assigned this variation. Possible values include DEFAULT, meaning the user was served the default variation; LAUNCH_RULE_MATCH, if the user session was enrolled in a launch; or EXPERIMENT_RULE_MATCH, if the user session was enrolled in an experiment.
",
- "EvaluationResult$variation": "The name of the variation that was served to the user session.
",
- "GetExperimentResultsResponse$details": "If the experiment doesn't yet have enough events to provide valid results, this field is returned with the message Not enough events to generate results. If there are enough events to provide valid results, this field is not returned.
",
- "InternalServerException$message": null,
- "RefResource$arn": "The ARN of the experiment or launch.
",
- "RefResource$endTime": "The day and time that this experiment or launch ended.
",
- "RefResource$lastUpdatedOn": "The day and time that this experiment or launch was most recently updated.
",
- "RefResource$name": "The name of the experiment or launch.
",
- "RefResource$startTime": "The day and time that this experiment or launch started.
",
- "RefResource$status": "The status of the experiment or launch.
",
- "RefResource$type": "Specifies whether the resource that this structure contains information about is an experiment or a launch.
",
- "ResourceNotFoundException$message": null,
- "ResourceNotFoundException$resourceId": "The ID of the resource that caused the exception.
",
- "ResourceNotFoundException$resourceType": "The type of the resource that is associated with the error.
",
- "ServiceQuotaExceededException$message": null,
- "ServiceQuotaExceededException$quotaCode": "The ID of the service quota that was exceeded.
",
- "ServiceQuotaExceededException$resourceId": "The ID of the resource that caused the exception.
",
- "ServiceQuotaExceededException$resourceType": "The type of the resource that is associated with the error.
",
- "ServiceQuotaExceededException$serviceCode": "The ID of the service that is associated with the error.
",
- "ServiceUnavailableException$message": null,
- "ThrottlingException$message": null,
- "ThrottlingException$quotaCode": "The ID of the service quota that was exceeded.
",
- "ThrottlingException$serviceCode": "The ID of the service that is associated with the error.
",
- "ValidationException$message": null,
- "ValidationExceptionField$message": "The error message.
",
- "ValidationExceptionField$name": "The error name.
"
- }
- },
- "TagKey": {
- "base": null,
- "refs": {
- "TagKeyList$member": null,
- "TagMap$key": null
- }
- },
- "TagKeyList": {
- "base": null,
- "refs": {
- "UntagResourceRequest$tagKeys": "The list of tag keys to remove from the resource.
"
- }
- },
- "TagMap": {
- "base": null,
- "refs": {
- "CreateExperimentRequest$tags": "Assigns one or more tags (key-value pairs) to the experiment.
Tags can help you organize and categorize your resources. You can also use them to scope user permissions by granting a user permission to access or change only resources with certain tag values.
Tags don't have any semantic meaning to Amazon Web Services and are interpreted strictly as strings of characters.
You can associate as many as 50 tags with an experiment.
For more information, see Tagging Amazon Web Services resources .
",
- "CreateFeatureRequest$tags": "Assigns one or more tags (key-value pairs) to the feature.
Tags can help you organize and categorize your resources. You can also use them to scope user permissions by granting a user permission to access or change only resources with certain tag values.
Tags don't have any semantic meaning to Amazon Web Services and are interpreted strictly as strings of characters.
You can associate as many as 50 tags with a feature.
For more information, see Tagging Amazon Web Services resources .
",
- "CreateLaunchRequest$tags": "Assigns one or more tags (key-value pairs) to the launch.
Tags can help you organize and categorize your resources. You can also use them to scope user permissions by granting a user permission to access or change only resources with certain tag values.
Tags don't have any semantic meaning to Amazon Web Services and are interpreted strictly as strings of characters.
You can associate as many as 50 tags with a launch.
For more information, see Tagging Amazon Web Services resources .
",
- "CreateProjectRequest$tags": "Assigns one or more tags (key-value pairs) to the project.
Tags can help you organize and categorize your resources. You can also use them to scope user permissions by granting a user permission to access or change only resources with certain tag values.
Tags don't have any semantic meaning to Amazon Web Services and are interpreted strictly as strings of characters.
You can associate as many as 50 tags with a project.
For more information, see Tagging Amazon Web Services resources .
",
- "CreateSegmentRequest$tags": "Assigns one or more tags (key-value pairs) to the segment.
Tags can help you organize and categorize your resources. You can also use them to scope user permissions by granting a user permission to access or change only resources with certain tag values.
Tags don't have any semantic meaning to Amazon Web Services and are interpreted strictly as strings of characters.
You can associate as many as 50 tags with a segment.
For more information, see Tagging Amazon Web Services resources .
",
- "Experiment$tags": "The list of tag keys and values associated with this experiment.
",
- "Feature$tags": "The list of tag keys and values associated with this feature.
",
- "FeatureSummary$tags": "The list of tag keys and values associated with this feature.
",
- "Launch$tags": "The list of tag keys and values associated with this launch.
",
- "ListTagsForResourceResponse$tags": "The list of tag keys and values associated with the resource you specified.
",
- "Project$tags": "The list of tag keys and values associated with this project.
",
- "ProjectSummary$tags": "The list of tag keys and values associated with this project.
",
- "Segment$tags": "The list of tag keys and values associated with this launch.
",
- "TagResourceRequest$tags": "The list of key-value pairs to associate with the resource.
"
- }
- },
- "TagResourceRequest": {
- "base": null,
- "refs": {}
- },
- "TagResourceResponse": {
- "base": null,
- "refs": {}
- },
- "TagValue": {
- "base": null,
- "refs": {
- "TagMap$value": null
- }
- },
- "TestSegmentPatternRequest": {
- "base": null,
- "refs": {}
- },
- "TestSegmentPatternResponse": {
- "base": null,
- "refs": {}
- },
- "ThrottlingException": {
- "base": "The request was denied because of request throttling. Retry the request.
",
- "refs": {}
- },
- "Timestamp": {
- "base": null,
- "refs": {
- "Event$timestamp": "The timestamp of the event.
",
- "Experiment$createdTime": "The date and time that the experiment is first created.
",
- "Experiment$lastUpdatedTime": "The date and time that the experiment was most recently updated.
",
- "ExperimentExecution$endedTime": "The date and time that the experiment ended.
",
- "ExperimentExecution$startedTime": "The date and time that the experiment started.
",
- "ExperimentSchedule$analysisCompleteTime": "The time and date that Evidently completed the analysis of the experiment.
",
- "Feature$createdTime": "The date and time that the feature is created.
",
- "Feature$lastUpdatedTime": "The date and time that the feature was most recently updated.
",
- "FeatureSummary$createdTime": "The date and time that the feature is created.
",
- "FeatureSummary$lastUpdatedTime": "The date and time that the feature was most recently updated.
",
- "GetExperimentResultsRequest$endTime": "The date and time that the experiment ended, if it is completed. This must be no longer than 30 days after the experiment start time.
",
- "GetExperimentResultsRequest$startTime": "The date and time that the experiment started.
",
- "Launch$createdTime": "The date and time that the launch is created.
",
- "Launch$lastUpdatedTime": "The date and time that the launch was most recently updated.
",
- "LaunchExecution$endedTime": "The date and time that the launch ended.
",
- "LaunchExecution$startedTime": "The date and time that the launch started.
",
- "Project$createdTime": "The date and time that the project is created.
",
- "Project$lastUpdatedTime": "The date and time that the project was most recently updated.
",
- "ProjectSummary$createdTime": "The date and time that the project is created.
",
- "ProjectSummary$lastUpdatedTime": "The date and time that the project was most recently updated.
",
- "ScheduledSplit$startTime": "The date and time that this step of the launch starts.
",
- "ScheduledSplitConfig$startTime": "The date and time that this step of the launch starts.
",
- "Segment$createdTime": "The date and time that this segment was created.
",
- "Segment$lastUpdatedTime": "The date and time that this segment was most recently updated.
",
- "StartExperimentRequest$analysisCompleteTime": "The date and time to end the experiment. This must be no more than 30 days after the experiment starts.
",
- "StartExperimentResponse$startedTime": "A timestamp that indicates when the experiment started.
",
- "StopExperimentResponse$endedTime": "The date and time that the experiment stopped.
",
- "StopLaunchResponse$endedTime": "The date and time that the launch stopped.
",
- "TimestampList$member": null
- }
- },
- "TimestampList": {
- "base": null,
- "refs": {
- "GetExperimentResultsResponse$timestamps": "The timestamps of each result returned.
"
- }
- },
- "Treatment": {
- "base": "A structure that defines one treatment in an experiment. A treatment is a variation of the feature that you are including in the experiment.
",
- "refs": {
- "TreatmentList$member": null
- }
- },
- "TreatmentConfig": {
- "base": "A structure that defines one treatment in an experiment. A treatment is a variation of the feature that you are including in the experiment.
",
- "refs": {
- "TreatmentConfigList$member": null
- }
- },
- "TreatmentConfigList": {
- "base": null,
- "refs": {
- "CreateExperimentRequest$treatments": "An array of structures that describe the configuration of each feature variation used in the experiment.
",
- "UpdateExperimentRequest$treatments": "An array of structures that define the variations being tested in the experiment.
"
- }
- },
- "TreatmentList": {
- "base": null,
- "refs": {
- "Experiment$treatments": "An array of structures that describe the configuration of each feature variation used in the experiment.
"
- }
- },
- "TreatmentName": {
- "base": null,
- "refs": {
- "ExperimentReport$treatmentName": "The name of the variation that this report pertains to.
",
- "ExperimentResultsData$treatmentName": "The treatment, or variation, that returned the values in this structure.
",
- "OnlineAbConfig$controlTreatmentName": "The name of the variation that is to be the default variation that the other variations are compared to.
",
- "OnlineAbDefinition$controlTreatmentName": "The name of the variation that is the default variation that the other variations are compared to.
",
- "Treatment$name": "The name of this treatment.
",
- "TreatmentConfig$name": "A name for this treatment.
",
- "TreatmentNameList$member": null,
- "TreatmentToWeightMap$key": null
- }
- },
- "TreatmentNameList": {
- "base": null,
- "refs": {
- "GetExperimentResultsRequest$treatmentNames": "The names of the experiment treatments that you want to see the results for.
"
- }
- },
- "TreatmentToWeightMap": {
- "base": null,
- "refs": {
- "OnlineAbConfig$treatmentWeights": "A set of key-value pairs. The keys are variation names, and the values are the portion of experiment traffic to be assigned to that variation. Specify the traffic portion in thousandths of a percent, so 20,000 for a variation would allocate 20% of the experiment traffic to that variation.
",
- "OnlineAbDefinition$treatmentWeights": "A set of key-value pairs. The keys are variation names, and the values are the portion of experiment traffic to be assigned to that variation. The traffic portion is specified in thousandths of a percent, so 20,000 for a variation would allocate 20% of the experiment traffic to that variation.
"
- }
- },
- "UntagResourceRequest": {
- "base": null,
- "refs": {}
- },
- "UntagResourceResponse": {
- "base": null,
- "refs": {}
- },
- "UpdateExperimentRequest": {
- "base": null,
- "refs": {}
- },
- "UpdateExperimentResponse": {
- "base": null,
- "refs": {}
- },
- "UpdateFeatureRequest": {
- "base": null,
- "refs": {}
- },
- "UpdateFeatureResponse": {
- "base": null,
- "refs": {}
- },
- "UpdateLaunchRequest": {
- "base": null,
- "refs": {}
- },
- "UpdateLaunchResponse": {
- "base": null,
- "refs": {}
- },
- "UpdateProjectDataDeliveryRequest": {
- "base": null,
- "refs": {}
- },
- "UpdateProjectDataDeliveryResponse": {
- "base": null,
- "refs": {}
- },
- "UpdateProjectRequest": {
- "base": null,
- "refs": {}
- },
- "UpdateProjectResponse": {
- "base": null,
- "refs": {}
- },
- "Uuid": {
- "base": null,
- "refs": {
- "PutProjectEventsResultEntry$eventId": "A unique ID assigned to this PutProjectEvents operation.
"
- }
- },
- "ValidationException": {
- "base": "The value of a parameter in the request caused an error.
",
- "refs": {}
- },
- "ValidationExceptionField": {
- "base": "A structure containing an error name and message.
",
- "refs": {
- "ValidationExceptionFieldList$member": null
- }
- },
- "ValidationExceptionFieldList": {
- "base": null,
- "refs": {
- "ValidationException$fieldList": "The parameter that caused the exception.
"
- }
- },
- "ValidationExceptionReason": {
- "base": null,
- "refs": {
- "ValidationException$reason": "A reason for the error.
"
- }
- },
- "VariableValue": {
- "base": "The value assigned to a feature variation. This structure must contain exactly one field. It can be boolValue, doubleValue, longValue, or stringValue.
",
- "refs": {
- "EvaluateFeatureResponse$value": "The value assigned to this variation to differentiate it from the other variations of this feature.
",
- "EvaluationResult$value": "The value assigned to this variation to differentiate it from the other variations of this feature.
",
- "Variation$value": "The value assigned to this variation.
",
- "VariationConfig$value": "The value assigned to this variation.
"
- }
- },
- "VariableValueLongValueLong": {
- "base": null,
- "refs": {
- "VariableValue$longValue": "If this feature uses the long variation type, this field contains the long value of this variation.
"
- }
- },
- "VariableValueStringValueString": {
- "base": null,
- "refs": {
- "VariableValue$stringValue": "If this feature uses the string variation type, this field contains the string value of this variation.
"
- }
- },
- "Variation": {
- "base": "This structure contains the name and variation value of one variation of a feature.
",
- "refs": {
- "VariationsList$member": null
- }
- },
- "VariationConfig": {
- "base": "This structure contains the name and variation value of one variation of a feature.
",
- "refs": {
- "VariationConfigsList$member": null
- }
- },
- "VariationConfigsList": {
- "base": null,
- "refs": {
- "CreateFeatureRequest$variations": "An array of structures that contain the configuration of the feature's different variations.
",
- "UpdateFeatureRequest$addOrUpdateVariations": "To update variation configurations for this feature, or add new ones, specify this structure. In this array, include any variations that you want to add or update. If the array includes a variation name that already exists for this feature, it is updated. If it includes a new variation name, it is added as a new variation.
"
- }
- },
- "VariationName": {
- "base": null,
- "refs": {
- "CreateFeatureRequest$defaultVariation": "The name of the variation to use as the default variation. The default variation is served to users who are not allocated to any ongoing launches or experiments of this feature.
This variation must also be listed in the variations structure.
If you omit defaultVariation, the first variation listed in the variations structure is used as the default variation.
",
- "EntityOverrideMap$value": null,
- "Feature$defaultVariation": "The name of the variation that is used as the default variation. The default variation is served to users who are not allocated to any ongoing launches or experiments of this feature.
This variation must also be listed in the variations structure.
If you omit defaultVariation, the first variation listed in the variations structure is used as the default variation.
",
- "FeatureSummary$defaultVariation": "The name of the variation that is used as the default variation. The default variation is served to users who are not allocated to any ongoing launches or experiments of this feature.
",
- "FeatureToVariationMap$value": null,
- "LaunchGroupConfig$variation": "The feature variation to use for this launch group.
",
- "TreatmentConfig$variation": "The name of the variation to use as this treatment in the experiment.
",
- "UpdateFeatureRequest$defaultVariation": "The name of the variation to use as the default variation. The default variation is served to users who are not allocated to any ongoing launches or experiments of this feature.
",
- "Variation$name": "The name of the variation.
",
- "VariationConfig$name": "The name of the variation.
",
- "VariationNameList$member": null
- }
- },
- "VariationNameList": {
- "base": null,
- "refs": {
- "UpdateFeatureRequest$removeVariations": "Removes a variation from the feature. If the variation you specify doesn't exist, then this makes no change and does not report an error.
This operation fails if you try to remove a variation that is part of an ongoing launch or experiment.
"
- }
- },
- "VariationValueType": {
- "base": null,
- "refs": {
- "Feature$valueType": "Defines the type of value used to define the different feature variations. For more information, see Variation types
"
- }
- },
- "VariationsList": {
- "base": null,
- "refs": {
- "Feature$variations": "An array of structures that contain the configuration of the feature's different variations.
"
- }
- }
- }
-}
diff --git a/apis/evidently/2021-02-01/endpoint-rule-set-1.json b/apis/evidently/2021-02-01/endpoint-rule-set-1.json
deleted file mode 100644
index f1ebd0c91d1..00000000000
--- a/apis/evidently/2021-02-01/endpoint-rule-set-1.json
+++ /dev/null
@@ -1,314 +0,0 @@
-{
- "version": "1.0",
- "parameters": {
- "Region": {
- "builtIn": "AWS::Region",
- "required": false,
- "documentation": "The AWS region used to dispatch the request.",
- "type": "string"
- },
- "UseDualStack": {
- "builtIn": "AWS::UseDualStack",
- "required": true,
- "default": false,
- "documentation": "When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.",
- "type": "boolean"
- },
- "UseFIPS": {
- "builtIn": "AWS::UseFIPS",
- "required": true,
- "default": false,
- "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.",
- "type": "boolean"
- },
- "Endpoint": {
- "builtIn": "SDK::Endpoint",
- "required": false,
- "documentation": "Override the endpoint used to send this request",
- "type": "string"
- }
- },
- "rules": [
- {
- "conditions": [
- {
- "fn": "isSet",
- "argv": [
- {
- "ref": "Endpoint"
- }
- ]
- }
- ],
- "rules": [
- {
- "conditions": [
- {
- "fn": "booleanEquals",
- "argv": [
- {
- "ref": "UseFIPS"
- },
- true
- ]
- }
- ],
- "error": "Invalid Configuration: FIPS and custom endpoint are not supported",
- "type": "error"
- },
- {
- "conditions": [
- {
- "fn": "booleanEquals",
- "argv": [
- {
- "ref": "UseDualStack"
- },
- true
- ]
- }
- ],
- "error": "Invalid Configuration: Dualstack and custom endpoint are not supported",
- "type": "error"
- },
- {
- "conditions": [],
- "endpoint": {
- "url": {
- "ref": "Endpoint"
- },
- "properties": {},
- "headers": {}
- },
- "type": "endpoint"
- }
- ],
- "type": "tree"
- },
- {
- "conditions": [
- {
- "fn": "isSet",
- "argv": [
- {
- "ref": "Region"
- }
- ]
- }
- ],
- "rules": [
- {
- "conditions": [
- {
- "fn": "aws.partition",
- "argv": [
- {
- "ref": "Region"
- }
- ],
- "assign": "PartitionResult"
- }
- ],
- "rules": [
- {
- "conditions": [
- {
- "fn": "booleanEquals",
- "argv": [
- {
- "ref": "UseFIPS"
- },
- true
- ]
- },
- {
- "fn": "booleanEquals",
- "argv": [
- {
- "ref": "UseDualStack"
- },
- true
- ]
- }
- ],
- "rules": [
- {
- "conditions": [
- {
- "fn": "booleanEquals",
- "argv": [
- true,
- {
- "fn": "getAttr",
- "argv": [
- {
- "ref": "PartitionResult"
- },
- "supportsFIPS"
- ]
- }
- ]
- },
- {
- "fn": "booleanEquals",
- "argv": [
- true,
- {
- "fn": "getAttr",
- "argv": [
- {
- "ref": "PartitionResult"
- },
- "supportsDualStack"
- ]
- }
- ]
- }
- ],
- "rules": [
- {
- "conditions": [],
- "endpoint": {
- "url": "https://evidently-fips.{Region}.{PartitionResult#dualStackDnsSuffix}",
- "properties": {},
- "headers": {}
- },
- "type": "endpoint"
- }
- ],
- "type": "tree"
- },
- {
- "conditions": [],
- "error": "FIPS and DualStack are enabled, but this partition does not support one or both",
- "type": "error"
- }
- ],
- "type": "tree"
- },
- {
- "conditions": [
- {
- "fn": "booleanEquals",
- "argv": [
- {
- "ref": "UseFIPS"
- },
- true
- ]
- }
- ],
- "rules": [
- {
- "conditions": [
- {
- "fn": "booleanEquals",
- "argv": [
- {
- "fn": "getAttr",
- "argv": [
- {
- "ref": "PartitionResult"
- },
- "supportsFIPS"
- ]
- },
- true
- ]
- }
- ],
- "rules": [
- {
- "conditions": [],
- "endpoint": {
- "url": "https://evidently-fips.{Region}.{PartitionResult#dnsSuffix}",
- "properties": {},
- "headers": {}
- },
- "type": "endpoint"
- }
- ],
- "type": "tree"
- },
- {
- "conditions": [],
- "error": "FIPS is enabled but this partition does not support FIPS",
- "type": "error"
- }
- ],
- "type": "tree"
- },
- {
- "conditions": [
- {
- "fn": "booleanEquals",
- "argv": [
- {
- "ref": "UseDualStack"
- },
- true
- ]
- }
- ],
- "rules": [
- {
- "conditions": [
- {
- "fn": "booleanEquals",
- "argv": [
- true,
- {
- "fn": "getAttr",
- "argv": [
- {
- "ref": "PartitionResult"
- },
- "supportsDualStack"
- ]
- }
- ]
- }
- ],
- "rules": [
- {
- "conditions": [],
- "endpoint": {
- "url": "https://evidently.{Region}.{PartitionResult#dualStackDnsSuffix}",
- "properties": {},
- "headers": {}
- },
- "type": "endpoint"
- }
- ],
- "type": "tree"
- },
- {
- "conditions": [],
- "error": "DualStack is enabled but this partition does not support DualStack",
- "type": "error"
- }
- ],
- "type": "tree"
- },
- {
- "conditions": [],
- "endpoint": {
- "url": "https://evidently.{Region}.{PartitionResult#dnsSuffix}",
- "properties": {},
- "headers": {}
- },
- "type": "endpoint"
- }
- ],
- "type": "tree"
- }
- ],
- "type": "tree"
- },
- {
- "conditions": [],
- "error": "Invalid Configuration: Missing Region",
- "type": "error"
- }
- ]
-}
\ No newline at end of file
diff --git a/apis/evidently/2021-02-01/endpoint-tests-1.json b/apis/evidently/2021-02-01/endpoint-tests-1.json
deleted file mode 100644
index f6df30ee85f..00000000000
--- a/apis/evidently/2021-02-01/endpoint-tests-1.json
+++ /dev/null
@@ -1,374 +0,0 @@
-{
- "testCases": [
- {
- "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled",
- "expect": {
- "endpoint": {
- "url": "https://evidently.ap-northeast-1.amazonaws.com"
- }
- },
- "params": {
- "Region": "ap-northeast-1",
- "UseFIPS": false,
- "UseDualStack": false
- }
- },
- {
- "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled",
- "expect": {
- "endpoint": {
- "url": "https://evidently.ap-southeast-1.amazonaws.com"
- }
- },
- "params": {
- "Region": "ap-southeast-1",
- "UseFIPS": false,
- "UseDualStack": false
- }
- },
- {
- "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled",
- "expect": {
- "endpoint": {
- "url": "https://evidently.ap-southeast-2.amazonaws.com"
- }
- },
- "params": {
- "Region": "ap-southeast-2",
- "UseFIPS": false,
- "UseDualStack": false
- }
- },
- {
- "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled",
- "expect": {
- "endpoint": {
- "url": "https://evidently.eu-central-1.amazonaws.com"
- }
- },
- "params": {
- "Region": "eu-central-1",
- "UseFIPS": false,
- "UseDualStack": false
- }
- },
- {
- "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled",
- "expect": {
- "endpoint": {
- "url": "https://evidently.eu-north-1.amazonaws.com"
- }
- },
- "params": {
- "Region": "eu-north-1",
- "UseFIPS": false,
- "UseDualStack": false
- }
- },
- {
- "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled",
- "expect": {
- "endpoint": {
- "url": "https://evidently.eu-west-1.amazonaws.com"
- }
- },
- "params": {
- "Region": "eu-west-1",
- "UseFIPS": false,
- "UseDualStack": false
- }
- },
- {
- "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled",
- "expect": {
- "endpoint": {
- "url": "https://evidently.us-east-1.amazonaws.com"
- }
- },
- "params": {
- "Region": "us-east-1",
- "UseFIPS": false,
- "UseDualStack": false
- }
- },
- {
- "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled",
- "expect": {
- "endpoint": {
- "url": "https://evidently.us-east-2.amazonaws.com"
- }
- },
- "params": {
- "Region": "us-east-2",
- "UseFIPS": false,
- "UseDualStack": false
- }
- },
- {
- "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled",
- "expect": {
- "endpoint": {
- "url": "https://evidently.us-west-2.amazonaws.com"
- }
- },
- "params": {
- "Region": "us-west-2",
- "UseFIPS": false,
- "UseDualStack": false
- }
- },
- {
- "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled",
- "expect": {
- "endpoint": {
- "url": "https://evidently-fips.us-east-1.api.aws"
- }
- },
- "params": {
- "Region": "us-east-1",
- "UseFIPS": true,
- "UseDualStack": true
- }
- },
- {
- "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled",
- "expect": {
- "endpoint": {
- "url": "https://evidently-fips.us-east-1.amazonaws.com"
- }
- },
- "params": {
- "Region": "us-east-1",
- "UseFIPS": true,
- "UseDualStack": false
- }
- },
- {
- "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled",
- "expect": {
- "endpoint": {
- "url": "https://evidently.us-east-1.api.aws"
- }
- },
- "params": {
- "Region": "us-east-1",
- "UseFIPS": false,
- "UseDualStack": true
- }
- },
- {
- "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled",
- "expect": {
- "endpoint": {
- "url": "https://evidently-fips.cn-north-1.api.amazonwebservices.com.cn"
- }
- },
- "params": {
- "Region": "cn-north-1",
- "UseFIPS": true,
- "UseDualStack": true
- }
- },
- {
- "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled",
- "expect": {
- "endpoint": {
- "url": "https://evidently-fips.cn-north-1.amazonaws.com.cn"
- }
- },
- "params": {
- "Region": "cn-north-1",
- "UseFIPS": true,
- "UseDualStack": false
- }
- },
- {
- "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled",
- "expect": {
- "endpoint": {
- "url": "https://evidently.cn-north-1.api.amazonwebservices.com.cn"
- }
- },
- "params": {
- "Region": "cn-north-1",
- "UseFIPS": false,
- "UseDualStack": true
- }
- },
- {
- "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled",
- "expect": {
- "endpoint": {
- "url": "https://evidently.cn-north-1.amazonaws.com.cn"
- }
- },
- "params": {
- "Region": "cn-north-1",
- "UseFIPS": false,
- "UseDualStack": false
- }
- },
- {
- "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled",
- "expect": {
- "endpoint": {
- "url": "https://evidently-fips.us-gov-east-1.api.aws"
- }
- },
- "params": {
- "Region": "us-gov-east-1",
- "UseFIPS": true,
- "UseDualStack": true
- }
- },
- {
- "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled",
- "expect": {
- "endpoint": {
- "url": "https://evidently-fips.us-gov-east-1.amazonaws.com"
- }
- },
- "params": {
- "Region": "us-gov-east-1",
- "UseFIPS": true,
- "UseDualStack": false
- }
- },
- {
- "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled",
- "expect": {
- "endpoint": {
- "url": "https://evidently.us-gov-east-1.api.aws"
- }
- },
- "params": {
- "Region": "us-gov-east-1",
- "UseFIPS": false,
- "UseDualStack": true
- }
- },
- {
- "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled",
- "expect": {
- "endpoint": {
- "url": "https://evidently.us-gov-east-1.amazonaws.com"
- }
- },
- "params": {
- "Region": "us-gov-east-1",
- "UseFIPS": false,
- "UseDualStack": false
- }
- },
- {
- "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled",
- "expect": {
- "endpoint": {
- "url": "https://evidently-fips.us-iso-east-1.c2s.ic.gov"
- }
- },
- "params": {
- "Region": "us-iso-east-1",
- "UseFIPS": true,
- "UseDualStack": false
- }
- },
- {
- "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled",
- "expect": {
- "endpoint": {
- "url": "https://evidently.us-iso-east-1.c2s.ic.gov"
- }
- },
- "params": {
- "Region": "us-iso-east-1",
- "UseFIPS": false,
- "UseDualStack": false
- }
- },
- {
- "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled",
- "expect": {
- "endpoint": {
- "url": "https://evidently-fips.us-isob-east-1.sc2s.sgov.gov"
- }
- },
- "params": {
- "Region": "us-isob-east-1",
- "UseFIPS": true,
- "UseDualStack": false
- }
- },
- {
- "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled",
- "expect": {
- "endpoint": {
- "url": "https://evidently.us-isob-east-1.sc2s.sgov.gov"
- }
- },
- "params": {
- "Region": "us-isob-east-1",
- "UseFIPS": false,
- "UseDualStack": false
- }
- },
- {
- "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled",
- "expect": {
- "endpoint": {
- "url": "https://example.com"
- }
- },
- "params": {
- "Region": "us-east-1",
- "UseFIPS": false,
- "UseDualStack": false,
- "Endpoint": "https://example.com"
- }
- },
- {
- "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled",
- "expect": {
- "endpoint": {
- "url": "https://example.com"
- }
- },
- "params": {
- "UseFIPS": false,
- "UseDualStack": false,
- "Endpoint": "https://example.com"
- }
- },
- {
- "documentation": "For custom endpoint with fips enabled and dualstack disabled",
- "expect": {
- "error": "Invalid Configuration: FIPS and custom endpoint are not supported"
- },
- "params": {
- "Region": "us-east-1",
- "UseFIPS": true,
- "UseDualStack": false,
- "Endpoint": "https://example.com"
- }
- },
- {
- "documentation": "For custom endpoint with fips disabled and dualstack enabled",
- "expect": {
- "error": "Invalid Configuration: Dualstack and custom endpoint are not supported"
- },
- "params": {
- "Region": "us-east-1",
- "UseFIPS": false,
- "UseDualStack": true,
- "Endpoint": "https://example.com"
- }
- },
- {
- "documentation": "Missing region",
- "expect": {
- "error": "Invalid Configuration: Missing Region"
- }
- }
- ],
- "version": "1.0"
-}
\ No newline at end of file
diff --git a/apis/evidently/2021-02-01/examples-1.json b/apis/evidently/2021-02-01/examples-1.json
deleted file mode 100644
index 2fb77604d1b..00000000000
--- a/apis/evidently/2021-02-01/examples-1.json
+++ /dev/null
@@ -1,4 +0,0 @@
-{
- "version": "1.0",
- "examples": {}
-}
diff --git a/apis/evidently/2021-02-01/paginators-1.json b/apis/evidently/2021-02-01/paginators-1.json
deleted file mode 100644
index c72d3cbae36..00000000000
--- a/apis/evidently/2021-02-01/paginators-1.json
+++ /dev/null
@@ -1,40 +0,0 @@
-{
- "pagination": {
- "ListExperiments": {
- "input_token": "nextToken",
- "output_token": "nextToken",
- "limit_key": "maxResults",
- "result_key": "experiments"
- },
- "ListFeatures": {
- "input_token": "nextToken",
- "output_token": "nextToken",
- "limit_key": "maxResults",
- "result_key": "features"
- },
- "ListLaunches": {
- "input_token": "nextToken",
- "output_token": "nextToken",
- "limit_key": "maxResults",
- "result_key": "launches"
- },
- "ListProjects": {
- "input_token": "nextToken",
- "output_token": "nextToken",
- "limit_key": "maxResults",
- "result_key": "projects"
- },
- "ListSegmentReferences": {
- "input_token": "nextToken",
- "output_token": "nextToken",
- "limit_key": "maxResults",
- "result_key": "referencedBy"
- },
- "ListSegments": {
- "input_token": "nextToken",
- "output_token": "nextToken",
- "limit_key": "maxResults",
- "result_key": "segments"
- }
- }
-}
diff --git a/apis/iotanalytics/2017-11-27/api-2.json b/apis/iotanalytics/2017-11-27/api-2.json
deleted file mode 100644
index a08a5eba19d..00000000000
--- a/apis/iotanalytics/2017-11-27/api-2.json
+++ /dev/null
@@ -1,2423 +0,0 @@
-{
- "version":"2.0",
- "metadata":{
- "apiVersion":"2017-11-27",
- "endpointPrefix":"iotanalytics",
- "protocol":"rest-json",
- "protocols":["rest-json"],
- "serviceFullName":"AWS IoT Analytics",
- "serviceId":"IoTAnalytics",
- "signatureVersion":"v4",
- "signingName":"iotanalytics",
- "uid":"iotanalytics-2017-11-27",
- "auth":["aws.auth#sigv4"]
- },
- "operations":{
- "BatchPutMessage":{
- "name":"BatchPutMessage",
- "http":{
- "method":"POST",
- "requestUri":"/messages/batch",
- "responseCode":200
- },
- "input":{"shape":"BatchPutMessageRequest"},
- "output":{"shape":"BatchPutMessageResponse"},
- "errors":[
- {"shape":"ResourceNotFoundException"},
- {"shape":"InvalidRequestException"},
- {"shape":"InternalFailureException"},
- {"shape":"ServiceUnavailableException"},
- {"shape":"ThrottlingException"}
- ]
- },
- "CancelPipelineReprocessing":{
- "name":"CancelPipelineReprocessing",
- "http":{
- "method":"DELETE",
- "requestUri":"/pipelines/{pipelineName}/reprocessing/{reprocessingId}"
- },
- "input":{"shape":"CancelPipelineReprocessingRequest"},
- "output":{"shape":"CancelPipelineReprocessingResponse"},
- "errors":[
- {"shape":"ResourceNotFoundException"},
- {"shape":"InvalidRequestException"},
- {"shape":"InternalFailureException"},
- {"shape":"ServiceUnavailableException"},
- {"shape":"ThrottlingException"}
- ]
- },
- "CreateChannel":{
- "name":"CreateChannel",
- "http":{
- "method":"POST",
- "requestUri":"/channels",
- "responseCode":201
- },
- "input":{"shape":"CreateChannelRequest"},
- "output":{"shape":"CreateChannelResponse"},
- "errors":[
- {"shape":"InvalidRequestException"},
- {"shape":"ResourceAlreadyExistsException"},
- {"shape":"InternalFailureException"},
- {"shape":"ServiceUnavailableException"},
- {"shape":"ThrottlingException"},
- {"shape":"LimitExceededException"}
- ]
- },
- "CreateDataset":{
- "name":"CreateDataset",
- "http":{
- "method":"POST",
- "requestUri":"/datasets",
- "responseCode":201
- },
- "input":{"shape":"CreateDatasetRequest"},
- "output":{"shape":"CreateDatasetResponse"},
- "errors":[
- {"shape":"InvalidRequestException"},
- {"shape":"ResourceAlreadyExistsException"},
- {"shape":"InternalFailureException"},
- {"shape":"ServiceUnavailableException"},
- {"shape":"ThrottlingException"},
- {"shape":"LimitExceededException"}
- ]
- },
- "CreateDatasetContent":{
- "name":"CreateDatasetContent",
- "http":{
- "method":"POST",
- "requestUri":"/datasets/{datasetName}/content"
- },
- "input":{"shape":"CreateDatasetContentRequest"},
- "output":{"shape":"CreateDatasetContentResponse"},
- "errors":[
- {"shape":"InvalidRequestException"},
- {"shape":"ResourceNotFoundException"},
- {"shape":"InternalFailureException"},
- {"shape":"ServiceUnavailableException"},
- {"shape":"ThrottlingException"}
- ]
- },
- "CreateDatastore":{
- "name":"CreateDatastore",
- "http":{
- "method":"POST",
- "requestUri":"/datastores",
- "responseCode":201
- },
- "input":{"shape":"CreateDatastoreRequest"},
- "output":{"shape":"CreateDatastoreResponse"},
- "errors":[
- {"shape":"InvalidRequestException"},
- {"shape":"ResourceAlreadyExistsException"},
- {"shape":"InternalFailureException"},
- {"shape":"ServiceUnavailableException"},
- {"shape":"ThrottlingException"},
- {"shape":"LimitExceededException"}
- ]
- },
- "CreatePipeline":{
- "name":"CreatePipeline",
- "http":{
- "method":"POST",
- "requestUri":"/pipelines",
- "responseCode":201
- },
- "input":{"shape":"CreatePipelineRequest"},
- "output":{"shape":"CreatePipelineResponse"},
- "errors":[
- {"shape":"InvalidRequestException"},
- {"shape":"ResourceAlreadyExistsException"},
- {"shape":"InternalFailureException"},
- {"shape":"ServiceUnavailableException"},
- {"shape":"ThrottlingException"},
- {"shape":"LimitExceededException"}
- ]
- },
- "DeleteChannel":{
- "name":"DeleteChannel",
- "http":{
- "method":"DELETE",
- "requestUri":"/channels/{channelName}",
- "responseCode":204
- },
- "input":{"shape":"DeleteChannelRequest"},
- "errors":[
- {"shape":"InvalidRequestException"},
- {"shape":"ResourceNotFoundException"},
- {"shape":"InternalFailureException"},
- {"shape":"ServiceUnavailableException"},
- {"shape":"ThrottlingException"}
- ]
- },
- "DeleteDataset":{
- "name":"DeleteDataset",
- "http":{
- "method":"DELETE",
- "requestUri":"/datasets/{datasetName}",
- "responseCode":204
- },
- "input":{"shape":"DeleteDatasetRequest"},
- "errors":[
- {"shape":"InvalidRequestException"},
- {"shape":"ResourceNotFoundException"},
- {"shape":"InternalFailureException"},
- {"shape":"ServiceUnavailableException"},
- {"shape":"ThrottlingException"}
- ]
- },
- "DeleteDatasetContent":{
- "name":"DeleteDatasetContent",
- "http":{
- "method":"DELETE",
- "requestUri":"/datasets/{datasetName}/content",
- "responseCode":204
- },
- "input":{"shape":"DeleteDatasetContentRequest"},
- "errors":[
- {"shape":"InvalidRequestException"},
- {"shape":"ResourceNotFoundException"},
- {"shape":"InternalFailureException"},
- {"shape":"ServiceUnavailableException"},
- {"shape":"ThrottlingException"}
- ]
- },
- "DeleteDatastore":{
- "name":"DeleteDatastore",
- "http":{
- "method":"DELETE",
- "requestUri":"/datastores/{datastoreName}",
- "responseCode":204
- },
- "input":{"shape":"DeleteDatastoreRequest"},
- "errors":[
- {"shape":"InvalidRequestException"},
- {"shape":"ResourceNotFoundException"},
- {"shape":"InternalFailureException"},
- {"shape":"ServiceUnavailableException"},
- {"shape":"ThrottlingException"}
- ]
- },
- "DeletePipeline":{
- "name":"DeletePipeline",
- "http":{
- "method":"DELETE",
- "requestUri":"/pipelines/{pipelineName}",
- "responseCode":204
- },
- "input":{"shape":"DeletePipelineRequest"},
- "errors":[
- {"shape":"InvalidRequestException"},
- {"shape":"ResourceNotFoundException"},
- {"shape":"InternalFailureException"},
- {"shape":"ServiceUnavailableException"},
- {"shape":"ThrottlingException"}
- ]
- },
- "DescribeChannel":{
- "name":"DescribeChannel",
- "http":{
- "method":"GET",
- "requestUri":"/channels/{channelName}"
- },
- "input":{"shape":"DescribeChannelRequest"},
- "output":{"shape":"DescribeChannelResponse"},
- "errors":[
- {"shape":"InvalidRequestException"},
- {"shape":"ResourceNotFoundException"},
- {"shape":"InternalFailureException"},
- {"shape":"ServiceUnavailableException"},
- {"shape":"ThrottlingException"}
- ]
- },
- "DescribeDataset":{
- "name":"DescribeDataset",
- "http":{
- "method":"GET",
- "requestUri":"/datasets/{datasetName}"
- },
- "input":{"shape":"DescribeDatasetRequest"},
- "output":{"shape":"DescribeDatasetResponse"},
- "errors":[
- {"shape":"InvalidRequestException"},
- {"shape":"ResourceNotFoundException"},
- {"shape":"InternalFailureException"},
- {"shape":"ServiceUnavailableException"},
- {"shape":"ThrottlingException"}
- ]
- },
- "DescribeDatastore":{
- "name":"DescribeDatastore",
- "http":{
- "method":"GET",
- "requestUri":"/datastores/{datastoreName}"
- },
- "input":{"shape":"DescribeDatastoreRequest"},
- "output":{"shape":"DescribeDatastoreResponse"},
- "errors":[
- {"shape":"InvalidRequestException"},
- {"shape":"ResourceNotFoundException"},
- {"shape":"InternalFailureException"},
- {"shape":"ServiceUnavailableException"},
- {"shape":"ThrottlingException"}
- ]
- },
- "DescribeLoggingOptions":{
- "name":"DescribeLoggingOptions",
- "http":{
- "method":"GET",
- "requestUri":"/logging"
- },
- "input":{"shape":"DescribeLoggingOptionsRequest"},
- "output":{"shape":"DescribeLoggingOptionsResponse"},
- "errors":[
- {"shape":"InvalidRequestException"},
- {"shape":"ResourceNotFoundException"},
- {"shape":"InternalFailureException"},
- {"shape":"ServiceUnavailableException"},
- {"shape":"ThrottlingException"}
- ]
- },
- "DescribePipeline":{
- "name":"DescribePipeline",
- "http":{
- "method":"GET",
- "requestUri":"/pipelines/{pipelineName}"
- },
- "input":{"shape":"DescribePipelineRequest"},
- "output":{"shape":"DescribePipelineResponse"},
- "errors":[
- {"shape":"InvalidRequestException"},
- {"shape":"ResourceNotFoundException"},
- {"shape":"InternalFailureException"},
- {"shape":"ServiceUnavailableException"},
- {"shape":"ThrottlingException"}
- ]
- },
- "GetDatasetContent":{
- "name":"GetDatasetContent",
- "http":{
- "method":"GET",
- "requestUri":"/datasets/{datasetName}/content"
- },
- "input":{"shape":"GetDatasetContentRequest"},
- "output":{"shape":"GetDatasetContentResponse"},
- "errors":[
- {"shape":"InvalidRequestException"},
- {"shape":"ResourceNotFoundException"},
- {"shape":"InternalFailureException"},
- {"shape":"ServiceUnavailableException"},
- {"shape":"ThrottlingException"}
- ]
- },
- "ListChannels":{
- "name":"ListChannels",
- "http":{
- "method":"GET",
- "requestUri":"/channels"
- },
- "input":{"shape":"ListChannelsRequest"},
- "output":{"shape":"ListChannelsResponse"},
- "errors":[
- {"shape":"InvalidRequestException"},
- {"shape":"InternalFailureException"},
- {"shape":"ServiceUnavailableException"},
- {"shape":"ThrottlingException"}
- ]
- },
- "ListDatasetContents":{
- "name":"ListDatasetContents",
- "http":{
- "method":"GET",
- "requestUri":"/datasets/{datasetName}/contents"
- },
- "input":{"shape":"ListDatasetContentsRequest"},
- "output":{"shape":"ListDatasetContentsResponse"},
- "errors":[
- {"shape":"InvalidRequestException"},
- {"shape":"InternalFailureException"},
- {"shape":"ServiceUnavailableException"},
- {"shape":"ThrottlingException"},
- {"shape":"ResourceNotFoundException"}
- ]
- },
- "ListDatasets":{
- "name":"ListDatasets",
- "http":{
- "method":"GET",
- "requestUri":"/datasets"
- },
- "input":{"shape":"ListDatasetsRequest"},
- "output":{"shape":"ListDatasetsResponse"},
- "errors":[
- {"shape":"InvalidRequestException"},
- {"shape":"InternalFailureException"},
- {"shape":"ServiceUnavailableException"},
- {"shape":"ThrottlingException"}
- ]
- },
- "ListDatastores":{
- "name":"ListDatastores",
- "http":{
- "method":"GET",
- "requestUri":"/datastores"
- },
- "input":{"shape":"ListDatastoresRequest"},
- "output":{"shape":"ListDatastoresResponse"},
- "errors":[
- {"shape":"InvalidRequestException"},
- {"shape":"InternalFailureException"},
- {"shape":"ServiceUnavailableException"},
- {"shape":"ThrottlingException"}
- ]
- },
- "ListPipelines":{
- "name":"ListPipelines",
- "http":{
- "method":"GET",
- "requestUri":"/pipelines"
- },
- "input":{"shape":"ListPipelinesRequest"},
- "output":{"shape":"ListPipelinesResponse"},
- "errors":[
- {"shape":"InvalidRequestException"},
- {"shape":"InternalFailureException"},
- {"shape":"ServiceUnavailableException"},
- {"shape":"ThrottlingException"}
- ]
- },
- "ListTagsForResource":{
- "name":"ListTagsForResource",
- "http":{
- "method":"GET",
- "requestUri":"/tags"
- },
- "input":{"shape":"ListTagsForResourceRequest"},
- "output":{"shape":"ListTagsForResourceResponse"},
- "errors":[
- {"shape":"InvalidRequestException"},
- {"shape":"InternalFailureException"},
- {"shape":"ServiceUnavailableException"},
- {"shape":"ThrottlingException"},
- {"shape":"LimitExceededException"},
- {"shape":"ResourceNotFoundException"}
- ]
- },
- "PutLoggingOptions":{
- "name":"PutLoggingOptions",
- "http":{
- "method":"PUT",
- "requestUri":"/logging"
- },
- "input":{"shape":"PutLoggingOptionsRequest"},
- "errors":[
- {"shape":"InvalidRequestException"},
- {"shape":"InternalFailureException"},
- {"shape":"ServiceUnavailableException"},
- {"shape":"ThrottlingException"}
- ]
- },
- "RunPipelineActivity":{
- "name":"RunPipelineActivity",
- "http":{
- "method":"POST",
- "requestUri":"/pipelineactivities/run"
- },
- "input":{"shape":"RunPipelineActivityRequest"},
- "output":{"shape":"RunPipelineActivityResponse"},
- "errors":[
- {"shape":"InvalidRequestException"},
- {"shape":"InternalFailureException"},
- {"shape":"ServiceUnavailableException"},
- {"shape":"ThrottlingException"}
- ]
- },
- "SampleChannelData":{
- "name":"SampleChannelData",
- "http":{
- "method":"GET",
- "requestUri":"/channels/{channelName}/sample"
- },
- "input":{"shape":"SampleChannelDataRequest"},
- "output":{"shape":"SampleChannelDataResponse"},
- "errors":[
- {"shape":"InvalidRequestException"},
- {"shape":"ResourceNotFoundException"},
- {"shape":"InternalFailureException"},
- {"shape":"ServiceUnavailableException"},
- {"shape":"ThrottlingException"}
- ]
- },
- "StartPipelineReprocessing":{
- "name":"StartPipelineReprocessing",
- "http":{
- "method":"POST",
- "requestUri":"/pipelines/{pipelineName}/reprocessing"
- },
- "input":{"shape":"StartPipelineReprocessingRequest"},
- "output":{"shape":"StartPipelineReprocessingResponse"},
- "errors":[
- {"shape":"ResourceNotFoundException"},
- {"shape":"ResourceAlreadyExistsException"},
- {"shape":"InvalidRequestException"},
- {"shape":"InternalFailureException"},
- {"shape":"ServiceUnavailableException"},
- {"shape":"ThrottlingException"}
- ]
- },
- "TagResource":{
- "name":"TagResource",
- "http":{
- "method":"POST",
- "requestUri":"/tags",
- "responseCode":204
- },
- "input":{"shape":"TagResourceRequest"},
- "output":{"shape":"TagResourceResponse"},
- "errors":[
- {"shape":"InvalidRequestException"},
- {"shape":"InternalFailureException"},
- {"shape":"ServiceUnavailableException"},
- {"shape":"ThrottlingException"},
- {"shape":"LimitExceededException"},
- {"shape":"ResourceNotFoundException"}
- ]
- },
- "UntagResource":{
- "name":"UntagResource",
- "http":{
- "method":"DELETE",
- "requestUri":"/tags",
- "responseCode":204
- },
- "input":{"shape":"UntagResourceRequest"},
- "output":{"shape":"UntagResourceResponse"},
- "errors":[
- {"shape":"InvalidRequestException"},
- {"shape":"InternalFailureException"},
- {"shape":"ServiceUnavailableException"},
- {"shape":"ThrottlingException"},
- {"shape":"LimitExceededException"},
- {"shape":"ResourceNotFoundException"}
- ]
- },
- "UpdateChannel":{
- "name":"UpdateChannel",
- "http":{
- "method":"PUT",
- "requestUri":"/channels/{channelName}"
- },
- "input":{"shape":"UpdateChannelRequest"},
- "errors":[
- {"shape":"InvalidRequestException"},
- {"shape":"ResourceNotFoundException"},
- {"shape":"InternalFailureException"},
- {"shape":"ServiceUnavailableException"},
- {"shape":"ThrottlingException"}
- ]
- },
- "UpdateDataset":{
- "name":"UpdateDataset",
- "http":{
- "method":"PUT",
- "requestUri":"/datasets/{datasetName}"
- },
- "input":{"shape":"UpdateDatasetRequest"},
- "errors":[
- {"shape":"InvalidRequestException"},
- {"shape":"ResourceNotFoundException"},
- {"shape":"InternalFailureException"},
- {"shape":"ServiceUnavailableException"},
- {"shape":"ThrottlingException"}
- ]
- },
- "UpdateDatastore":{
- "name":"UpdateDatastore",
- "http":{
- "method":"PUT",
- "requestUri":"/datastores/{datastoreName}"
- },
- "input":{"shape":"UpdateDatastoreRequest"},
- "errors":[
- {"shape":"InvalidRequestException"},
- {"shape":"ResourceNotFoundException"},
- {"shape":"InternalFailureException"},
- {"shape":"ServiceUnavailableException"},
- {"shape":"ThrottlingException"}
- ]
- },
- "UpdatePipeline":{
- "name":"UpdatePipeline",
- "http":{
- "method":"PUT",
- "requestUri":"/pipelines/{pipelineName}"
- },
- "input":{"shape":"UpdatePipelineRequest"},
- "errors":[
- {"shape":"InvalidRequestException"},
- {"shape":"ResourceNotFoundException"},
- {"shape":"InternalFailureException"},
- {"shape":"ServiceUnavailableException"},
- {"shape":"ThrottlingException"},
- {"shape":"LimitExceededException"}
- ]
- }
- },
- "shapes":{
- "ActivityBatchSize":{
- "type":"integer",
- "max":1000,
- "min":1
- },
- "ActivityName":{
- "type":"string",
- "max":128,
- "min":1
- },
- "AddAttributesActivity":{
- "type":"structure",
- "required":[
- "name",
- "attributes"
- ],
- "members":{
- "name":{"shape":"ActivityName"},
- "attributes":{"shape":"AttributeNameMapping"},
- "next":{"shape":"ActivityName"}
- }
- },
- "AttributeName":{
- "type":"string",
- "max":256,
- "min":1
- },
- "AttributeNameMapping":{
- "type":"map",
- "key":{"shape":"AttributeName"},
- "value":{"shape":"AttributeName"},
- "max":50,
- "min":1
- },
- "AttributeNames":{
- "type":"list",
- "member":{"shape":"AttributeName"},
- "max":50,
- "min":1
- },
- "BatchPutMessageErrorEntries":{
- "type":"list",
- "member":{"shape":"BatchPutMessageErrorEntry"}
- },
- "BatchPutMessageErrorEntry":{
- "type":"structure",
- "members":{
- "messageId":{"shape":"MessageId"},
- "errorCode":{"shape":"ErrorCode"},
- "errorMessage":{"shape":"ErrorMessage"}
- }
- },
- "BatchPutMessageRequest":{
- "type":"structure",
- "required":[
- "channelName",
- "messages"
- ],
- "members":{
- "channelName":{"shape":"ChannelName"},
- "messages":{"shape":"Messages"}
- }
- },
- "BatchPutMessageResponse":{
- "type":"structure",
- "members":{
- "batchPutMessageErrorEntries":{"shape":"BatchPutMessageErrorEntries"}
- }
- },
- "BucketKeyExpression":{
- "type":"string",
- "max":255,
- "min":1,
- "pattern":"^[a-zA-Z0-9!_.*'()/{}:-]*$"
- },
- "BucketName":{
- "type":"string",
- "max":255,
- "min":3,
- "pattern":"^[a-zA-Z0-9.\\-_]*$"
- },
- "CancelPipelineReprocessingRequest":{
- "type":"structure",
- "required":[
- "pipelineName",
- "reprocessingId"
- ],
- "members":{
- "pipelineName":{
- "shape":"PipelineName",
- "location":"uri",
- "locationName":"pipelineName"
- },
- "reprocessingId":{
- "shape":"ReprocessingId",
- "location":"uri",
- "locationName":"reprocessingId"
- }
- }
- },
- "CancelPipelineReprocessingResponse":{
- "type":"structure",
- "members":{}
- },
- "Channel":{
- "type":"structure",
- "members":{
- "name":{"shape":"ChannelName"},
- "storage":{"shape":"ChannelStorage"},
- "arn":{"shape":"ChannelArn"},
- "status":{"shape":"ChannelStatus"},
- "retentionPeriod":{"shape":"RetentionPeriod"},
- "creationTime":{"shape":"Timestamp"},
- "lastUpdateTime":{"shape":"Timestamp"},
- "lastMessageArrivalTime":{"shape":"Timestamp"}
- }
- },
- "ChannelActivity":{
- "type":"structure",
- "required":[
- "name",
- "channelName"
- ],
- "members":{
- "name":{"shape":"ActivityName"},
- "channelName":{"shape":"ChannelName"},
- "next":{"shape":"ActivityName"}
- }
- },
- "ChannelArn":{"type":"string"},
- "ChannelMessages":{
- "type":"structure",
- "members":{
- "s3Paths":{"shape":"S3PathChannelMessages"}
- }
- },
- "ChannelName":{
- "type":"string",
- "max":128,
- "min":1,
- "pattern":"(^(?!_{2}))(^[a-zA-Z0-9_]+$)"
- },
- "ChannelStatistics":{
- "type":"structure",
- "members":{
- "size":{"shape":"EstimatedResourceSize"}
- }
- },
- "ChannelStatus":{
- "type":"string",
- "enum":[
- "CREATING",
- "ACTIVE",
- "DELETING"
- ]
- },
- "ChannelStorage":{
- "type":"structure",
- "members":{
- "serviceManagedS3":{"shape":"ServiceManagedChannelS3Storage"},
- "customerManagedS3":{"shape":"CustomerManagedChannelS3Storage"}
- }
- },
- "ChannelStorageSummary":{
- "type":"structure",
- "members":{
- "serviceManagedS3":{"shape":"ServiceManagedChannelS3StorageSummary"},
- "customerManagedS3":{"shape":"CustomerManagedChannelS3StorageSummary"}
- }
- },
- "ChannelSummaries":{
- "type":"list",
- "member":{"shape":"ChannelSummary"}
- },
- "ChannelSummary":{
- "type":"structure",
- "members":{
- "channelName":{"shape":"ChannelName"},
- "channelStorage":{"shape":"ChannelStorageSummary"},
- "status":{"shape":"ChannelStatus"},
- "creationTime":{"shape":"Timestamp"},
- "lastUpdateTime":{"shape":"Timestamp"},
- "lastMessageArrivalTime":{"shape":"Timestamp"}
- }
- },
- "Column":{
- "type":"structure",
- "required":[
- "name",
- "type"
- ],
- "members":{
- "name":{"shape":"ColumnName"},
- "type":{"shape":"ColumnDataType"}
- }
- },
- "ColumnDataType":{
- "type":"string",
- "max":131072,
- "min":1,
- "pattern":"^[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*$"
- },
- "ColumnName":{
- "type":"string",
- "max":255,
- "min":1,
- "pattern":"^[A-Za-z_]([A-Za-z0-9]*|[A-Za-z0-9][A-Za-z0-9_]*)$"
- },
- "Columns":{
- "type":"list",
- "member":{"shape":"Column"}
- },
- "ComputeType":{
- "type":"string",
- "enum":[
- "ACU_1",
- "ACU_2"
- ]
- },
- "ContainerDatasetAction":{
- "type":"structure",
- "required":[
- "image",
- "executionRoleArn",
- "resourceConfiguration"
- ],
- "members":{
- "image":{"shape":"Image"},
- "executionRoleArn":{"shape":"RoleArn"},
- "resourceConfiguration":{"shape":"ResourceConfiguration"},
- "variables":{"shape":"Variables"}
- }
- },
- "CreateChannelRequest":{
- "type":"structure",
- "required":["channelName"],
- "members":{
- "channelName":{"shape":"ChannelName"},
- "channelStorage":{"shape":"ChannelStorage"},
- "retentionPeriod":{"shape":"RetentionPeriod"},
- "tags":{"shape":"TagList"}
- }
- },
- "CreateChannelResponse":{
- "type":"structure",
- "members":{
- "channelName":{"shape":"ChannelName"},
- "channelArn":{"shape":"ChannelArn"},
- "retentionPeriod":{"shape":"RetentionPeriod"}
- }
- },
- "CreateDatasetContentRequest":{
- "type":"structure",
- "required":["datasetName"],
- "members":{
- "datasetName":{
- "shape":"DatasetName",
- "location":"uri",
- "locationName":"datasetName"
- },
- "versionId":{"shape":"DatasetContentVersion"}
- }
- },
- "CreateDatasetContentResponse":{
- "type":"structure",
- "members":{
- "versionId":{"shape":"DatasetContentVersion"}
- }
- },
- "CreateDatasetRequest":{
- "type":"structure",
- "required":[
- "datasetName",
- "actions"
- ],
- "members":{
- "datasetName":{"shape":"DatasetName"},
- "actions":{"shape":"DatasetActions"},
- "triggers":{"shape":"DatasetTriggers"},
- "contentDeliveryRules":{"shape":"DatasetContentDeliveryRules"},
- "retentionPeriod":{"shape":"RetentionPeriod"},
- "versioningConfiguration":{"shape":"VersioningConfiguration"},
- "tags":{"shape":"TagList"},
- "lateDataRules":{"shape":"LateDataRules"}
- }
- },
- "CreateDatasetResponse":{
- "type":"structure",
- "members":{
- "datasetName":{"shape":"DatasetName"},
- "datasetArn":{"shape":"DatasetArn"},
- "retentionPeriod":{"shape":"RetentionPeriod"}
- }
- },
- "CreateDatastoreRequest":{
- "type":"structure",
- "required":["datastoreName"],
- "members":{
- "datastoreName":{"shape":"DatastoreName"},
- "datastoreStorage":{"shape":"DatastoreStorage"},
- "retentionPeriod":{"shape":"RetentionPeriod"},
- "tags":{"shape":"TagList"},
- "fileFormatConfiguration":{"shape":"FileFormatConfiguration"},
- "datastorePartitions":{"shape":"DatastorePartitions"}
- }
- },
- "CreateDatastoreResponse":{
- "type":"structure",
- "members":{
- "datastoreName":{"shape":"DatastoreName"},
- "datastoreArn":{"shape":"DatastoreArn"},
- "retentionPeriod":{"shape":"RetentionPeriod"}
- }
- },
- "CreatePipelineRequest":{
- "type":"structure",
- "required":[
- "pipelineName",
- "pipelineActivities"
- ],
- "members":{
- "pipelineName":{"shape":"PipelineName"},
- "pipelineActivities":{"shape":"PipelineActivities"},
- "tags":{"shape":"TagList"}
- }
- },
- "CreatePipelineResponse":{
- "type":"structure",
- "members":{
- "pipelineName":{"shape":"PipelineName"},
- "pipelineArn":{"shape":"PipelineArn"}
- }
- },
- "CustomerManagedChannelS3Storage":{
- "type":"structure",
- "required":[
- "bucket",
- "roleArn"
- ],
- "members":{
- "bucket":{"shape":"BucketName"},
- "keyPrefix":{"shape":"S3KeyPrefix"},
- "roleArn":{"shape":"RoleArn"}
- }
- },
- "CustomerManagedChannelS3StorageSummary":{
- "type":"structure",
- "members":{
- "bucket":{"shape":"BucketName"},
- "keyPrefix":{"shape":"S3KeyPrefix"},
- "roleArn":{"shape":"RoleArn"}
- }
- },
- "CustomerManagedDatastoreS3Storage":{
- "type":"structure",
- "required":[
- "bucket",
- "roleArn"
- ],
- "members":{
- "bucket":{"shape":"BucketName"},
- "keyPrefix":{"shape":"S3KeyPrefix"},
- "roleArn":{"shape":"RoleArn"}
- }
- },
- "CustomerManagedDatastoreS3StorageSummary":{
- "type":"structure",
- "members":{
- "bucket":{"shape":"BucketName"},
- "keyPrefix":{"shape":"S3KeyPrefix"},
- "roleArn":{"shape":"RoleArn"}
- }
- },
- "Dataset":{
- "type":"structure",
- "members":{
- "name":{"shape":"DatasetName"},
- "arn":{"shape":"DatasetArn"},
- "actions":{"shape":"DatasetActions"},
- "triggers":{"shape":"DatasetTriggers"},
- "contentDeliveryRules":{"shape":"DatasetContentDeliveryRules"},
- "status":{"shape":"DatasetStatus"},
- "creationTime":{"shape":"Timestamp"},
- "lastUpdateTime":{"shape":"Timestamp"},
- "retentionPeriod":{"shape":"RetentionPeriod"},
- "versioningConfiguration":{"shape":"VersioningConfiguration"},
- "lateDataRules":{"shape":"LateDataRules"}
- }
- },
- "DatasetAction":{
- "type":"structure",
- "members":{
- "actionName":{"shape":"DatasetActionName"},
- "queryAction":{"shape":"SqlQueryDatasetAction"},
- "containerAction":{"shape":"ContainerDatasetAction"}
- }
- },
- "DatasetActionName":{
- "type":"string",
- "max":128,
- "min":1,
- "pattern":"^[a-zA-Z0-9_]+$"
- },
- "DatasetActionSummaries":{
- "type":"list",
- "member":{"shape":"DatasetActionSummary"},
- "max":1,
- "min":1
- },
- "DatasetActionSummary":{
- "type":"structure",
- "members":{
- "actionName":{"shape":"DatasetActionName"},
- "actionType":{"shape":"DatasetActionType"}
- }
- },
- "DatasetActionType":{
- "type":"string",
- "enum":[
- "QUERY",
- "CONTAINER"
- ]
- },
- "DatasetActions":{
- "type":"list",
- "member":{"shape":"DatasetAction"},
- "max":1,
- "min":1
- },
- "DatasetArn":{"type":"string"},
- "DatasetContentDeliveryDestination":{
- "type":"structure",
- "members":{
- "iotEventsDestinationConfiguration":{"shape":"IotEventsDestinationConfiguration"},
- "s3DestinationConfiguration":{"shape":"S3DestinationConfiguration"}
- }
- },
- "DatasetContentDeliveryRule":{
- "type":"structure",
- "required":["destination"],
- "members":{
- "entryName":{"shape":"EntryName"},
- "destination":{"shape":"DatasetContentDeliveryDestination"}
- }
- },
- "DatasetContentDeliveryRules":{
- "type":"list",
- "member":{"shape":"DatasetContentDeliveryRule"},
- "max":20,
- "min":0
- },
- "DatasetContentState":{
- "type":"string",
- "enum":[
- "CREATING",
- "SUCCEEDED",
- "FAILED"
- ]
- },
- "DatasetContentStatus":{
- "type":"structure",
- "members":{
- "state":{"shape":"DatasetContentState"},
- "reason":{"shape":"Reason"}
- }
- },
- "DatasetContentSummaries":{
- "type":"list",
- "member":{"shape":"DatasetContentSummary"}
- },
- "DatasetContentSummary":{
- "type":"structure",
- "members":{
- "version":{"shape":"DatasetContentVersion"},
- "status":{"shape":"DatasetContentStatus"},
- "creationTime":{"shape":"Timestamp"},
- "scheduleTime":{"shape":"Timestamp"},
- "completionTime":{"shape":"Timestamp"}
- }
- },
- "DatasetContentVersion":{
- "type":"string",
- "max":36,
- "min":7
- },
- "DatasetContentVersionValue":{
- "type":"structure",
- "required":["datasetName"],
- "members":{
- "datasetName":{"shape":"DatasetName"}
- }
- },
- "DatasetEntries":{
- "type":"list",
- "member":{"shape":"DatasetEntry"}
- },
- "DatasetEntry":{
- "type":"structure",
- "members":{
- "entryName":{"shape":"EntryName"},
- "dataURI":{"shape":"PresignedURI"}
- }
- },
- "DatasetName":{
- "type":"string",
- "max":128,
- "min":1,
- "pattern":"(^(?!_{2}))(^[a-zA-Z0-9_]+$)"
- },
- "DatasetStatus":{
- "type":"string",
- "enum":[
- "CREATING",
- "ACTIVE",
- "DELETING"
- ]
- },
- "DatasetSummaries":{
- "type":"list",
- "member":{"shape":"DatasetSummary"}
- },
- "DatasetSummary":{
- "type":"structure",
- "members":{
- "datasetName":{"shape":"DatasetName"},
- "status":{"shape":"DatasetStatus"},
- "creationTime":{"shape":"Timestamp"},
- "lastUpdateTime":{"shape":"Timestamp"},
- "triggers":{"shape":"DatasetTriggers"},
- "actions":{"shape":"DatasetActionSummaries"}
- }
- },
- "DatasetTrigger":{
- "type":"structure",
- "members":{
- "schedule":{"shape":"Schedule"},
- "dataset":{"shape":"TriggeringDataset"}
- }
- },
- "DatasetTriggers":{
- "type":"list",
- "member":{"shape":"DatasetTrigger"},
- "max":5,
- "min":0
- },
- "Datastore":{
- "type":"structure",
- "members":{
- "name":{"shape":"DatastoreName"},
- "storage":{"shape":"DatastoreStorage"},
- "arn":{"shape":"DatastoreArn"},
- "status":{"shape":"DatastoreStatus"},
- "retentionPeriod":{"shape":"RetentionPeriod"},
- "creationTime":{"shape":"Timestamp"},
- "lastUpdateTime":{"shape":"Timestamp"},
- "lastMessageArrivalTime":{"shape":"Timestamp"},
- "fileFormatConfiguration":{"shape":"FileFormatConfiguration"},
- "datastorePartitions":{"shape":"DatastorePartitions"}
- }
- },
- "DatastoreActivity":{
- "type":"structure",
- "required":[
- "name",
- "datastoreName"
- ],
- "members":{
- "name":{"shape":"ActivityName"},
- "datastoreName":{"shape":"DatastoreName"}
- }
- },
- "DatastoreArn":{"type":"string"},
- "DatastoreIotSiteWiseMultiLayerStorage":{
- "type":"structure",
- "required":["customerManagedS3Storage"],
- "members":{
- "customerManagedS3Storage":{"shape":"IotSiteWiseCustomerManagedDatastoreS3Storage"}
- }
- },
- "DatastoreIotSiteWiseMultiLayerStorageSummary":{
- "type":"structure",
- "members":{
- "customerManagedS3Storage":{"shape":"IotSiteWiseCustomerManagedDatastoreS3StorageSummary"}
- }
- },
- "DatastoreName":{
- "type":"string",
- "max":128,
- "min":1,
- "pattern":"(^(?!_{2}))(^[a-zA-Z0-9_]+$)"
- },
- "DatastorePartition":{
- "type":"structure",
- "members":{
- "attributePartition":{"shape":"Partition"},
- "timestampPartition":{"shape":"TimestampPartition"}
- }
- },
- "DatastorePartitions":{
- "type":"structure",
- "members":{
- "partitions":{"shape":"Partitions"}
- }
- },
- "DatastoreStatistics":{
- "type":"structure",
- "members":{
- "size":{"shape":"EstimatedResourceSize"}
- }
- },
- "DatastoreStatus":{
- "type":"string",
- "enum":[
- "CREATING",
- "ACTIVE",
- "DELETING"
- ]
- },
- "DatastoreStorage":{
- "type":"structure",
- "members":{
- "serviceManagedS3":{"shape":"ServiceManagedDatastoreS3Storage"},
- "customerManagedS3":{"shape":"CustomerManagedDatastoreS3Storage"},
- "iotSiteWiseMultiLayerStorage":{"shape":"DatastoreIotSiteWiseMultiLayerStorage"}
- }
- },
- "DatastoreStorageSummary":{
- "type":"structure",
- "members":{
- "serviceManagedS3":{"shape":"ServiceManagedDatastoreS3StorageSummary"},
- "customerManagedS3":{"shape":"CustomerManagedDatastoreS3StorageSummary"},
- "iotSiteWiseMultiLayerStorage":{"shape":"DatastoreIotSiteWiseMultiLayerStorageSummary"}
- }
- },
- "DatastoreSummaries":{
- "type":"list",
- "member":{"shape":"DatastoreSummary"}
- },
- "DatastoreSummary":{
- "type":"structure",
- "members":{
- "datastoreName":{"shape":"DatastoreName"},
- "datastoreStorage":{"shape":"DatastoreStorageSummary"},
- "status":{"shape":"DatastoreStatus"},
- "creationTime":{"shape":"Timestamp"},
- "lastUpdateTime":{"shape":"Timestamp"},
- "lastMessageArrivalTime":{"shape":"Timestamp"},
- "fileFormatType":{"shape":"FileFormatType"},
- "datastorePartitions":{"shape":"DatastorePartitions"}
- }
- },
- "DeleteChannelRequest":{
- "type":"structure",
- "required":["channelName"],
- "members":{
- "channelName":{
- "shape":"ChannelName",
- "location":"uri",
- "locationName":"channelName"
- }
- }
- },
- "DeleteDatasetContentRequest":{
- "type":"structure",
- "required":["datasetName"],
- "members":{
- "datasetName":{
- "shape":"DatasetName",
- "location":"uri",
- "locationName":"datasetName"
- },
- "versionId":{
- "shape":"DatasetContentVersion",
- "location":"querystring",
- "locationName":"versionId"
- }
- }
- },
- "DeleteDatasetRequest":{
- "type":"structure",
- "required":["datasetName"],
- "members":{
- "datasetName":{
- "shape":"DatasetName",
- "location":"uri",
- "locationName":"datasetName"
- }
- }
- },
- "DeleteDatastoreRequest":{
- "type":"structure",
- "required":["datastoreName"],
- "members":{
- "datastoreName":{
- "shape":"DatastoreName",
- "location":"uri",
- "locationName":"datastoreName"
- }
- }
- },
- "DeletePipelineRequest":{
- "type":"structure",
- "required":["pipelineName"],
- "members":{
- "pipelineName":{
- "shape":"PipelineName",
- "location":"uri",
- "locationName":"pipelineName"
- }
- }
- },
- "DeltaTime":{
- "type":"structure",
- "required":[
- "offsetSeconds",
- "timeExpression"
- ],
- "members":{
- "offsetSeconds":{"shape":"OffsetSeconds"},
- "timeExpression":{"shape":"TimeExpression"}
- }
- },
- "DeltaTimeSessionWindowConfiguration":{
- "type":"structure",
- "required":["timeoutInMinutes"],
- "members":{
- "timeoutInMinutes":{"shape":"SessionTimeoutInMinutes"}
- }
- },
- "DescribeChannelRequest":{
- "type":"structure",
- "required":["channelName"],
- "members":{
- "channelName":{
- "shape":"ChannelName",
- "location":"uri",
- "locationName":"channelName"
- },
- "includeStatistics":{
- "shape":"IncludeStatisticsFlag",
- "location":"querystring",
- "locationName":"includeStatistics"
- }
- }
- },
- "DescribeChannelResponse":{
- "type":"structure",
- "members":{
- "channel":{"shape":"Channel"},
- "statistics":{"shape":"ChannelStatistics"}
- }
- },
- "DescribeDatasetRequest":{
- "type":"structure",
- "required":["datasetName"],
- "members":{
- "datasetName":{
- "shape":"DatasetName",
- "location":"uri",
- "locationName":"datasetName"
- }
- }
- },
- "DescribeDatasetResponse":{
- "type":"structure",
- "members":{
- "dataset":{"shape":"Dataset"}
- }
- },
- "DescribeDatastoreRequest":{
- "type":"structure",
- "required":["datastoreName"],
- "members":{
- "datastoreName":{
- "shape":"DatastoreName",
- "location":"uri",
- "locationName":"datastoreName"
- },
- "includeStatistics":{
- "shape":"IncludeStatisticsFlag",
- "location":"querystring",
- "locationName":"includeStatistics"
- }
- }
- },
- "DescribeDatastoreResponse":{
- "type":"structure",
- "members":{
- "datastore":{"shape":"Datastore"},
- "statistics":{"shape":"DatastoreStatistics"}
- }
- },
- "DescribeLoggingOptionsRequest":{
- "type":"structure",
- "members":{}
- },
- "DescribeLoggingOptionsResponse":{
- "type":"structure",
- "members":{
- "loggingOptions":{"shape":"LoggingOptions"}
- }
- },
- "DescribePipelineRequest":{
- "type":"structure",
- "required":["pipelineName"],
- "members":{
- "pipelineName":{
- "shape":"PipelineName",
- "location":"uri",
- "locationName":"pipelineName"
- }
- }
- },
- "DescribePipelineResponse":{
- "type":"structure",
- "members":{
- "pipeline":{"shape":"Pipeline"}
- }
- },
- "DeviceRegistryEnrichActivity":{
- "type":"structure",
- "required":[
- "name",
- "attribute",
- "thingName",
- "roleArn"
- ],
- "members":{
- "name":{"shape":"ActivityName"},
- "attribute":{"shape":"AttributeName"},
- "thingName":{"shape":"AttributeName"},
- "roleArn":{"shape":"RoleArn"},
- "next":{"shape":"ActivityName"}
- }
- },
- "DeviceShadowEnrichActivity":{
- "type":"structure",
- "required":[
- "name",
- "attribute",
- "thingName",
- "roleArn"
- ],
- "members":{
- "name":{"shape":"ActivityName"},
- "attribute":{"shape":"AttributeName"},
- "thingName":{"shape":"AttributeName"},
- "roleArn":{"shape":"RoleArn"},
- "next":{"shape":"ActivityName"}
- }
- },
- "DoubleValue":{"type":"double"},
- "EndTime":{"type":"timestamp"},
- "EntryName":{"type":"string"},
- "ErrorCode":{"type":"string"},
- "ErrorMessage":{"type":"string"},
- "EstimatedResourceSize":{
- "type":"structure",
- "members":{
- "estimatedSizeInBytes":{"shape":"SizeInBytes"},
- "estimatedOn":{"shape":"Timestamp"}
- }
- },
- "FileFormatConfiguration":{
- "type":"structure",
- "members":{
- "jsonConfiguration":{"shape":"JsonConfiguration"},
- "parquetConfiguration":{"shape":"ParquetConfiguration"}
- }
- },
- "FileFormatType":{
- "type":"string",
- "enum":[
- "JSON",
- "PARQUET"
- ]
- },
- "FilterActivity":{
- "type":"structure",
- "required":[
- "name",
- "filter"
- ],
- "members":{
- "name":{"shape":"ActivityName"},
- "filter":{"shape":"FilterExpression"},
- "next":{"shape":"ActivityName"}
- }
- },
- "FilterExpression":{
- "type":"string",
- "max":256,
- "min":1
- },
- "GetDatasetContentRequest":{
- "type":"structure",
- "required":["datasetName"],
- "members":{
- "datasetName":{
- "shape":"DatasetName",
- "location":"uri",
- "locationName":"datasetName"
- },
- "versionId":{
- "shape":"DatasetContentVersion",
- "location":"querystring",
- "locationName":"versionId"
- }
- }
- },
- "GetDatasetContentResponse":{
- "type":"structure",
- "members":{
- "entries":{"shape":"DatasetEntries"},
- "timestamp":{"shape":"Timestamp"},
- "status":{"shape":"DatasetContentStatus"}
- }
- },
- "GlueConfiguration":{
- "type":"structure",
- "required":[
- "tableName",
- "databaseName"
- ],
- "members":{
- "tableName":{"shape":"GlueTableName"},
- "databaseName":{"shape":"GlueDatabaseName"}
- }
- },
- "GlueDatabaseName":{
- "type":"string",
- "max":150,
- "min":1,
- "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*"
- },
- "GlueTableName":{
- "type":"string",
- "max":150,
- "min":1,
- "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*"
- },
- "Image":{
- "type":"string",
- "max":255
- },
- "IncludeStatisticsFlag":{"type":"boolean"},
- "InternalFailureException":{
- "type":"structure",
- "members":{
- "message":{"shape":"errorMessage"}
- },
- "error":{"httpStatusCode":500},
- "exception":true,
- "fault":true
- },
- "InvalidRequestException":{
- "type":"structure",
- "members":{
- "message":{"shape":"errorMessage"}
- },
- "error":{"httpStatusCode":400},
- "exception":true
- },
- "IotEventsDestinationConfiguration":{
- "type":"structure",
- "required":[
- "inputName",
- "roleArn"
- ],
- "members":{
- "inputName":{"shape":"IotEventsInputName"},
- "roleArn":{"shape":"RoleArn"}
- }
- },
- "IotEventsInputName":{
- "type":"string",
- "max":128,
- "min":1,
- "pattern":"^[a-zA-Z][a-zA-Z0-9_]*$"
- },
- "IotSiteWiseCustomerManagedDatastoreS3Storage":{
- "type":"structure",
- "required":["bucket"],
- "members":{
- "bucket":{"shape":"BucketName"},
- "keyPrefix":{"shape":"S3KeyPrefix"}
- }
- },
- "IotSiteWiseCustomerManagedDatastoreS3StorageSummary":{
- "type":"structure",
- "members":{
- "bucket":{"shape":"BucketName"},
- "keyPrefix":{"shape":"S3KeyPrefix"}
- }
- },
- "JsonConfiguration":{
- "type":"structure",
- "members":{}
- },
- "LambdaActivity":{
- "type":"structure",
- "required":[
- "name",
- "lambdaName",
- "batchSize"
- ],
- "members":{
- "name":{"shape":"ActivityName"},
- "lambdaName":{"shape":"LambdaName"},
- "batchSize":{"shape":"ActivityBatchSize"},
- "next":{"shape":"ActivityName"}
- }
- },
- "LambdaName":{
- "type":"string",
- "max":64,
- "min":1,
- "pattern":"^[a-zA-Z0-9_-]+$"
- },
- "LateDataRule":{
- "type":"structure",
- "required":["ruleConfiguration"],
- "members":{
- "ruleName":{"shape":"LateDataRuleName"},
- "ruleConfiguration":{"shape":"LateDataRuleConfiguration"}
- }
- },
- "LateDataRuleConfiguration":{
- "type":"structure",
- "members":{
- "deltaTimeSessionWindowConfiguration":{"shape":"DeltaTimeSessionWindowConfiguration"}
- }
- },
- "LateDataRuleName":{
- "type":"string",
- "max":128,
- "min":1,
- "pattern":"^[a-zA-Z0-9_]+$"
- },
- "LateDataRules":{
- "type":"list",
- "member":{"shape":"LateDataRule"},
- "max":1,
- "min":1
- },
- "LimitExceededException":{
- "type":"structure",
- "members":{
- "message":{"shape":"errorMessage"}
- },
- "error":{"httpStatusCode":410},
- "exception":true
- },
- "ListChannelsRequest":{
- "type":"structure",
- "members":{
- "nextToken":{
- "shape":"NextToken",
- "location":"querystring",
- "locationName":"nextToken"
- },
- "maxResults":{
- "shape":"MaxResults",
- "location":"querystring",
- "locationName":"maxResults"
- }
- }
- },
- "ListChannelsResponse":{
- "type":"structure",
- "members":{
- "channelSummaries":{"shape":"ChannelSummaries"},
- "nextToken":{"shape":"NextToken"}
- }
- },
- "ListDatasetContentsRequest":{
- "type":"structure",
- "required":["datasetName"],
- "members":{
- "datasetName":{
- "shape":"DatasetName",
- "location":"uri",
- "locationName":"datasetName"
- },
- "nextToken":{
- "shape":"NextToken",
- "location":"querystring",
- "locationName":"nextToken"
- },
- "maxResults":{
- "shape":"MaxResults",
- "location":"querystring",
- "locationName":"maxResults"
- },
- "scheduledOnOrAfter":{
- "shape":"Timestamp",
- "location":"querystring",
- "locationName":"scheduledOnOrAfter"
- },
- "scheduledBefore":{
- "shape":"Timestamp",
- "location":"querystring",
- "locationName":"scheduledBefore"
- }
- }
- },
- "ListDatasetContentsResponse":{
- "type":"structure",
- "members":{
- "datasetContentSummaries":{"shape":"DatasetContentSummaries"},
- "nextToken":{"shape":"NextToken"}
- }
- },
- "ListDatasetsRequest":{
- "type":"structure",
- "members":{
- "nextToken":{
- "shape":"NextToken",
- "location":"querystring",
- "locationName":"nextToken"
- },
- "maxResults":{
- "shape":"MaxResults",
- "location":"querystring",
- "locationName":"maxResults"
- }
- }
- },
- "ListDatasetsResponse":{
- "type":"structure",
- "members":{
- "datasetSummaries":{"shape":"DatasetSummaries"},
- "nextToken":{"shape":"NextToken"}
- }
- },
- "ListDatastoresRequest":{
- "type":"structure",
- "members":{
- "nextToken":{
- "shape":"NextToken",
- "location":"querystring",
- "locationName":"nextToken"
- },
- "maxResults":{
- "shape":"MaxResults",
- "location":"querystring",
- "locationName":"maxResults"
- }
- }
- },
- "ListDatastoresResponse":{
- "type":"structure",
- "members":{
- "datastoreSummaries":{"shape":"DatastoreSummaries"},
- "nextToken":{"shape":"NextToken"}
- }
- },
- "ListPipelinesRequest":{
- "type":"structure",
- "members":{
- "nextToken":{
- "shape":"NextToken",
- "location":"querystring",
- "locationName":"nextToken"
- },
- "maxResults":{
- "shape":"MaxResults",
- "location":"querystring",
- "locationName":"maxResults"
- }
- }
- },
- "ListPipelinesResponse":{
- "type":"structure",
- "members":{
- "pipelineSummaries":{"shape":"PipelineSummaries"},
- "nextToken":{"shape":"NextToken"}
- }
- },
- "ListTagsForResourceRequest":{
- "type":"structure",
- "required":["resourceArn"],
- "members":{
- "resourceArn":{
- "shape":"ResourceArn",
- "location":"querystring",
- "locationName":"resourceArn"
- }
- }
- },
- "ListTagsForResourceResponse":{
- "type":"structure",
- "members":{
- "tags":{"shape":"TagList"}
- }
- },
- "LogResult":{"type":"string"},
- "LoggingEnabled":{"type":"boolean"},
- "LoggingLevel":{
- "type":"string",
- "enum":["ERROR"]
- },
- "LoggingOptions":{
- "type":"structure",
- "required":[
- "roleArn",
- "level",
- "enabled"
- ],
- "members":{
- "roleArn":{"shape":"RoleArn"},
- "level":{"shape":"LoggingLevel"},
- "enabled":{"shape":"LoggingEnabled"}
- }
- },
- "MathActivity":{
- "type":"structure",
- "required":[
- "name",
- "attribute",
- "math"
- ],
- "members":{
- "name":{"shape":"ActivityName"},
- "attribute":{"shape":"AttributeName"},
- "math":{"shape":"MathExpression"},
- "next":{"shape":"ActivityName"}
- }
- },
- "MathExpression":{
- "type":"string",
- "max":256,
- "min":1
- },
- "MaxMessages":{
- "type":"integer",
- "max":10,
- "min":1
- },
- "MaxResults":{
- "type":"integer",
- "max":250,
- "min":1
- },
- "MaxVersions":{
- "type":"integer",
- "max":1000,
- "min":1
- },
- "Message":{
- "type":"structure",
- "required":[
- "messageId",
- "payload"
- ],
- "members":{
- "messageId":{"shape":"MessageId"},
- "payload":{"shape":"MessagePayload"}
- }
- },
- "MessageId":{
- "type":"string",
- "max":128,
- "min":1,
- "pattern":"\\p{ASCII}*"
- },
- "MessagePayload":{"type":"blob"},
- "MessagePayloads":{
- "type":"list",
- "member":{"shape":"MessagePayload"},
- "max":10,
- "min":1
- },
- "Messages":{
- "type":"list",
- "member":{"shape":"Message"}
- },
- "NextToken":{"type":"string"},
- "OffsetSeconds":{"type":"integer"},
- "OutputFileName":{
- "type":"string",
- "pattern":"[\\w\\.-]{1,255}"
- },
- "OutputFileUriValue":{
- "type":"structure",
- "required":["fileName"],
- "members":{
- "fileName":{"shape":"OutputFileName"}
- }
- },
- "ParquetConfiguration":{
- "type":"structure",
- "members":{
- "schemaDefinition":{"shape":"SchemaDefinition"}
- }
- },
- "Partition":{
- "type":"structure",
- "required":["attributeName"],
- "members":{
- "attributeName":{"shape":"PartitionAttributeName"}
- }
- },
- "PartitionAttributeName":{
- "type":"string",
- "max":128,
- "min":1,
- "pattern":"^[a-zA-Z0-9_]+$"
- },
- "Partitions":{
- "type":"list",
- "member":{"shape":"DatastorePartition"},
- "max":25,
- "min":0
- },
- "Pipeline":{
- "type":"structure",
- "members":{
- "name":{"shape":"PipelineName"},
- "arn":{"shape":"PipelineArn"},
- "activities":{"shape":"PipelineActivities"},
- "reprocessingSummaries":{"shape":"ReprocessingSummaries"},
- "creationTime":{"shape":"Timestamp"},
- "lastUpdateTime":{"shape":"Timestamp"}
- }
- },
- "PipelineActivities":{
- "type":"list",
- "member":{"shape":"PipelineActivity"},
- "max":25,
- "min":1
- },
- "PipelineActivity":{
- "type":"structure",
- "members":{
- "channel":{"shape":"ChannelActivity"},
- "lambda":{"shape":"LambdaActivity"},
- "datastore":{"shape":"DatastoreActivity"},
- "addAttributes":{"shape":"AddAttributesActivity"},
- "removeAttributes":{"shape":"RemoveAttributesActivity"},
- "selectAttributes":{"shape":"SelectAttributesActivity"},
- "filter":{"shape":"FilterActivity"},
- "math":{"shape":"MathActivity"},
- "deviceRegistryEnrich":{"shape":"DeviceRegistryEnrichActivity"},
- "deviceShadowEnrich":{"shape":"DeviceShadowEnrichActivity"}
- }
- },
- "PipelineArn":{"type":"string"},
- "PipelineName":{
- "type":"string",
- "max":128,
- "min":1,
- "pattern":"(^(?!_{2}))(^[a-zA-Z0-9_]+$)"
- },
- "PipelineSummaries":{
- "type":"list",
- "member":{"shape":"PipelineSummary"}
- },
- "PipelineSummary":{
- "type":"structure",
- "members":{
- "pipelineName":{"shape":"PipelineName"},
- "reprocessingSummaries":{"shape":"ReprocessingSummaries"},
- "creationTime":{"shape":"Timestamp"},
- "lastUpdateTime":{"shape":"Timestamp"}
- }
- },
- "PresignedURI":{"type":"string"},
- "PutLoggingOptionsRequest":{
- "type":"structure",
- "required":["loggingOptions"],
- "members":{
- "loggingOptions":{"shape":"LoggingOptions"}
- }
- },
- "QueryFilter":{
- "type":"structure",
- "members":{
- "deltaTime":{"shape":"DeltaTime"}
- }
- },
- "QueryFilters":{
- "type":"list",
- "member":{"shape":"QueryFilter"},
- "max":1,
- "min":0
- },
- "Reason":{"type":"string"},
- "RemoveAttributesActivity":{
- "type":"structure",
- "required":[
- "name",
- "attributes"
- ],
- "members":{
- "name":{"shape":"ActivityName"},
- "attributes":{"shape":"AttributeNames"},
- "next":{"shape":"ActivityName"}
- }
- },
- "ReprocessingId":{"type":"string"},
- "ReprocessingStatus":{
- "type":"string",
- "enum":[
- "RUNNING",
- "SUCCEEDED",
- "CANCELLED",
- "FAILED"
- ]
- },
- "ReprocessingSummaries":{
- "type":"list",
- "member":{"shape":"ReprocessingSummary"}
- },
- "ReprocessingSummary":{
- "type":"structure",
- "members":{
- "id":{"shape":"ReprocessingId"},
- "status":{"shape":"ReprocessingStatus"},
- "creationTime":{"shape":"Timestamp"}
- }
- },
- "ResourceAlreadyExistsException":{
- "type":"structure",
- "members":{
- "message":{"shape":"errorMessage"},
- "resourceId":{"shape":"resourceId"},
- "resourceArn":{"shape":"resourceArn"}
- },
- "error":{"httpStatusCode":409},
- "exception":true
- },
- "ResourceArn":{
- "type":"string",
- "max":2048,
- "min":20
- },
- "ResourceConfiguration":{
- "type":"structure",
- "required":[
- "computeType",
- "volumeSizeInGB"
- ],
- "members":{
- "computeType":{"shape":"ComputeType"},
- "volumeSizeInGB":{"shape":"VolumeSizeInGB"}
- }
- },
- "ResourceNotFoundException":{
- "type":"structure",
- "members":{
- "message":{"shape":"errorMessage"}
- },
- "error":{"httpStatusCode":404},
- "exception":true
- },
- "RetentionPeriod":{
- "type":"structure",
- "members":{
- "unlimited":{"shape":"UnlimitedRetentionPeriod"},
- "numberOfDays":{"shape":"RetentionPeriodInDays"}
- }
- },
- "RetentionPeriodInDays":{
- "type":"integer",
- "min":1
- },
- "RoleArn":{
- "type":"string",
- "max":2048,
- "min":20
- },
- "RunPipelineActivityRequest":{
- "type":"structure",
- "required":[
- "pipelineActivity",
- "payloads"
- ],
- "members":{
- "pipelineActivity":{"shape":"PipelineActivity"},
- "payloads":{"shape":"MessagePayloads"}
- }
- },
- "RunPipelineActivityResponse":{
- "type":"structure",
- "members":{
- "payloads":{"shape":"MessagePayloads"},
- "logResult":{"shape":"LogResult"}
- }
- },
- "S3DestinationConfiguration":{
- "type":"structure",
- "required":[
- "bucket",
- "key",
- "roleArn"
- ],
- "members":{
- "bucket":{"shape":"BucketName"},
- "key":{"shape":"BucketKeyExpression"},
- "glueConfiguration":{"shape":"GlueConfiguration"},
- "roleArn":{"shape":"RoleArn"}
- }
- },
- "S3KeyPrefix":{
- "type":"string",
- "max":255,
- "min":1,
- "pattern":"^[a-zA-Z0-9!_.*'()/{}:-]*/$"
- },
- "S3PathChannelMessage":{
- "type":"string",
- "max":1024,
- "min":1,
- "pattern":"^[a-zA-Z0-9/_!'(){}\\*\\s\\.\\-\\=\\:]+$"
- },
- "S3PathChannelMessages":{
- "type":"list",
- "member":{"shape":"S3PathChannelMessage"},
- "max":100,
- "min":1
- },
- "SampleChannelDataRequest":{
- "type":"structure",
- "required":["channelName"],
- "members":{
- "channelName":{
- "shape":"ChannelName",
- "location":"uri",
- "locationName":"channelName"
- },
- "maxMessages":{
- "shape":"MaxMessages",
- "location":"querystring",
- "locationName":"maxMessages"
- },
- "startTime":{
- "shape":"StartTime",
- "location":"querystring",
- "locationName":"startTime"
- },
- "endTime":{
- "shape":"EndTime",
- "location":"querystring",
- "locationName":"endTime"
- }
- }
- },
- "SampleChannelDataResponse":{
- "type":"structure",
- "members":{
- "payloads":{"shape":"MessagePayloads"}
- }
- },
- "Schedule":{
- "type":"structure",
- "members":{
- "expression":{"shape":"ScheduleExpression"}
- }
- },
- "ScheduleExpression":{"type":"string"},
- "SchemaDefinition":{
- "type":"structure",
- "members":{
- "columns":{"shape":"Columns"}
- }
- },
- "SelectAttributesActivity":{
- "type":"structure",
- "required":[
- "name",
- "attributes"
- ],
- "members":{
- "name":{"shape":"ActivityName"},
- "attributes":{"shape":"AttributeNames"},
- "next":{"shape":"ActivityName"}
- }
- },
- "ServiceManagedChannelS3Storage":{
- "type":"structure",
- "members":{}
- },
- "ServiceManagedChannelS3StorageSummary":{
- "type":"structure",
- "members":{}
- },
- "ServiceManagedDatastoreS3Storage":{
- "type":"structure",
- "members":{}
- },
- "ServiceManagedDatastoreS3StorageSummary":{
- "type":"structure",
- "members":{}
- },
- "ServiceUnavailableException":{
- "type":"structure",
- "members":{
- "message":{"shape":"errorMessage"}
- },
- "error":{"httpStatusCode":503},
- "exception":true,
- "fault":true
- },
- "SessionTimeoutInMinutes":{
- "type":"integer",
- "max":60,
- "min":1
- },
- "SizeInBytes":{"type":"double"},
- "SqlQuery":{"type":"string"},
- "SqlQueryDatasetAction":{
- "type":"structure",
- "required":["sqlQuery"],
- "members":{
- "sqlQuery":{"shape":"SqlQuery"},
- "filters":{"shape":"QueryFilters"}
- }
- },
- "StartPipelineReprocessingRequest":{
- "type":"structure",
- "required":["pipelineName"],
- "members":{
- "pipelineName":{
- "shape":"PipelineName",
- "location":"uri",
- "locationName":"pipelineName"
- },
- "startTime":{"shape":"StartTime"},
- "endTime":{"shape":"EndTime"},
- "channelMessages":{"shape":"ChannelMessages"}
- }
- },
- "StartPipelineReprocessingResponse":{
- "type":"structure",
- "members":{
- "reprocessingId":{"shape":"ReprocessingId"}
- }
- },
- "StartTime":{"type":"timestamp"},
- "StringValue":{
- "type":"string",
- "max":1024,
- "min":0
- },
- "Tag":{
- "type":"structure",
- "required":[
- "key",
- "value"
- ],
- "members":{
- "key":{"shape":"TagKey"},
- "value":{"shape":"TagValue"}
- }
- },
- "TagKey":{
- "type":"string",
- "max":256,
- "min":1
- },
- "TagKeyList":{
- "type":"list",
- "member":{"shape":"TagKey"},
- "max":50,
- "min":1
- },
- "TagList":{
- "type":"list",
- "member":{"shape":"Tag"},
- "max":50,
- "min":1
- },
- "TagResourceRequest":{
- "type":"structure",
- "required":[
- "resourceArn",
- "tags"
- ],
- "members":{
- "resourceArn":{
- "shape":"ResourceArn",
- "location":"querystring",
- "locationName":"resourceArn"
- },
- "tags":{"shape":"TagList"}
- }
- },
- "TagResourceResponse":{
- "type":"structure",
- "members":{}
- },
- "TagValue":{
- "type":"string",
- "max":256,
- "min":1
- },
- "ThrottlingException":{
- "type":"structure",
- "members":{
- "message":{"shape":"errorMessage"}
- },
- "error":{"httpStatusCode":429},
- "exception":true
- },
- "TimeExpression":{"type":"string"},
- "Timestamp":{"type":"timestamp"},
- "TimestampFormat":{
- "type":"string",
- "max":50,
- "min":1,
- "pattern":"^[a-zA-Z0-9\\s\\[\\]_,.'/:-]*$"
- },
- "TimestampPartition":{
- "type":"structure",
- "required":["attributeName"],
- "members":{
- "attributeName":{"shape":"PartitionAttributeName"},
- "timestampFormat":{"shape":"TimestampFormat"}
- }
- },
- "TriggeringDataset":{
- "type":"structure",
- "required":["name"],
- "members":{
- "name":{"shape":"DatasetName"}
- }
- },
- "UnlimitedRetentionPeriod":{"type":"boolean"},
- "UnlimitedVersioning":{"type":"boolean"},
- "UntagResourceRequest":{
- "type":"structure",
- "required":[
- "resourceArn",
- "tagKeys"
- ],
- "members":{
- "resourceArn":{
- "shape":"ResourceArn",
- "location":"querystring",
- "locationName":"resourceArn"
- },
- "tagKeys":{
- "shape":"TagKeyList",
- "location":"querystring",
- "locationName":"tagKeys"
- }
- }
- },
- "UntagResourceResponse":{
- "type":"structure",
- "members":{}
- },
- "UpdateChannelRequest":{
- "type":"structure",
- "required":["channelName"],
- "members":{
- "channelName":{
- "shape":"ChannelName",
- "location":"uri",
- "locationName":"channelName"
- },
- "channelStorage":{"shape":"ChannelStorage"},
- "retentionPeriod":{"shape":"RetentionPeriod"}
- }
- },
- "UpdateDatasetRequest":{
- "type":"structure",
- "required":[
- "datasetName",
- "actions"
- ],
- "members":{
- "datasetName":{
- "shape":"DatasetName",
- "location":"uri",
- "locationName":"datasetName"
- },
- "actions":{"shape":"DatasetActions"},
- "triggers":{"shape":"DatasetTriggers"},
- "contentDeliveryRules":{"shape":"DatasetContentDeliveryRules"},
- "retentionPeriod":{"shape":"RetentionPeriod"},
- "versioningConfiguration":{"shape":"VersioningConfiguration"},
- "lateDataRules":{"shape":"LateDataRules"}
- }
- },
- "UpdateDatastoreRequest":{
- "type":"structure",
- "required":["datastoreName"],
- "members":{
- "datastoreName":{
- "shape":"DatastoreName",
- "location":"uri",
- "locationName":"datastoreName"
- },
- "retentionPeriod":{"shape":"RetentionPeriod"},
- "datastoreStorage":{"shape":"DatastoreStorage"},
- "fileFormatConfiguration":{"shape":"FileFormatConfiguration"}
- }
- },
- "UpdatePipelineRequest":{
- "type":"structure",
- "required":[
- "pipelineName",
- "pipelineActivities"
- ],
- "members":{
- "pipelineName":{
- "shape":"PipelineName",
- "location":"uri",
- "locationName":"pipelineName"
- },
- "pipelineActivities":{"shape":"PipelineActivities"}
- }
- },
- "Variable":{
- "type":"structure",
- "required":["name"],
- "members":{
- "name":{"shape":"VariableName"},
- "stringValue":{"shape":"StringValue"},
- "doubleValue":{
- "shape":"DoubleValue",
- "box":true
- },
- "datasetContentVersionValue":{"shape":"DatasetContentVersionValue"},
- "outputFileUriValue":{"shape":"OutputFileUriValue"}
- }
- },
- "VariableName":{
- "type":"string",
- "max":256,
- "min":1
- },
- "Variables":{
- "type":"list",
- "member":{"shape":"Variable"},
- "max":50,
- "min":0
- },
- "VersioningConfiguration":{
- "type":"structure",
- "members":{
- "unlimited":{"shape":"UnlimitedVersioning"},
- "maxVersions":{"shape":"MaxVersions"}
- }
- },
- "VolumeSizeInGB":{
- "type":"integer",
- "max":50,
- "min":1
- },
- "errorMessage":{"type":"string"},
- "resourceArn":{"type":"string"},
- "resourceId":{"type":"string"}
- }
-}
diff --git a/apis/iotanalytics/2017-11-27/docs-2.json b/apis/iotanalytics/2017-11-27/docs-2.json
deleted file mode 100644
index 1780abf08ba..00000000000
--- a/apis/iotanalytics/2017-11-27/docs-2.json
+++ /dev/null
@@ -1,1584 +0,0 @@
-{
- "version": "2.0",
- "service": "IoT Analytics allows you to collect large amounts of device data, process messages, and store them. You can then query the data and run sophisticated analytics on it. IoT Analytics enables advanced data exploration through integration with Jupyter Notebooks and data visualization through integration with Amazon QuickSight.
Traditional analytics and business intelligence tools are designed to process structured data. IoT data often comes from devices that record noisy processes (such as temperature, motion, or sound). As a result the data from these devices can have significant gaps, corrupted messages, and false readings that must be cleaned up before analysis can occur. Also, IoT data is often only meaningful in the context of other data from external sources.
IoT Analytics automates the steps required to analyze data from IoT devices. IoT Analytics filters, transforms, and enriches IoT data before storing it in a time-series data store for analysis. You can set up the service to collect only the data you need from your devices, apply mathematical transforms to process the data, and enrich the data with device-specific metadata such as device type and location before storing it. Then, you can analyze your data by running queries using the built-in SQL query engine, or perform more complex analytics and machine learning inference. IoT Analytics includes pre-built models for common IoT use cases so you can answer questions like which devices are about to fail or which customers are at risk of abandoning their wearable devices.
",
- "operations": {
- "BatchPutMessage": "Sends messages to a channel.
",
- "CancelPipelineReprocessing": "Cancels the reprocessing of data through the pipeline.
",
- "CreateChannel": "Used to create a channel. A channel collects data from an MQTT topic and archives the raw, unprocessed messages before publishing the data to a pipeline.
",
- "CreateDataset": "Used to create a dataset. A dataset stores data retrieved from a data store by applying a queryAction (a SQL query) or a containerAction (executing a containerized application). This operation creates the skeleton of a dataset. The dataset can be populated manually by calling CreateDatasetContent or automatically according to a trigger you specify.
",
- "CreateDatasetContent": "Creates the content of a dataset by applying a queryAction (a SQL query) or a containerAction (executing a containerized application).
",
- "CreateDatastore": "Creates a data store, which is a repository for messages.
",
- "CreatePipeline": "Creates a pipeline. A pipeline consumes messages from a channel and allows you to process the messages before storing them in a data store. You must specify both a channel and a datastore activity and, optionally, as many as 23 additional activities in the pipelineActivities array.
",
- "DeleteChannel": "Deletes the specified channel.
",
- "DeleteDataset": "Deletes the specified dataset.
You do not have to delete the content of the dataset before you perform this operation.
",
- "DeleteDatasetContent": "Deletes the content of the specified dataset.
",
- "DeleteDatastore": "Deletes the specified data store.
",
- "DeletePipeline": "Deletes the specified pipeline.
",
- "DescribeChannel": "Retrieves information about a channel.
",
- "DescribeDataset": "Retrieves information about a dataset.
",
- "DescribeDatastore": "Retrieves information about a data store.
",
- "DescribeLoggingOptions": "Retrieves the current settings of the IoT Analytics logging options.
",
- "DescribePipeline": "Retrieves information about a pipeline.
",
- "GetDatasetContent": "Retrieves the contents of a dataset as presigned URIs.
",
- "ListChannels": "Retrieves a list of channels.
",
- "ListDatasetContents": "Lists information about dataset contents that have been created.
",
- "ListDatasets": "Retrieves information about datasets.
",
- "ListDatastores": "Retrieves a list of data stores.
",
- "ListPipelines": "Retrieves a list of pipelines.
",
- "ListTagsForResource": "Lists the tags (metadata) that you have assigned to the resource.
",
- "PutLoggingOptions": "Sets or updates the IoT Analytics logging options.
If you update the value of any loggingOptions field, it takes up to one minute for the change to take effect. Also, if you change the policy attached to the role you specified in the roleArn field (for example, to correct an invalid policy), it takes up to five minutes for that change to take effect.
",
- "RunPipelineActivity": "Simulates the results of running a pipeline activity on a message payload.
",
- "SampleChannelData": "Retrieves a sample of messages from the specified channel ingested during the specified timeframe. Up to 10 messages can be retrieved.
",
- "StartPipelineReprocessing": "Starts the reprocessing of raw message data through the pipeline.
",
- "TagResource": "Adds to or modifies the tags of the given resource. Tags are metadata that can be used to manage a resource.
",
- "UntagResource": "Removes the given tags (metadata) from the resource.
",
- "UpdateChannel": "Used to update the settings of a channel.
",
- "UpdateDataset": "Updates the settings of a dataset.
",
- "UpdateDatastore": "Used to update the settings of a data store.
",
- "UpdatePipeline": "Updates the settings of a pipeline. You must specify both a channel and a datastore activity and, optionally, as many as 23 additional activities in the pipelineActivities array.
"
- },
- "shapes": {
- "ActivityBatchSize": {
- "base": null,
- "refs": {
- "LambdaActivity$batchSize": "The number of messages passed to the Lambda function for processing.
The Lambda function must be able to process all of these messages within five minutes, which is the maximum timeout duration for Lambda functions.
"
- }
- },
- "ActivityName": {
- "base": null,
- "refs": {
- "AddAttributesActivity$name": "The name of the addAttributes activity.
",
- "AddAttributesActivity$next": "The next activity in the pipeline.
",
- "ChannelActivity$name": "The name of the channel activity.
",
- "ChannelActivity$next": "The next activity in the pipeline.
",
- "DatastoreActivity$name": "The name of the datastore activity.
",
- "DeviceRegistryEnrichActivity$name": "The name of the deviceRegistryEnrich activity.
",
- "DeviceRegistryEnrichActivity$next": "The next activity in the pipeline.
",
- "DeviceShadowEnrichActivity$name": "The name of the deviceShadowEnrich activity.
",
- "DeviceShadowEnrichActivity$next": "The next activity in the pipeline.
",
- "FilterActivity$name": "The name of the filter activity.
",
- "FilterActivity$next": "The next activity in the pipeline.
",
- "LambdaActivity$name": "The name of the lambda activity.
",
- "LambdaActivity$next": "The next activity in the pipeline.
",
- "MathActivity$name": "The name of the math activity.
",
- "MathActivity$next": "The next activity in the pipeline.
",
- "RemoveAttributesActivity$name": "The name of the removeAttributes activity.
",
- "RemoveAttributesActivity$next": "The next activity in the pipeline.
",
- "SelectAttributesActivity$name": "The name of the selectAttributes activity.
",
- "SelectAttributesActivity$next": "The next activity in the pipeline.
"
- }
- },
- "AddAttributesActivity": {
- "base": "An activity that adds other attributes based on existing attributes in the message.
",
- "refs": {
- "PipelineActivity$addAttributes": "Adds other attributes based on existing attributes in the message.
"
- }
- },
- "AttributeName": {
- "base": null,
- "refs": {
- "AttributeNameMapping$key": null,
- "AttributeNameMapping$value": null,
- "AttributeNames$member": null,
- "DeviceRegistryEnrichActivity$attribute": "The name of the attribute that is added to the message.
",
- "DeviceRegistryEnrichActivity$thingName": "The name of the IoT device whose registry information is added to the message.
",
- "DeviceShadowEnrichActivity$attribute": "The name of the attribute that is added to the message.
",
- "DeviceShadowEnrichActivity$thingName": "The name of the IoT device whose shadow information is added to the message.
",
- "MathActivity$attribute": "The name of the attribute that contains the result of the math operation.
"
- }
- },
- "AttributeNameMapping": {
- "base": null,
- "refs": {
- "AddAttributesActivity$attributes": "A list of 1-50 AttributeNameMapping objects that map an existing attribute to a new attribute.
The existing attributes remain in the message, so if you want to remove the originals, use RemoveAttributeActivity.
"
- }
- },
- "AttributeNames": {
- "base": null,
- "refs": {
- "RemoveAttributesActivity$attributes": "A list of 1-50 attributes to remove from the message.
",
- "SelectAttributesActivity$attributes": "A list of the attributes to select from the message.
"
- }
- },
- "BatchPutMessageErrorEntries": {
- "base": null,
- "refs": {
- "BatchPutMessageResponse$batchPutMessageErrorEntries": "A list of any errors encountered when sending the messages to the channel.
"
- }
- },
- "BatchPutMessageErrorEntry": {
- "base": "Contains informations about errors.
",
- "refs": {
- "BatchPutMessageErrorEntries$member": null
- }
- },
- "BatchPutMessageRequest": {
- "base": null,
- "refs": {}
- },
- "BatchPutMessageResponse": {
- "base": null,
- "refs": {}
- },
- "BucketKeyExpression": {
- "base": null,
- "refs": {
- "S3DestinationConfiguration$key": "The key of the dataset contents object in an S3 bucket. Each object has a key that is a unique identifier. Each object has exactly one key.
You can create a unique key with the following options:
Use !{iotanalytics:scheduleTime} to insert the time of a scheduled SQL query run.
Use !{iotanalytics:versionId} to insert a unique hash that identifies a dataset content.
Use !{iotanalytics:creationTime} to insert the creation time of a dataset content.
The following example creates a unique key for a CSV file: dataset/mydataset/!{iotanalytics:scheduleTime}/!{iotanalytics:versionId}.csv
If you don't use !{iotanalytics:versionId} to specify the key, you might get duplicate keys. For example, you might have two dataset contents with the same scheduleTime but different versionIds. This means that one dataset content overwrites the other.
"
- }
- },
- "BucketName": {
- "base": null,
- "refs": {
- "CustomerManagedChannelS3Storage$bucket": "The name of the S3 bucket in which channel data is stored.
",
- "CustomerManagedChannelS3StorageSummary$bucket": "The name of the S3 bucket in which channel data is stored.
",
- "CustomerManagedDatastoreS3Storage$bucket": "The name of the Amazon S3 bucket where your data is stored.
",
- "CustomerManagedDatastoreS3StorageSummary$bucket": "The name of the Amazon S3 bucket where your data is stored.
",
- "IotSiteWiseCustomerManagedDatastoreS3Storage$bucket": " The name of the Amazon S3 bucket where your data is stored.
",
- "IotSiteWiseCustomerManagedDatastoreS3StorageSummary$bucket": " The name of the Amazon S3 bucket where your data is stored.
",
- "S3DestinationConfiguration$bucket": "The name of the S3 bucket to which dataset contents are delivered.
"
- }
- },
- "CancelPipelineReprocessingRequest": {
- "base": null,
- "refs": {}
- },
- "CancelPipelineReprocessingResponse": {
- "base": null,
- "refs": {}
- },
- "Channel": {
- "base": "A collection of data from an MQTT topic. Channels archive the raw, unprocessed messages before publishing the data to a pipeline.
",
- "refs": {
- "DescribeChannelResponse$channel": "An object that contains information about the channel.
"
- }
- },
- "ChannelActivity": {
- "base": "The activity that determines the source of the messages to be processed.
",
- "refs": {
- "PipelineActivity$channel": "Determines the source of the messages to be processed.
"
- }
- },
- "ChannelArn": {
- "base": null,
- "refs": {
- "Channel$arn": "The ARN of the channel.
",
- "CreateChannelResponse$channelArn": "The ARN of the channel.
"
- }
- },
- "ChannelMessages": {
- "base": "Specifies one or more sets of channel messages.
",
- "refs": {
- "StartPipelineReprocessingRequest$channelMessages": "Specifies one or more sets of channel messages that you want to reprocess.
If you use the channelMessages object, you must not specify a value for startTime and endTime.
"
- }
- },
- "ChannelName": {
- "base": null,
- "refs": {
- "BatchPutMessageRequest$channelName": "The name of the channel where the messages are sent.
",
- "Channel$name": "The name of the channel.
",
- "ChannelActivity$channelName": "The name of the channel from which the messages are processed.
",
- "ChannelSummary$channelName": "The name of the channel.
",
- "CreateChannelRequest$channelName": "The name of the channel.
",
- "CreateChannelResponse$channelName": "The name of the channel.
",
- "DeleteChannelRequest$channelName": "The name of the channel to delete.
",
- "DescribeChannelRequest$channelName": "The name of the channel whose information is retrieved.
",
- "SampleChannelDataRequest$channelName": "The name of the channel whose message samples are retrieved.
",
- "UpdateChannelRequest$channelName": "The name of the channel to be updated.
"
- }
- },
- "ChannelStatistics": {
- "base": "Statistics information about the channel.
",
- "refs": {
- "DescribeChannelResponse$statistics": "Statistics about the channel. Included if the includeStatistics parameter is set to true in the request.
"
- }
- },
- "ChannelStatus": {
- "base": null,
- "refs": {
- "Channel$status": "The status of the channel.
",
- "ChannelSummary$status": "The status of the channel.
"
- }
- },
- "ChannelStorage": {
- "base": "Where channel data is stored. You may choose one of serviceManagedS3, customerManagedS3 storage. If not specified, the default is serviceManagedS3. This can't be changed after creation of the channel.
",
- "refs": {
- "Channel$storage": "Where channel data is stored. You can choose one of serviceManagedS3 or customerManagedS3 storage. If not specified, the default is serviceManagedS3. You can't change this storage option after the channel is created.
",
- "CreateChannelRequest$channelStorage": "Where channel data is stored. You can choose one of serviceManagedS3 or customerManagedS3 storage. If not specified, the default is serviceManagedS3. You can't change this storage option after the channel is created.
",
- "UpdateChannelRequest$channelStorage": "Where channel data is stored. You can choose one of serviceManagedS3 or customerManagedS3 storage. If not specified, the default is serviceManagedS3. You can't change this storage option after the channel is created.
"
- }
- },
- "ChannelStorageSummary": {
- "base": "Where channel data is stored.
",
- "refs": {
- "ChannelSummary$channelStorage": "Where channel data is stored.
"
- }
- },
- "ChannelSummaries": {
- "base": null,
- "refs": {
- "ListChannelsResponse$channelSummaries": "A list of ChannelSummary objects.
"
- }
- },
- "ChannelSummary": {
- "base": "A summary of information about a channel.
",
- "refs": {
- "ChannelSummaries$member": null
- }
- },
- "Column": {
- "base": "Contains information about a column that stores your data.
",
- "refs": {
- "Columns$member": null
- }
- },
- "ColumnDataType": {
- "base": null,
- "refs": {
- "Column$type": "The type of data. For more information about the supported data types, see Common data types in the Glue Developer Guide .
"
- }
- },
- "ColumnName": {
- "base": null,
- "refs": {
- "Column$name": "The name of the column.
"
- }
- },
- "Columns": {
- "base": null,
- "refs": {
- "SchemaDefinition$columns": "Specifies one or more columns that store your data.
Each schema can have up to 100 columns. Each column can have up to 100 nested types.
"
- }
- },
- "ComputeType": {
- "base": null,
- "refs": {
- "ResourceConfiguration$computeType": "The type of the compute resource used to execute the containerAction. Possible values are: ACU_1 (vCPU=4, memory=16 GiB) or ACU_2 (vCPU=8, memory=32 GiB).
"
- }
- },
- "ContainerDatasetAction": {
- "base": "Information required to run the containerAction to produce dataset contents.
",
- "refs": {
- "DatasetAction$containerAction": "Information that allows the system to run a containerized application to create the dataset contents. The application must be in a Docker container along with any required support libraries.
"
- }
- },
- "CreateChannelRequest": {
- "base": null,
- "refs": {}
- },
- "CreateChannelResponse": {
- "base": null,
- "refs": {}
- },
- "CreateDatasetContentRequest": {
- "base": null,
- "refs": {}
- },
- "CreateDatasetContentResponse": {
- "base": null,
- "refs": {}
- },
- "CreateDatasetRequest": {
- "base": null,
- "refs": {}
- },
- "CreateDatasetResponse": {
- "base": null,
- "refs": {}
- },
- "CreateDatastoreRequest": {
- "base": null,
- "refs": {}
- },
- "CreateDatastoreResponse": {
- "base": null,
- "refs": {}
- },
- "CreatePipelineRequest": {
- "base": null,
- "refs": {}
- },
- "CreatePipelineResponse": {
- "base": null,
- "refs": {}
- },
- "CustomerManagedChannelS3Storage": {
- "base": "Used to store channel data in an S3 bucket that you manage. If customer-managed storage is selected, the retentionPeriod parameter is ignored. You can't change the choice of S3 storage after the data store is created.
",
- "refs": {
- "ChannelStorage$customerManagedS3": "Used to store channel data in an S3 bucket that you manage. If customer managed storage is selected, the retentionPeriod parameter is ignored. You can't change the choice of S3 storage after the data store is created.
"
- }
- },
- "CustomerManagedChannelS3StorageSummary": {
- "base": "Used to store channel data in an S3 bucket that you manage.
",
- "refs": {
- "ChannelStorageSummary$customerManagedS3": "Used to store channel data in an S3 bucket that you manage.
"
- }
- },
- "CustomerManagedDatastoreS3Storage": {
- "base": "S3-customer-managed; When you choose customer-managed storage, the retentionPeriod parameter is ignored. You can't change the choice of Amazon S3 storage after your data store is created.
",
- "refs": {
- "DatastoreStorage$customerManagedS3": "S3-customer-managed; When you choose customer-managed storage, the retentionPeriod parameter is ignored. You can't change the choice of Amazon S3 storage after your data store is created.
"
- }
- },
- "CustomerManagedDatastoreS3StorageSummary": {
- "base": "Contains information about the data store that you manage.
",
- "refs": {
- "DatastoreStorageSummary$customerManagedS3": "Used to store data in an Amazon S3 bucket managed by IoT Analytics.
"
- }
- },
- "Dataset": {
- "base": "Information about a dataset.
",
- "refs": {
- "DescribeDatasetResponse$dataset": "An object that contains information about the dataset.
"
- }
- },
- "DatasetAction": {
- "base": "A DatasetAction object that specifies how dataset contents are automatically created.
",
- "refs": {
- "DatasetActions$member": null
- }
- },
- "DatasetActionName": {
- "base": null,
- "refs": {
- "DatasetAction$actionName": "The name of the dataset action by which dataset contents are automatically created.
",
- "DatasetActionSummary$actionName": "The name of the action that automatically creates the dataset's contents.
"
- }
- },
- "DatasetActionSummaries": {
- "base": null,
- "refs": {
- "DatasetSummary$actions": "A list of DataActionSummary objects.
"
- }
- },
- "DatasetActionSummary": {
- "base": "Information about the action that automatically creates the dataset's contents.
",
- "refs": {
- "DatasetActionSummaries$member": null
- }
- },
- "DatasetActionType": {
- "base": null,
- "refs": {
- "DatasetActionSummary$actionType": "The type of action by which the dataset's contents are automatically created.
"
- }
- },
- "DatasetActions": {
- "base": null,
- "refs": {
- "CreateDatasetRequest$actions": "A list of actions that create the dataset contents.
",
- "Dataset$actions": "The DatasetAction objects that automatically create the dataset contents.
",
- "UpdateDatasetRequest$actions": "A list of DatasetAction objects.
"
- }
- },
- "DatasetArn": {
- "base": null,
- "refs": {
- "CreateDatasetResponse$datasetArn": "The ARN of the dataset.
",
- "Dataset$arn": "The ARN of the dataset.
"
- }
- },
- "DatasetContentDeliveryDestination": {
- "base": "The destination to which dataset contents are delivered.
",
- "refs": {
- "DatasetContentDeliveryRule$destination": "The destination to which dataset contents are delivered.
"
- }
- },
- "DatasetContentDeliveryRule": {
- "base": "When dataset contents are created, they are delivered to destination specified here.
",
- "refs": {
- "DatasetContentDeliveryRules$member": null
- }
- },
- "DatasetContentDeliveryRules": {
- "base": null,
- "refs": {
- "CreateDatasetRequest$contentDeliveryRules": "When dataset contents are created, they are delivered to destinations specified here.
",
- "Dataset$contentDeliveryRules": "When dataset contents are created they are delivered to destinations specified here.
",
- "UpdateDatasetRequest$contentDeliveryRules": "When dataset contents are created, they are delivered to destinations specified here.
"
- }
- },
- "DatasetContentState": {
- "base": null,
- "refs": {
- "DatasetContentStatus$state": "The state of the dataset contents. Can be one of READY, CREATING, SUCCEEDED, or FAILED.
"
- }
- },
- "DatasetContentStatus": {
- "base": "The state of the dataset contents and the reason they are in this state.
",
- "refs": {
- "DatasetContentSummary$status": "The status of the dataset contents.
",
- "GetDatasetContentResponse$status": "The status of the dataset content.
"
- }
- },
- "DatasetContentSummaries": {
- "base": null,
- "refs": {
- "ListDatasetContentsResponse$datasetContentSummaries": "Summary information about dataset contents that have been created.
"
- }
- },
- "DatasetContentSummary": {
- "base": "Summary information about dataset contents.
",
- "refs": {
- "DatasetContentSummaries$member": null
- }
- },
- "DatasetContentVersion": {
- "base": null,
- "refs": {
- "CreateDatasetContentRequest$versionId": "The version ID of the dataset content. To specify versionId for a dataset content, the dataset must use a DeltaTimer filter.
",
- "CreateDatasetContentResponse$versionId": "The version ID of the dataset contents that are being created.
",
- "DatasetContentSummary$version": "The version of the dataset contents.
",
- "DeleteDatasetContentRequest$versionId": "The version of the dataset whose content is deleted. You can also use the strings \"$LATEST\" or \"$LATEST_SUCCEEDED\" to delete the latest or latest successfully completed data set. If not specified, \"$LATEST_SUCCEEDED\" is the default.
",
- "GetDatasetContentRequest$versionId": "The version of the dataset whose contents are retrieved. You can also use the strings \"$LATEST\" or \"$LATEST_SUCCEEDED\" to retrieve the contents of the latest or latest successfully completed dataset. If not specified, \"$LATEST_SUCCEEDED\" is the default.
"
- }
- },
- "DatasetContentVersionValue": {
- "base": "The dataset whose latest contents are used as input to the notebook or application.
",
- "refs": {
- "Variable$datasetContentVersionValue": "The value of the variable as a structure that specifies a dataset content version.
"
- }
- },
- "DatasetEntries": {
- "base": null,
- "refs": {
- "GetDatasetContentResponse$entries": "A list of DatasetEntry objects.
"
- }
- },
- "DatasetEntry": {
- "base": "The reference to a dataset entry.
",
- "refs": {
- "DatasetEntries$member": null
- }
- },
- "DatasetName": {
- "base": null,
- "refs": {
- "CreateDatasetContentRequest$datasetName": "The name of the dataset.
",
- "CreateDatasetRequest$datasetName": "The name of the dataset.
",
- "CreateDatasetResponse$datasetName": "The name of the dataset.
",
- "Dataset$name": "The name of the dataset.
",
- "DatasetContentVersionValue$datasetName": "The name of the dataset whose latest contents are used as input to the notebook or application.
",
- "DatasetSummary$datasetName": "The name of the dataset.
",
- "DeleteDatasetContentRequest$datasetName": "The name of the dataset whose content is deleted.
",
- "DeleteDatasetRequest$datasetName": "The name of the dataset to delete.
",
- "DescribeDatasetRequest$datasetName": "The name of the dataset whose information is retrieved.
",
- "GetDatasetContentRequest$datasetName": "The name of the dataset whose contents are retrieved.
",
- "ListDatasetContentsRequest$datasetName": "The name of the dataset whose contents information you want to list.
",
- "TriggeringDataset$name": "The name of the dataset whose content generation triggers the new dataset content generation.
",
- "UpdateDatasetRequest$datasetName": "The name of the dataset to update.
"
- }
- },
- "DatasetStatus": {
- "base": null,
- "refs": {
- "Dataset$status": "The status of the dataset.
",
- "DatasetSummary$status": "The status of the dataset.
"
- }
- },
- "DatasetSummaries": {
- "base": null,
- "refs": {
- "ListDatasetsResponse$datasetSummaries": "A list of DatasetSummary objects.
"
- }
- },
- "DatasetSummary": {
- "base": "A summary of information about a dataset.
",
- "refs": {
- "DatasetSummaries$member": null
- }
- },
- "DatasetTrigger": {
- "base": "The DatasetTrigger that specifies when the dataset is automatically updated.
",
- "refs": {
- "DatasetTriggers$member": null
- }
- },
- "DatasetTriggers": {
- "base": null,
- "refs": {
- "CreateDatasetRequest$triggers": "A list of triggers. A trigger causes dataset contents to be populated at a specified time interval or when another dataset's contents are created. The list of triggers can be empty or contain up to five DataSetTrigger objects.
",
- "Dataset$triggers": "The DatasetTrigger objects that specify when the dataset is automatically updated.
",
- "DatasetSummary$triggers": "A list of triggers. A trigger causes dataset content to be populated at a specified time interval or when another dataset is populated. The list of triggers can be empty or contain up to five DataSetTrigger objects
",
- "UpdateDatasetRequest$triggers": "A list of DatasetTrigger objects. The list can be empty or can contain up to five DatasetTrigger objects.
"
- }
- },
- "Datastore": {
- "base": "Information about a data store.
",
- "refs": {
- "DescribeDatastoreResponse$datastore": "Information about the data store.
"
- }
- },
- "DatastoreActivity": {
- "base": "The datastore activity that specifies where to store the processed data.
",
- "refs": {
- "PipelineActivity$datastore": "Specifies where to store the processed message data.
"
- }
- },
- "DatastoreArn": {
- "base": null,
- "refs": {
- "CreateDatastoreResponse$datastoreArn": "The ARN of the data store.
",
- "Datastore$arn": "The ARN of the data store.
"
- }
- },
- "DatastoreIotSiteWiseMultiLayerStorage": {
- "base": " Used to store data used by IoT SiteWise in an Amazon S3 bucket that you manage. You can't change the choice of Amazon S3 storage after your data store is created.
",
- "refs": {
- "DatastoreStorage$iotSiteWiseMultiLayerStorage": " Used to store data used by IoT SiteWise in an Amazon S3 bucket that you manage. You can't change the choice of Amazon S3 storage after your data store is created.
"
- }
- },
- "DatastoreIotSiteWiseMultiLayerStorageSummary": {
- "base": " Contains information about the data store that you manage, which stores data used by IoT SiteWise.
",
- "refs": {
- "DatastoreStorageSummary$iotSiteWiseMultiLayerStorage": " Used to store data used by IoT SiteWise in an Amazon S3 bucket that you manage.
"
- }
- },
- "DatastoreName": {
- "base": null,
- "refs": {
- "CreateDatastoreRequest$datastoreName": "The name of the data store.
",
- "CreateDatastoreResponse$datastoreName": "The name of the data store.
",
- "Datastore$name": "The name of the data store.
",
- "DatastoreActivity$datastoreName": "The name of the data store where processed messages are stored.
",
- "DatastoreSummary$datastoreName": "The name of the data store.
",
- "DeleteDatastoreRequest$datastoreName": "The name of the data store to delete.
",
- "DescribeDatastoreRequest$datastoreName": "The name of the data store
",
- "UpdateDatastoreRequest$datastoreName": "The name of the data store to be updated.
"
- }
- },
- "DatastorePartition": {
- "base": " A single dimension to partition a data store. The dimension must be an AttributePartition or a TimestampPartition.
",
- "refs": {
- "Partitions$member": null
- }
- },
- "DatastorePartitions": {
- "base": " Contains information about the partition dimensions in a data store.
",
- "refs": {
- "CreateDatastoreRequest$datastorePartitions": " Contains information about the partition dimensions in a data store.
",
- "Datastore$datastorePartitions": " Contains information about the partition dimensions in a data store.
",
- "DatastoreSummary$datastorePartitions": " Contains information about the partition dimensions in a data store.
"
- }
- },
- "DatastoreStatistics": {
- "base": "Statistical information about the data store.
",
- "refs": {
- "DescribeDatastoreResponse$statistics": "Additional statistical information about the data store. Included if the includeStatistics parameter is set to true in the request.
"
- }
- },
- "DatastoreStatus": {
- "base": null,
- "refs": {
- "Datastore$status": "The status of a data store:
CREATING The data store is being created.
ACTIVE The data store has been created and can be used.
DELETING The data store is being deleted.
",
- "DatastoreSummary$status": "The status of the data store.
"
- }
- },
- "DatastoreStorage": {
- "base": "Where data in a data store is stored.. You can choose serviceManagedS3 storage, customerManagedS3 storage, or iotSiteWiseMultiLayerStorage storage. The default is serviceManagedS3. You can't change the choice of Amazon S3 storage after your data store is created.
",
- "refs": {
- "CreateDatastoreRequest$datastoreStorage": "Where data in a data store is stored.. You can choose serviceManagedS3 storage, customerManagedS3 storage, or iotSiteWiseMultiLayerStorage storage. The default is serviceManagedS3. You can't change the choice of Amazon S3 storage after your data store is created.
",
- "Datastore$storage": "Where data in a data store is stored.. You can choose serviceManagedS3 storage, customerManagedS3 storage, or iotSiteWiseMultiLayerStorage storage. The default is serviceManagedS3. You can't change the choice of Amazon S3 storage after your data store is created.
",
- "UpdateDatastoreRequest$datastoreStorage": "Where data in a data store is stored.. You can choose serviceManagedS3 storage, customerManagedS3 storage, or iotSiteWiseMultiLayerStorage storage. The default is serviceManagedS3. You can't change the choice of Amazon S3 storage after your data store is created.
"
- }
- },
- "DatastoreStorageSummary": {
- "base": "Contains information about your data store.
",
- "refs": {
- "DatastoreSummary$datastoreStorage": "Where data in a data store is stored.
"
- }
- },
- "DatastoreSummaries": {
- "base": null,
- "refs": {
- "ListDatastoresResponse$datastoreSummaries": "A list of DatastoreSummary objects.
"
- }
- },
- "DatastoreSummary": {
- "base": "A summary of information about a data store.
",
- "refs": {
- "DatastoreSummaries$member": null
- }
- },
- "DeleteChannelRequest": {
- "base": null,
- "refs": {}
- },
- "DeleteDatasetContentRequest": {
- "base": null,
- "refs": {}
- },
- "DeleteDatasetRequest": {
- "base": null,
- "refs": {}
- },
- "DeleteDatastoreRequest": {
- "base": null,
- "refs": {}
- },
- "DeletePipelineRequest": {
- "base": null,
- "refs": {}
- },
- "DeltaTime": {
- "base": "Used to limit data to that which has arrived since the last execution of the action.
",
- "refs": {
- "QueryFilter$deltaTime": "Used to limit data to that which has arrived since the last execution of the action.
"
- }
- },
- "DeltaTimeSessionWindowConfiguration": {
- "base": "A structure that contains the configuration information of a delta time session window.
DeltaTime specifies a time interval. You can use DeltaTime to create dataset contents with data that has arrived in the data store since the last execution. For an example of DeltaTime, see Creating a SQL dataset with a delta window (CLI) in the IoT Analytics User Guide .
",
- "refs": {
- "LateDataRuleConfiguration$deltaTimeSessionWindowConfiguration": "The information needed to configure a delta time session window.
"
- }
- },
- "DescribeChannelRequest": {
- "base": null,
- "refs": {}
- },
- "DescribeChannelResponse": {
- "base": null,
- "refs": {}
- },
- "DescribeDatasetRequest": {
- "base": null,
- "refs": {}
- },
- "DescribeDatasetResponse": {
- "base": null,
- "refs": {}
- },
- "DescribeDatastoreRequest": {
- "base": null,
- "refs": {}
- },
- "DescribeDatastoreResponse": {
- "base": null,
- "refs": {}
- },
- "DescribeLoggingOptionsRequest": {
- "base": null,
- "refs": {}
- },
- "DescribeLoggingOptionsResponse": {
- "base": null,
- "refs": {}
- },
- "DescribePipelineRequest": {
- "base": null,
- "refs": {}
- },
- "DescribePipelineResponse": {
- "base": null,
- "refs": {}
- },
- "DeviceRegistryEnrichActivity": {
- "base": "An activity that adds data from the IoT device registry to your message.
",
- "refs": {
- "PipelineActivity$deviceRegistryEnrich": "Adds data from the IoT device registry to your message.
"
- }
- },
- "DeviceShadowEnrichActivity": {
- "base": "An activity that adds information from the IoT Device Shadow service to a message.
",
- "refs": {
- "PipelineActivity$deviceShadowEnrich": "Adds information from the IoT Device Shadow service to a message.
"
- }
- },
- "DoubleValue": {
- "base": null,
- "refs": {
- "Variable$doubleValue": "The value of the variable as a double (numeric).
"
- }
- },
- "EndTime": {
- "base": null,
- "refs": {
- "SampleChannelDataRequest$endTime": "The end of the time window from which sample messages are retrieved.
",
- "StartPipelineReprocessingRequest$endTime": "The end time (exclusive) of raw message data that is reprocessed.
If you specify a value for the endTime parameter, you must not use the channelMessages object.
"
- }
- },
- "EntryName": {
- "base": null,
- "refs": {
- "DatasetContentDeliveryRule$entryName": "The name of the dataset content delivery rules entry.
",
- "DatasetEntry$entryName": "The name of the dataset item.
"
- }
- },
- "ErrorCode": {
- "base": null,
- "refs": {
- "BatchPutMessageErrorEntry$errorCode": "The code associated with the error.
"
- }
- },
- "ErrorMessage": {
- "base": null,
- "refs": {
- "BatchPutMessageErrorEntry$errorMessage": "The message associated with the error.
"
- }
- },
- "EstimatedResourceSize": {
- "base": "The estimated size of the resource.
",
- "refs": {
- "ChannelStatistics$size": "The estimated size of the channel.
",
- "DatastoreStatistics$size": "The estimated size of the data store.
"
- }
- },
- "FileFormatConfiguration": {
- "base": "Contains the configuration information of file formats. IoT Analytics data stores support JSON and Parquet .
The default file format is JSON. You can specify only one format.
You can't change the file format after you create the data store.
",
- "refs": {
- "CreateDatastoreRequest$fileFormatConfiguration": "Contains the configuration information of file formats. IoT Analytics data stores support JSON and Parquet .
The default file format is JSON. You can specify only one format.
You can't change the file format after you create the data store.
",
- "Datastore$fileFormatConfiguration": "Contains the configuration information of file formats. IoT Analytics data stores support JSON and Parquet .
The default file format is JSON. You can specify only one format.
You can't change the file format after you create the data store.
",
- "UpdateDatastoreRequest$fileFormatConfiguration": "Contains the configuration information of file formats. IoT Analytics data stores support JSON and Parquet .
The default file format is JSON. You can specify only one format.
You can't change the file format after you create the data store.
"
- }
- },
- "FileFormatType": {
- "base": null,
- "refs": {
- "DatastoreSummary$fileFormatType": "The file format of the data in the data store.
"
- }
- },
- "FilterActivity": {
- "base": "An activity that filters a message based on its attributes.
",
- "refs": {
- "PipelineActivity$filter": "Filters a message based on its attributes.
"
- }
- },
- "FilterExpression": {
- "base": null,
- "refs": {
- "FilterActivity$filter": "An expression that looks like a SQL WHERE clause that must return a Boolean value. Messages that satisfy the condition are passed to the next activity.
"
- }
- },
- "GetDatasetContentRequest": {
- "base": null,
- "refs": {}
- },
- "GetDatasetContentResponse": {
- "base": null,
- "refs": {}
- },
- "GlueConfiguration": {
- "base": "Configuration information for coordination with Glue, a fully managed extract, transform and load (ETL) service.
",
- "refs": {
- "S3DestinationConfiguration$glueConfiguration": "Configuration information for coordination with Glue, a fully managed extract, transform and load (ETL) service.
"
- }
- },
- "GlueDatabaseName": {
- "base": null,
- "refs": {
- "GlueConfiguration$databaseName": "The name of the database in your Glue Data Catalog in which the table is located. An Glue Data Catalog database contains metadata tables.
"
- }
- },
- "GlueTableName": {
- "base": null,
- "refs": {
- "GlueConfiguration$tableName": "The name of the table in your Glue Data Catalog that is used to perform the ETL operations. An Glue Data Catalog table contains partitioned data and descriptions of data sources and targets.
"
- }
- },
- "Image": {
- "base": null,
- "refs": {
- "ContainerDatasetAction$image": "The ARN of the Docker container stored in your account. The Docker container contains an application and required support libraries and is used to generate dataset contents.
"
- }
- },
- "IncludeStatisticsFlag": {
- "base": null,
- "refs": {
- "DescribeChannelRequest$includeStatistics": "If true, additional statistical information about the channel is included in the response. This feature can't be used with a channel whose S3 storage is customer-managed.
",
- "DescribeDatastoreRequest$includeStatistics": "If true, additional statistical information about the data store is included in the response. This feature can't be used with a data store whose S3 storage is customer-managed.
"
- }
- },
- "InternalFailureException": {
- "base": "There was an internal failure.
",
- "refs": {}
- },
- "InvalidRequestException": {
- "base": "The request was not valid.
",
- "refs": {}
- },
- "IotEventsDestinationConfiguration": {
- "base": "Configuration information for delivery of dataset contents to IoT Events.
",
- "refs": {
- "DatasetContentDeliveryDestination$iotEventsDestinationConfiguration": "Configuration information for delivery of dataset contents to IoT Events.
"
- }
- },
- "IotEventsInputName": {
- "base": null,
- "refs": {
- "IotEventsDestinationConfiguration$inputName": "The name of the IoT Events input to which dataset contents are delivered.
"
- }
- },
- "IotSiteWiseCustomerManagedDatastoreS3Storage": {
- "base": " Used to store data used by IoT SiteWise in an Amazon S3 bucket that you manage. You can't change the choice of Amazon S3 storage after your data store is created.
",
- "refs": {
- "DatastoreIotSiteWiseMultiLayerStorage$customerManagedS3Storage": " Used to store data used by IoT SiteWise in an Amazon S3 bucket that you manage.
"
- }
- },
- "IotSiteWiseCustomerManagedDatastoreS3StorageSummary": {
- "base": " Contains information about the data store that you manage, which stores data used by IoT SiteWise.
",
- "refs": {
- "DatastoreIotSiteWiseMultiLayerStorageSummary$customerManagedS3Storage": "Used to store data used by IoT SiteWise in an Amazon S3 bucket that you manage.
"
- }
- },
- "JsonConfiguration": {
- "base": "Contains the configuration information of the JSON format.
",
- "refs": {
- "FileFormatConfiguration$jsonConfiguration": "Contains the configuration information of the JSON format.
"
- }
- },
- "LambdaActivity": {
- "base": "An activity that runs a Lambda function to modify the message.
",
- "refs": {
- "PipelineActivity$lambda": "Runs a Lambda function to modify the message.
"
- }
- },
- "LambdaName": {
- "base": null,
- "refs": {
- "LambdaActivity$lambdaName": "The name of the Lambda function that is run on the message.
"
- }
- },
- "LateDataRule": {
- "base": "A structure that contains the name and configuration information of a late data rule.
",
- "refs": {
- "LateDataRules$member": null
- }
- },
- "LateDataRuleConfiguration": {
- "base": "The information needed to configure a delta time session window.
",
- "refs": {
- "LateDataRule$ruleConfiguration": "The information needed to configure the late data rule.
"
- }
- },
- "LateDataRuleName": {
- "base": null,
- "refs": {
- "LateDataRule$ruleName": "The name of the late data rule.
"
- }
- },
- "LateDataRules": {
- "base": null,
- "refs": {
- "CreateDatasetRequest$lateDataRules": "A list of data rules that send notifications to CloudWatch, when data arrives late. To specify lateDataRules, the dataset must use a DeltaTimer filter.
",
- "Dataset$lateDataRules": "A list of data rules that send notifications to CloudWatch, when data arrives late. To specify lateDataRules, the dataset must use a DeltaTimer filter.
",
- "UpdateDatasetRequest$lateDataRules": "A list of data rules that send notifications to CloudWatch, when data arrives late. To specify lateDataRules, the dataset must use a DeltaTimer filter.
"
- }
- },
- "LimitExceededException": {
- "base": "The command caused an internal limit to be exceeded.
",
- "refs": {}
- },
- "ListChannelsRequest": {
- "base": null,
- "refs": {}
- },
- "ListChannelsResponse": {
- "base": null,
- "refs": {}
- },
- "ListDatasetContentsRequest": {
- "base": null,
- "refs": {}
- },
- "ListDatasetContentsResponse": {
- "base": null,
- "refs": {}
- },
- "ListDatasetsRequest": {
- "base": null,
- "refs": {}
- },
- "ListDatasetsResponse": {
- "base": null,
- "refs": {}
- },
- "ListDatastoresRequest": {
- "base": null,
- "refs": {}
- },
- "ListDatastoresResponse": {
- "base": null,
- "refs": {}
- },
- "ListPipelinesRequest": {
- "base": null,
- "refs": {}
- },
- "ListPipelinesResponse": {
- "base": null,
- "refs": {}
- },
- "ListTagsForResourceRequest": {
- "base": null,
- "refs": {}
- },
- "ListTagsForResourceResponse": {
- "base": null,
- "refs": {}
- },
- "LogResult": {
- "base": null,
- "refs": {
- "RunPipelineActivityResponse$logResult": "In case the pipeline activity fails, the log message that is generated.
"
- }
- },
- "LoggingEnabled": {
- "base": null,
- "refs": {
- "LoggingOptions$enabled": "If true, logging is enabled for IoT Analytics.
"
- }
- },
- "LoggingLevel": {
- "base": null,
- "refs": {
- "LoggingOptions$level": "The logging level. Currently, only ERROR is supported.
"
- }
- },
- "LoggingOptions": {
- "base": "Information about logging options.
",
- "refs": {
- "DescribeLoggingOptionsResponse$loggingOptions": "The current settings of the IoT Analytics logging options.
",
- "PutLoggingOptionsRequest$loggingOptions": "The new values of the IoT Analytics logging options.
"
- }
- },
- "MathActivity": {
- "base": "An activity that computes an arithmetic expression using the message's attributes.
",
- "refs": {
- "PipelineActivity$math": "Computes an arithmetic expression using the message's attributes and adds it to the message.
"
- }
- },
- "MathExpression": {
- "base": null,
- "refs": {
- "MathActivity$math": "An expression that uses one or more existing attributes and must return an integer value.
"
- }
- },
- "MaxMessages": {
- "base": null,
- "refs": {
- "SampleChannelDataRequest$maxMessages": "The number of sample messages to be retrieved. The limit is 10. The default is also 10.
"
- }
- },
- "MaxResults": {
- "base": null,
- "refs": {
- "ListChannelsRequest$maxResults": "The maximum number of results to return in this request.
The default value is 100.
",
- "ListDatasetContentsRequest$maxResults": "The maximum number of results to return in this request.
",
- "ListDatasetsRequest$maxResults": "The maximum number of results to return in this request.
The default value is 100.
",
- "ListDatastoresRequest$maxResults": "The maximum number of results to return in this request.
The default value is 100.
",
- "ListPipelinesRequest$maxResults": "The maximum number of results to return in this request.
The default value is 100.
"
- }
- },
- "MaxVersions": {
- "base": null,
- "refs": {
- "VersioningConfiguration$maxVersions": "How many versions of dataset contents are kept. The unlimited parameter must be false.
"
- }
- },
- "Message": {
- "base": "Information about a message.
",
- "refs": {
- "Messages$member": null
- }
- },
- "MessageId": {
- "base": null,
- "refs": {
- "BatchPutMessageErrorEntry$messageId": "The ID of the message that caused the error. See the value corresponding to the messageId key in the message object.
",
- "Message$messageId": "The ID you want to assign to the message. Each messageId must be unique within each batch sent.
"
- }
- },
- "MessagePayload": {
- "base": null,
- "refs": {
- "Message$payload": "The payload of the message. This can be a JSON string or a base64-encoded string representing binary data, in which case you must decode it by means of a pipeline activity.
",
- "MessagePayloads$member": null
- }
- },
- "MessagePayloads": {
- "base": null,
- "refs": {
- "RunPipelineActivityRequest$payloads": "The sample message payloads on which the pipeline activity is run.
",
- "RunPipelineActivityResponse$payloads": "The enriched or transformed sample message payloads as base64-encoded strings. (The results of running the pipeline activity on each input sample message payload, encoded in base64.)
",
- "SampleChannelDataResponse$payloads": "The list of message samples. Each sample message is returned as a base64-encoded string.
"
- }
- },
- "Messages": {
- "base": null,
- "refs": {
- "BatchPutMessageRequest$messages": "The list of messages to be sent. Each message has the format: { \"messageId\": \"string\", \"payload\": \"string\"}.
The field names of message payloads (data) that you send to IoT Analytics:
Must contain only alphanumeric characters and undescores (_). No other special characters are allowed.
Must begin with an alphabetic character or single underscore (_).
Cannot contain hyphens (-).
In regular expression terms: \"^[A-Za-z_]([A-Za-z0-9]*|[A-Za-z0-9][A-Za-z0-9_]*)$\".
Cannot be more than 255 characters.
Are case insensitive. (Fields named foo and FOO in the same payload are considered duplicates.)
For example, {\"temp_01\": 29} or {\"_temp_01\": 29} are valid, but {\"temp-01\": 29}, {\"01_temp\": 29} or {\"__temp_01\": 29} are invalid in message payloads.
"
- }
- },
- "NextToken": {
- "base": null,
- "refs": {
- "ListChannelsRequest$nextToken": "The token for the next set of results.
",
- "ListChannelsResponse$nextToken": "The token to retrieve the next set of results, or null if there are no more results.
",
- "ListDatasetContentsRequest$nextToken": "The token for the next set of results.
",
- "ListDatasetContentsResponse$nextToken": "The token to retrieve the next set of results, or null if there are no more results.
",
- "ListDatasetsRequest$nextToken": "The token for the next set of results.
",
- "ListDatasetsResponse$nextToken": "The token to retrieve the next set of results, or null if there are no more results.
",
- "ListDatastoresRequest$nextToken": "The token for the next set of results.
",
- "ListDatastoresResponse$nextToken": "The token to retrieve the next set of results, or null if there are no more results.
",
- "ListPipelinesRequest$nextToken": "The token for the next set of results.
",
- "ListPipelinesResponse$nextToken": "The token to retrieve the next set of results, or null if there are no more results.
"
- }
- },
- "OffsetSeconds": {
- "base": null,
- "refs": {
- "DeltaTime$offsetSeconds": "The number of seconds of estimated in-flight lag time of message data. When you create dataset contents using message data from a specified timeframe, some message data might still be in flight when processing begins, and so do not arrive in time to be processed. Use this field to make allowances for the in flight time of your message data, so that data not processed from a previous timeframe is included with the next timeframe. Otherwise, missed message data would be excluded from processing during the next timeframe too, because its timestamp places it within the previous timeframe.
"
- }
- },
- "OutputFileName": {
- "base": null,
- "refs": {
- "OutputFileUriValue$fileName": "The URI of the location where dataset contents are stored, usually the URI of a file in an S3 bucket.
"
- }
- },
- "OutputFileUriValue": {
- "base": "The value of the variable as a structure that specifies an output file URI.
",
- "refs": {
- "Variable$outputFileUriValue": "The value of the variable as a structure that specifies an output file URI.
"
- }
- },
- "ParquetConfiguration": {
- "base": "Contains the configuration information of the Parquet format.
",
- "refs": {
- "FileFormatConfiguration$parquetConfiguration": "Contains the configuration information of the Parquet format.
"
- }
- },
- "Partition": {
- "base": " A partition dimension defined by an attribute.
",
- "refs": {
- "DatastorePartition$attributePartition": " A partition dimension defined by an attributeName.
"
- }
- },
- "PartitionAttributeName": {
- "base": null,
- "refs": {
- "Partition$attributeName": " The name of the attribute that defines a partition dimension.
",
- "TimestampPartition$attributeName": " The attribute name of the partition defined by a timestamp.
"
- }
- },
- "Partitions": {
- "base": null,
- "refs": {
- "DatastorePartitions$partitions": " A list of partition dimensions in a data store.
"
- }
- },
- "Pipeline": {
- "base": "Contains information about a pipeline.
",
- "refs": {
- "DescribePipelineResponse$pipeline": "A Pipeline object that contains information about the pipeline.
"
- }
- },
- "PipelineActivities": {
- "base": null,
- "refs": {
- "CreatePipelineRequest$pipelineActivities": "A list of PipelineActivity objects. Activities perform transformations on your messages, such as removing, renaming or adding message attributes; filtering messages based on attribute values; invoking your Lambda unctions on messages for advanced processing; or performing mathematical transformations to normalize device data.
The list can be 2-25 PipelineActivity objects and must contain both a channel and a datastore activity. Each entry in the list must contain only one activity. For example:
pipelineActivities = [ { \"channel\": { ... } }, { \"lambda\": { ... } }, ... ]
",
- "Pipeline$activities": "The activities that perform transformations on the messages.
",
- "UpdatePipelineRequest$pipelineActivities": "A list of PipelineActivity objects. Activities perform transformations on your messages, such as removing, renaming or adding message attributes; filtering messages based on attribute values; invoking your Lambda functions on messages for advanced processing; or performing mathematical transformations to normalize device data.
The list can be 2-25 PipelineActivity objects and must contain both a channel and a datastore activity. Each entry in the list must contain only one activity. For example:
pipelineActivities = [ { \"channel\": { ... } }, { \"lambda\": { ... } }, ... ]
"
- }
- },
- "PipelineActivity": {
- "base": "An activity that performs a transformation on a message.
",
- "refs": {
- "PipelineActivities$member": null,
- "RunPipelineActivityRequest$pipelineActivity": "The pipeline activity that is run. This must not be a channel activity or a data store activity because these activities are used in a pipeline only to load the original message and to store the (possibly) transformed message. If a Lambda activity is specified, only short-running Lambda functions (those with a timeout of less than 30 seconds or less) can be used.
"
- }
- },
- "PipelineArn": {
- "base": null,
- "refs": {
- "CreatePipelineResponse$pipelineArn": "The ARN of the pipeline.
",
- "Pipeline$arn": "The ARN of the pipeline.
"
- }
- },
- "PipelineName": {
- "base": null,
- "refs": {
- "CancelPipelineReprocessingRequest$pipelineName": "The name of pipeline for which data reprocessing is canceled.
",
- "CreatePipelineRequest$pipelineName": "The name of the pipeline.
",
- "CreatePipelineResponse$pipelineName": "The name of the pipeline.
",
- "DeletePipelineRequest$pipelineName": "The name of the pipeline to delete.
",
- "DescribePipelineRequest$pipelineName": "The name of the pipeline whose information is retrieved.
",
- "Pipeline$name": "The name of the pipeline.
",
- "PipelineSummary$pipelineName": "The name of the pipeline.
",
- "StartPipelineReprocessingRequest$pipelineName": "The name of the pipeline on which to start reprocessing.
",
- "UpdatePipelineRequest$pipelineName": "The name of the pipeline to update.
"
- }
- },
- "PipelineSummaries": {
- "base": null,
- "refs": {
- "ListPipelinesResponse$pipelineSummaries": "A list of PipelineSummary objects.
"
- }
- },
- "PipelineSummary": {
- "base": "A summary of information about a pipeline.
",
- "refs": {
- "PipelineSummaries$member": null
- }
- },
- "PresignedURI": {
- "base": null,
- "refs": {
- "DatasetEntry$dataURI": "The presigned URI of the dataset item.
"
- }
- },
- "PutLoggingOptionsRequest": {
- "base": null,
- "refs": {}
- },
- "QueryFilter": {
- "base": "Information that is used to filter message data, to segregate it according to the timeframe in which it arrives.
",
- "refs": {
- "QueryFilters$member": null
- }
- },
- "QueryFilters": {
- "base": null,
- "refs": {
- "SqlQueryDatasetAction$filters": "Prefilters applied to message data.
"
- }
- },
- "Reason": {
- "base": null,
- "refs": {
- "DatasetContentStatus$reason": "The reason the dataset contents are in this state.
"
- }
- },
- "RemoveAttributesActivity": {
- "base": "An activity that removes attributes from a message.
",
- "refs": {
- "PipelineActivity$removeAttributes": "Removes attributes from a message.
"
- }
- },
- "ReprocessingId": {
- "base": null,
- "refs": {
- "CancelPipelineReprocessingRequest$reprocessingId": "The ID of the reprocessing task (returned by StartPipelineReprocessing).
",
- "ReprocessingSummary$id": "The reprocessingId returned by StartPipelineReprocessing.
",
- "StartPipelineReprocessingResponse$reprocessingId": "The ID of the pipeline reprocessing activity that was started.
"
- }
- },
- "ReprocessingStatus": {
- "base": null,
- "refs": {
- "ReprocessingSummary$status": "The status of the pipeline reprocessing.
"
- }
- },
- "ReprocessingSummaries": {
- "base": null,
- "refs": {
- "Pipeline$reprocessingSummaries": "A summary of information about the pipeline reprocessing.
",
- "PipelineSummary$reprocessingSummaries": "A summary of information about the pipeline reprocessing.
"
- }
- },
- "ReprocessingSummary": {
- "base": "Information about pipeline reprocessing.
",
- "refs": {
- "ReprocessingSummaries$member": null
- }
- },
- "ResourceAlreadyExistsException": {
- "base": "A resource with the same name already exists.
",
- "refs": {}
- },
- "ResourceArn": {
- "base": null,
- "refs": {
- "ListTagsForResourceRequest$resourceArn": "The ARN of the resource whose tags you want to list.
",
- "TagResourceRequest$resourceArn": "The ARN of the resource whose tags you want to modify.
",
- "UntagResourceRequest$resourceArn": "The ARN of the resource whose tags you want to remove.
"
- }
- },
- "ResourceConfiguration": {
- "base": "The configuration of the resource used to execute the containerAction.
",
- "refs": {
- "ContainerDatasetAction$resourceConfiguration": "Configuration of the resource that executes the containerAction.
"
- }
- },
- "ResourceNotFoundException": {
- "base": "A resource with the specified name could not be found.
",
- "refs": {}
- },
- "RetentionPeriod": {
- "base": "How long, in days, message data is kept.
",
- "refs": {
- "Channel$retentionPeriod": "How long, in days, message data is kept for the channel.
",
- "CreateChannelRequest$retentionPeriod": "How long, in days, message data is kept for the channel. When customerManagedS3 storage is selected, this parameter is ignored.
",
- "CreateChannelResponse$retentionPeriod": "How long, in days, message data is kept for the channel.
",
- "CreateDatasetRequest$retentionPeriod": "Optional. How long, in days, versions of dataset contents are kept for the dataset. If not specified or set to null, versions of dataset contents are retained for at most 90 days. The number of versions of dataset contents retained is determined by the versioningConfiguration parameter. For more information, see Keeping Multiple Versions of IoT Analytics datasets in the IoT Analytics User Guide .
",
- "CreateDatasetResponse$retentionPeriod": "How long, in days, dataset contents are kept for the dataset.
",
- "CreateDatastoreRequest$retentionPeriod": "How long, in days, message data is kept for the data store. When customerManagedS3 storage is selected, this parameter is ignored.
",
- "CreateDatastoreResponse$retentionPeriod": "How long, in days, message data is kept for the data store.
",
- "Dataset$retentionPeriod": "Optional. How long, in days, message data is kept for the dataset.
",
- "Datastore$retentionPeriod": "How long, in days, message data is kept for the data store. When customerManagedS3 storage is selected, this parameter is ignored.
",
- "UpdateChannelRequest$retentionPeriod": "How long, in days, message data is kept for the channel. The retention period can't be updated if the channel's Amazon S3 storage is customer-managed.
",
- "UpdateDatasetRequest$retentionPeriod": "How long, in days, dataset contents are kept for the dataset.
",
- "UpdateDatastoreRequest$retentionPeriod": "How long, in days, message data is kept for the data store. The retention period can't be updated if the data store's Amazon S3 storage is customer-managed.
"
- }
- },
- "RetentionPeriodInDays": {
- "base": null,
- "refs": {
- "RetentionPeriod$numberOfDays": "The number of days that message data is kept. The unlimited parameter must be false.
"
- }
- },
- "RoleArn": {
- "base": null,
- "refs": {
- "ContainerDatasetAction$executionRoleArn": "The ARN of the role that gives permission to the system to access required resources to run the containerAction. This includes, at minimum, permission to retrieve the dataset contents that are the input to the containerized application.
",
- "CustomerManagedChannelS3Storage$roleArn": "The ARN of the role that grants IoT Analytics permission to interact with your Amazon S3 resources.
",
- "CustomerManagedChannelS3StorageSummary$roleArn": "The ARN of the role that grants IoT Analytics permission to interact with your Amazon S3 resources.
",
- "CustomerManagedDatastoreS3Storage$roleArn": "The ARN of the role that grants IoT Analytics permission to interact with your Amazon S3 resources.
",
- "CustomerManagedDatastoreS3StorageSummary$roleArn": "The ARN of the role that grants IoT Analytics permission to interact with your Amazon S3 resources.
",
- "DeviceRegistryEnrichActivity$roleArn": "The ARN of the role that allows access to the device's registry information.
",
- "DeviceShadowEnrichActivity$roleArn": "The ARN of the role that allows access to the device's shadow.
",
- "IotEventsDestinationConfiguration$roleArn": "The ARN of the role that grants IoT Analytics permission to deliver dataset contents to an IoT Events input.
",
- "LoggingOptions$roleArn": "The ARN of the role that grants permission to IoT Analytics to perform logging.
",
- "S3DestinationConfiguration$roleArn": "The ARN of the role that grants IoT Analytics permission to interact with your Amazon S3 and Glue resources.
"
- }
- },
- "RunPipelineActivityRequest": {
- "base": null,
- "refs": {}
- },
- "RunPipelineActivityResponse": {
- "base": null,
- "refs": {}
- },
- "S3DestinationConfiguration": {
- "base": "Configuration information for delivery of dataset contents to Amazon Simple Storage Service (Amazon S3).
",
- "refs": {
- "DatasetContentDeliveryDestination$s3DestinationConfiguration": "Configuration information for delivery of dataset contents to Amazon S3.
"
- }
- },
- "S3KeyPrefix": {
- "base": null,
- "refs": {
- "CustomerManagedChannelS3Storage$keyPrefix": "(Optional) The prefix used to create the keys of the channel data objects. Each object in an S3 bucket has a key that is its unique identifier in the bucket. Each object in a bucket has exactly one key. The prefix must end with a forward slash (/).
",
- "CustomerManagedChannelS3StorageSummary$keyPrefix": "(Optional) The prefix used to create the keys of the channel data objects. Each object in an S3 bucket has a key that is its unique identifier within the bucket (each object in a bucket has exactly one key). The prefix must end with a forward slash (/).
",
- "CustomerManagedDatastoreS3Storage$keyPrefix": "(Optional) The prefix used to create the keys of the data store data objects. Each object in an Amazon S3 bucket has a key that is its unique identifier in the bucket. Each object in a bucket has exactly one key. The prefix must end with a forward slash (/).
",
- "CustomerManagedDatastoreS3StorageSummary$keyPrefix": "(Optional) The prefix used to create the keys of the data store data objects. Each object in an Amazon S3 bucket has a key that is its unique identifier in the bucket. Each object in a bucket has exactly one key. The prefix must end with a forward slash (/).
",
- "IotSiteWiseCustomerManagedDatastoreS3Storage$keyPrefix": " (Optional) The prefix used to create the keys of the data store data objects. Each object in an Amazon S3 bucket has a key that is its unique identifier in the bucket. Each object in a bucket has exactly one key. The prefix must end with a forward slash (/).
",
- "IotSiteWiseCustomerManagedDatastoreS3StorageSummary$keyPrefix": " (Optional) The prefix used to create the keys of the data store data objects. Each object in an Amazon S3 bucket has a key that is its unique identifier in the bucket. Each object in a bucket has exactly one key. The prefix must end with a forward slash (/).
"
- }
- },
- "S3PathChannelMessage": {
- "base": null,
- "refs": {
- "S3PathChannelMessages$member": null
- }
- },
- "S3PathChannelMessages": {
- "base": null,
- "refs": {
- "ChannelMessages$s3Paths": "Specifies one or more keys that identify the Amazon Simple Storage Service (Amazon S3) objects that save your channel messages.
You must use the full path for the key.
Example path: channel/mychannel/__dt=2020-02-29 00:00:00/1582940490000_1582940520000_123456789012_mychannel_0_2118.0.json.gz
"
- }
- },
- "SampleChannelDataRequest": {
- "base": null,
- "refs": {}
- },
- "SampleChannelDataResponse": {
- "base": null,
- "refs": {}
- },
- "Schedule": {
- "base": "The schedule for when to trigger an update.
",
- "refs": {
- "DatasetTrigger$schedule": "The Schedule when the trigger is initiated.
"
- }
- },
- "ScheduleExpression": {
- "base": null,
- "refs": {
- "Schedule$expression": "The expression that defines when to trigger an update. For more information, see Schedule Expressions for Rules in the Amazon CloudWatch Events User Guide .
"
- }
- },
- "SchemaDefinition": {
- "base": "Information needed to define a schema.
",
- "refs": {
- "ParquetConfiguration$schemaDefinition": "Information needed to define a schema.
"
- }
- },
- "SelectAttributesActivity": {
- "base": "Used to create a new message using only the specified attributes from the original message.
",
- "refs": {
- "PipelineActivity$selectAttributes": "Used to create a new message using only the specified attributes from the original message.
"
- }
- },
- "ServiceManagedChannelS3Storage": {
- "base": "Used to store channel data in an S3 bucket managed by IoT Analytics. You can't change the choice of S3 storage after the data store is created.
",
- "refs": {
- "ChannelStorage$serviceManagedS3": "Used to store channel data in an S3 bucket managed by IoT Analytics. You can't change the choice of S3 storage after the data store is created.
"
- }
- },
- "ServiceManagedChannelS3StorageSummary": {
- "base": "Used to store channel data in an S3 bucket managed by IoT Analytics.
",
- "refs": {
- "ChannelStorageSummary$serviceManagedS3": "Used to store channel data in an S3 bucket managed by IoT Analytics.
"
- }
- },
- "ServiceManagedDatastoreS3Storage": {
- "base": "Used to store data in an Amazon S3 bucket managed by IoT Analytics. You can't change the choice of Amazon S3 storage after your data store is created.
",
- "refs": {
- "DatastoreStorage$serviceManagedS3": "Used to store data in an Amazon S3 bucket managed by IoT Analytics. You can't change the choice of Amazon S3 storage after your data store is created.
"
- }
- },
- "ServiceManagedDatastoreS3StorageSummary": {
- "base": "Contains information about the data store that is managed by IoT Analytics.
",
- "refs": {
- "DatastoreStorageSummary$serviceManagedS3": "Used to store data in an Amazon S3 bucket managed by IoT Analytics.
"
- }
- },
- "ServiceUnavailableException": {
- "base": "The service is temporarily unavailable.
",
- "refs": {}
- },
- "SessionTimeoutInMinutes": {
- "base": null,
- "refs": {
- "DeltaTimeSessionWindowConfiguration$timeoutInMinutes": "A time interval. You can use timeoutInMinutes so that IoT Analytics can batch up late data notifications that have been generated since the last execution. IoT Analytics sends one batch of notifications to Amazon CloudWatch Events at one time.
For more information about how to write a timestamp expression, see Date and Time Functions and Operators , in the Presto 0.172 Documentation .
"
- }
- },
- "SizeInBytes": {
- "base": null,
- "refs": {
- "EstimatedResourceSize$estimatedSizeInBytes": "The estimated size of the resource, in bytes.
"
- }
- },
- "SqlQuery": {
- "base": null,
- "refs": {
- "SqlQueryDatasetAction$sqlQuery": "A SQL query string.
"
- }
- },
- "SqlQueryDatasetAction": {
- "base": "The SQL query to modify the message.
",
- "refs": {
- "DatasetAction$queryAction": "An SqlQueryDatasetAction object that uses an SQL query to automatically create dataset contents.
"
- }
- },
- "StartPipelineReprocessingRequest": {
- "base": null,
- "refs": {}
- },
- "StartPipelineReprocessingResponse": {
- "base": null,
- "refs": {}
- },
- "StartTime": {
- "base": null,
- "refs": {
- "SampleChannelDataRequest$startTime": "The start of the time window from which sample messages are retrieved.
",
- "StartPipelineReprocessingRequest$startTime": "The start time (inclusive) of raw message data that is reprocessed.
If you specify a value for the startTime parameter, you must not use the channelMessages object.
"
- }
- },
- "StringValue": {
- "base": null,
- "refs": {
- "Variable$stringValue": "The value of the variable as a string.
"
- }
- },
- "Tag": {
- "base": "A set of key-value pairs that are used to manage the resource.
",
- "refs": {
- "TagList$member": null
- }
- },
- "TagKey": {
- "base": null,
- "refs": {
- "Tag$key": "The tag's key.
",
- "TagKeyList$member": null
- }
- },
- "TagKeyList": {
- "base": null,
- "refs": {
- "UntagResourceRequest$tagKeys": "The keys of those tags which you want to remove.
"
- }
- },
- "TagList": {
- "base": null,
- "refs": {
- "CreateChannelRequest$tags": "Metadata which can be used to manage the channel.
",
- "CreateDatasetRequest$tags": "Metadata which can be used to manage the dataset.
",
- "CreateDatastoreRequest$tags": "Metadata which can be used to manage the data store.
",
- "CreatePipelineRequest$tags": "Metadata which can be used to manage the pipeline.
",
- "ListTagsForResourceResponse$tags": "The tags (metadata) that you have assigned to the resource.
",
- "TagResourceRequest$tags": "The new or modified tags for the resource.
"
- }
- },
- "TagResourceRequest": {
- "base": null,
- "refs": {}
- },
- "TagResourceResponse": {
- "base": null,
- "refs": {}
- },
- "TagValue": {
- "base": null,
- "refs": {
- "Tag$value": "The tag's value.
"
- }
- },
- "ThrottlingException": {
- "base": "The request was denied due to request throttling.
",
- "refs": {}
- },
- "TimeExpression": {
- "base": null,
- "refs": {
- "DeltaTime$timeExpression": "An expression by which the time of the message data might be determined. This can be the name of a timestamp field or a SQL expression that is used to derive the time the message data was generated.
"
- }
- },
- "Timestamp": {
- "base": null,
- "refs": {
- "Channel$creationTime": "When the channel was created.
",
- "Channel$lastUpdateTime": "When the channel was last updated.
",
- "Channel$lastMessageArrivalTime": "The last time when a new message arrived in the channel.
IoT Analytics updates this value at most once per minute for one channel. Hence, the lastMessageArrivalTime value is an approximation.
This feature only applies to messages that arrived in the data store after October 23, 2020.
",
- "ChannelSummary$creationTime": "When the channel was created.
",
- "ChannelSummary$lastUpdateTime": "The last time the channel was updated.
",
- "ChannelSummary$lastMessageArrivalTime": "The last time when a new message arrived in the channel.
IoT Analytics updates this value at most once per minute for one channel. Hence, the lastMessageArrivalTime value is an approximation.
This feature only applies to messages that arrived in the data store after October 23, 2020.
",
- "Dataset$creationTime": "When the dataset was created.
",
- "Dataset$lastUpdateTime": "The last time the dataset was updated.
",
- "DatasetContentSummary$creationTime": "The actual time the creation of the dataset contents was started.
",
- "DatasetContentSummary$scheduleTime": "The time the creation of the dataset contents was scheduled to start.
",
- "DatasetContentSummary$completionTime": "The time the dataset content status was updated to SUCCEEDED or FAILED.
",
- "DatasetSummary$creationTime": "The time the dataset was created.
",
- "DatasetSummary$lastUpdateTime": "The last time the dataset was updated.
",
- "Datastore$creationTime": "When the data store was created.
",
- "Datastore$lastUpdateTime": "The last time the data store was updated.
",
- "Datastore$lastMessageArrivalTime": "The last time when a new message arrived in the data store.
IoT Analytics updates this value at most once per minute for Amazon Simple Storage Service one data store. Hence, the lastMessageArrivalTime value is an approximation.
This feature only applies to messages that arrived in the data store after October 23, 2020.
",
- "DatastoreSummary$creationTime": "When the data store was created.
",
- "DatastoreSummary$lastUpdateTime": "The last time the data store was updated.
",
- "DatastoreSummary$lastMessageArrivalTime": "The last time when a new message arrived in the data store.
IoT Analytics updates this value at most once per minute for Amazon Simple Storage Service one data store. Hence, the lastMessageArrivalTime value is an approximation.
This feature only applies to messages that arrived in the data store after October 23, 2020.
",
- "EstimatedResourceSize$estimatedOn": "The time when the estimate of the size of the resource was made.
",
- "GetDatasetContentResponse$timestamp": "The time when the request was made.
",
- "ListDatasetContentsRequest$scheduledOnOrAfter": "A filter to limit results to those dataset contents whose creation is scheduled on or after the given time. See the field triggers.schedule in the CreateDataset request. (timestamp)
",
- "ListDatasetContentsRequest$scheduledBefore": "A filter to limit results to those dataset contents whose creation is scheduled before the given time. See the field triggers.schedule in the CreateDataset request. (timestamp)
",
- "Pipeline$creationTime": "When the pipeline was created.
",
- "Pipeline$lastUpdateTime": "The last time the pipeline was updated.
",
- "PipelineSummary$creationTime": "When the pipeline was created.
",
- "PipelineSummary$lastUpdateTime": "When the pipeline was last updated.
",
- "ReprocessingSummary$creationTime": "The time the pipeline reprocessing was created.
"
- }
- },
- "TimestampFormat": {
- "base": null,
- "refs": {
- "TimestampPartition$timestampFormat": " The timestamp format of a partition defined by a timestamp. The default format is seconds since epoch (January 1, 1970 at midnight UTC time).
"
- }
- },
- "TimestampPartition": {
- "base": " A partition dimension defined by a timestamp attribute.
",
- "refs": {
- "DatastorePartition$timestampPartition": " A partition dimension defined by a timestamp attribute.
"
- }
- },
- "TriggeringDataset": {
- "base": "Information about the dataset whose content generation triggers the new dataset content generation.
",
- "refs": {
- "DatasetTrigger$dataset": "The dataset whose content creation triggers the creation of this dataset's contents.
"
- }
- },
- "UnlimitedRetentionPeriod": {
- "base": null,
- "refs": {
- "RetentionPeriod$unlimited": "If true, message data is kept indefinitely.
"
- }
- },
- "UnlimitedVersioning": {
- "base": null,
- "refs": {
- "VersioningConfiguration$unlimited": "If true, unlimited versions of dataset contents are kept.
"
- }
- },
- "UntagResourceRequest": {
- "base": null,
- "refs": {}
- },
- "UntagResourceResponse": {
- "base": null,
- "refs": {}
- },
- "UpdateChannelRequest": {
- "base": null,
- "refs": {}
- },
- "UpdateDatasetRequest": {
- "base": null,
- "refs": {}
- },
- "UpdateDatastoreRequest": {
- "base": null,
- "refs": {}
- },
- "UpdatePipelineRequest": {
- "base": null,
- "refs": {}
- },
- "Variable": {
- "base": "An instance of a variable to be passed to the containerAction execution. Each variable must have a name and a value given by one of stringValue, datasetContentVersionValue, or outputFileUriValue.
",
- "refs": {
- "Variables$member": null
- }
- },
- "VariableName": {
- "base": null,
- "refs": {
- "Variable$name": "The name of the variable.
"
- }
- },
- "Variables": {
- "base": null,
- "refs": {
- "ContainerDatasetAction$variables": "The values of variables used in the context of the execution of the containerized application (basically, parameters passed to the application). Each variable must have a name and a value given by one of stringValue, datasetContentVersionValue, or outputFileUriValue.
"
- }
- },
- "VersioningConfiguration": {
- "base": "Information about the versioning of dataset contents.
",
- "refs": {
- "CreateDatasetRequest$versioningConfiguration": "Optional. How many versions of dataset contents are kept. If not specified or set to null, only the latest version plus the latest succeeded version (if they are different) are kept for the time period specified by the retentionPeriod parameter. For more information, see Keeping Multiple Versions of IoT Analytics datasets in the IoT Analytics User Guide .
",
- "Dataset$versioningConfiguration": "Optional. How many versions of dataset contents are kept. If not specified or set to null, only the latest version plus the latest succeeded version (if they are different) are kept for the time period specified by the retentionPeriod parameter. For more information, see Keeping Multiple Versions of IoT Analytics datasets in the IoT Analytics User Guide .
",
- "UpdateDatasetRequest$versioningConfiguration": "Optional. How many versions of dataset contents are kept. If not specified or set to null, only the latest version plus the latest succeeded version (if they are different) are kept for the time period specified by the retentionPeriod parameter. For more information, see Keeping Multiple Versions of IoT Analytics datasets in the IoT Analytics User Guide .
"
- }
- },
- "VolumeSizeInGB": {
- "base": null,
- "refs": {
- "ResourceConfiguration$volumeSizeInGB": "The size, in GB, of the persistent storage available to the resource instance used to execute the containerAction (min: 1, max: 50).
"
- }
- },
- "errorMessage": {
- "base": null,
- "refs": {
- "InternalFailureException$message": null,
- "InvalidRequestException$message": null,
- "LimitExceededException$message": null,
- "ResourceAlreadyExistsException$message": null,
- "ResourceNotFoundException$message": null,
- "ServiceUnavailableException$message": null,
- "ThrottlingException$message": null
- }
- },
- "resourceArn": {
- "base": null,
- "refs": {
- "ResourceAlreadyExistsException$resourceArn": "The ARN of the resource.
"
- }
- },
- "resourceId": {
- "base": null,
- "refs": {
- "ResourceAlreadyExistsException$resourceId": "The ID of the resource.
"
- }
- }
- }
-}
diff --git a/apis/iotanalytics/2017-11-27/endpoint-rule-set-1.json b/apis/iotanalytics/2017-11-27/endpoint-rule-set-1.json
deleted file mode 100644
index bdc5ba9f524..00000000000
--- a/apis/iotanalytics/2017-11-27/endpoint-rule-set-1.json
+++ /dev/null
@@ -1,314 +0,0 @@
-{
- "version": "1.0",
- "parameters": {
- "Region": {
- "builtIn": "AWS::Region",
- "required": false,
- "documentation": "The AWS region used to dispatch the request.",
- "type": "string"
- },
- "UseDualStack": {
- "builtIn": "AWS::UseDualStack",
- "required": true,
- "default": false,
- "documentation": "When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.",
- "type": "boolean"
- },
- "UseFIPS": {
- "builtIn": "AWS::UseFIPS",
- "required": true,
- "default": false,
- "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.",
- "type": "boolean"
- },
- "Endpoint": {
- "builtIn": "SDK::Endpoint",
- "required": false,
- "documentation": "Override the endpoint used to send this request",
- "type": "string"
- }
- },
- "rules": [
- {
- "conditions": [
- {
- "fn": "isSet",
- "argv": [
- {
- "ref": "Endpoint"
- }
- ]
- }
- ],
- "rules": [
- {
- "conditions": [
- {
- "fn": "booleanEquals",
- "argv": [
- {
- "ref": "UseFIPS"
- },
- true
- ]
- }
- ],
- "error": "Invalid Configuration: FIPS and custom endpoint are not supported",
- "type": "error"
- },
- {
- "conditions": [
- {
- "fn": "booleanEquals",
- "argv": [
- {
- "ref": "UseDualStack"
- },
- true
- ]
- }
- ],
- "error": "Invalid Configuration: Dualstack and custom endpoint are not supported",
- "type": "error"
- },
- {
- "conditions": [],
- "endpoint": {
- "url": {
- "ref": "Endpoint"
- },
- "properties": {},
- "headers": {}
- },
- "type": "endpoint"
- }
- ],
- "type": "tree"
- },
- {
- "conditions": [
- {
- "fn": "isSet",
- "argv": [
- {
- "ref": "Region"
- }
- ]
- }
- ],
- "rules": [
- {
- "conditions": [
- {
- "fn": "aws.partition",
- "argv": [
- {
- "ref": "Region"
- }
- ],
- "assign": "PartitionResult"
- }
- ],
- "rules": [
- {
- "conditions": [
- {
- "fn": "booleanEquals",
- "argv": [
- {
- "ref": "UseFIPS"
- },
- true
- ]
- },
- {
- "fn": "booleanEquals",
- "argv": [
- {
- "ref": "UseDualStack"
- },
- true
- ]
- }
- ],
- "rules": [
- {
- "conditions": [
- {
- "fn": "booleanEquals",
- "argv": [
- true,
- {
- "fn": "getAttr",
- "argv": [
- {
- "ref": "PartitionResult"
- },
- "supportsFIPS"
- ]
- }
- ]
- },
- {
- "fn": "booleanEquals",
- "argv": [
- true,
- {
- "fn": "getAttr",
- "argv": [
- {
- "ref": "PartitionResult"
- },
- "supportsDualStack"
- ]
- }
- ]
- }
- ],
- "rules": [
- {
- "conditions": [],
- "endpoint": {
- "url": "https://iotanalytics-fips.{Region}.{PartitionResult#dualStackDnsSuffix}",
- "properties": {},
- "headers": {}
- },
- "type": "endpoint"
- }
- ],
- "type": "tree"
- },
- {
- "conditions": [],
- "error": "FIPS and DualStack are enabled, but this partition does not support one or both",
- "type": "error"
- }
- ],
- "type": "tree"
- },
- {
- "conditions": [
- {
- "fn": "booleanEquals",
- "argv": [
- {
- "ref": "UseFIPS"
- },
- true
- ]
- }
- ],
- "rules": [
- {
- "conditions": [
- {
- "fn": "booleanEquals",
- "argv": [
- {
- "fn": "getAttr",
- "argv": [
- {
- "ref": "PartitionResult"
- },
- "supportsFIPS"
- ]
- },
- true
- ]
- }
- ],
- "rules": [
- {
- "conditions": [],
- "endpoint": {
- "url": "https://iotanalytics-fips.{Region}.{PartitionResult#dnsSuffix}",
- "properties": {},
- "headers": {}
- },
- "type": "endpoint"
- }
- ],
- "type": "tree"
- },
- {
- "conditions": [],
- "error": "FIPS is enabled but this partition does not support FIPS",
- "type": "error"
- }
- ],
- "type": "tree"
- },
- {
- "conditions": [
- {
- "fn": "booleanEquals",
- "argv": [
- {
- "ref": "UseDualStack"
- },
- true
- ]
- }
- ],
- "rules": [
- {
- "conditions": [
- {
- "fn": "booleanEquals",
- "argv": [
- true,
- {
- "fn": "getAttr",
- "argv": [
- {
- "ref": "PartitionResult"
- },
- "supportsDualStack"
- ]
- }
- ]
- }
- ],
- "rules": [
- {
- "conditions": [],
- "endpoint": {
- "url": "https://iotanalytics.{Region}.{PartitionResult#dualStackDnsSuffix}",
- "properties": {},
- "headers": {}
- },
- "type": "endpoint"
- }
- ],
- "type": "tree"
- },
- {
- "conditions": [],
- "error": "DualStack is enabled but this partition does not support DualStack",
- "type": "error"
- }
- ],
- "type": "tree"
- },
- {
- "conditions": [],
- "endpoint": {
- "url": "https://iotanalytics.{Region}.{PartitionResult#dnsSuffix}",
- "properties": {},
- "headers": {}
- },
- "type": "endpoint"
- }
- ],
- "type": "tree"
- }
- ],
- "type": "tree"
- },
- {
- "conditions": [],
- "error": "Invalid Configuration: Missing Region",
- "type": "error"
- }
- ]
-}
\ No newline at end of file
diff --git a/apis/iotanalytics/2017-11-27/endpoint-tests-1.json b/apis/iotanalytics/2017-11-27/endpoint-tests-1.json
deleted file mode 100644
index 211ee3a3896..00000000000
--- a/apis/iotanalytics/2017-11-27/endpoint-tests-1.json
+++ /dev/null
@@ -1,361 +0,0 @@
-{
- "testCases": [
- {
- "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled",
- "expect": {
- "endpoint": {
- "url": "https://iotanalytics.ap-northeast-1.amazonaws.com"
- }
- },
- "params": {
- "Region": "ap-northeast-1",
- "UseFIPS": false,
- "UseDualStack": false
- }
- },
- {
- "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled",
- "expect": {
- "endpoint": {
- "url": "https://iotanalytics.ap-south-1.amazonaws.com"
- }
- },
- "params": {
- "Region": "ap-south-1",
- "UseFIPS": false,
- "UseDualStack": false
- }
- },
- {
- "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled",
- "expect": {
- "endpoint": {
- "url": "https://iotanalytics.ap-southeast-2.amazonaws.com"
- }
- },
- "params": {
- "Region": "ap-southeast-2",
- "UseFIPS": false,
- "UseDualStack": false
- }
- },
- {
- "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled",
- "expect": {
- "endpoint": {
- "url": "https://iotanalytics.eu-central-1.amazonaws.com"
- }
- },
- "params": {
- "Region": "eu-central-1",
- "UseFIPS": false,
- "UseDualStack": false
- }
- },
- {
- "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled",
- "expect": {
- "endpoint": {
- "url": "https://iotanalytics.eu-west-1.amazonaws.com"
- }
- },
- "params": {
- "Region": "eu-west-1",
- "UseFIPS": false,
- "UseDualStack": false
- }
- },
- {
- "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled",
- "expect": {
- "endpoint": {
- "url": "https://iotanalytics.us-east-1.amazonaws.com"
- }
- },
- "params": {
- "Region": "us-east-1",
- "UseFIPS": false,
- "UseDualStack": false
- }
- },
- {
- "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled",
- "expect": {
- "endpoint": {
- "url": "https://iotanalytics.us-east-2.amazonaws.com"
- }
- },
- "params": {
- "Region": "us-east-2",
- "UseFIPS": false,
- "UseDualStack": false
- }
- },
- {
- "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled",
- "expect": {
- "endpoint": {
- "url": "https://iotanalytics.us-west-2.amazonaws.com"
- }
- },
- "params": {
- "Region": "us-west-2",
- "UseFIPS": false,
- "UseDualStack": false
- }
- },
- {
- "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled",
- "expect": {
- "endpoint": {
- "url": "https://iotanalytics-fips.us-east-1.api.aws"
- }
- },
- "params": {
- "Region": "us-east-1",
- "UseFIPS": true,
- "UseDualStack": true
- }
- },
- {
- "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled",
- "expect": {
- "endpoint": {
- "url": "https://iotanalytics-fips.us-east-1.amazonaws.com"
- }
- },
- "params": {
- "Region": "us-east-1",
- "UseFIPS": true,
- "UseDualStack": false
- }
- },
- {
- "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled",
- "expect": {
- "endpoint": {
- "url": "https://iotanalytics.us-east-1.api.aws"
- }
- },
- "params": {
- "Region": "us-east-1",
- "UseFIPS": false,
- "UseDualStack": true
- }
- },
- {
- "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled",
- "expect": {
- "endpoint": {
- "url": "https://iotanalytics.cn-north-1.amazonaws.com.cn"
- }
- },
- "params": {
- "Region": "cn-north-1",
- "UseFIPS": false,
- "UseDualStack": false
- }
- },
- {
- "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled",
- "expect": {
- "endpoint": {
- "url": "https://iotanalytics-fips.cn-north-1.api.amazonwebservices.com.cn"
- }
- },
- "params": {
- "Region": "cn-north-1",
- "UseFIPS": true,
- "UseDualStack": true
- }
- },
- {
- "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled",
- "expect": {
- "endpoint": {
- "url": "https://iotanalytics-fips.cn-north-1.amazonaws.com.cn"
- }
- },
- "params": {
- "Region": "cn-north-1",
- "UseFIPS": true,
- "UseDualStack": false
- }
- },
- {
- "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled",
- "expect": {
- "endpoint": {
- "url": "https://iotanalytics.cn-north-1.api.amazonwebservices.com.cn"
- }
- },
- "params": {
- "Region": "cn-north-1",
- "UseFIPS": false,
- "UseDualStack": true
- }
- },
- {
- "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled",
- "expect": {
- "endpoint": {
- "url": "https://iotanalytics-fips.us-gov-east-1.api.aws"
- }
- },
- "params": {
- "Region": "us-gov-east-1",
- "UseFIPS": true,
- "UseDualStack": true
- }
- },
- {
- "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled",
- "expect": {
- "endpoint": {
- "url": "https://iotanalytics-fips.us-gov-east-1.amazonaws.com"
- }
- },
- "params": {
- "Region": "us-gov-east-1",
- "UseFIPS": true,
- "UseDualStack": false
- }
- },
- {
- "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled",
- "expect": {
- "endpoint": {
- "url": "https://iotanalytics.us-gov-east-1.api.aws"
- }
- },
- "params": {
- "Region": "us-gov-east-1",
- "UseFIPS": false,
- "UseDualStack": true
- }
- },
- {
- "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled",
- "expect": {
- "endpoint": {
- "url": "https://iotanalytics.us-gov-east-1.amazonaws.com"
- }
- },
- "params": {
- "Region": "us-gov-east-1",
- "UseFIPS": false,
- "UseDualStack": false
- }
- },
- {
- "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled",
- "expect": {
- "endpoint": {
- "url": "https://iotanalytics-fips.us-iso-east-1.c2s.ic.gov"
- }
- },
- "params": {
- "Region": "us-iso-east-1",
- "UseFIPS": true,
- "UseDualStack": false
- }
- },
- {
- "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled",
- "expect": {
- "endpoint": {
- "url": "https://iotanalytics.us-iso-east-1.c2s.ic.gov"
- }
- },
- "params": {
- "Region": "us-iso-east-1",
- "UseFIPS": false,
- "UseDualStack": false
- }
- },
- {
- "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled",
- "expect": {
- "endpoint": {
- "url": "https://iotanalytics-fips.us-isob-east-1.sc2s.sgov.gov"
- }
- },
- "params": {
- "Region": "us-isob-east-1",
- "UseFIPS": true,
- "UseDualStack": false
- }
- },
- {
- "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled",
- "expect": {
- "endpoint": {
- "url": "https://iotanalytics.us-isob-east-1.sc2s.sgov.gov"
- }
- },
- "params": {
- "Region": "us-isob-east-1",
- "UseFIPS": false,
- "UseDualStack": false
- }
- },
- {
- "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled",
- "expect": {
- "endpoint": {
- "url": "https://example.com"
- }
- },
- "params": {
- "Region": "us-east-1",
- "UseFIPS": false,
- "UseDualStack": false,
- "Endpoint": "https://example.com"
- }
- },
- {
- "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled",
- "expect": {
- "endpoint": {
- "url": "https://example.com"
- }
- },
- "params": {
- "UseFIPS": false,
- "UseDualStack": false,
- "Endpoint": "https://example.com"
- }
- },
- {
- "documentation": "For custom endpoint with fips enabled and dualstack disabled",
- "expect": {
- "error": "Invalid Configuration: FIPS and custom endpoint are not supported"
- },
- "params": {
- "Region": "us-east-1",
- "UseFIPS": true,
- "UseDualStack": false,
- "Endpoint": "https://example.com"
- }
- },
- {
- "documentation": "For custom endpoint with fips disabled and dualstack enabled",
- "expect": {
- "error": "Invalid Configuration: Dualstack and custom endpoint are not supported"
- },
- "params": {
- "Region": "us-east-1",
- "UseFIPS": false,
- "UseDualStack": true,
- "Endpoint": "https://example.com"
- }
- },
- {
- "documentation": "Missing region",
- "expect": {
- "error": "Invalid Configuration: Missing Region"
- }
- }
- ],
- "version": "1.0"
-}
\ No newline at end of file
diff --git a/apis/iotanalytics/2017-11-27/examples-1.json b/apis/iotanalytics/2017-11-27/examples-1.json
deleted file mode 100644
index 2fb77604d1b..00000000000
--- a/apis/iotanalytics/2017-11-27/examples-1.json
+++ /dev/null
@@ -1,4 +0,0 @@
-{
- "version": "1.0",
- "examples": {}
-}
diff --git a/apis/iotanalytics/2017-11-27/paginators-1.json b/apis/iotanalytics/2017-11-27/paginators-1.json
deleted file mode 100644
index c234bf079e9..00000000000
--- a/apis/iotanalytics/2017-11-27/paginators-1.json
+++ /dev/null
@@ -1,29 +0,0 @@
-{
- "pagination": {
- "ListChannels": {
- "input_token": "nextToken",
- "output_token": "nextToken",
- "limit_key": "maxResults"
- },
- "ListDatasetContents": {
- "input_token": "nextToken",
- "output_token": "nextToken",
- "limit_key": "maxResults"
- },
- "ListDatasets": {
- "input_token": "nextToken",
- "output_token": "nextToken",
- "limit_key": "maxResults"
- },
- "ListDatastores": {
- "input_token": "nextToken",
- "output_token": "nextToken",
- "limit_key": "maxResults"
- },
- "ListPipelines": {
- "input_token": "nextToken",
- "output_token": "nextToken",
- "limit_key": "maxResults"
- }
- }
-}
diff --git a/gems/aws-sdk-cloudwatchevidently/CHANGELOG.md b/gems/aws-sdk-cloudwatchevidently/CHANGELOG.md
deleted file mode 100644
index a2f7fae5150..00000000000
--- a/gems/aws-sdk-cloudwatchevidently/CHANGELOG.md
+++ /dev/null
@@ -1,277 +0,0 @@
-Unreleased Changes
-------------------
-
-* Feature - CloudWatch Evidently has been removed from the SDK because it has been discontinued.
-
-1.53.0 (2026-01-26)
-------------------
-
-* Feature - Deprecate all Evidently API for AWS CloudWatch Evidently deprecation
-
-1.52.0 (2026-01-16)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.51.0 (2026-01-08)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.50.0 (2026-01-05)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.49.0 (2025-11-21)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.48.0 (2025-10-21)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.47.0 (2025-10-17)
-------------------
-
-* Feature - Update endpoint ruleset parameters casing
-
-1.46.0 (2025-08-27)
-------------------
-
-* Feature - Remove incorrect endpoint tests
-
-1.45.0 (2025-08-26)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.44.0 (2025-08-04)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.43.0 (2025-07-31)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.42.0 (2025-07-21)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.41.0 (2025-06-02)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.40.0 (2025-05-12)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.39.0 (2025-05-01)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.38.0 (2025-02-18)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.37.0 (2025-02-06)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.36.0 (2025-01-15)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.35.0 (2024-10-18)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.34.0 (2024-09-24)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.33.0 (2024-09-23)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.32.0 (2024-09-20)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.31.0 (2024-09-11)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.30.0 (2024-09-10)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.29.0 (2024-09-03)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.28.0 (2024-07-02)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.27.0 (2024-06-25)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.26.0 (2024-06-24)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.25.0 (2024-06-05)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.24.0 (2024-05-13)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.23.0 (2024-04-25)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.22.0 (2024-01-26)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.21.0 (2023-11-28)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.20.0 (2023-11-22)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.19.0 (2023-09-27)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.18.0 (2023-09-19)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.17.0 (2023-07-11)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.16.0 (2023-07-06)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.15.0 (2023-06-28)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.14.0 (2023-06-15)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.13.0 (2023-05-31)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.12.0 (2023-03-09)
-------------------
-
-* Feature - Updated entity override documentation
-
-1.11.0 (2023-02-09)
-------------------
-
-* Feature - Updated entity overrides parameter to accept up to 2500 overrides or a total of 40KB.
-
-1.10.0 (2023-01-18)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-* Issue - Replace runtime endpoint resolution approach with generated ruby code.
-
-1.9.0 (2022-10-25)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.8.0 (2022-09-13)
-------------------
-
-* Feature - This release adds support for the client-side evaluation - powered by AWS AppConfig feature.
-
-1.7.0 (2022-07-15)
-------------------
-
-* Feature - This release adds support for the new segmentation feature.
-
-1.6.0 (2022-05-09)
-------------------
-
-* Feature - Add detail message inside GetExperimentResults API response to indicate experiment result availability
-
-1.5.0 (2022-02-24)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.4.0 (2022-02-17)
-------------------
-
-* Feature - Add support for filtering list of experiments and launches by status
-
-1.3.0 (2022-02-03)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.2.0 (2021-12-21)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.1.0 (2021-11-30)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.0.0 (2021-11-29)
-------------------
-
-* Feature - Initial release of `aws-sdk-cloudwatchevidently`.
-
diff --git a/gems/aws-sdk-cloudwatchevidently/LICENSE.txt b/gems/aws-sdk-cloudwatchevidently/LICENSE.txt
deleted file mode 100644
index d6456956733..00000000000
--- a/gems/aws-sdk-cloudwatchevidently/LICENSE.txt
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/gems/aws-sdk-cloudwatchevidently/VERSION b/gems/aws-sdk-cloudwatchevidently/VERSION
deleted file mode 100644
index 3f4830156cb..00000000000
--- a/gems/aws-sdk-cloudwatchevidently/VERSION
+++ /dev/null
@@ -1 +0,0 @@
-1.53.0
diff --git a/gems/aws-sdk-cloudwatchevidently/aws-sdk-cloudwatchevidently.gemspec b/gems/aws-sdk-cloudwatchevidently/aws-sdk-cloudwatchevidently.gemspec
deleted file mode 100644
index 8d7cdd34b56..00000000000
--- a/gems/aws-sdk-cloudwatchevidently/aws-sdk-cloudwatchevidently.gemspec
+++ /dev/null
@@ -1,32 +0,0 @@
-# frozen_string_literal: true
-
-# WARNING ABOUT GENERATED CODE
-#
-# This file is generated. See the contributing guide for more information:
-# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md
-#
-# WARNING ABOUT GENERATED CODE
-
-Gem::Specification.new do |spec|
-
- spec.name = 'aws-sdk-cloudwatchevidently'
- spec.version = File.read(File.expand_path('../VERSION', __FILE__)).strip
- spec.summary = 'AWS SDK for Ruby - Amazon CloudWatch Evidently'
- spec.description = 'Official AWS Ruby gem for Amazon CloudWatch Evidently. This gem is part of the AWS SDK for Ruby.'
- spec.author = 'Amazon Web Services'
- spec.homepage = 'https://github.com/aws/aws-sdk-ruby'
- spec.license = 'Apache-2.0'
- spec.email = ['aws-dr-rubygems@amazon.com']
- spec.require_paths = ['lib']
- spec.files = Dir["LICENSE.txt", "CHANGELOG.md", "VERSION", "lib/**/*.rb", "sig/**/*.rbs"]
-
- spec.metadata = {
- 'source_code_uri' => 'https://github.com/aws/aws-sdk-ruby/tree/version-3/gems/aws-sdk-cloudwatchevidently',
- 'changelog_uri' => 'https://github.com/aws/aws-sdk-ruby/tree/version-3/gems/aws-sdk-cloudwatchevidently/CHANGELOG.md'
- }
-
- spec.add_dependency('aws-sdk-core', '~> 3', '>= 3.241.4')
- spec.add_dependency('aws-sigv4', '~> 1.5')
-
- spec.required_ruby_version = '>= 2.7'
-end
diff --git a/gems/aws-sdk-cloudwatchevidently/features/env.rb b/gems/aws-sdk-cloudwatchevidently/features/env.rb
deleted file mode 100644
index 6d5e185e001..00000000000
--- a/gems/aws-sdk-cloudwatchevidently/features/env.rb
+++ /dev/null
@@ -1,18 +0,0 @@
-# frozen_string_literal: true
-
-# WARNING ABOUT GENERATED CODE
-#
-# This file is generated. See the contributing guide for more information:
-# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md
-#
-# WARNING ABOUT GENERATED CODE
-
-$:.unshift(File.expand_path('../../lib', __FILE__))
-$:.unshift(File.expand_path('../../../aws-sdk-core/features', __FILE__))
-$:.unshift(File.expand_path('../../../aws-sdk-core/lib', __FILE__))
-$:.unshift(File.expand_path('../../../aws-sigv4/lib', __FILE__))
-
-require 'features_helper'
-require 'aws-sdk-cloudwatchevidently'
-
-Aws::CloudWatchEvidently::Client.add_plugin(ApiCallTracker)
diff --git a/gems/aws-sdk-cloudwatchevidently/features/step_definitions.rb b/gems/aws-sdk-cloudwatchevidently/features/step_definitions.rb
deleted file mode 100644
index a26505fe5c5..00000000000
--- a/gems/aws-sdk-cloudwatchevidently/features/step_definitions.rb
+++ /dev/null
@@ -1,8 +0,0 @@
-Before("@cloudwatchevidently") do
- @service = Aws::CloudWatchEvidently::Resource.new
- @client = @service.client
-end
-
-After("@cloudwatchevidently") do
- # shared cleanup logic
-end
diff --git a/gems/aws-sdk-cloudwatchevidently/lib/aws-sdk-cloudwatchevidently.rb b/gems/aws-sdk-cloudwatchevidently/lib/aws-sdk-cloudwatchevidently.rb
deleted file mode 100644
index faa0b4c7764..00000000000
--- a/gems/aws-sdk-cloudwatchevidently/lib/aws-sdk-cloudwatchevidently.rb
+++ /dev/null
@@ -1,61 +0,0 @@
-# frozen_string_literal: true
-
-# WARNING ABOUT GENERATED CODE
-#
-# This file is generated. See the contributing guide for more information:
-# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md
-#
-# WARNING ABOUT GENERATED CODE
-
-
-require 'aws-sdk-core'
-require 'aws-sigv4'
-
-Aws::Plugins::GlobalConfiguration.add_identifier(:cloudwatchevidently)
-
-# This module provides support for Amazon CloudWatch Evidently. This module is available in the
-# `aws-sdk-cloudwatchevidently` gem.
-#
-# # Client
-#
-# The {Client} class provides one method for each API operation. Operation
-# methods each accept a hash of request parameters and return a response
-# structure.
-#
-# cloud_watch_evidently = Aws::CloudWatchEvidently::Client.new
-# resp = cloud_watch_evidently.batch_evaluate_feature(params)
-#
-# See {Client} for more information.
-#
-# # Errors
-#
-# Errors returned from Amazon CloudWatch Evidently are defined in the
-# {Errors} module and all extend {Errors::ServiceError}.
-#
-# begin
-# # do stuff
-# rescue Aws::CloudWatchEvidently::Errors::ServiceError
-# # rescues all Amazon CloudWatch Evidently API errors
-# end
-#
-# See {Errors} for more information.
-#
-# @!group service
-module Aws::CloudWatchEvidently
- autoload :Types, 'aws-sdk-cloudwatchevidently/types'
- autoload :ClientApi, 'aws-sdk-cloudwatchevidently/client_api'
- module Plugins
- autoload :Endpoints, 'aws-sdk-cloudwatchevidently/plugins/endpoints.rb'
- end
- autoload :Client, 'aws-sdk-cloudwatchevidently/client'
- autoload :Errors, 'aws-sdk-cloudwatchevidently/errors'
- autoload :Resource, 'aws-sdk-cloudwatchevidently/resource'
- autoload :EndpointParameters, 'aws-sdk-cloudwatchevidently/endpoint_parameters'
- autoload :EndpointProvider, 'aws-sdk-cloudwatchevidently/endpoint_provider'
- autoload :Endpoints, 'aws-sdk-cloudwatchevidently/endpoints'
-
- GEM_VERSION = '1.53.0'
-
-end
-
-require_relative 'aws-sdk-cloudwatchevidently/customizations'
diff --git a/gems/aws-sdk-cloudwatchevidently/lib/aws-sdk-cloudwatchevidently/client.rb b/gems/aws-sdk-cloudwatchevidently/lib/aws-sdk-cloudwatchevidently/client.rb
deleted file mode 100644
index ddde0d75981..00000000000
--- a/gems/aws-sdk-cloudwatchevidently/lib/aws-sdk-cloudwatchevidently/client.rb
+++ /dev/null
@@ -1,3257 +0,0 @@
-# frozen_string_literal: true
-
-# WARNING ABOUT GENERATED CODE
-#
-# This file is generated. See the contributing guide for more information:
-# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md
-#
-# WARNING ABOUT GENERATED CODE
-
-require 'seahorse/client/plugins/content_length'
-require 'aws-sdk-core/plugins/credentials_configuration'
-require 'aws-sdk-core/plugins/logging'
-require 'aws-sdk-core/plugins/param_converter'
-require 'aws-sdk-core/plugins/param_validator'
-require 'aws-sdk-core/plugins/user_agent'
-require 'aws-sdk-core/plugins/helpful_socket_errors'
-require 'aws-sdk-core/plugins/retry_errors'
-require 'aws-sdk-core/plugins/global_configuration'
-require 'aws-sdk-core/plugins/regional_endpoint'
-require 'aws-sdk-core/plugins/endpoint_discovery'
-require 'aws-sdk-core/plugins/endpoint_pattern'
-require 'aws-sdk-core/plugins/response_paging'
-require 'aws-sdk-core/plugins/stub_responses'
-require 'aws-sdk-core/plugins/idempotency_token'
-require 'aws-sdk-core/plugins/invocation_id'
-require 'aws-sdk-core/plugins/jsonvalue_converter'
-require 'aws-sdk-core/plugins/client_metrics_plugin'
-require 'aws-sdk-core/plugins/client_metrics_send_plugin'
-require 'aws-sdk-core/plugins/transfer_encoding'
-require 'aws-sdk-core/plugins/http_checksum'
-require 'aws-sdk-core/plugins/checksum_algorithm'
-require 'aws-sdk-core/plugins/request_compression'
-require 'aws-sdk-core/plugins/defaults_mode'
-require 'aws-sdk-core/plugins/recursion_detection'
-require 'aws-sdk-core/plugins/telemetry'
-require 'aws-sdk-core/plugins/sign'
-require 'aws-sdk-core/plugins/protocols/rest_json'
-
-module Aws::CloudWatchEvidently
- # An API client for CloudWatchEvidently. To construct a client, you need to configure a `:region` and `:credentials`.
- #
- # client = Aws::CloudWatchEvidently::Client.new(
- # region: region_name,
- # credentials: credentials,
- # # ...
- # )
- #
- # For details on configuring region and credentials see
- # the [developer guide](/sdk-for-ruby/v3/developer-guide/setup-config.html).
- #
- # See {#initialize} for a full list of supported configuration options.
- class Client < Seahorse::Client::Base
-
- include Aws::ClientStubs
-
- @identifier = :cloudwatchevidently
-
- set_api(ClientApi::API)
-
- add_plugin(Seahorse::Client::Plugins::ContentLength)
- add_plugin(Aws::Plugins::CredentialsConfiguration)
- add_plugin(Aws::Plugins::Logging)
- add_plugin(Aws::Plugins::ParamConverter)
- add_plugin(Aws::Plugins::ParamValidator)
- add_plugin(Aws::Plugins::UserAgent)
- add_plugin(Aws::Plugins::HelpfulSocketErrors)
- add_plugin(Aws::Plugins::RetryErrors)
- add_plugin(Aws::Plugins::GlobalConfiguration)
- add_plugin(Aws::Plugins::RegionalEndpoint)
- add_plugin(Aws::Plugins::EndpointDiscovery)
- add_plugin(Aws::Plugins::EndpointPattern)
- add_plugin(Aws::Plugins::ResponsePaging)
- add_plugin(Aws::Plugins::StubResponses)
- add_plugin(Aws::Plugins::IdempotencyToken)
- add_plugin(Aws::Plugins::InvocationId)
- add_plugin(Aws::Plugins::JsonvalueConverter)
- add_plugin(Aws::Plugins::ClientMetricsPlugin)
- add_plugin(Aws::Plugins::ClientMetricsSendPlugin)
- add_plugin(Aws::Plugins::TransferEncoding)
- add_plugin(Aws::Plugins::HttpChecksum)
- add_plugin(Aws::Plugins::ChecksumAlgorithm)
- add_plugin(Aws::Plugins::RequestCompression)
- add_plugin(Aws::Plugins::DefaultsMode)
- add_plugin(Aws::Plugins::RecursionDetection)
- add_plugin(Aws::Plugins::Telemetry)
- add_plugin(Aws::Plugins::Sign)
- add_plugin(Aws::Plugins::Protocols::RestJson)
- add_plugin(Aws::CloudWatchEvidently::Plugins::Endpoints)
-
- # @overload initialize(options)
- # @param [Hash] options
- #
- # @option options [Array] :plugins ([]])
- # A list of plugins to apply to the client. Each plugin is either a
- # class name or an instance of a plugin class.
- #
- # @option options [required, Aws::CredentialProvider] :credentials
- # Your AWS credentials used for authentication. This can be any class that includes and implements
- # `Aws::CredentialProvider`, or instance of any one of the following classes:
- #
- # * `Aws::Credentials` - Used for configuring static, non-refreshing
- # credentials.
- #
- # * `Aws::SharedCredentials` - Used for loading static credentials from a
- # shared file, such as `~/.aws/config`.
- #
- # * `Aws::AssumeRoleCredentials` - Used when you need to assume a role.
- #
- # * `Aws::AssumeRoleWebIdentityCredentials` - Used when you need to
- # assume a role after providing credentials via the web.
- #
- # * `Aws::SSOCredentials` - Used for loading credentials from AWS SSO using an
- # access token generated from `aws login`.
- #
- # * `Aws::ProcessCredentials` - Used for loading credentials from a
- # process that outputs to stdout.
- #
- # * `Aws::InstanceProfileCredentials` - Used for loading credentials
- # from an EC2 IMDS on an EC2 instance.
- #
- # * `Aws::ECSCredentials` - Used for loading credentials from
- # instances running in ECS.
- #
- # * `Aws::CognitoIdentityCredentials` - Used for loading credentials
- # from the Cognito Identity service.
- #
- # When `:credentials` are not configured directly, the following locations will be searched for credentials:
- #
- # * `Aws.config[:credentials]`
- #
- # * The `:access_key_id`, `:secret_access_key`, `:session_token`, and
- # `:account_id` options.
- #
- # * `ENV['AWS_ACCESS_KEY_ID']`, `ENV['AWS_SECRET_ACCESS_KEY']`,
- # `ENV['AWS_SESSION_TOKEN']`, and `ENV['AWS_ACCOUNT_ID']`.
- #
- # * `~/.aws/credentials`
- #
- # * `~/.aws/config`
- #
- # * EC2/ECS IMDS instance profile - When used by default, the timeouts are very aggressive.
- # Construct and pass an instance of `Aws::InstanceProfileCredentials` or `Aws::ECSCredentials` to
- # enable retries and extended timeouts. Instance profile credential fetching can be disabled by
- # setting `ENV['AWS_EC2_METADATA_DISABLED']` to `true`.
- #
- # @option options [required, String] :region
- # The AWS region to connect to. The configured `:region` is
- # used to determine the service `:endpoint`. When not passed,
- # a default `:region` is searched for in the following locations:
- #
- # * `Aws.config[:region]`
- # * `ENV['AWS_REGION']`
- # * `ENV['AMAZON_REGION']`
- # * `ENV['AWS_DEFAULT_REGION']`
- # * `~/.aws/credentials`
- # * `~/.aws/config`
- #
- # @option options [String] :access_key_id
- #
- # @option options [String] :account_id
- #
- # @option options [Boolean] :active_endpoint_cache (false)
- # When set to `true`, a thread polling for endpoints will be running in
- # the background every 60 secs (default). Defaults to `false`.
- #
- # @option options [Boolean] :adaptive_retry_wait_to_fill (true)
- # Used only in `adaptive` retry mode. When true, the request will sleep
- # until there is sufficent client side capacity to retry the request.
- # When false, the request will raise a `RetryCapacityNotAvailableError` and will
- # not retry instead of sleeping.
- #
- # @option options [Array] :auth_scheme_preference
- # A list of preferred authentication schemes to use when making a request. Supported values are:
- # `sigv4`, `sigv4a`, `httpBearerAuth`, and `noAuth`. When set using `ENV['AWS_AUTH_SCHEME_PREFERENCE']` or in
- # shared config as `auth_scheme_preference`, the value should be a comma-separated list.
- #
- # @option options [Boolean] :client_side_monitoring (false)
- # When `true`, client-side metrics will be collected for all API requests from
- # this client.
- #
- # @option options [String] :client_side_monitoring_client_id ("")
- # Allows you to provide an identifier for this client which will be attached to
- # all generated client side metrics. Defaults to an empty string.
- #
- # @option options [String] :client_side_monitoring_host ("127.0.0.1")
- # Allows you to specify the DNS hostname or IPv4 or IPv6 address that the client
- # side monitoring agent is running on, where client metrics will be published via UDP.
- #
- # @option options [Integer] :client_side_monitoring_port (31000)
- # Required for publishing client metrics. The port that the client side monitoring
- # agent is running on, where client metrics will be published via UDP.
- #
- # @option options [Aws::ClientSideMonitoring::Publisher] :client_side_monitoring_publisher (Aws::ClientSideMonitoring::Publisher)
- # Allows you to provide a custom client-side monitoring publisher class. By default,
- # will use the Client Side Monitoring Agent Publisher.
- #
- # @option options [Boolean] :convert_params (true)
- # When `true`, an attempt is made to coerce request parameters into
- # the required types.
- #
- # @option options [Boolean] :correct_clock_skew (true)
- # Used only in `standard` and adaptive retry modes. Specifies whether to apply
- # a clock skew correction and retry requests with skewed client clocks.
- #
- # @option options [String] :defaults_mode ("legacy")
- # See {Aws::DefaultsModeConfiguration} for a list of the
- # accepted modes and the configuration defaults that are included.
- #
- # @option options [Boolean] :disable_host_prefix_injection (false)
- # When `true`, the SDK will not prepend the modeled host prefix to the endpoint.
- #
- # @option options [Boolean] :disable_request_compression (false)
- # When set to 'true' the request body will not be compressed
- # for supported operations.
- #
- # @option options [String, URI::HTTPS, URI::HTTP] :endpoint
- # Normally you should not configure the `:endpoint` option
- # directly. This is normally constructed from the `:region`
- # option. Configuring `:endpoint` is normally reserved for
- # connecting to test or custom endpoints. The endpoint should
- # be a URI formatted like:
- #
- # 'http://example.com'
- # 'https://example.com'
- # 'http://example.com:123'
- #
- # @option options [Integer] :endpoint_cache_max_entries (1000)
- # Used for the maximum size limit of the LRU cache storing endpoints data
- # for endpoint discovery enabled operations. Defaults to 1000.
- #
- # @option options [Integer] :endpoint_cache_max_threads (10)
- # Used for the maximum threads in use for polling endpoints to be cached, defaults to 10.
- #
- # @option options [Integer] :endpoint_cache_poll_interval (60)
- # When :endpoint_discovery and :active_endpoint_cache is enabled,
- # Use this option to config the time interval in seconds for making
- # requests fetching endpoints information. Defaults to 60 sec.
- #
- # @option options [Boolean] :endpoint_discovery (false)
- # When set to `true`, endpoint discovery will be enabled for operations when available.
- #
- # @option options [Boolean] :ignore_configured_endpoint_urls
- # Setting to true disables use of endpoint URLs provided via environment
- # variables and the shared configuration file.
- #
- # @option options [Aws::Log::Formatter] :log_formatter (Aws::Log::Formatter.default)
- # The log formatter.
- #
- # @option options [Symbol] :log_level (:info)
- # The log level to send messages to the `:logger` at.
- #
- # @option options [Logger] :logger
- # The Logger instance to send log messages to. If this option
- # is not set, logging will be disabled.
- #
- # @option options [Integer] :max_attempts (3)
- # An integer representing the maximum number attempts that will be made for
- # a single request, including the initial attempt. For example,
- # setting this value to 5 will result in a request being retried up to
- # 4 times. Used in `standard` and `adaptive` retry modes.
- #
- # @option options [String] :profile ("default")
- # Used when loading credentials from the shared credentials file at `HOME/.aws/credentials`.
- # When not specified, 'default' is used.
- #
- # @option options [String] :request_checksum_calculation ("when_supported")
- # Determines when a checksum will be calculated for request payloads. Values are:
- #
- # * `when_supported` - (default) When set, a checksum will be
- # calculated for all request payloads of operations modeled with the
- # `httpChecksum` trait where `requestChecksumRequired` is `true` and/or a
- # `requestAlgorithmMember` is modeled.
- # * `when_required` - When set, a checksum will only be calculated for
- # request payloads of operations modeled with the `httpChecksum` trait where
- # `requestChecksumRequired` is `true` or where a `requestAlgorithmMember`
- # is modeled and supplied.
- #
- # @option options [Integer] :request_min_compression_size_bytes (10240)
- # The minimum size in bytes that triggers compression for request
- # bodies. The value must be non-negative integer value between 0
- # and 10485780 bytes inclusive.
- #
- # @option options [String] :response_checksum_validation ("when_supported")
- # Determines when checksum validation will be performed on response payloads. Values are:
- #
- # * `when_supported` - (default) When set, checksum validation is performed on all
- # response payloads of operations modeled with the `httpChecksum` trait where
- # `responseAlgorithms` is modeled, except when no modeled checksum algorithms
- # are supported.
- # * `when_required` - When set, checksum validation is not performed on
- # response payloads of operations unless the checksum algorithm is supported and
- # the `requestValidationModeMember` member is set to `ENABLED`.
- #
- # @option options [Proc] :retry_backoff
- # A proc or lambda used for backoff. Defaults to 2**retries * retry_base_delay.
- # This option is only used in the `legacy` retry mode.
- #
- # @option options [Float] :retry_base_delay (0.3)
- # The base delay in seconds used by the default backoff function. This option
- # is only used in the `legacy` retry mode.
- #
- # @option options [Symbol] :retry_jitter (:none)
- # A delay randomiser function used by the default backoff function.
- # Some predefined functions can be referenced by name - :none, :equal, :full,
- # otherwise a Proc that takes and returns a number. This option is only used
- # in the `legacy` retry mode.
- #
- # @see https://www.awsarchitectureblog.com/2015/03/backoff.html
- #
- # @option options [Integer] :retry_limit (3)
- # The maximum number of times to retry failed requests. Only
- # ~ 500 level server errors and certain ~ 400 level client errors
- # are retried. Generally, these are throttling errors, data
- # checksum errors, networking errors, timeout errors, auth errors,
- # endpoint discovery, and errors from expired credentials.
- # This option is only used in the `legacy` retry mode.
- #
- # @option options [Integer] :retry_max_delay (0)
- # The maximum number of seconds to delay between retries (0 for no limit)
- # used by the default backoff function. This option is only used in the
- # `legacy` retry mode.
- #
- # @option options [String] :retry_mode ("legacy")
- # Specifies which retry algorithm to use. Values are:
- #
- # * `legacy` - The pre-existing retry behavior. This is default value if
- # no retry mode is provided.
- #
- # * `standard` - A standardized set of retry rules across the AWS SDKs.
- # This includes support for retry quotas, which limit the number of
- # unsuccessful retries a client can make.
- #
- # * `adaptive` - An experimental retry mode that includes all the
- # functionality of `standard` mode along with automatic client side
- # throttling. This is a provisional mode that may change behavior
- # in the future.
- #
- # @option options [String] :sdk_ua_app_id
- # A unique and opaque application ID that is appended to the
- # User-Agent header as app/sdk_ua_app_id. It should have a
- # maximum length of 50. This variable is sourced from environment
- # variable AWS_SDK_UA_APP_ID or the shared config profile attribute sdk_ua_app_id.
- #
- # @option options [String] :secret_access_key
- #
- # @option options [String] :session_token
- #
- # @option options [Array] :sigv4a_signing_region_set
- # A list of regions that should be signed with SigV4a signing. When
- # not passed, a default `:sigv4a_signing_region_set` is searched for
- # in the following locations:
- #
- # * `Aws.config[:sigv4a_signing_region_set]`
- # * `ENV['AWS_SIGV4A_SIGNING_REGION_SET']`
- # * `~/.aws/config`
- #
- # @option options [Boolean] :stub_responses (false)
- # Causes the client to return stubbed responses. By default
- # fake responses are generated and returned. You can specify
- # the response data to return or errors to raise by calling
- # {ClientStubs#stub_responses}. See {ClientStubs} for more information.
- #
- # ** Please note ** When response stubbing is enabled, no HTTP
- # requests are made, and retries are disabled.
- #
- # @option options [Aws::Telemetry::TelemetryProviderBase] :telemetry_provider (Aws::Telemetry::NoOpTelemetryProvider)
- # Allows you to provide a telemetry provider, which is used to
- # emit telemetry data. By default, uses `NoOpTelemetryProvider` which
- # will not record or emit any telemetry data. The SDK supports the
- # following telemetry providers:
- #
- # * OpenTelemetry (OTel) - To use the OTel provider, install and require the
- # `opentelemetry-sdk` gem and then, pass in an instance of a
- # `Aws::Telemetry::OTelProvider` for telemetry provider.
- #
- # @option options [Aws::TokenProvider] :token_provider
- # Your Bearer token used for authentication. This can be any class that includes and implements
- # `Aws::TokenProvider`, or instance of any one of the following classes:
- #
- # * `Aws::StaticTokenProvider` - Used for configuring static, non-refreshing
- # tokens.
- #
- # * `Aws::SSOTokenProvider` - Used for loading tokens from AWS SSO using an
- # access token generated from `aws login`.
- #
- # When `:token_provider` is not configured directly, the `Aws::TokenProviderChain`
- # will be used to search for tokens configured for your profile in shared configuration files.
- #
- # @option options [Boolean] :use_dualstack_endpoint
- # When set to `true`, dualstack enabled endpoints (with `.aws` TLD)
- # will be used if available.
- #
- # @option options [Boolean] :use_fips_endpoint
- # When set to `true`, fips compatible endpoints will be used if available.
- # When a `fips` region is used, the region is normalized and this config
- # is set to `true`.
- #
- # @option options [Boolean] :validate_params (true)
- # When `true`, request parameters are validated before
- # sending the request.
- #
- # @option options [Aws::CloudWatchEvidently::EndpointProvider] :endpoint_provider
- # The endpoint provider used to resolve endpoints. Any object that responds to
- # `#resolve_endpoint(parameters)` where `parameters` is a Struct similar to
- # `Aws::CloudWatchEvidently::EndpointParameters`.
- #
- # @option options [Float] :http_continue_timeout (1)
- # The number of seconds to wait for a 100-continue response before sending the
- # request body. This option has no effect unless the request has "Expect"
- # header set to "100-continue". Defaults to `nil` which disables this
- # behaviour. This value can safely be set per request on the session.
- #
- # @option options [Float] :http_idle_timeout (5)
- # The number of seconds a connection is allowed to sit idle before it
- # is considered stale. Stale connections are closed and removed from the
- # pool before making a request.
- #
- # @option options [Float] :http_open_timeout (15)
- # The default number of seconds to wait for response data.
- # This value can safely be set per-request on the session.
- #
- # @option options [URI::HTTP,String] :http_proxy
- # A proxy to send requests through. Formatted like 'http://proxy.com:123'.
- #
- # @option options [Float] :http_read_timeout (60)
- # The default number of seconds to wait for response data.
- # This value can safely be set per-request on the session.
- #
- # @option options [Boolean] :http_wire_trace (false)
- # When `true`, HTTP debug output will be sent to the `:logger`.
- #
- # @option options [Proc] :on_chunk_received
- # When a Proc object is provided, it will be used as callback when each chunk
- # of the response body is received. It provides three arguments: the chunk,
- # the number of bytes received, and the total number of
- # bytes in the response (or nil if the server did not send a `content-length`).
- #
- # @option options [Proc] :on_chunk_sent
- # When a Proc object is provided, it will be used as callback when each chunk
- # of the request body is sent. It provides three arguments: the chunk,
- # the number of bytes read from the body, and the total number of
- # bytes in the body.
- #
- # @option options [Boolean] :raise_response_errors (true)
- # When `true`, response errors are raised.
- #
- # @option options [String] :ssl_ca_bundle
- # Full path to the SSL certificate authority bundle file that should be used when
- # verifying peer certificates. If you do not pass `:ssl_ca_bundle` or
- # `:ssl_ca_directory` the the system default will be used if available.
- #
- # @option options [String] :ssl_ca_directory
- # Full path of the directory that contains the unbundled SSL certificate
- # authority files for verifying peer certificates. If you do
- # not pass `:ssl_ca_bundle` or `:ssl_ca_directory` the the system
- # default will be used if available.
- #
- # @option options [String] :ssl_ca_store
- # Sets the X509::Store to verify peer certificate.
- #
- # @option options [OpenSSL::X509::Certificate] :ssl_cert
- # Sets a client certificate when creating http connections.
- #
- # @option options [OpenSSL::PKey] :ssl_key
- # Sets a client key when creating http connections.
- #
- # @option options [Float] :ssl_timeout
- # Sets the SSL timeout in seconds
- #
- # @option options [Boolean] :ssl_verify_peer (true)
- # When `true`, SSL peer certificates are verified when establishing a connection.
- #
- def initialize(*args)
- super
- end
-
- # @!group API Operations
-
- # This operation assigns feature variation to user sessions. For each
- # user session, you pass in an `entityID` that represents the user.
- # Evidently then checks the evaluation rules and assigns the variation.
- #
- # The first rules that are evaluated are the override rules. If the
- # user's `entityID` matches an override rule, the user is served the
- # variation specified by that rule.
- #
- # Next, if there is a launch of the feature, the user might be assigned
- # to a variation in the launch. The chance of this depends on the
- # percentage of users that are allocated to that launch. If the user is
- # enrolled in the launch, the variation they are served depends on the
- # allocation of the various feature variations used for the launch.
- #
- # If the user is not assigned to a launch, and there is an ongoing
- # experiment for this feature, the user might be assigned to a variation
- # in the experiment. The chance of this depends on the percentage of
- # users that are allocated to that experiment. If the user is enrolled
- # in the experiment, the variation they are served depends on the
- # allocation of the various feature variations used for the experiment.
- #
- # If the user is not assigned to a launch or experiment, they are served
- # the default variation.
- #
- # @option params [required, String] :project
- # The name or ARN of the project that contains the feature being
- # evaluated.
- #
- # @option params [required, Array] :requests
- # An array of structures, where each structure assigns a feature
- # variation to one user session.
- #
- # @return [Types::BatchEvaluateFeatureResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
- #
- # * {Types::BatchEvaluateFeatureResponse#results #results} => Array<Types::EvaluationResult>
- #
- # @example Request syntax with placeholder values
- #
- # resp = client.batch_evaluate_feature({
- # project: "ProjectRef", # required
- # requests: [ # required
- # {
- # entity_id: "EntityId", # required
- # evaluation_context: "JsonValue",
- # feature: "FeatureName", # required
- # },
- # ],
- # })
- #
- # @example Response structure
- #
- # resp.results #=> Array
- # resp.results[0].details #=> String
- # resp.results[0].entity_id #=> String
- # resp.results[0].feature #=> String
- # resp.results[0].project #=> String
- # resp.results[0].reason #=> String
- # resp.results[0].value.bool_value #=> Boolean
- # resp.results[0].value.double_value #=> Float
- # resp.results[0].value.long_value #=> Integer
- # resp.results[0].value.string_value #=> String
- # resp.results[0].variation #=> String
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/BatchEvaluateFeature AWS API Documentation
- #
- # @overload batch_evaluate_feature(params = {})
- # @param [Hash] params ({})
- def batch_evaluate_feature(params = {}, options = {})
- req = build_request(:batch_evaluate_feature, params)
- req.send_request(options)
- end
-
- # Creates an Evidently *experiment*. Before you create an experiment,
- # you must create the feature to use for the experiment.
- #
- # An experiment helps you make feature design decisions based on
- # evidence and data. An experiment can test as many as five variations
- # at once. Evidently collects experiment data and analyzes it by
- # statistical methods, and provides clear recommendations about which
- # variations perform better.
- #
- # You can optionally specify a `segment` to have the experiment consider
- # only certain audience types in the experiment, such as using only user
- # sessions from a certain location or who use a certain internet
- # browser.
- #
- # Don't use this operation to update an existing experiment. Instead,
- # use [UpdateExperiment][1].
- #
- #
- #
- # [1]: https://docs.aws.amazon.com/cloudwatchevidently/latest/APIReference/API_UpdateExperiment.html
- #
- # @option params [String] :description
- # An optional description of the experiment.
- #
- # @option params [required, Array] :metric_goals
- # An array of structures that defines the metrics used for the
- # experiment, and whether a higher or lower value for each metric is the
- # goal.
- #
- # @option params [required, String] :name
- # A name for the new experiment.
- #
- # @option params [Types::OnlineAbConfig] :online_ab_config
- # A structure that contains the configuration of which variation to use
- # as the "control" version. tThe "control" version is used for
- # comparison with other variations. This structure also specifies how
- # much experiment traffic is allocated to each variation.
- #
- # @option params [required, String] :project
- # The name or ARN of the project that you want to create the new
- # experiment in.
- #
- # @option params [String] :randomization_salt
- # When Evidently assigns a particular user session to an experiment, it
- # must use a randomization ID to determine which variation the user
- # session is served. This randomization ID is a combination of the
- # entity ID and `randomizationSalt`. If you omit `randomizationSalt`,
- # Evidently uses the experiment name as the `randomizationSalt`.
- #
- # @option params [Integer] :sampling_rate
- # The portion of the available audience that you want to allocate to
- # this experiment, in thousandths of a percent. The available audience
- # is the total audience minus the audience that you have allocated to
- # overrides or current launches of this feature.
- #
- # This is represented in thousandths of a percent. For example, specify
- # 10,000 to allocate 10% of the available audience.
- #
- # @option params [String] :segment
- # Specifies an audience *segment* to use in the experiment. When a
- # segment is used in an experiment, only user sessions that match the
- # segment pattern are used in the experiment.
- #
- # @option params [Hash] :tags
- # Assigns one or more tags (key-value pairs) to the experiment.
- #
- # Tags can help you organize and categorize your resources. You can also
- # use them to scope user permissions by granting a user permission to
- # access or change only resources with certain tag values.
- #
- # Tags don't have any semantic meaning to Amazon Web Services and are
- # interpreted strictly as strings of characters.
- #
- # You can associate as many as 50 tags with an experiment.
- #
- # For more information, see [Tagging Amazon Web Services resources][1].
- #
- #
- #
- # [1]: https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html
- #
- # @option params [required, Array] :treatments
- # An array of structures that describe the configuration of each feature
- # variation used in the experiment.
- #
- # @return [Types::CreateExperimentResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
- #
- # * {Types::CreateExperimentResponse#experiment #experiment} => Types::Experiment
- #
- # @example Request syntax with placeholder values
- #
- # resp = client.create_experiment({
- # description: "Description",
- # metric_goals: [ # required
- # {
- # desired_change: "INCREASE", # accepts INCREASE, DECREASE
- # metric_definition: { # required
- # entity_id_key: "JsonPath", # required
- # event_pattern: "MetricDefinitionConfigEventPatternString",
- # name: "CwDimensionSafeName", # required
- # unit_label: "MetricUnitLabel",
- # value_key: "JsonPath", # required
- # },
- # },
- # ],
- # name: "ExperimentName", # required
- # online_ab_config: {
- # control_treatment_name: "TreatmentName",
- # treatment_weights: {
- # "TreatmentName" => 1,
- # },
- # },
- # project: "ProjectRef", # required
- # randomization_salt: "RandomizationSalt",
- # sampling_rate: 1,
- # segment: "SegmentRef",
- # tags: {
- # "TagKey" => "TagValue",
- # },
- # treatments: [ # required
- # {
- # description: "Description",
- # feature: "FeatureName", # required
- # name: "TreatmentName", # required
- # variation: "VariationName", # required
- # },
- # ],
- # })
- #
- # @example Response structure
- #
- # resp.experiment.arn #=> String
- # resp.experiment.created_time #=> Time
- # resp.experiment.description #=> String
- # resp.experiment.execution.ended_time #=> Time
- # resp.experiment.execution.started_time #=> Time
- # resp.experiment.last_updated_time #=> Time
- # resp.experiment.metric_goals #=> Array
- # resp.experiment.metric_goals[0].desired_change #=> String, one of "INCREASE", "DECREASE"
- # resp.experiment.metric_goals[0].metric_definition.entity_id_key #=> String
- # resp.experiment.metric_goals[0].metric_definition.event_pattern #=> String
- # resp.experiment.metric_goals[0].metric_definition.name #=> String
- # resp.experiment.metric_goals[0].metric_definition.unit_label #=> String
- # resp.experiment.metric_goals[0].metric_definition.value_key #=> String
- # resp.experiment.name #=> String
- # resp.experiment.online_ab_definition.control_treatment_name #=> String
- # resp.experiment.online_ab_definition.treatment_weights #=> Hash
- # resp.experiment.online_ab_definition.treatment_weights["TreatmentName"] #=> Integer
- # resp.experiment.project #=> String
- # resp.experiment.randomization_salt #=> String
- # resp.experiment.sampling_rate #=> Integer
- # resp.experiment.schedule.analysis_complete_time #=> Time
- # resp.experiment.segment #=> String
- # resp.experiment.status #=> String, one of "CREATED", "UPDATING", "RUNNING", "COMPLETED", "CANCELLED"
- # resp.experiment.status_reason #=> String
- # resp.experiment.tags #=> Hash
- # resp.experiment.tags["TagKey"] #=> String
- # resp.experiment.treatments #=> Array
- # resp.experiment.treatments[0].description #=> String
- # resp.experiment.treatments[0].feature_variations #=> Hash
- # resp.experiment.treatments[0].feature_variations["FeatureName"] #=> String
- # resp.experiment.treatments[0].name #=> String
- # resp.experiment.type #=> String, one of "aws.evidently.onlineab"
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/CreateExperiment AWS API Documentation
- #
- # @overload create_experiment(params = {})
- # @param [Hash] params ({})
- def create_experiment(params = {}, options = {})
- req = build_request(:create_experiment, params)
- req.send_request(options)
- end
-
- # Creates an Evidently *feature* that you want to launch or test. You
- # can define up to five variations of a feature, and use these
- # variations in your launches and experiments. A feature must be created
- # in a project. For information about creating a project, see
- # [CreateProject][1].
- #
- # Don't use this operation to update an existing feature. Instead, use
- # [UpdateFeature][2].
- #
- #
- #
- # [1]: https://docs.aws.amazon.com/cloudwatchevidently/latest/APIReference/API_CreateProject.html
- # [2]: https://docs.aws.amazon.com/cloudwatchevidently/latest/APIReference/API_UpdateFeature.html
- #
- # @option params [String] :default_variation
- # The name of the variation to use as the default variation. The default
- # variation is served to users who are not allocated to any ongoing
- # launches or experiments of this feature.
- #
- # This variation must also be listed in the `variations` structure.
- #
- # If you omit `defaultVariation`, the first variation listed in the
- # `variations` structure is used as the default variation.
- #
- # @option params [String] :description
- # An optional description of the feature.
- #
- # @option params [Hash] :entity_overrides
- # Specify users that should always be served a specific variation of a
- # feature. Each user is specified by a key-value pair . For each key,
- # specify a user by entering their user ID, account ID, or some other
- # identifier. For the value, specify the name of the variation that they
- # are to be served.
- #
- # This parameter is limited to 2500 overrides or a total of 40KB. The
- # 40KB limit includes an overhead of 6 bytes per override.
- #
- # @option params [String] :evaluation_strategy
- # Specify `ALL_RULES` to activate the traffic allocation specified by
- # any ongoing launches or experiments. Specify `DEFAULT_VARIATION` to
- # serve the default variation to all users instead.
- #
- # @option params [required, String] :name
- # The name for the new feature.
- #
- # @option params [required, String] :project
- # The name or ARN of the project that is to contain the new feature.
- #
- # @option params [Hash] :tags
- # Assigns one or more tags (key-value pairs) to the feature.
- #
- # Tags can help you organize and categorize your resources. You can also
- # use them to scope user permissions by granting a user permission to
- # access or change only resources with certain tag values.
- #
- # Tags don't have any semantic meaning to Amazon Web Services and are
- # interpreted strictly as strings of characters.
- #
- # You can associate as many as 50 tags with a feature.
- #
- # For more information, see [Tagging Amazon Web Services resources][1].
- #
- #
- #
- # [1]: https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html
- #
- # @option params [required, Array] :variations
- # An array of structures that contain the configuration of the
- # feature's different variations.
- #
- # @return [Types::CreateFeatureResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
- #
- # * {Types::CreateFeatureResponse#feature #feature} => Types::Feature
- #
- # @example Request syntax with placeholder values
- #
- # resp = client.create_feature({
- # default_variation: "VariationName",
- # description: "Description",
- # entity_overrides: {
- # "EntityId" => "VariationName",
- # },
- # evaluation_strategy: "ALL_RULES", # accepts ALL_RULES, DEFAULT_VARIATION
- # name: "FeatureName", # required
- # project: "ProjectRef", # required
- # tags: {
- # "TagKey" => "TagValue",
- # },
- # variations: [ # required
- # {
- # name: "VariationName", # required
- # value: { # required
- # bool_value: false,
- # double_value: 1.0,
- # long_value: 1,
- # string_value: "VariableValueStringValueString",
- # },
- # },
- # ],
- # })
- #
- # @example Response structure
- #
- # resp.feature.arn #=> String
- # resp.feature.created_time #=> Time
- # resp.feature.default_variation #=> String
- # resp.feature.description #=> String
- # resp.feature.entity_overrides #=> Hash
- # resp.feature.entity_overrides["EntityId"] #=> String
- # resp.feature.evaluation_rules #=> Array
- # resp.feature.evaluation_rules[0].name #=> String
- # resp.feature.evaluation_rules[0].type #=> String
- # resp.feature.evaluation_strategy #=> String, one of "ALL_RULES", "DEFAULT_VARIATION"
- # resp.feature.last_updated_time #=> Time
- # resp.feature.name #=> String
- # resp.feature.project #=> String
- # resp.feature.status #=> String, one of "AVAILABLE", "UPDATING"
- # resp.feature.tags #=> Hash
- # resp.feature.tags["TagKey"] #=> String
- # resp.feature.value_type #=> String, one of "STRING", "LONG", "DOUBLE", "BOOLEAN"
- # resp.feature.variations #=> Array
- # resp.feature.variations[0].name #=> String
- # resp.feature.variations[0].value.bool_value #=> Boolean
- # resp.feature.variations[0].value.double_value #=> Float
- # resp.feature.variations[0].value.long_value #=> Integer
- # resp.feature.variations[0].value.string_value #=> String
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/CreateFeature AWS API Documentation
- #
- # @overload create_feature(params = {})
- # @param [Hash] params ({})
- def create_feature(params = {}, options = {})
- req = build_request(:create_feature, params)
- req.send_request(options)
- end
-
- # Creates a *launch* of a given feature. Before you create a launch, you
- # must create the feature to use for the launch.
- #
- # You can use a launch to safely validate new features by serving them
- # to a specified percentage of your users while you roll out the
- # feature. You can monitor the performance of the new feature to help
- # you decide when to ramp up traffic to more users. This helps you
- # reduce risk and identify unintended consequences before you fully
- # launch the feature.
- #
- # Don't use this operation to update an existing launch. Instead, use
- # [UpdateLaunch][1].
- #
- #
- #
- # [1]: https://docs.aws.amazon.com/cloudwatchevidently/latest/APIReference/API_UpdateLaunch.html
- #
- # @option params [String] :description
- # An optional description for the launch.
- #
- # @option params [required, Array] :groups
- # An array of structures that contains the feature and variations that
- # are to be used for the launch.
- #
- # @option params [Array] :metric_monitors
- # An array of structures that define the metrics that will be used to
- # monitor the launch performance.
- #
- # @option params [required, String] :name
- # The name for the new launch.
- #
- # @option params [required, String] :project
- # The name or ARN of the project that you want to create the launch in.
- #
- # @option params [String] :randomization_salt
- # When Evidently assigns a particular user session to a launch, it must
- # use a randomization ID to determine which variation the user session
- # is served. This randomization ID is a combination of the entity ID and
- # `randomizationSalt`. If you omit `randomizationSalt`, Evidently uses
- # the launch name as the `randomizationSalt`.
- #
- # @option params [Types::ScheduledSplitsLaunchConfig] :scheduled_splits_config
- # An array of structures that define the traffic allocation percentages
- # among the feature variations during each step of the launch.
- #
- # @option params [Hash] :tags
- # Assigns one or more tags (key-value pairs) to the launch.
- #
- # Tags can help you organize and categorize your resources. You can also
- # use them to scope user permissions by granting a user permission to
- # access or change only resources with certain tag values.
- #
- # Tags don't have any semantic meaning to Amazon Web Services and are
- # interpreted strictly as strings of characters.
- #
- # You can associate as many as 50 tags with a launch.
- #
- # For more information, see [Tagging Amazon Web Services resources][1].
- #
- #
- #
- # [1]: https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html
- #
- # @return [Types::CreateLaunchResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
- #
- # * {Types::CreateLaunchResponse#launch #launch} => Types::Launch
- #
- # @example Request syntax with placeholder values
- #
- # resp = client.create_launch({
- # description: "Description",
- # groups: [ # required
- # {
- # description: "Description",
- # feature: "FeatureName", # required
- # name: "GroupName", # required
- # variation: "VariationName", # required
- # },
- # ],
- # metric_monitors: [
- # {
- # metric_definition: { # required
- # entity_id_key: "JsonPath", # required
- # event_pattern: "MetricDefinitionConfigEventPatternString",
- # name: "CwDimensionSafeName", # required
- # unit_label: "MetricUnitLabel",
- # value_key: "JsonPath", # required
- # },
- # },
- # ],
- # name: "LaunchName", # required
- # project: "ProjectRef", # required
- # randomization_salt: "RandomizationSalt",
- # scheduled_splits_config: {
- # steps: [ # required
- # {
- # group_weights: { # required
- # "GroupName" => 1,
- # },
- # segment_overrides: [
- # {
- # evaluation_order: 1, # required
- # segment: "SegmentRef", # required
- # weights: { # required
- # "GroupName" => 1,
- # },
- # },
- # ],
- # start_time: Time.now, # required
- # },
- # ],
- # },
- # tags: {
- # "TagKey" => "TagValue",
- # },
- # })
- #
- # @example Response structure
- #
- # resp.launch.arn #=> String
- # resp.launch.created_time #=> Time
- # resp.launch.description #=> String
- # resp.launch.execution.ended_time #=> Time
- # resp.launch.execution.started_time #=> Time
- # resp.launch.groups #=> Array
- # resp.launch.groups[0].description #=> String
- # resp.launch.groups[0].feature_variations #=> Hash
- # resp.launch.groups[0].feature_variations["FeatureName"] #=> String
- # resp.launch.groups[0].name #=> String
- # resp.launch.last_updated_time #=> Time
- # resp.launch.metric_monitors #=> Array
- # resp.launch.metric_monitors[0].metric_definition.entity_id_key #=> String
- # resp.launch.metric_monitors[0].metric_definition.event_pattern #=> String
- # resp.launch.metric_monitors[0].metric_definition.name #=> String
- # resp.launch.metric_monitors[0].metric_definition.unit_label #=> String
- # resp.launch.metric_monitors[0].metric_definition.value_key #=> String
- # resp.launch.name #=> String
- # resp.launch.project #=> String
- # resp.launch.randomization_salt #=> String
- # resp.launch.scheduled_splits_definition.steps #=> Array
- # resp.launch.scheduled_splits_definition.steps[0].group_weights #=> Hash
- # resp.launch.scheduled_splits_definition.steps[0].group_weights["GroupName"] #=> Integer
- # resp.launch.scheduled_splits_definition.steps[0].segment_overrides #=> Array
- # resp.launch.scheduled_splits_definition.steps[0].segment_overrides[0].evaluation_order #=> Integer
- # resp.launch.scheduled_splits_definition.steps[0].segment_overrides[0].segment #=> String
- # resp.launch.scheduled_splits_definition.steps[0].segment_overrides[0].weights #=> Hash
- # resp.launch.scheduled_splits_definition.steps[0].segment_overrides[0].weights["GroupName"] #=> Integer
- # resp.launch.scheduled_splits_definition.steps[0].start_time #=> Time
- # resp.launch.status #=> String, one of "CREATED", "UPDATING", "RUNNING", "COMPLETED", "CANCELLED"
- # resp.launch.status_reason #=> String
- # resp.launch.tags #=> Hash
- # resp.launch.tags["TagKey"] #=> String
- # resp.launch.type #=> String, one of "aws.evidently.splits"
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/CreateLaunch AWS API Documentation
- #
- # @overload create_launch(params = {})
- # @param [Hash] params ({})
- def create_launch(params = {}, options = {})
- req = build_request(:create_launch, params)
- req.send_request(options)
- end
-
- # Creates a project, which is the logical object in Evidently that can
- # contain features, launches, and experiments. Use projects to group
- # similar features together.
- #
- # To update an existing project, use [UpdateProject][1].
- #
- #
- #
- # [1]: https://docs.aws.amazon.com/cloudwatchevidently/latest/APIReference/API_UpdateProject.html
- #
- # @option params [Types::ProjectAppConfigResourceConfig] :app_config_resource
- # Use this parameter if the project will use *client-side evaluation
- # powered by AppConfig*. Client-side evaluation allows your application
- # to assign variations to user sessions locally instead of by calling
- # the [EvaluateFeature][1] operation. This mitigates the latency and
- # availability risks that come with an API call. For more information,
- # see [ Client-side evaluation - powered by AppConfig.][2]
- #
- # This parameter is a structure that contains information about the
- # AppConfig application and environment that will be used as for
- # client-side evaluation.
- #
- # To create a project that uses client-side evaluation, you must have
- # the `evidently:ExportProjectAsConfiguration` permission.
- #
- #
- #
- # [1]: https://docs.aws.amazon.com/cloudwatchevidently/latest/APIReference/API_EvaluateFeature.html
- # [2]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-Evidently-client-side-evaluation.html
- #
- # @option params [Types::ProjectDataDeliveryConfig] :data_delivery
- # A structure that contains information about where Evidently is to
- # store evaluation events for longer term storage, if you choose to do
- # so. If you choose not to store these events, Evidently deletes them
- # after using them to produce metrics and other experiment results that
- # you can view.
- #
- # @option params [String] :description
- # An optional description of the project.
- #
- # @option params [required, String] :name
- # The name for the project.
- #
- # @option params [Hash] :tags
- # Assigns one or more tags (key-value pairs) to the project.
- #
- # Tags can help you organize and categorize your resources. You can also
- # use them to scope user permissions by granting a user permission to
- # access or change only resources with certain tag values.
- #
- # Tags don't have any semantic meaning to Amazon Web Services and are
- # interpreted strictly as strings of characters.
- #
- # You can associate as many as 50 tags with a project.
- #
- # For more information, see [Tagging Amazon Web Services resources][1].
- #
- #
- #
- # [1]: https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html
- #
- # @return [Types::CreateProjectResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
- #
- # * {Types::CreateProjectResponse#project #project} => Types::Project
- #
- # @example Request syntax with placeholder values
- #
- # resp = client.create_project({
- # app_config_resource: {
- # application_id: "AppConfigResourceId",
- # environment_id: "AppConfigResourceId",
- # },
- # data_delivery: {
- # cloud_watch_logs: {
- # log_group: "CwLogGroupSafeName",
- # },
- # s3_destination: {
- # bucket: "S3BucketSafeName",
- # prefix: "S3PrefixSafeName",
- # },
- # },
- # description: "Description",
- # name: "ProjectName", # required
- # tags: {
- # "TagKey" => "TagValue",
- # },
- # })
- #
- # @example Response structure
- #
- # resp.project.active_experiment_count #=> Integer
- # resp.project.active_launch_count #=> Integer
- # resp.project.app_config_resource.application_id #=> String
- # resp.project.app_config_resource.configuration_profile_id #=> String
- # resp.project.app_config_resource.environment_id #=> String
- # resp.project.arn #=> String
- # resp.project.created_time #=> Time
- # resp.project.data_delivery.cloud_watch_logs.log_group #=> String
- # resp.project.data_delivery.s3_destination.bucket #=> String
- # resp.project.data_delivery.s3_destination.prefix #=> String
- # resp.project.description #=> String
- # resp.project.experiment_count #=> Integer
- # resp.project.feature_count #=> Integer
- # resp.project.last_updated_time #=> Time
- # resp.project.launch_count #=> Integer
- # resp.project.name #=> String
- # resp.project.status #=> String, one of "AVAILABLE", "UPDATING"
- # resp.project.tags #=> Hash
- # resp.project.tags["TagKey"] #=> String
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/CreateProject AWS API Documentation
- #
- # @overload create_project(params = {})
- # @param [Hash] params ({})
- def create_project(params = {}, options = {})
- req = build_request(:create_project, params)
- req.send_request(options)
- end
-
- # Use this operation to define a *segment* of your audience. A segment
- # is a portion of your audience that share one or more characteristics.
- # Examples could be Chrome browser users, users in Europe, or Firefox
- # browser users in Europe who also fit other criteria that your
- # application collects, such as age.
- #
- # Using a segment in an experiment limits that experiment to evaluate
- # only the users who match the segment criteria. Using one or more
- # segments in a launch allows you to define different traffic splits for
- # the different audience segments.
- #
- # For more information about segment pattern syntax, see [ Segment rule
- # pattern syntax][1].
- #
- # The pattern that you define for a segment is matched against the value
- # of `evaluationContext`, which is passed into Evidently in the
- # [EvaluateFeature][2] operation, when Evidently assigns a feature
- # variation to a user.
- #
- #
- #
- # [1]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-Evidently-segments.html#CloudWatch-Evidently-segments-syntax.html
- # [2]: https://docs.aws.amazon.com/cloudwatchevidently/latest/APIReference/API_EvaluateFeature.html
- #
- # @option params [String] :description
- # An optional description for this segment.
- #
- # @option params [required, String] :name
- # A name for the segment.
- #
- # @option params [required, String] :pattern
- # The pattern to use for the segment. For more information about pattern
- # syntax, see [ Segment rule pattern syntax][1].
- #
- # **SDK automatically handles json encoding and base64 encoding for you
- # when the required value (Hash, Array, etc.) is provided according to
- # the description.**
- #
- #
- #
- # [1]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-Evidently-segments.html#CloudWatch-Evidently-segments-syntax.html
- #
- # @option params [Hash] :tags
- # Assigns one or more tags (key-value pairs) to the segment.
- #
- # Tags can help you organize and categorize your resources. You can also
- # use them to scope user permissions by granting a user permission to
- # access or change only resources with certain tag values.
- #
- # Tags don't have any semantic meaning to Amazon Web Services and are
- # interpreted strictly as strings of characters.
- #
- # You can associate as many as 50 tags with a segment.
- #
- # For more information, see [Tagging Amazon Web Services resources][1].
- #
- #
- #
- # [1]: https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html
- #
- # @return [Types::CreateSegmentResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
- #
- # * {Types::CreateSegmentResponse#segment #segment} => Types::Segment
- #
- # @example Request syntax with placeholder values
- #
- # resp = client.create_segment({
- # description: "Description",
- # name: "SegmentName", # required
- # pattern: "SegmentPattern", # required
- # tags: {
- # "TagKey" => "TagValue",
- # },
- # })
- #
- # @example Response structure
- #
- # resp.segment.arn #=> String
- # resp.segment.created_time #=> Time
- # resp.segment.description #=> String
- # resp.segment.experiment_count #=> Integer
- # resp.segment.last_updated_time #=> Time
- # resp.segment.launch_count #=> Integer
- # resp.segment.name #=> String
- # resp.segment.pattern #=> String
- # resp.segment.tags #=> Hash
- # resp.segment.tags["TagKey"] #=> String
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/CreateSegment AWS API Documentation
- #
- # @overload create_segment(params = {})
- # @param [Hash] params ({})
- def create_segment(params = {}, options = {})
- req = build_request(:create_segment, params)
- req.send_request(options)
- end
-
- # Deletes an Evidently experiment. The feature used for the experiment
- # is not deleted.
- #
- # To stop an experiment without deleting it, use [StopExperiment][1].
- #
- #
- #
- # [1]: https://docs.aws.amazon.com/cloudwatchevidently/latest/APIReference/API_StopExperiment.html
- #
- # @option params [required, String] :experiment
- # The name of the experiment to delete.
- #
- # @option params [required, String] :project
- # The name or ARN of the project that contains the experiment to delete.
- #
- # @return [Struct] Returns an empty {Seahorse::Client::Response response}.
- #
- # @example Request syntax with placeholder values
- #
- # resp = client.delete_experiment({
- # experiment: "ExperimentName", # required
- # project: "ProjectRef", # required
- # })
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/DeleteExperiment AWS API Documentation
- #
- # @overload delete_experiment(params = {})
- # @param [Hash] params ({})
- def delete_experiment(params = {}, options = {})
- req = build_request(:delete_experiment, params)
- req.send_request(options)
- end
-
- # Deletes an Evidently feature.
- #
- # @option params [required, String] :feature
- # The name of the feature to delete.
- #
- # @option params [required, String] :project
- # The name or ARN of the project that contains the feature to delete.
- #
- # @return [Struct] Returns an empty {Seahorse::Client::Response response}.
- #
- # @example Request syntax with placeholder values
- #
- # resp = client.delete_feature({
- # feature: "FeatureName", # required
- # project: "ProjectRef", # required
- # })
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/DeleteFeature AWS API Documentation
- #
- # @overload delete_feature(params = {})
- # @param [Hash] params ({})
- def delete_feature(params = {}, options = {})
- req = build_request(:delete_feature, params)
- req.send_request(options)
- end
-
- # Deletes an Evidently launch. The feature used for the launch is not
- # deleted.
- #
- # To stop a launch without deleting it, use [StopLaunch][1].
- #
- #
- #
- # [1]: https://docs.aws.amazon.com/cloudwatchevidently/latest/APIReference/API_StopLaunch.html
- #
- # @option params [required, String] :launch
- # The name of the launch to delete.
- #
- # @option params [required, String] :project
- # The name or ARN of the project that contains the launch to delete.
- #
- # @return [Struct] Returns an empty {Seahorse::Client::Response response}.
- #
- # @example Request syntax with placeholder values
- #
- # resp = client.delete_launch({
- # launch: "LaunchName", # required
- # project: "ProjectRef", # required
- # })
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/DeleteLaunch AWS API Documentation
- #
- # @overload delete_launch(params = {})
- # @param [Hash] params ({})
- def delete_launch(params = {}, options = {})
- req = build_request(:delete_launch, params)
- req.send_request(options)
- end
-
- # Deletes an Evidently project. Before you can delete a project, you
- # must delete all the features that the project contains. To delete a
- # feature, use [DeleteFeature][1].
- #
- #
- #
- # [1]: https://docs.aws.amazon.com/cloudwatchevidently/latest/APIReference/API_DeleteFeature.html
- #
- # @option params [required, String] :project
- # The name or ARN of the project to delete.
- #
- # @return [Struct] Returns an empty {Seahorse::Client::Response response}.
- #
- # @example Request syntax with placeholder values
- #
- # resp = client.delete_project({
- # project: "ProjectRef", # required
- # })
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/DeleteProject AWS API Documentation
- #
- # @overload delete_project(params = {})
- # @param [Hash] params ({})
- def delete_project(params = {}, options = {})
- req = build_request(:delete_project, params)
- req.send_request(options)
- end
-
- # Deletes a segment. You can't delete a segment that is being used in a
- # launch or experiment, even if that launch or experiment is not
- # currently running.
- #
- # @option params [required, String] :segment
- # Specifies the segment to delete.
- #
- # @return [Struct] Returns an empty {Seahorse::Client::Response response}.
- #
- # @example Request syntax with placeholder values
- #
- # resp = client.delete_segment({
- # segment: "SegmentRef", # required
- # })
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/DeleteSegment AWS API Documentation
- #
- # @overload delete_segment(params = {})
- # @param [Hash] params ({})
- def delete_segment(params = {}, options = {})
- req = build_request(:delete_segment, params)
- req.send_request(options)
- end
-
- # This operation assigns a feature variation to one given user session.
- # You pass in an `entityID` that represents the user. Evidently then
- # checks the evaluation rules and assigns the variation.
- #
- # The first rules that are evaluated are the override rules. If the
- # user's `entityID` matches an override rule, the user is served the
- # variation specified by that rule.
- #
- # If there is a current launch with this feature that uses segment
- # overrides, and if the user session's `evaluationContext` matches a
- # segment rule defined in a segment override, the configuration in the
- # segment overrides is used. For more information about segments, see
- # [CreateSegment][1] and [Use segments to focus your audience][2].
- #
- # If there is a launch with no segment overrides, the user might be
- # assigned to a variation in the launch. The chance of this depends on
- # the percentage of users that are allocated to that launch. If the user
- # is enrolled in the launch, the variation they are served depends on
- # the allocation of the various feature variations used for the launch.
- #
- # If the user is not assigned to a launch, and there is an ongoing
- # experiment for this feature, the user might be assigned to a variation
- # in the experiment. The chance of this depends on the percentage of
- # users that are allocated to that experiment.
- #
- # If the experiment uses a segment, then only user sessions with
- # `evaluationContext` values that match the segment rule are used in the
- # experiment.
- #
- # If the user is enrolled in the experiment, the variation they are
- # served depends on the allocation of the various feature variations
- # used for the experiment.
- #
- # If the user is not assigned to a launch or experiment, they are served
- # the default variation.
- #
- #
- #
- # [1]: https://docs.aws.amazon.com/cloudwatchevidently/latest/APIReference/API_CreateSegment.html
- # [2]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-Evidently-segments.html
- #
- # @option params [required, String] :entity_id
- # An internal ID that represents a unique user of the application. This
- # `entityID` is checked against any override rules assigned for this
- # feature.
- #
- # @option params [String] :evaluation_context
- # A JSON object of attributes that you can optionally pass in as part of
- # the evaluation event sent to Evidently from the user session.
- # Evidently can use this value to match user sessions with defined
- # audience segments. For more information, see [Use segments to focus
- # your audience][1].
- #
- # If you include this parameter, the value must be a JSON object. A JSON
- # array is not supported.
- #
- # **SDK automatically handles json encoding and base64 encoding for you
- # when the required value (Hash, Array, etc.) is provided according to
- # the description.**
- #
- #
- #
- # [1]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-Evidently-segments.html
- #
- # @option params [required, String] :feature
- # The name of the feature being evaluated.
- #
- # @option params [required, String] :project
- # The name or ARN of the project that contains this feature.
- #
- # @return [Types::EvaluateFeatureResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
- #
- # * {Types::EvaluateFeatureResponse#details #details} => String
- # * {Types::EvaluateFeatureResponse#reason #reason} => String
- # * {Types::EvaluateFeatureResponse#value #value} => Types::VariableValue
- # * {Types::EvaluateFeatureResponse#variation #variation} => String
- #
- # @example Request syntax with placeholder values
- #
- # resp = client.evaluate_feature({
- # entity_id: "EntityId", # required
- # evaluation_context: "JsonValue",
- # feature: "FeatureName", # required
- # project: "ProjectRef", # required
- # })
- #
- # @example Response structure
- #
- # resp.details #=> String
- # resp.reason #=> String
- # resp.value.bool_value #=> Boolean
- # resp.value.double_value #=> Float
- # resp.value.long_value #=> Integer
- # resp.value.string_value #=> String
- # resp.variation #=> String
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/EvaluateFeature AWS API Documentation
- #
- # @overload evaluate_feature(params = {})
- # @param [Hash] params ({})
- def evaluate_feature(params = {}, options = {})
- req = build_request(:evaluate_feature, params)
- req.send_request(options)
- end
-
- # Returns the details about one experiment. You must already know the
- # experiment name. To retrieve a list of experiments in your account,
- # use [ListExperiments][1].
- #
- #
- #
- # [1]: https://docs.aws.amazon.com/cloudwatchevidently/latest/APIReference/API_ListExperiments.html
- #
- # @option params [required, String] :experiment
- # The name of the experiment that you want to see the details of.
- #
- # @option params [required, String] :project
- # The name or ARN of the project that contains the experiment.
- #
- # @return [Types::GetExperimentResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
- #
- # * {Types::GetExperimentResponse#experiment #experiment} => Types::Experiment
- #
- # @example Request syntax with placeholder values
- #
- # resp = client.get_experiment({
- # experiment: "ExperimentName", # required
- # project: "ProjectRef", # required
- # })
- #
- # @example Response structure
- #
- # resp.experiment.arn #=> String
- # resp.experiment.created_time #=> Time
- # resp.experiment.description #=> String
- # resp.experiment.execution.ended_time #=> Time
- # resp.experiment.execution.started_time #=> Time
- # resp.experiment.last_updated_time #=> Time
- # resp.experiment.metric_goals #=> Array
- # resp.experiment.metric_goals[0].desired_change #=> String, one of "INCREASE", "DECREASE"
- # resp.experiment.metric_goals[0].metric_definition.entity_id_key #=> String
- # resp.experiment.metric_goals[0].metric_definition.event_pattern #=> String
- # resp.experiment.metric_goals[0].metric_definition.name #=> String
- # resp.experiment.metric_goals[0].metric_definition.unit_label #=> String
- # resp.experiment.metric_goals[0].metric_definition.value_key #=> String
- # resp.experiment.name #=> String
- # resp.experiment.online_ab_definition.control_treatment_name #=> String
- # resp.experiment.online_ab_definition.treatment_weights #=> Hash
- # resp.experiment.online_ab_definition.treatment_weights["TreatmentName"] #=> Integer
- # resp.experiment.project #=> String
- # resp.experiment.randomization_salt #=> String
- # resp.experiment.sampling_rate #=> Integer
- # resp.experiment.schedule.analysis_complete_time #=> Time
- # resp.experiment.segment #=> String
- # resp.experiment.status #=> String, one of "CREATED", "UPDATING", "RUNNING", "COMPLETED", "CANCELLED"
- # resp.experiment.status_reason #=> String
- # resp.experiment.tags #=> Hash
- # resp.experiment.tags["TagKey"] #=> String
- # resp.experiment.treatments #=> Array
- # resp.experiment.treatments[0].description #=> String
- # resp.experiment.treatments[0].feature_variations #=> Hash
- # resp.experiment.treatments[0].feature_variations["FeatureName"] #=> String
- # resp.experiment.treatments[0].name #=> String
- # resp.experiment.type #=> String, one of "aws.evidently.onlineab"
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/GetExperiment AWS API Documentation
- #
- # @overload get_experiment(params = {})
- # @param [Hash] params ({})
- def get_experiment(params = {}, options = {})
- req = build_request(:get_experiment, params)
- req.send_request(options)
- end
-
- # Retrieves the results of a running or completed experiment. No results
- # are available until there have been 100 events for each variation and
- # at least 10 minutes have passed since the start of the experiment. To
- # increase the statistical power, Evidently performs an additional
- # offline p-value analysis at the end of the experiment. Offline p-value
- # analysis can detect statistical significance in some cases where the
- # anytime p-values used during the experiment do not find statistical
- # significance.
- #
- # Experiment results are available up to 63 days after the start of the
- # experiment. They are not available after that because of CloudWatch
- # data retention policies.
- #
- # @option params [String] :base_stat
- # The statistic used to calculate experiment results. Currently the only
- # valid value is `mean`, which uses the mean of the collected values as
- # the statistic.
- #
- # @option params [Time,DateTime,Date,Integer,String] :end_time
- # The date and time that the experiment ended, if it is completed. This
- # must be no longer than 30 days after the experiment start time.
- #
- # @option params [required, String] :experiment
- # The name of the experiment to retrieve the results of.
- #
- # @option params [required, Array] :metric_names
- # The names of the experiment metrics that you want to see the results
- # of.
- #
- # @option params [Integer] :period
- # In seconds, the amount of time to aggregate results together.
- #
- # @option params [required, String] :project
- # The name or ARN of the project that contains the experiment that you
- # want to see the results of.
- #
- # @option params [Array] :report_names
- # The names of the report types that you want to see. Currently,
- # `BayesianInference` is the only valid value.
- #
- # @option params [Array] :result_stats
- # The statistics that you want to see in the returned results.
- #
- # * `PValue` specifies to use p-values for the results. A p-value is
- # used in hypothesis testing to measure how often you are willing to
- # make a mistake in rejecting the null hypothesis. A general practice
- # is to reject the null hypothesis and declare that the results are
- # statistically significant when the p-value is less than 0.05.
- #
- # * `ConfidenceInterval` specifies a confidence interval for the
- # results. The confidence interval represents the range of values for
- # the chosen metric that is likely to contain the true difference
- # between the `baseStat` of a variation and the baseline. Evidently
- # returns the 95% confidence interval.
- #
- # * `TreatmentEffect` is the difference in the statistic specified by
- # the `baseStat` parameter between each variation and the default
- # variation.
- #
- # * `BaseStat` returns the statistical values collected for the metric
- # for each variation. The statistic uses the same statistic specified
- # in the `baseStat` parameter. Therefore, if `baseStat` is `mean`,
- # this returns the mean of the values collected for each variation.
- #
- # @option params [Time,DateTime,Date,Integer,String] :start_time
- # The date and time that the experiment started.
- #
- # @option params [required, Array] :treatment_names
- # The names of the experiment treatments that you want to see the
- # results for.
- #
- # @return [Types::GetExperimentResultsResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
- #
- # * {Types::GetExperimentResultsResponse#details #details} => String
- # * {Types::GetExperimentResultsResponse#reports #reports} => Array<Types::ExperimentReport>
- # * {Types::GetExperimentResultsResponse#results_data #results_data} => Array<Types::ExperimentResultsData>
- # * {Types::GetExperimentResultsResponse#timestamps #timestamps} => Array<Time>
- #
- # @example Request syntax with placeholder values
- #
- # resp = client.get_experiment_results({
- # base_stat: "Mean", # accepts Mean
- # end_time: Time.now,
- # experiment: "ExperimentName", # required
- # metric_names: ["CwDimensionSafeName"], # required
- # period: 1,
- # project: "ProjectRef", # required
- # report_names: ["BayesianInference"], # accepts BayesianInference
- # result_stats: ["BaseStat"], # accepts BaseStat, TreatmentEffect, ConfidenceInterval, PValue
- # start_time: Time.now,
- # treatment_names: ["TreatmentName"], # required
- # })
- #
- # @example Response structure
- #
- # resp.details #=> String
- # resp.reports #=> Array
- # resp.reports[0].content #=> String
- # resp.reports[0].metric_name #=> String
- # resp.reports[0].report_name #=> String, one of "BayesianInference"
- # resp.reports[0].treatment_name #=> String
- # resp.results_data #=> Array
- # resp.results_data[0].metric_name #=> String
- # resp.results_data[0].result_stat #=> String, one of "Mean", "TreatmentEffect", "ConfidenceIntervalUpperBound", "ConfidenceIntervalLowerBound", "PValue"
- # resp.results_data[0].treatment_name #=> String
- # resp.results_data[0].values #=> Array
- # resp.results_data[0].values[0] #=> Float
- # resp.timestamps #=> Array
- # resp.timestamps[0] #=> Time
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/GetExperimentResults AWS API Documentation
- #
- # @overload get_experiment_results(params = {})
- # @param [Hash] params ({})
- def get_experiment_results(params = {}, options = {})
- req = build_request(:get_experiment_results, params)
- req.send_request(options)
- end
-
- # Returns the details about one feature. You must already know the
- # feature name. To retrieve a list of features in your account, use
- # [ListFeatures][1].
- #
- #
- #
- # [1]: https://docs.aws.amazon.com/cloudwatchevidently/latest/APIReference/API_ListFeatures.html
- #
- # @option params [required, String] :feature
- # The name of the feature that you want to retrieve information for.
- #
- # @option params [required, String] :project
- # The name or ARN of the project that contains the feature.
- #
- # @return [Types::GetFeatureResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
- #
- # * {Types::GetFeatureResponse#feature #feature} => Types::Feature
- #
- # @example Request syntax with placeholder values
- #
- # resp = client.get_feature({
- # feature: "FeatureName", # required
- # project: "ProjectRef", # required
- # })
- #
- # @example Response structure
- #
- # resp.feature.arn #=> String
- # resp.feature.created_time #=> Time
- # resp.feature.default_variation #=> String
- # resp.feature.description #=> String
- # resp.feature.entity_overrides #=> Hash
- # resp.feature.entity_overrides["EntityId"] #=> String
- # resp.feature.evaluation_rules #=> Array
- # resp.feature.evaluation_rules[0].name #=> String
- # resp.feature.evaluation_rules[0].type #=> String
- # resp.feature.evaluation_strategy #=> String, one of "ALL_RULES", "DEFAULT_VARIATION"
- # resp.feature.last_updated_time #=> Time
- # resp.feature.name #=> String
- # resp.feature.project #=> String
- # resp.feature.status #=> String, one of "AVAILABLE", "UPDATING"
- # resp.feature.tags #=> Hash
- # resp.feature.tags["TagKey"] #=> String
- # resp.feature.value_type #=> String, one of "STRING", "LONG", "DOUBLE", "BOOLEAN"
- # resp.feature.variations #=> Array
- # resp.feature.variations[0].name #=> String
- # resp.feature.variations[0].value.bool_value #=> Boolean
- # resp.feature.variations[0].value.double_value #=> Float
- # resp.feature.variations[0].value.long_value #=> Integer
- # resp.feature.variations[0].value.string_value #=> String
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/GetFeature AWS API Documentation
- #
- # @overload get_feature(params = {})
- # @param [Hash] params ({})
- def get_feature(params = {}, options = {})
- req = build_request(:get_feature, params)
- req.send_request(options)
- end
-
- # Returns the details about one launch. You must already know the launch
- # name. To retrieve a list of launches in your account, use
- # [ListLaunches][1].
- #
- #
- #
- # [1]: https://docs.aws.amazon.com/cloudwatchevidently/latest/APIReference/API_ListLaunches.html
- #
- # @option params [required, String] :launch
- # The name of the launch that you want to see the details of.
- #
- # @option params [required, String] :project
- # The name or ARN of the project that contains the launch.
- #
- # @return [Types::GetLaunchResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
- #
- # * {Types::GetLaunchResponse#launch #launch} => Types::Launch
- #
- # @example Request syntax with placeholder values
- #
- # resp = client.get_launch({
- # launch: "LaunchName", # required
- # project: "ProjectRef", # required
- # })
- #
- # @example Response structure
- #
- # resp.launch.arn #=> String
- # resp.launch.created_time #=> Time
- # resp.launch.description #=> String
- # resp.launch.execution.ended_time #=> Time
- # resp.launch.execution.started_time #=> Time
- # resp.launch.groups #=> Array
- # resp.launch.groups[0].description #=> String
- # resp.launch.groups[0].feature_variations #=> Hash
- # resp.launch.groups[0].feature_variations["FeatureName"] #=> String
- # resp.launch.groups[0].name #=> String
- # resp.launch.last_updated_time #=> Time
- # resp.launch.metric_monitors #=> Array
- # resp.launch.metric_monitors[0].metric_definition.entity_id_key #=> String
- # resp.launch.metric_monitors[0].metric_definition.event_pattern #=> String
- # resp.launch.metric_monitors[0].metric_definition.name #=> String
- # resp.launch.metric_monitors[0].metric_definition.unit_label #=> String
- # resp.launch.metric_monitors[0].metric_definition.value_key #=> String
- # resp.launch.name #=> String
- # resp.launch.project #=> String
- # resp.launch.randomization_salt #=> String
- # resp.launch.scheduled_splits_definition.steps #=> Array
- # resp.launch.scheduled_splits_definition.steps[0].group_weights #=> Hash
- # resp.launch.scheduled_splits_definition.steps[0].group_weights["GroupName"] #=> Integer
- # resp.launch.scheduled_splits_definition.steps[0].segment_overrides #=> Array
- # resp.launch.scheduled_splits_definition.steps[0].segment_overrides[0].evaluation_order #=> Integer
- # resp.launch.scheduled_splits_definition.steps[0].segment_overrides[0].segment #=> String
- # resp.launch.scheduled_splits_definition.steps[0].segment_overrides[0].weights #=> Hash
- # resp.launch.scheduled_splits_definition.steps[0].segment_overrides[0].weights["GroupName"] #=> Integer
- # resp.launch.scheduled_splits_definition.steps[0].start_time #=> Time
- # resp.launch.status #=> String, one of "CREATED", "UPDATING", "RUNNING", "COMPLETED", "CANCELLED"
- # resp.launch.status_reason #=> String
- # resp.launch.tags #=> Hash
- # resp.launch.tags["TagKey"] #=> String
- # resp.launch.type #=> String, one of "aws.evidently.splits"
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/GetLaunch AWS API Documentation
- #
- # @overload get_launch(params = {})
- # @param [Hash] params ({})
- def get_launch(params = {}, options = {})
- req = build_request(:get_launch, params)
- req.send_request(options)
- end
-
- # Returns the details about one launch. You must already know the
- # project name. To retrieve a list of projects in your account, use
- # [ListProjects][1].
- #
- #
- #
- # [1]: https://docs.aws.amazon.com/cloudwatchevidently/latest/APIReference/API_ListProjects.html
- #
- # @option params [required, String] :project
- # The name or ARN of the project that you want to see the details of.
- #
- # @return [Types::GetProjectResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
- #
- # * {Types::GetProjectResponse#project #project} => Types::Project
- #
- # @example Request syntax with placeholder values
- #
- # resp = client.get_project({
- # project: "ProjectRef", # required
- # })
- #
- # @example Response structure
- #
- # resp.project.active_experiment_count #=> Integer
- # resp.project.active_launch_count #=> Integer
- # resp.project.app_config_resource.application_id #=> String
- # resp.project.app_config_resource.configuration_profile_id #=> String
- # resp.project.app_config_resource.environment_id #=> String
- # resp.project.arn #=> String
- # resp.project.created_time #=> Time
- # resp.project.data_delivery.cloud_watch_logs.log_group #=> String
- # resp.project.data_delivery.s3_destination.bucket #=> String
- # resp.project.data_delivery.s3_destination.prefix #=> String
- # resp.project.description #=> String
- # resp.project.experiment_count #=> Integer
- # resp.project.feature_count #=> Integer
- # resp.project.last_updated_time #=> Time
- # resp.project.launch_count #=> Integer
- # resp.project.name #=> String
- # resp.project.status #=> String, one of "AVAILABLE", "UPDATING"
- # resp.project.tags #=> Hash
- # resp.project.tags["TagKey"] #=> String
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/GetProject AWS API Documentation
- #
- # @overload get_project(params = {})
- # @param [Hash] params ({})
- def get_project(params = {}, options = {})
- req = build_request(:get_project, params)
- req.send_request(options)
- end
-
- # Returns information about the specified segment. Specify the segment
- # you want to view by specifying its ARN.
- #
- # @option params [required, String] :segment
- # The ARN of the segment to return information for.
- #
- # @return [Types::GetSegmentResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
- #
- # * {Types::GetSegmentResponse#segment #segment} => Types::Segment
- #
- # @example Request syntax with placeholder values
- #
- # resp = client.get_segment({
- # segment: "SegmentRef", # required
- # })
- #
- # @example Response structure
- #
- # resp.segment.arn #=> String
- # resp.segment.created_time #=> Time
- # resp.segment.description #=> String
- # resp.segment.experiment_count #=> Integer
- # resp.segment.last_updated_time #=> Time
- # resp.segment.launch_count #=> Integer
- # resp.segment.name #=> String
- # resp.segment.pattern #=> String
- # resp.segment.tags #=> Hash
- # resp.segment.tags["TagKey"] #=> String
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/GetSegment AWS API Documentation
- #
- # @overload get_segment(params = {})
- # @param [Hash] params ({})
- def get_segment(params = {}, options = {})
- req = build_request(:get_segment, params)
- req.send_request(options)
- end
-
- # Returns configuration details about all the experiments in the
- # specified project.
- #
- # @option params [Integer] :max_results
- # The maximum number of results to include in the response.
- #
- # @option params [String] :next_token
- # The token to use when requesting the next set of results. You received
- # this token from a previous `ListExperiments` operation.
- #
- # @option params [required, String] :project
- # The name or ARN of the project to return the experiment list from.
- #
- # @option params [String] :status
- # Use this optional parameter to limit the returned results to only the
- # experiments with the status that you specify here.
- #
- # @return [Types::ListExperimentsResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
- #
- # * {Types::ListExperimentsResponse#experiments #experiments} => Array<Types::Experiment>
- # * {Types::ListExperimentsResponse#next_token #next_token} => String
- #
- # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
- #
- # @example Request syntax with placeholder values
- #
- # resp = client.list_experiments({
- # max_results: 1,
- # next_token: "NextToken",
- # project: "ProjectRef", # required
- # status: "CREATED", # accepts CREATED, UPDATING, RUNNING, COMPLETED, CANCELLED
- # })
- #
- # @example Response structure
- #
- # resp.experiments #=> Array
- # resp.experiments[0].arn #=> String
- # resp.experiments[0].created_time #=> Time
- # resp.experiments[0].description #=> String
- # resp.experiments[0].execution.ended_time #=> Time
- # resp.experiments[0].execution.started_time #=> Time
- # resp.experiments[0].last_updated_time #=> Time
- # resp.experiments[0].metric_goals #=> Array
- # resp.experiments[0].metric_goals[0].desired_change #=> String, one of "INCREASE", "DECREASE"
- # resp.experiments[0].metric_goals[0].metric_definition.entity_id_key #=> String
- # resp.experiments[0].metric_goals[0].metric_definition.event_pattern #=> String
- # resp.experiments[0].metric_goals[0].metric_definition.name #=> String
- # resp.experiments[0].metric_goals[0].metric_definition.unit_label #=> String
- # resp.experiments[0].metric_goals[0].metric_definition.value_key #=> String
- # resp.experiments[0].name #=> String
- # resp.experiments[0].online_ab_definition.control_treatment_name #=> String
- # resp.experiments[0].online_ab_definition.treatment_weights #=> Hash
- # resp.experiments[0].online_ab_definition.treatment_weights["TreatmentName"] #=> Integer
- # resp.experiments[0].project #=> String
- # resp.experiments[0].randomization_salt #=> String
- # resp.experiments[0].sampling_rate #=> Integer
- # resp.experiments[0].schedule.analysis_complete_time #=> Time
- # resp.experiments[0].segment #=> String
- # resp.experiments[0].status #=> String, one of "CREATED", "UPDATING", "RUNNING", "COMPLETED", "CANCELLED"
- # resp.experiments[0].status_reason #=> String
- # resp.experiments[0].tags #=> Hash
- # resp.experiments[0].tags["TagKey"] #=> String
- # resp.experiments[0].treatments #=> Array
- # resp.experiments[0].treatments[0].description #=> String
- # resp.experiments[0].treatments[0].feature_variations #=> Hash
- # resp.experiments[0].treatments[0].feature_variations["FeatureName"] #=> String
- # resp.experiments[0].treatments[0].name #=> String
- # resp.experiments[0].type #=> String, one of "aws.evidently.onlineab"
- # resp.next_token #=> String
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/ListExperiments AWS API Documentation
- #
- # @overload list_experiments(params = {})
- # @param [Hash] params ({})
- def list_experiments(params = {}, options = {})
- req = build_request(:list_experiments, params)
- req.send_request(options)
- end
-
- # Returns configuration details about all the features in the specified
- # project.
- #
- # @option params [Integer] :max_results
- # The maximum number of results to include in the response.
- #
- # @option params [String] :next_token
- # The token to use when requesting the next set of results. You received
- # this token from a previous `ListFeatures` operation.
- #
- # @option params [required, String] :project
- # The name or ARN of the project to return the feature list from.
- #
- # @return [Types::ListFeaturesResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
- #
- # * {Types::ListFeaturesResponse#features #features} => Array<Types::FeatureSummary>
- # * {Types::ListFeaturesResponse#next_token #next_token} => String
- #
- # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
- #
- # @example Request syntax with placeholder values
- #
- # resp = client.list_features({
- # max_results: 1,
- # next_token: "NextToken",
- # project: "ProjectRef", # required
- # })
- #
- # @example Response structure
- #
- # resp.features #=> Array
- # resp.features[0].arn #=> String
- # resp.features[0].created_time #=> Time
- # resp.features[0].default_variation #=> String
- # resp.features[0].evaluation_rules #=> Array
- # resp.features[0].evaluation_rules[0].name #=> String
- # resp.features[0].evaluation_rules[0].type #=> String
- # resp.features[0].evaluation_strategy #=> String, one of "ALL_RULES", "DEFAULT_VARIATION"
- # resp.features[0].last_updated_time #=> Time
- # resp.features[0].name #=> String
- # resp.features[0].project #=> String
- # resp.features[0].status #=> String, one of "AVAILABLE", "UPDATING"
- # resp.features[0].tags #=> Hash
- # resp.features[0].tags["TagKey"] #=> String
- # resp.next_token #=> String
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/ListFeatures AWS API Documentation
- #
- # @overload list_features(params = {})
- # @param [Hash] params ({})
- def list_features(params = {}, options = {})
- req = build_request(:list_features, params)
- req.send_request(options)
- end
-
- # Returns configuration details about all the launches in the specified
- # project.
- #
- # @option params [Integer] :max_results
- # The maximum number of results to include in the response.
- #
- # @option params [String] :next_token
- # The token to use when requesting the next set of results. You received
- # this token from a previous `ListLaunches` operation.
- #
- # @option params [required, String] :project
- # The name or ARN of the project to return the launch list from.
- #
- # @option params [String] :status
- # Use this optional parameter to limit the returned results to only the
- # launches with the status that you specify here.
- #
- # @return [Types::ListLaunchesResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
- #
- # * {Types::ListLaunchesResponse#launches #launches} => Array<Types::Launch>
- # * {Types::ListLaunchesResponse#next_token #next_token} => String
- #
- # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
- #
- # @example Request syntax with placeholder values
- #
- # resp = client.list_launches({
- # max_results: 1,
- # next_token: "NextToken",
- # project: "ProjectRef", # required
- # status: "CREATED", # accepts CREATED, UPDATING, RUNNING, COMPLETED, CANCELLED
- # })
- #
- # @example Response structure
- #
- # resp.launches #=> Array
- # resp.launches[0].arn #=> String
- # resp.launches[0].created_time #=> Time
- # resp.launches[0].description #=> String
- # resp.launches[0].execution.ended_time #=> Time
- # resp.launches[0].execution.started_time #=> Time
- # resp.launches[0].groups #=> Array
- # resp.launches[0].groups[0].description #=> String
- # resp.launches[0].groups[0].feature_variations #=> Hash
- # resp.launches[0].groups[0].feature_variations["FeatureName"] #=> String
- # resp.launches[0].groups[0].name #=> String
- # resp.launches[0].last_updated_time #=> Time
- # resp.launches[0].metric_monitors #=> Array
- # resp.launches[0].metric_monitors[0].metric_definition.entity_id_key #=> String
- # resp.launches[0].metric_monitors[0].metric_definition.event_pattern #=> String
- # resp.launches[0].metric_monitors[0].metric_definition.name #=> String
- # resp.launches[0].metric_monitors[0].metric_definition.unit_label #=> String
- # resp.launches[0].metric_monitors[0].metric_definition.value_key #=> String
- # resp.launches[0].name #=> String
- # resp.launches[0].project #=> String
- # resp.launches[0].randomization_salt #=> String
- # resp.launches[0].scheduled_splits_definition.steps #=> Array
- # resp.launches[0].scheduled_splits_definition.steps[0].group_weights #=> Hash
- # resp.launches[0].scheduled_splits_definition.steps[0].group_weights["GroupName"] #=> Integer
- # resp.launches[0].scheduled_splits_definition.steps[0].segment_overrides #=> Array
- # resp.launches[0].scheduled_splits_definition.steps[0].segment_overrides[0].evaluation_order #=> Integer
- # resp.launches[0].scheduled_splits_definition.steps[0].segment_overrides[0].segment #=> String
- # resp.launches[0].scheduled_splits_definition.steps[0].segment_overrides[0].weights #=> Hash
- # resp.launches[0].scheduled_splits_definition.steps[0].segment_overrides[0].weights["GroupName"] #=> Integer
- # resp.launches[0].scheduled_splits_definition.steps[0].start_time #=> Time
- # resp.launches[0].status #=> String, one of "CREATED", "UPDATING", "RUNNING", "COMPLETED", "CANCELLED"
- # resp.launches[0].status_reason #=> String
- # resp.launches[0].tags #=> Hash
- # resp.launches[0].tags["TagKey"] #=> String
- # resp.launches[0].type #=> String, one of "aws.evidently.splits"
- # resp.next_token #=> String
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/ListLaunches AWS API Documentation
- #
- # @overload list_launches(params = {})
- # @param [Hash] params ({})
- def list_launches(params = {}, options = {})
- req = build_request(:list_launches, params)
- req.send_request(options)
- end
-
- # Returns configuration details about all the projects in the current
- # Region in your account.
- #
- # @option params [Integer] :max_results
- # The maximum number of results to include in the response.
- #
- # @option params [String] :next_token
- # The token to use when requesting the next set of results. You received
- # this token from a previous `ListProjects` operation.
- #
- # @return [Types::ListProjectsResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
- #
- # * {Types::ListProjectsResponse#next_token #next_token} => String
- # * {Types::ListProjectsResponse#projects #projects} => Array<Types::ProjectSummary>
- #
- # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
- #
- # @example Request syntax with placeholder values
- #
- # resp = client.list_projects({
- # max_results: 1,
- # next_token: "NextToken",
- # })
- #
- # @example Response structure
- #
- # resp.next_token #=> String
- # resp.projects #=> Array
- # resp.projects[0].active_experiment_count #=> Integer
- # resp.projects[0].active_launch_count #=> Integer
- # resp.projects[0].arn #=> String
- # resp.projects[0].created_time #=> Time
- # resp.projects[0].description #=> String
- # resp.projects[0].experiment_count #=> Integer
- # resp.projects[0].feature_count #=> Integer
- # resp.projects[0].last_updated_time #=> Time
- # resp.projects[0].launch_count #=> Integer
- # resp.projects[0].name #=> String
- # resp.projects[0].status #=> String, one of "AVAILABLE", "UPDATING"
- # resp.projects[0].tags #=> Hash
- # resp.projects[0].tags["TagKey"] #=> String
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/ListProjects AWS API Documentation
- #
- # @overload list_projects(params = {})
- # @param [Hash] params ({})
- def list_projects(params = {}, options = {})
- req = build_request(:list_projects, params)
- req.send_request(options)
- end
-
- # Use this operation to find which experiments or launches are using a
- # specified segment.
- #
- # @option params [Integer] :max_results
- # The maximum number of results to include in the response. If you omit
- # this, the default of 50 is used.
- #
- # @option params [String] :next_token
- # The token to use when requesting the next set of results. You received
- # this token from a previous `ListSegmentReferences` operation.
- #
- # @option params [required, String] :segment
- # The ARN of the segment that you want to view information for.
- #
- # @option params [required, String] :type
- # Specifies whether to return information about launches or experiments
- # that use this segment.
- #
- # @return [Types::ListSegmentReferencesResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
- #
- # * {Types::ListSegmentReferencesResponse#next_token #next_token} => String
- # * {Types::ListSegmentReferencesResponse#referenced_by #referenced_by} => Array<Types::RefResource>
- #
- # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
- #
- # @example Request syntax with placeholder values
- #
- # resp = client.list_segment_references({
- # max_results: 1,
- # next_token: "NextToken",
- # segment: "SegmentRef", # required
- # type: "EXPERIMENT", # required, accepts EXPERIMENT, LAUNCH
- # })
- #
- # @example Response structure
- #
- # resp.next_token #=> String
- # resp.referenced_by #=> Array
- # resp.referenced_by[0].arn #=> String
- # resp.referenced_by[0].end_time #=> String
- # resp.referenced_by[0].last_updated_on #=> String
- # resp.referenced_by[0].name #=> String
- # resp.referenced_by[0].start_time #=> String
- # resp.referenced_by[0].status #=> String
- # resp.referenced_by[0].type #=> String
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/ListSegmentReferences AWS API Documentation
- #
- # @overload list_segment_references(params = {})
- # @param [Hash] params ({})
- def list_segment_references(params = {}, options = {})
- req = build_request(:list_segment_references, params)
- req.send_request(options)
- end
-
- # Returns a list of audience segments that you have created in your
- # account in this Region.
- #
- # @option params [Integer] :max_results
- # The maximum number of results to include in the response. If you omit
- # this, the default of 50 is used.
- #
- # @option params [String] :next_token
- # The token to use when requesting the next set of results. You received
- # this token from a previous `ListSegments` operation.
- #
- # @return [Types::ListSegmentsResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
- #
- # * {Types::ListSegmentsResponse#next_token #next_token} => String
- # * {Types::ListSegmentsResponse#segments #segments} => Array<Types::Segment>
- #
- # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
- #
- # @example Request syntax with placeholder values
- #
- # resp = client.list_segments({
- # max_results: 1,
- # next_token: "NextToken",
- # })
- #
- # @example Response structure
- #
- # resp.next_token #=> String
- # resp.segments #=> Array
- # resp.segments[0].arn #=> String
- # resp.segments[0].created_time #=> Time
- # resp.segments[0].description #=> String
- # resp.segments[0].experiment_count #=> Integer
- # resp.segments[0].last_updated_time #=> Time
- # resp.segments[0].launch_count #=> Integer
- # resp.segments[0].name #=> String
- # resp.segments[0].pattern #=> String
- # resp.segments[0].tags #=> Hash
- # resp.segments[0].tags["TagKey"] #=> String
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/ListSegments AWS API Documentation
- #
- # @overload list_segments(params = {})
- # @param [Hash] params ({})
- def list_segments(params = {}, options = {})
- req = build_request(:list_segments, params)
- req.send_request(options)
- end
-
- # Displays the tags associated with an Evidently resource.
- #
- # @option params [required, String] :resource_arn
- # The ARN of the resource that you want to see the tags of.
- #
- # @return [Types::ListTagsForResourceResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
- #
- # * {Types::ListTagsForResourceResponse#tags #tags} => Hash<String,String>
- #
- # @example Request syntax with placeholder values
- #
- # resp = client.list_tags_for_resource({
- # resource_arn: "Arn", # required
- # })
- #
- # @example Response structure
- #
- # resp.tags #=> Hash
- # resp.tags["TagKey"] #=> String
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/ListTagsForResource AWS API Documentation
- #
- # @overload list_tags_for_resource(params = {})
- # @param [Hash] params ({})
- def list_tags_for_resource(params = {}, options = {})
- req = build_request(:list_tags_for_resource, params)
- req.send_request(options)
- end
-
- # Sends performance events to Evidently. These events can be used to
- # evaluate a launch or an experiment.
- #
- # @option params [required, Array] :events
- # An array of event structures that contain the performance data that is
- # being sent to Evidently.
- #
- # @option params [required, String] :project
- # The name or ARN of the project to write the events to.
- #
- # @return [Types::PutProjectEventsResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
- #
- # * {Types::PutProjectEventsResponse#event_results #event_results} => Array<Types::PutProjectEventsResultEntry>
- # * {Types::PutProjectEventsResponse#failed_event_count #failed_event_count} => Integer
- #
- # @example Request syntax with placeholder values
- #
- # resp = client.put_project_events({
- # events: [ # required
- # {
- # data: "JsonValue", # required
- # timestamp: Time.now, # required
- # type: "aws.evidently.evaluation", # required, accepts aws.evidently.evaluation, aws.evidently.custom
- # },
- # ],
- # project: "ProjectRef", # required
- # })
- #
- # @example Response structure
- #
- # resp.event_results #=> Array
- # resp.event_results[0].error_code #=> String
- # resp.event_results[0].error_message #=> String
- # resp.event_results[0].event_id #=> String
- # resp.failed_event_count #=> Integer
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/PutProjectEvents AWS API Documentation
- #
- # @overload put_project_events(params = {})
- # @param [Hash] params ({})
- def put_project_events(params = {}, options = {})
- req = build_request(:put_project_events, params)
- req.send_request(options)
- end
-
- # Starts an existing experiment. To create an experiment, use
- # [CreateExperiment][1].
- #
- #
- #
- # [1]: https://docs.aws.amazon.com/cloudwatchevidently/latest/APIReference/API_CreateExperiment.html
- #
- # @option params [required, Time,DateTime,Date,Integer,String] :analysis_complete_time
- # The date and time to end the experiment. This must be no more than 30
- # days after the experiment starts.
- #
- # @option params [required, String] :experiment
- # The name of the experiment to start.
- #
- # @option params [required, String] :project
- # The name or ARN of the project that contains the experiment to start.
- #
- # @return [Types::StartExperimentResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
- #
- # * {Types::StartExperimentResponse#started_time #started_time} => Time
- #
- # @example Request syntax with placeholder values
- #
- # resp = client.start_experiment({
- # analysis_complete_time: Time.now, # required
- # experiment: "ExperimentName", # required
- # project: "ProjectRef", # required
- # })
- #
- # @example Response structure
- #
- # resp.started_time #=> Time
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/StartExperiment AWS API Documentation
- #
- # @overload start_experiment(params = {})
- # @param [Hash] params ({})
- def start_experiment(params = {}, options = {})
- req = build_request(:start_experiment, params)
- req.send_request(options)
- end
-
- # Starts an existing launch. To create a launch, use [CreateLaunch][1].
- #
- #
- #
- # [1]: https://docs.aws.amazon.com/cloudwatchevidently/latest/APIReference/API_CreateLaunch.html
- #
- # @option params [required, String] :launch
- # The name of the launch to start.
- #
- # @option params [required, String] :project
- # The name or ARN of the project that contains the launch to start.
- #
- # @return [Types::StartLaunchResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
- #
- # * {Types::StartLaunchResponse#launch #launch} => Types::Launch
- #
- # @example Request syntax with placeholder values
- #
- # resp = client.start_launch({
- # launch: "LaunchName", # required
- # project: "ProjectRef", # required
- # })
- #
- # @example Response structure
- #
- # resp.launch.arn #=> String
- # resp.launch.created_time #=> Time
- # resp.launch.description #=> String
- # resp.launch.execution.ended_time #=> Time
- # resp.launch.execution.started_time #=> Time
- # resp.launch.groups #=> Array
- # resp.launch.groups[0].description #=> String
- # resp.launch.groups[0].feature_variations #=> Hash
- # resp.launch.groups[0].feature_variations["FeatureName"] #=> String
- # resp.launch.groups[0].name #=> String
- # resp.launch.last_updated_time #=> Time
- # resp.launch.metric_monitors #=> Array
- # resp.launch.metric_monitors[0].metric_definition.entity_id_key #=> String
- # resp.launch.metric_monitors[0].metric_definition.event_pattern #=> String
- # resp.launch.metric_monitors[0].metric_definition.name #=> String
- # resp.launch.metric_monitors[0].metric_definition.unit_label #=> String
- # resp.launch.metric_monitors[0].metric_definition.value_key #=> String
- # resp.launch.name #=> String
- # resp.launch.project #=> String
- # resp.launch.randomization_salt #=> String
- # resp.launch.scheduled_splits_definition.steps #=> Array
- # resp.launch.scheduled_splits_definition.steps[0].group_weights #=> Hash
- # resp.launch.scheduled_splits_definition.steps[0].group_weights["GroupName"] #=> Integer
- # resp.launch.scheduled_splits_definition.steps[0].segment_overrides #=> Array
- # resp.launch.scheduled_splits_definition.steps[0].segment_overrides[0].evaluation_order #=> Integer
- # resp.launch.scheduled_splits_definition.steps[0].segment_overrides[0].segment #=> String
- # resp.launch.scheduled_splits_definition.steps[0].segment_overrides[0].weights #=> Hash
- # resp.launch.scheduled_splits_definition.steps[0].segment_overrides[0].weights["GroupName"] #=> Integer
- # resp.launch.scheduled_splits_definition.steps[0].start_time #=> Time
- # resp.launch.status #=> String, one of "CREATED", "UPDATING", "RUNNING", "COMPLETED", "CANCELLED"
- # resp.launch.status_reason #=> String
- # resp.launch.tags #=> Hash
- # resp.launch.tags["TagKey"] #=> String
- # resp.launch.type #=> String, one of "aws.evidently.splits"
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/StartLaunch AWS API Documentation
- #
- # @overload start_launch(params = {})
- # @param [Hash] params ({})
- def start_launch(params = {}, options = {})
- req = build_request(:start_launch, params)
- req.send_request(options)
- end
-
- # Stops an experiment that is currently running. If you stop an
- # experiment, you can't resume it or restart it.
- #
- # @option params [String] :desired_state
- # Specify whether the experiment is to be considered `COMPLETED` or
- # `CANCELLED` after it stops.
- #
- # @option params [required, String] :experiment
- # The name of the experiment to stop.
- #
- # @option params [required, String] :project
- # The name or ARN of the project that contains the experiment to stop.
- #
- # @option params [String] :reason
- # A string that describes why you are stopping the experiment.
- #
- # @return [Types::StopExperimentResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
- #
- # * {Types::StopExperimentResponse#ended_time #ended_time} => Time
- #
- # @example Request syntax with placeholder values
- #
- # resp = client.stop_experiment({
- # desired_state: "COMPLETED", # accepts COMPLETED, CANCELLED
- # experiment: "ExperimentName", # required
- # project: "ProjectRef", # required
- # reason: "Description",
- # })
- #
- # @example Response structure
- #
- # resp.ended_time #=> Time
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/StopExperiment AWS API Documentation
- #
- # @overload stop_experiment(params = {})
- # @param [Hash] params ({})
- def stop_experiment(params = {}, options = {})
- req = build_request(:stop_experiment, params)
- req.send_request(options)
- end
-
- # Stops a launch that is currently running. After you stop a launch, you
- # will not be able to resume it or restart it. Also, it will not be
- # evaluated as a rule for traffic allocation, and the traffic that was
- # allocated to the launch will instead be available to the feature's
- # experiment, if there is one. Otherwise, all traffic will be served the
- # default variation after the launch is stopped.
- #
- # @option params [String] :desired_state
- # Specify whether to consider the launch as `COMPLETED` or `CANCELLED`
- # after it stops.
- #
- # @option params [required, String] :launch
- # The name of the launch to stop.
- #
- # @option params [required, String] :project
- # The name or ARN of the project that contains the launch that you want
- # to stop.
- #
- # @option params [String] :reason
- # A string that describes why you are stopping the launch.
- #
- # @return [Types::StopLaunchResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
- #
- # * {Types::StopLaunchResponse#ended_time #ended_time} => Time
- #
- # @example Request syntax with placeholder values
- #
- # resp = client.stop_launch({
- # desired_state: "COMPLETED", # accepts COMPLETED, CANCELLED
- # launch: "LaunchName", # required
- # project: "ProjectRef", # required
- # reason: "Description",
- # })
- #
- # @example Response structure
- #
- # resp.ended_time #=> Time
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/StopLaunch AWS API Documentation
- #
- # @overload stop_launch(params = {})
- # @param [Hash] params ({})
- def stop_launch(params = {}, options = {})
- req = build_request(:stop_launch, params)
- req.send_request(options)
- end
-
- # Assigns one or more tags (key-value pairs) to the specified CloudWatch
- # Evidently resource. Projects, features, launches, and experiments can
- # be tagged.
- #
- # Tags can help you organize and categorize your resources. You can also
- # use them to scope user permissions by granting a user permission to
- # access or change only resources with certain tag values.
- #
- # Tags don't have any semantic meaning to Amazon Web Services and are
- # interpreted strictly as strings of characters.
- #
- # You can use the `TagResource` action with a resource that already has
- # tags. If you specify a new tag key for the resource, this tag is
- # appended to the list of tags associated with the alarm. If you specify
- # a tag key that is already associated with the resource, the new tag
- # value that you specify replaces the previous value for that tag.
- #
- # You can associate as many as 50 tags with a resource.
- #
- # For more information, see [Tagging Amazon Web Services resources][1].
- #
- #
- #
- # [1]: https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html
- #
- # @option params [required, String] :resource_arn
- # The ARN of the CloudWatch Evidently resource that you're adding tags
- # to.
- #
- # @option params [required, Hash] :tags
- # The list of key-value pairs to associate with the resource.
- #
- # @return [Struct] Returns an empty {Seahorse::Client::Response response}.
- #
- # @example Request syntax with placeholder values
- #
- # resp = client.tag_resource({
- # resource_arn: "Arn", # required
- # tags: { # required
- # "TagKey" => "TagValue",
- # },
- # })
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/TagResource AWS API Documentation
- #
- # @overload tag_resource(params = {})
- # @param [Hash] params ({})
- def tag_resource(params = {}, options = {})
- req = build_request(:tag_resource, params)
- req.send_request(options)
- end
-
- # Use this operation to test a rules pattern that you plan to use to
- # create an audience segment. For more information about segments, see
- # [CreateSegment][1].
- #
- #
- #
- # [1]: https://docs.aws.amazon.com/cloudwatchevidently/latest/APIReference/API_CreateSegment.html
- #
- # @option params [required, String] :pattern
- # The pattern to test.
- #
- # **SDK automatically handles json encoding and base64 encoding for you
- # when the required value (Hash, Array, etc.) is provided according to
- # the description.**
- #
- # @option params [required, String] :payload
- # A sample `evaluationContext` JSON block to test against the specified
- # pattern.
- #
- # **SDK automatically handles json encoding and base64 encoding for you
- # when the required value (Hash, Array, etc.) is provided according to
- # the description.**
- #
- # @return [Types::TestSegmentPatternResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
- #
- # * {Types::TestSegmentPatternResponse#match #match} => Boolean
- #
- # @example Request syntax with placeholder values
- #
- # resp = client.test_segment_pattern({
- # pattern: "SegmentPattern", # required
- # payload: "JsonValue", # required
- # })
- #
- # @example Response structure
- #
- # resp.match #=> Boolean
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/TestSegmentPattern AWS API Documentation
- #
- # @overload test_segment_pattern(params = {})
- # @param [Hash] params ({})
- def test_segment_pattern(params = {}, options = {})
- req = build_request(:test_segment_pattern, params)
- req.send_request(options)
- end
-
- # Removes one or more tags from the specified resource.
- #
- # @option params [required, String] :resource_arn
- # The ARN of the CloudWatch Evidently resource that you're removing
- # tags from.
- #
- # @option params [required, Array] :tag_keys
- # The list of tag keys to remove from the resource.
- #
- # @return [Struct] Returns an empty {Seahorse::Client::Response response}.
- #
- # @example Request syntax with placeholder values
- #
- # resp = client.untag_resource({
- # resource_arn: "Arn", # required
- # tag_keys: ["TagKey"], # required
- # })
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/UntagResource AWS API Documentation
- #
- # @overload untag_resource(params = {})
- # @param [Hash] params ({})
- def untag_resource(params = {}, options = {})
- req = build_request(:untag_resource, params)
- req.send_request(options)
- end
-
- # Updates an Evidently experiment.
- #
- # Don't use this operation to update an experiment's tag. Instead, use
- # [TagResource][1].
- #
- #
- #
- # [1]: https://docs.aws.amazon.com/cloudwatchevidently/latest/APIReference/API_TagResource.html
- #
- # @option params [String] :description
- # An optional description of the experiment.
- #
- # @option params [required, String] :experiment
- # The name of the experiment to update.
- #
- # @option params [Array] :metric_goals
- # An array of structures that defines the metrics used for the
- # experiment, and whether a higher or lower value for each metric is the
- # goal.
- #
- # @option params [Types::OnlineAbConfig] :online_ab_config
- # A structure that contains the configuration of which variation o use
- # as the "control" version. The "control" version is used for
- # comparison with other variations. This structure also specifies how
- # much experiment traffic is allocated to each variation.
- #
- # @option params [required, String] :project
- # The name or ARN of the project that contains the experiment that you
- # want to update.
- #
- # @option params [String] :randomization_salt
- # When Evidently assigns a particular user session to an experiment, it
- # must use a randomization ID to determine which variation the user
- # session is served. This randomization ID is a combination of the
- # entity ID and `randomizationSalt`. If you omit `randomizationSalt`,
- # Evidently uses the experiment name as the `randomizationSalt`.
- #
- # @option params [Boolean] :remove_segment
- # Removes a segment from being used in an experiment. You can't use
- # this parameter if the experiment is currently running.
- #
- # @option params [Integer] :sampling_rate
- # The portion of the available audience that you want to allocate to
- # this experiment, in thousandths of a percent. The available audience
- # is the total audience minus the audience that you have allocated to
- # overrides or current launches of this feature.
- #
- # This is represented in thousandths of a percent. For example, specify
- # 20,000 to allocate 20% of the available audience.
- #
- # @option params [String] :segment
- # Adds an audience *segment* to an experiment. When a segment is used in
- # an experiment, only user sessions that match the segment pattern are
- # used in the experiment. You can't use this parameter if the
- # experiment is currently running.
- #
- # @option params [Array] :treatments
- # An array of structures that define the variations being tested in the
- # experiment.
- #
- # @return [Types::UpdateExperimentResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
- #
- # * {Types::UpdateExperimentResponse#experiment #experiment} => Types::Experiment
- #
- # @example Request syntax with placeholder values
- #
- # resp = client.update_experiment({
- # description: "Description",
- # experiment: "ExperimentName", # required
- # metric_goals: [
- # {
- # desired_change: "INCREASE", # accepts INCREASE, DECREASE
- # metric_definition: { # required
- # entity_id_key: "JsonPath", # required
- # event_pattern: "MetricDefinitionConfigEventPatternString",
- # name: "CwDimensionSafeName", # required
- # unit_label: "MetricUnitLabel",
- # value_key: "JsonPath", # required
- # },
- # },
- # ],
- # online_ab_config: {
- # control_treatment_name: "TreatmentName",
- # treatment_weights: {
- # "TreatmentName" => 1,
- # },
- # },
- # project: "ProjectRef", # required
- # randomization_salt: "RandomizationSalt",
- # remove_segment: false,
- # sampling_rate: 1,
- # segment: "SegmentRef",
- # treatments: [
- # {
- # description: "Description",
- # feature: "FeatureName", # required
- # name: "TreatmentName", # required
- # variation: "VariationName", # required
- # },
- # ],
- # })
- #
- # @example Response structure
- #
- # resp.experiment.arn #=> String
- # resp.experiment.created_time #=> Time
- # resp.experiment.description #=> String
- # resp.experiment.execution.ended_time #=> Time
- # resp.experiment.execution.started_time #=> Time
- # resp.experiment.last_updated_time #=> Time
- # resp.experiment.metric_goals #=> Array
- # resp.experiment.metric_goals[0].desired_change #=> String, one of "INCREASE", "DECREASE"
- # resp.experiment.metric_goals[0].metric_definition.entity_id_key #=> String
- # resp.experiment.metric_goals[0].metric_definition.event_pattern #=> String
- # resp.experiment.metric_goals[0].metric_definition.name #=> String
- # resp.experiment.metric_goals[0].metric_definition.unit_label #=> String
- # resp.experiment.metric_goals[0].metric_definition.value_key #=> String
- # resp.experiment.name #=> String
- # resp.experiment.online_ab_definition.control_treatment_name #=> String
- # resp.experiment.online_ab_definition.treatment_weights #=> Hash
- # resp.experiment.online_ab_definition.treatment_weights["TreatmentName"] #=> Integer
- # resp.experiment.project #=> String
- # resp.experiment.randomization_salt #=> String
- # resp.experiment.sampling_rate #=> Integer
- # resp.experiment.schedule.analysis_complete_time #=> Time
- # resp.experiment.segment #=> String
- # resp.experiment.status #=> String, one of "CREATED", "UPDATING", "RUNNING", "COMPLETED", "CANCELLED"
- # resp.experiment.status_reason #=> String
- # resp.experiment.tags #=> Hash
- # resp.experiment.tags["TagKey"] #=> String
- # resp.experiment.treatments #=> Array
- # resp.experiment.treatments[0].description #=> String
- # resp.experiment.treatments[0].feature_variations #=> Hash
- # resp.experiment.treatments[0].feature_variations["FeatureName"] #=> String
- # resp.experiment.treatments[0].name #=> String
- # resp.experiment.type #=> String, one of "aws.evidently.onlineab"
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/UpdateExperiment AWS API Documentation
- #
- # @overload update_experiment(params = {})
- # @param [Hash] params ({})
- def update_experiment(params = {}, options = {})
- req = build_request(:update_experiment, params)
- req.send_request(options)
- end
-
- # Updates an existing feature.
- #
- # You can't use this operation to update the tags of an existing
- # feature. Instead, use [TagResource][1].
- #
- #
- #
- # [1]: https://docs.aws.amazon.com/cloudwatchevidently/latest/APIReference/API_TagResource.html
- #
- # @option params [Array] :add_or_update_variations
- # To update variation configurations for this feature, or add new ones,
- # specify this structure. In this array, include any variations that you
- # want to add or update. If the array includes a variation name that
- # already exists for this feature, it is updated. If it includes a new
- # variation name, it is added as a new variation.
- #
- # @option params [String] :default_variation
- # The name of the variation to use as the default variation. The default
- # variation is served to users who are not allocated to any ongoing
- # launches or experiments of this feature.
- #
- # @option params [String] :description
- # An optional description of the feature.
- #
- # @option params [Hash] :entity_overrides
- # Specified users that should always be served a specific variation of a
- # feature. Each user is specified by a key-value pair . For each key,
- # specify a user by entering their user ID, account ID, or some other
- # identifier. For the value, specify the name of the variation that they
- # are to be served.
- #
- # This parameter is limited to 2500 overrides or a total of 40KB. The
- # 40KB limit includes an overhead of 6 bytes per override.
- #
- # @option params [String] :evaluation_strategy
- # Specify `ALL_RULES` to activate the traffic allocation specified by
- # any ongoing launches or experiments. Specify `DEFAULT_VARIATION` to
- # serve the default variation to all users instead.
- #
- # @option params [required, String] :feature
- # The name of the feature to be updated.
- #
- # @option params [required, String] :project
- # The name or ARN of the project that contains the feature to be
- # updated.
- #
- # @option params [Array] :remove_variations
- # Removes a variation from the feature. If the variation you specify
- # doesn't exist, then this makes no change and does not report an
- # error.
- #
- # This operation fails if you try to remove a variation that is part of
- # an ongoing launch or experiment.
- #
- # @return [Types::UpdateFeatureResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
- #
- # * {Types::UpdateFeatureResponse#feature #feature} => Types::Feature
- #
- # @example Request syntax with placeholder values
- #
- # resp = client.update_feature({
- # add_or_update_variations: [
- # {
- # name: "VariationName", # required
- # value: { # required
- # bool_value: false,
- # double_value: 1.0,
- # long_value: 1,
- # string_value: "VariableValueStringValueString",
- # },
- # },
- # ],
- # default_variation: "VariationName",
- # description: "Description",
- # entity_overrides: {
- # "EntityId" => "VariationName",
- # },
- # evaluation_strategy: "ALL_RULES", # accepts ALL_RULES, DEFAULT_VARIATION
- # feature: "FeatureName", # required
- # project: "ProjectRef", # required
- # remove_variations: ["VariationName"],
- # })
- #
- # @example Response structure
- #
- # resp.feature.arn #=> String
- # resp.feature.created_time #=> Time
- # resp.feature.default_variation #=> String
- # resp.feature.description #=> String
- # resp.feature.entity_overrides #=> Hash
- # resp.feature.entity_overrides["EntityId"] #=> String
- # resp.feature.evaluation_rules #=> Array
- # resp.feature.evaluation_rules[0].name #=> String
- # resp.feature.evaluation_rules[0].type #=> String
- # resp.feature.evaluation_strategy #=> String, one of "ALL_RULES", "DEFAULT_VARIATION"
- # resp.feature.last_updated_time #=> Time
- # resp.feature.name #=> String
- # resp.feature.project #=> String
- # resp.feature.status #=> String, one of "AVAILABLE", "UPDATING"
- # resp.feature.tags #=> Hash
- # resp.feature.tags["TagKey"] #=> String
- # resp.feature.value_type #=> String, one of "STRING", "LONG", "DOUBLE", "BOOLEAN"
- # resp.feature.variations #=> Array
- # resp.feature.variations[0].name #=> String
- # resp.feature.variations[0].value.bool_value #=> Boolean
- # resp.feature.variations[0].value.double_value #=> Float
- # resp.feature.variations[0].value.long_value #=> Integer
- # resp.feature.variations[0].value.string_value #=> String
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/UpdateFeature AWS API Documentation
- #
- # @overload update_feature(params = {})
- # @param [Hash] params ({})
- def update_feature(params = {}, options = {})
- req = build_request(:update_feature, params)
- req.send_request(options)
- end
-
- # Updates a launch of a given feature.
- #
- # Don't use this operation to update the tags of an existing launch.
- # Instead, use [TagResource][1].
- #
- #
- #
- # [1]: https://docs.aws.amazon.com/cloudwatchevidently/latest/APIReference/API_TagResource.html
- #
- # @option params [String] :description
- # An optional description for the launch.
- #
- # @option params [Array] :groups
- # An array of structures that contains the feature and variations that
- # are to be used for the launch.
- #
- # @option params [required, String] :launch
- # The name of the launch that is to be updated.
- #
- # @option params [Array] :metric_monitors
- # An array of structures that define the metrics that will be used to
- # monitor the launch performance.
- #
- # @option params [required, String] :project
- # The name or ARN of the project that contains the launch that you want
- # to update.
- #
- # @option params [String] :randomization_salt
- # When Evidently assigns a particular user session to a launch, it must
- # use a randomization ID to determine which variation the user session
- # is served. This randomization ID is a combination of the entity ID and
- # `randomizationSalt`. If you omit `randomizationSalt`, Evidently uses
- # the launch name as the `randomizationSalt`.
- #
- # @option params [Types::ScheduledSplitsLaunchConfig] :scheduled_splits_config
- # An array of structures that define the traffic allocation percentages
- # among the feature variations during each step of the launch.
- #
- # @return [Types::UpdateLaunchResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
- #
- # * {Types::UpdateLaunchResponse#launch #launch} => Types::Launch
- #
- # @example Request syntax with placeholder values
- #
- # resp = client.update_launch({
- # description: "Description",
- # groups: [
- # {
- # description: "Description",
- # feature: "FeatureName", # required
- # name: "GroupName", # required
- # variation: "VariationName", # required
- # },
- # ],
- # launch: "LaunchName", # required
- # metric_monitors: [
- # {
- # metric_definition: { # required
- # entity_id_key: "JsonPath", # required
- # event_pattern: "MetricDefinitionConfigEventPatternString",
- # name: "CwDimensionSafeName", # required
- # unit_label: "MetricUnitLabel",
- # value_key: "JsonPath", # required
- # },
- # },
- # ],
- # project: "ProjectRef", # required
- # randomization_salt: "RandomizationSalt",
- # scheduled_splits_config: {
- # steps: [ # required
- # {
- # group_weights: { # required
- # "GroupName" => 1,
- # },
- # segment_overrides: [
- # {
- # evaluation_order: 1, # required
- # segment: "SegmentRef", # required
- # weights: { # required
- # "GroupName" => 1,
- # },
- # },
- # ],
- # start_time: Time.now, # required
- # },
- # ],
- # },
- # })
- #
- # @example Response structure
- #
- # resp.launch.arn #=> String
- # resp.launch.created_time #=> Time
- # resp.launch.description #=> String
- # resp.launch.execution.ended_time #=> Time
- # resp.launch.execution.started_time #=> Time
- # resp.launch.groups #=> Array
- # resp.launch.groups[0].description #=> String
- # resp.launch.groups[0].feature_variations #=> Hash
- # resp.launch.groups[0].feature_variations["FeatureName"] #=> String
- # resp.launch.groups[0].name #=> String
- # resp.launch.last_updated_time #=> Time
- # resp.launch.metric_monitors #=> Array
- # resp.launch.metric_monitors[0].metric_definition.entity_id_key #=> String
- # resp.launch.metric_monitors[0].metric_definition.event_pattern #=> String
- # resp.launch.metric_monitors[0].metric_definition.name #=> String
- # resp.launch.metric_monitors[0].metric_definition.unit_label #=> String
- # resp.launch.metric_monitors[0].metric_definition.value_key #=> String
- # resp.launch.name #=> String
- # resp.launch.project #=> String
- # resp.launch.randomization_salt #=> String
- # resp.launch.scheduled_splits_definition.steps #=> Array
- # resp.launch.scheduled_splits_definition.steps[0].group_weights #=> Hash
- # resp.launch.scheduled_splits_definition.steps[0].group_weights["GroupName"] #=> Integer
- # resp.launch.scheduled_splits_definition.steps[0].segment_overrides #=> Array
- # resp.launch.scheduled_splits_definition.steps[0].segment_overrides[0].evaluation_order #=> Integer
- # resp.launch.scheduled_splits_definition.steps[0].segment_overrides[0].segment #=> String
- # resp.launch.scheduled_splits_definition.steps[0].segment_overrides[0].weights #=> Hash
- # resp.launch.scheduled_splits_definition.steps[0].segment_overrides[0].weights["GroupName"] #=> Integer
- # resp.launch.scheduled_splits_definition.steps[0].start_time #=> Time
- # resp.launch.status #=> String, one of "CREATED", "UPDATING", "RUNNING", "COMPLETED", "CANCELLED"
- # resp.launch.status_reason #=> String
- # resp.launch.tags #=> Hash
- # resp.launch.tags["TagKey"] #=> String
- # resp.launch.type #=> String, one of "aws.evidently.splits"
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/UpdateLaunch AWS API Documentation
- #
- # @overload update_launch(params = {})
- # @param [Hash] params ({})
- def update_launch(params = {}, options = {})
- req = build_request(:update_launch, params)
- req.send_request(options)
- end
-
- # Updates the description of an existing project.
- #
- # To create a new project, use [CreateProject][1].
- #
- # Don't use this operation to update the data storage options of a
- # project. Instead, use [UpdateProjectDataDelivery][2].
- #
- # Don't use this operation to update the tags of a project. Instead,
- # use [TagResource][3].
- #
- #
- #
- # [1]: https://docs.aws.amazon.com/cloudwatchevidently/latest/APIReference/API_CreateProject.html
- # [2]: https://docs.aws.amazon.com/cloudwatchevidently/latest/APIReference/API_UpdateProjectDataDelivery.html
- # [3]: https://docs.aws.amazon.com/cloudwatchevidently/latest/APIReference/API_TagResource.html
- #
- # @option params [Types::ProjectAppConfigResourceConfig] :app_config_resource
- # Use this parameter if the project will use client-side evaluation
- # powered by AppConfig. Client-side evaluation allows your application
- # to assign variations to user sessions locally instead of by calling
- # the [EvaluateFeature][1] operation. This mitigates the latency and
- # availability risks that come with an API call. allows you to
- #
- # This parameter is a structure that contains information about the
- # AppConfig application that will be used for client-side evaluation.
- #
- #
- #
- # [1]: https://docs.aws.amazon.com/cloudwatchevidently/latest/APIReference/API_EvaluateFeature.html
- #
- # @option params [String] :description
- # An optional description of the project.
- #
- # @option params [required, String] :project
- # The name or ARN of the project to update.
- #
- # @return [Types::UpdateProjectResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
- #
- # * {Types::UpdateProjectResponse#project #project} => Types::Project
- #
- # @example Request syntax with placeholder values
- #
- # resp = client.update_project({
- # app_config_resource: {
- # application_id: "AppConfigResourceId",
- # environment_id: "AppConfigResourceId",
- # },
- # description: "Description",
- # project: "ProjectRef", # required
- # })
- #
- # @example Response structure
- #
- # resp.project.active_experiment_count #=> Integer
- # resp.project.active_launch_count #=> Integer
- # resp.project.app_config_resource.application_id #=> String
- # resp.project.app_config_resource.configuration_profile_id #=> String
- # resp.project.app_config_resource.environment_id #=> String
- # resp.project.arn #=> String
- # resp.project.created_time #=> Time
- # resp.project.data_delivery.cloud_watch_logs.log_group #=> String
- # resp.project.data_delivery.s3_destination.bucket #=> String
- # resp.project.data_delivery.s3_destination.prefix #=> String
- # resp.project.description #=> String
- # resp.project.experiment_count #=> Integer
- # resp.project.feature_count #=> Integer
- # resp.project.last_updated_time #=> Time
- # resp.project.launch_count #=> Integer
- # resp.project.name #=> String
- # resp.project.status #=> String, one of "AVAILABLE", "UPDATING"
- # resp.project.tags #=> Hash
- # resp.project.tags["TagKey"] #=> String
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/UpdateProject AWS API Documentation
- #
- # @overload update_project(params = {})
- # @param [Hash] params ({})
- def update_project(params = {}, options = {})
- req = build_request(:update_project, params)
- req.send_request(options)
- end
-
- # Updates the data storage options for this project. If you store
- # evaluation events, you an keep them and analyze them on your own. If
- # you choose not to store evaluation events, Evidently deletes them
- # after using them to produce metrics and other experiment results that
- # you can view.
- #
- # You can't specify both `cloudWatchLogs` and `s3Destination` in the
- # same operation.
- #
- # @option params [Types::CloudWatchLogsDestinationConfig] :cloud_watch_logs
- # A structure containing the CloudWatch Logs log group where you want to
- # store evaluation events.
- #
- # @option params [required, String] :project
- # The name or ARN of the project that you want to modify the data
- # storage options for.
- #
- # @option params [Types::S3DestinationConfig] :s3_destination
- # A structure containing the S3 bucket name and bucket prefix where you
- # want to store evaluation events.
- #
- # @return [Types::UpdateProjectDataDeliveryResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
- #
- # * {Types::UpdateProjectDataDeliveryResponse#project #project} => Types::Project
- #
- # @example Request syntax with placeholder values
- #
- # resp = client.update_project_data_delivery({
- # cloud_watch_logs: {
- # log_group: "CwLogGroupSafeName",
- # },
- # project: "ProjectRef", # required
- # s3_destination: {
- # bucket: "S3BucketSafeName",
- # prefix: "S3PrefixSafeName",
- # },
- # })
- #
- # @example Response structure
- #
- # resp.project.active_experiment_count #=> Integer
- # resp.project.active_launch_count #=> Integer
- # resp.project.app_config_resource.application_id #=> String
- # resp.project.app_config_resource.configuration_profile_id #=> String
- # resp.project.app_config_resource.environment_id #=> String
- # resp.project.arn #=> String
- # resp.project.created_time #=> Time
- # resp.project.data_delivery.cloud_watch_logs.log_group #=> String
- # resp.project.data_delivery.s3_destination.bucket #=> String
- # resp.project.data_delivery.s3_destination.prefix #=> String
- # resp.project.description #=> String
- # resp.project.experiment_count #=> Integer
- # resp.project.feature_count #=> Integer
- # resp.project.last_updated_time #=> Time
- # resp.project.launch_count #=> Integer
- # resp.project.name #=> String
- # resp.project.status #=> String, one of "AVAILABLE", "UPDATING"
- # resp.project.tags #=> Hash
- # resp.project.tags["TagKey"] #=> String
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/UpdateProjectDataDelivery AWS API Documentation
- #
- # @overload update_project_data_delivery(params = {})
- # @param [Hash] params ({})
- def update_project_data_delivery(params = {}, options = {})
- req = build_request(:update_project_data_delivery, params)
- req.send_request(options)
- end
-
- # @!endgroup
-
- # @param params ({})
- # @api private
- def build_request(operation_name, params = {})
- handlers = @handlers.for(operation_name)
- tracer = config.telemetry_provider.tracer_provider.tracer(
- Aws::Telemetry.module_to_tracer_name('Aws::CloudWatchEvidently')
- )
- context = Seahorse::Client::RequestContext.new(
- operation_name: operation_name,
- operation: config.api.operation(operation_name),
- client: self,
- params: params,
- config: config,
- tracer: tracer
- )
- context[:gem_name] = 'aws-sdk-cloudwatchevidently'
- context[:gem_version] = '1.53.0'
- Seahorse::Client::Request.new(handlers, context)
- end
-
- # @api private
- # @deprecated
- def waiter_names
- []
- end
-
- class << self
-
- # @api private
- attr_reader :identifier
-
- # @api private
- def errors_module
- Errors
- end
-
- end
- end
-end
diff --git a/gems/aws-sdk-cloudwatchevidently/lib/aws-sdk-cloudwatchevidently/client_api.rb b/gems/aws-sdk-cloudwatchevidently/lib/aws-sdk-cloudwatchevidently/client_api.rb
deleted file mode 100644
index 9700ded0027..00000000000
--- a/gems/aws-sdk-cloudwatchevidently/lib/aws-sdk-cloudwatchevidently/client_api.rb
+++ /dev/null
@@ -1,1589 +0,0 @@
-# frozen_string_literal: true
-
-# WARNING ABOUT GENERATED CODE
-#
-# This file is generated. See the contributing guide for more information:
-# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md
-#
-# WARNING ABOUT GENERATED CODE
-
-
-module Aws::CloudWatchEvidently
- # @api private
- module ClientApi
-
- include Seahorse::Model
-
- AccessDeniedException = Shapes::StructureShape.new(name: 'AccessDeniedException')
- AppConfigResourceId = Shapes::StringShape.new(name: 'AppConfigResourceId')
- Arn = Shapes::StringShape.new(name: 'Arn')
- BatchEvaluateFeatureRequest = Shapes::StructureShape.new(name: 'BatchEvaluateFeatureRequest')
- BatchEvaluateFeatureResponse = Shapes::StructureShape.new(name: 'BatchEvaluateFeatureResponse')
- Boolean = Shapes::BooleanShape.new(name: 'Boolean')
- ChangeDirectionEnum = Shapes::StringShape.new(name: 'ChangeDirectionEnum')
- CloudWatchLogsDestination = Shapes::StructureShape.new(name: 'CloudWatchLogsDestination')
- CloudWatchLogsDestinationConfig = Shapes::StructureShape.new(name: 'CloudWatchLogsDestinationConfig')
- ConflictException = Shapes::StructureShape.new(name: 'ConflictException')
- CreateExperimentRequest = Shapes::StructureShape.new(name: 'CreateExperimentRequest')
- CreateExperimentResponse = Shapes::StructureShape.new(name: 'CreateExperimentResponse')
- CreateFeatureRequest = Shapes::StructureShape.new(name: 'CreateFeatureRequest')
- CreateFeatureResponse = Shapes::StructureShape.new(name: 'CreateFeatureResponse')
- CreateLaunchRequest = Shapes::StructureShape.new(name: 'CreateLaunchRequest')
- CreateLaunchResponse = Shapes::StructureShape.new(name: 'CreateLaunchResponse')
- CreateProjectRequest = Shapes::StructureShape.new(name: 'CreateProjectRequest')
- CreateProjectResponse = Shapes::StructureShape.new(name: 'CreateProjectResponse')
- CreateSegmentRequest = Shapes::StructureShape.new(name: 'CreateSegmentRequest')
- CreateSegmentResponse = Shapes::StructureShape.new(name: 'CreateSegmentResponse')
- CwDimensionSafeName = Shapes::StringShape.new(name: 'CwDimensionSafeName')
- CwLogGroupSafeName = Shapes::StringShape.new(name: 'CwLogGroupSafeName')
- DeleteExperimentRequest = Shapes::StructureShape.new(name: 'DeleteExperimentRequest')
- DeleteExperimentResponse = Shapes::StructureShape.new(name: 'DeleteExperimentResponse')
- DeleteFeatureRequest = Shapes::StructureShape.new(name: 'DeleteFeatureRequest')
- DeleteFeatureResponse = Shapes::StructureShape.new(name: 'DeleteFeatureResponse')
- DeleteLaunchRequest = Shapes::StructureShape.new(name: 'DeleteLaunchRequest')
- DeleteLaunchResponse = Shapes::StructureShape.new(name: 'DeleteLaunchResponse')
- DeleteProjectRequest = Shapes::StructureShape.new(name: 'DeleteProjectRequest')
- DeleteProjectResponse = Shapes::StructureShape.new(name: 'DeleteProjectResponse')
- DeleteSegmentRequest = Shapes::StructureShape.new(name: 'DeleteSegmentRequest')
- DeleteSegmentResponse = Shapes::StructureShape.new(name: 'DeleteSegmentResponse')
- Description = Shapes::StringShape.new(name: 'Description')
- Double = Shapes::FloatShape.new(name: 'Double')
- DoubleValueList = Shapes::ListShape.new(name: 'DoubleValueList')
- EntityId = Shapes::StringShape.new(name: 'EntityId')
- EntityOverrideMap = Shapes::MapShape.new(name: 'EntityOverrideMap')
- ErrorCodeEnum = Shapes::StringShape.new(name: 'ErrorCodeEnum')
- ErrorMessage = Shapes::StringShape.new(name: 'ErrorMessage')
- EvaluateFeatureRequest = Shapes::StructureShape.new(name: 'EvaluateFeatureRequest')
- EvaluateFeatureResponse = Shapes::StructureShape.new(name: 'EvaluateFeatureResponse')
- EvaluationRequest = Shapes::StructureShape.new(name: 'EvaluationRequest')
- EvaluationRequestsList = Shapes::ListShape.new(name: 'EvaluationRequestsList')
- EvaluationResult = Shapes::StructureShape.new(name: 'EvaluationResult')
- EvaluationResultsList = Shapes::ListShape.new(name: 'EvaluationResultsList')
- EvaluationRule = Shapes::StructureShape.new(name: 'EvaluationRule')
- EvaluationRulesList = Shapes::ListShape.new(name: 'EvaluationRulesList')
- Event = Shapes::StructureShape.new(name: 'Event')
- EventList = Shapes::ListShape.new(name: 'EventList')
- EventType = Shapes::StringShape.new(name: 'EventType')
- Experiment = Shapes::StructureShape.new(name: 'Experiment')
- ExperimentArn = Shapes::StringShape.new(name: 'ExperimentArn')
- ExperimentBaseStat = Shapes::StringShape.new(name: 'ExperimentBaseStat')
- ExperimentExecution = Shapes::StructureShape.new(name: 'ExperimentExecution')
- ExperimentList = Shapes::ListShape.new(name: 'ExperimentList')
- ExperimentName = Shapes::StringShape.new(name: 'ExperimentName')
- ExperimentReport = Shapes::StructureShape.new(name: 'ExperimentReport')
- ExperimentReportList = Shapes::ListShape.new(name: 'ExperimentReportList')
- ExperimentReportName = Shapes::StringShape.new(name: 'ExperimentReportName')
- ExperimentReportNameList = Shapes::ListShape.new(name: 'ExperimentReportNameList')
- ExperimentResultRequestType = Shapes::StringShape.new(name: 'ExperimentResultRequestType')
- ExperimentResultRequestTypeList = Shapes::ListShape.new(name: 'ExperimentResultRequestTypeList')
- ExperimentResultResponseType = Shapes::StringShape.new(name: 'ExperimentResultResponseType')
- ExperimentResultsData = Shapes::StructureShape.new(name: 'ExperimentResultsData')
- ExperimentResultsDataList = Shapes::ListShape.new(name: 'ExperimentResultsDataList')
- ExperimentSchedule = Shapes::StructureShape.new(name: 'ExperimentSchedule')
- ExperimentStatus = Shapes::StringShape.new(name: 'ExperimentStatus')
- ExperimentStopDesiredState = Shapes::StringShape.new(name: 'ExperimentStopDesiredState')
- ExperimentType = Shapes::StringShape.new(name: 'ExperimentType')
- Feature = Shapes::StructureShape.new(name: 'Feature')
- FeatureArn = Shapes::StringShape.new(name: 'FeatureArn')
- FeatureEvaluationStrategy = Shapes::StringShape.new(name: 'FeatureEvaluationStrategy')
- FeatureName = Shapes::StringShape.new(name: 'FeatureName')
- FeatureStatus = Shapes::StringShape.new(name: 'FeatureStatus')
- FeatureSummariesList = Shapes::ListShape.new(name: 'FeatureSummariesList')
- FeatureSummary = Shapes::StructureShape.new(name: 'FeatureSummary')
- FeatureToVariationMap = Shapes::MapShape.new(name: 'FeatureToVariationMap')
- GetExperimentRequest = Shapes::StructureShape.new(name: 'GetExperimentRequest')
- GetExperimentResponse = Shapes::StructureShape.new(name: 'GetExperimentResponse')
- GetExperimentResultsRequest = Shapes::StructureShape.new(name: 'GetExperimentResultsRequest')
- GetExperimentResultsResponse = Shapes::StructureShape.new(name: 'GetExperimentResultsResponse')
- GetFeatureRequest = Shapes::StructureShape.new(name: 'GetFeatureRequest')
- GetFeatureResponse = Shapes::StructureShape.new(name: 'GetFeatureResponse')
- GetLaunchRequest = Shapes::StructureShape.new(name: 'GetLaunchRequest')
- GetLaunchResponse = Shapes::StructureShape.new(name: 'GetLaunchResponse')
- GetProjectRequest = Shapes::StructureShape.new(name: 'GetProjectRequest')
- GetProjectResponse = Shapes::StructureShape.new(name: 'GetProjectResponse')
- GetSegmentRequest = Shapes::StructureShape.new(name: 'GetSegmentRequest')
- GetSegmentResponse = Shapes::StructureShape.new(name: 'GetSegmentResponse')
- GroupName = Shapes::StringShape.new(name: 'GroupName')
- GroupToWeightMap = Shapes::MapShape.new(name: 'GroupToWeightMap')
- Integer = Shapes::IntegerShape.new(name: 'Integer')
- InternalServerException = Shapes::StructureShape.new(name: 'InternalServerException')
- JsonPath = Shapes::StringShape.new(name: 'JsonPath')
- JsonValue = Shapes::StringShape.new(name: 'JsonValue')
- Launch = Shapes::StructureShape.new(name: 'Launch')
- LaunchArn = Shapes::StringShape.new(name: 'LaunchArn')
- LaunchExecution = Shapes::StructureShape.new(name: 'LaunchExecution')
- LaunchGroup = Shapes::StructureShape.new(name: 'LaunchGroup')
- LaunchGroupConfig = Shapes::StructureShape.new(name: 'LaunchGroupConfig')
- LaunchGroupConfigList = Shapes::ListShape.new(name: 'LaunchGroupConfigList')
- LaunchGroupList = Shapes::ListShape.new(name: 'LaunchGroupList')
- LaunchName = Shapes::StringShape.new(name: 'LaunchName')
- LaunchStatus = Shapes::StringShape.new(name: 'LaunchStatus')
- LaunchStopDesiredState = Shapes::StringShape.new(name: 'LaunchStopDesiredState')
- LaunchType = Shapes::StringShape.new(name: 'LaunchType')
- LaunchesList = Shapes::ListShape.new(name: 'LaunchesList')
- ListExperimentsRequest = Shapes::StructureShape.new(name: 'ListExperimentsRequest')
- ListExperimentsResponse = Shapes::StructureShape.new(name: 'ListExperimentsResponse')
- ListFeaturesRequest = Shapes::StructureShape.new(name: 'ListFeaturesRequest')
- ListFeaturesResponse = Shapes::StructureShape.new(name: 'ListFeaturesResponse')
- ListLaunchesRequest = Shapes::StructureShape.new(name: 'ListLaunchesRequest')
- ListLaunchesResponse = Shapes::StructureShape.new(name: 'ListLaunchesResponse')
- ListProjectsRequest = Shapes::StructureShape.new(name: 'ListProjectsRequest')
- ListProjectsResponse = Shapes::StructureShape.new(name: 'ListProjectsResponse')
- ListSegmentReferencesRequest = Shapes::StructureShape.new(name: 'ListSegmentReferencesRequest')
- ListSegmentReferencesResponse = Shapes::StructureShape.new(name: 'ListSegmentReferencesResponse')
- ListSegmentsRequest = Shapes::StructureShape.new(name: 'ListSegmentsRequest')
- ListSegmentsResponse = Shapes::StructureShape.new(name: 'ListSegmentsResponse')
- ListTagsForResourceRequest = Shapes::StructureShape.new(name: 'ListTagsForResourceRequest')
- ListTagsForResourceResponse = Shapes::StructureShape.new(name: 'ListTagsForResourceResponse')
- Long = Shapes::IntegerShape.new(name: 'Long')
- MaxExperiments = Shapes::IntegerShape.new(name: 'MaxExperiments')
- MaxFeatures = Shapes::IntegerShape.new(name: 'MaxFeatures')
- MaxLaunches = Shapes::IntegerShape.new(name: 'MaxLaunches')
- MaxProjects = Shapes::IntegerShape.new(name: 'MaxProjects')
- MaxReferences = Shapes::IntegerShape.new(name: 'MaxReferences')
- MaxSegments = Shapes::IntegerShape.new(name: 'MaxSegments')
- MetricDefinition = Shapes::StructureShape.new(name: 'MetricDefinition')
- MetricDefinitionConfig = Shapes::StructureShape.new(name: 'MetricDefinitionConfig')
- MetricDefinitionConfigEventPatternString = Shapes::StringShape.new(name: 'MetricDefinitionConfigEventPatternString')
- MetricGoal = Shapes::StructureShape.new(name: 'MetricGoal')
- MetricGoalConfig = Shapes::StructureShape.new(name: 'MetricGoalConfig')
- MetricGoalConfigList = Shapes::ListShape.new(name: 'MetricGoalConfigList')
- MetricGoalsList = Shapes::ListShape.new(name: 'MetricGoalsList')
- MetricMonitor = Shapes::StructureShape.new(name: 'MetricMonitor')
- MetricMonitorConfig = Shapes::StructureShape.new(name: 'MetricMonitorConfig')
- MetricMonitorConfigList = Shapes::ListShape.new(name: 'MetricMonitorConfigList')
- MetricMonitorList = Shapes::ListShape.new(name: 'MetricMonitorList')
- MetricNameList = Shapes::ListShape.new(name: 'MetricNameList')
- MetricUnitLabel = Shapes::StringShape.new(name: 'MetricUnitLabel')
- NextToken = Shapes::StringShape.new(name: 'NextToken')
- OnlineAbConfig = Shapes::StructureShape.new(name: 'OnlineAbConfig')
- OnlineAbDefinition = Shapes::StructureShape.new(name: 'OnlineAbDefinition')
- PrimitiveBoolean = Shapes::BooleanShape.new(name: 'PrimitiveBoolean')
- Project = Shapes::StructureShape.new(name: 'Project')
- ProjectAppConfigResource = Shapes::StructureShape.new(name: 'ProjectAppConfigResource')
- ProjectAppConfigResourceConfig = Shapes::StructureShape.new(name: 'ProjectAppConfigResourceConfig')
- ProjectArn = Shapes::StringShape.new(name: 'ProjectArn')
- ProjectDataDelivery = Shapes::StructureShape.new(name: 'ProjectDataDelivery')
- ProjectDataDeliveryConfig = Shapes::StructureShape.new(name: 'ProjectDataDeliveryConfig')
- ProjectName = Shapes::StringShape.new(name: 'ProjectName')
- ProjectRef = Shapes::StringShape.new(name: 'ProjectRef')
- ProjectStatus = Shapes::StringShape.new(name: 'ProjectStatus')
- ProjectSummariesList = Shapes::ListShape.new(name: 'ProjectSummariesList')
- ProjectSummary = Shapes::StructureShape.new(name: 'ProjectSummary')
- PutProjectEventsRequest = Shapes::StructureShape.new(name: 'PutProjectEventsRequest')
- PutProjectEventsResponse = Shapes::StructureShape.new(name: 'PutProjectEventsResponse')
- PutProjectEventsResultEntry = Shapes::StructureShape.new(name: 'PutProjectEventsResultEntry')
- PutProjectEventsResultEntryList = Shapes::ListShape.new(name: 'PutProjectEventsResultEntryList')
- RandomizationSalt = Shapes::StringShape.new(name: 'RandomizationSalt')
- RefResource = Shapes::StructureShape.new(name: 'RefResource')
- RefResourceList = Shapes::ListShape.new(name: 'RefResourceList')
- ResourceNotFoundException = Shapes::StructureShape.new(name: 'ResourceNotFoundException')
- ResultsPeriod = Shapes::IntegerShape.new(name: 'ResultsPeriod')
- RuleName = Shapes::StringShape.new(name: 'RuleName')
- RuleType = Shapes::StringShape.new(name: 'RuleType')
- S3BucketSafeName = Shapes::StringShape.new(name: 'S3BucketSafeName')
- S3Destination = Shapes::StructureShape.new(name: 'S3Destination')
- S3DestinationConfig = Shapes::StructureShape.new(name: 'S3DestinationConfig')
- S3PrefixSafeName = Shapes::StringShape.new(name: 'S3PrefixSafeName')
- ScheduledSplit = Shapes::StructureShape.new(name: 'ScheduledSplit')
- ScheduledSplitConfig = Shapes::StructureShape.new(name: 'ScheduledSplitConfig')
- ScheduledSplitConfigList = Shapes::ListShape.new(name: 'ScheduledSplitConfigList')
- ScheduledSplitsLaunchConfig = Shapes::StructureShape.new(name: 'ScheduledSplitsLaunchConfig')
- ScheduledSplitsLaunchDefinition = Shapes::StructureShape.new(name: 'ScheduledSplitsLaunchDefinition')
- ScheduledStepList = Shapes::ListShape.new(name: 'ScheduledStepList')
- Segment = Shapes::StructureShape.new(name: 'Segment')
- SegmentArn = Shapes::StringShape.new(name: 'SegmentArn')
- SegmentList = Shapes::ListShape.new(name: 'SegmentList')
- SegmentName = Shapes::StringShape.new(name: 'SegmentName')
- SegmentOverride = Shapes::StructureShape.new(name: 'SegmentOverride')
- SegmentOverridesList = Shapes::ListShape.new(name: 'SegmentOverridesList')
- SegmentPattern = Shapes::StringShape.new(name: 'SegmentPattern')
- SegmentRef = Shapes::StringShape.new(name: 'SegmentRef')
- SegmentReferenceResourceType = Shapes::StringShape.new(name: 'SegmentReferenceResourceType')
- ServiceQuotaExceededException = Shapes::StructureShape.new(name: 'ServiceQuotaExceededException')
- ServiceUnavailableException = Shapes::StructureShape.new(name: 'ServiceUnavailableException')
- SplitWeight = Shapes::IntegerShape.new(name: 'SplitWeight')
- StartExperimentRequest = Shapes::StructureShape.new(name: 'StartExperimentRequest')
- StartExperimentResponse = Shapes::StructureShape.new(name: 'StartExperimentResponse')
- StartLaunchRequest = Shapes::StructureShape.new(name: 'StartLaunchRequest')
- StartLaunchResponse = Shapes::StructureShape.new(name: 'StartLaunchResponse')
- StopExperimentRequest = Shapes::StructureShape.new(name: 'StopExperimentRequest')
- StopExperimentResponse = Shapes::StructureShape.new(name: 'StopExperimentResponse')
- StopLaunchRequest = Shapes::StructureShape.new(name: 'StopLaunchRequest')
- StopLaunchResponse = Shapes::StructureShape.new(name: 'StopLaunchResponse')
- String = Shapes::StringShape.new(name: 'String')
- TagKey = Shapes::StringShape.new(name: 'TagKey')
- TagKeyList = Shapes::ListShape.new(name: 'TagKeyList')
- TagMap = Shapes::MapShape.new(name: 'TagMap')
- TagResourceRequest = Shapes::StructureShape.new(name: 'TagResourceRequest')
- TagResourceResponse = Shapes::StructureShape.new(name: 'TagResourceResponse')
- TagValue = Shapes::StringShape.new(name: 'TagValue')
- TestSegmentPatternRequest = Shapes::StructureShape.new(name: 'TestSegmentPatternRequest')
- TestSegmentPatternResponse = Shapes::StructureShape.new(name: 'TestSegmentPatternResponse')
- ThrottlingException = Shapes::StructureShape.new(name: 'ThrottlingException')
- Timestamp = Shapes::TimestampShape.new(name: 'Timestamp')
- TimestampList = Shapes::ListShape.new(name: 'TimestampList')
- Treatment = Shapes::StructureShape.new(name: 'Treatment')
- TreatmentConfig = Shapes::StructureShape.new(name: 'TreatmentConfig')
- TreatmentConfigList = Shapes::ListShape.new(name: 'TreatmentConfigList')
- TreatmentList = Shapes::ListShape.new(name: 'TreatmentList')
- TreatmentName = Shapes::StringShape.new(name: 'TreatmentName')
- TreatmentNameList = Shapes::ListShape.new(name: 'TreatmentNameList')
- TreatmentToWeightMap = Shapes::MapShape.new(name: 'TreatmentToWeightMap')
- UntagResourceRequest = Shapes::StructureShape.new(name: 'UntagResourceRequest')
- UntagResourceResponse = Shapes::StructureShape.new(name: 'UntagResourceResponse')
- UpdateExperimentRequest = Shapes::StructureShape.new(name: 'UpdateExperimentRequest')
- UpdateExperimentResponse = Shapes::StructureShape.new(name: 'UpdateExperimentResponse')
- UpdateFeatureRequest = Shapes::StructureShape.new(name: 'UpdateFeatureRequest')
- UpdateFeatureResponse = Shapes::StructureShape.new(name: 'UpdateFeatureResponse')
- UpdateLaunchRequest = Shapes::StructureShape.new(name: 'UpdateLaunchRequest')
- UpdateLaunchResponse = Shapes::StructureShape.new(name: 'UpdateLaunchResponse')
- UpdateProjectDataDeliveryRequest = Shapes::StructureShape.new(name: 'UpdateProjectDataDeliveryRequest')
- UpdateProjectDataDeliveryResponse = Shapes::StructureShape.new(name: 'UpdateProjectDataDeliveryResponse')
- UpdateProjectRequest = Shapes::StructureShape.new(name: 'UpdateProjectRequest')
- UpdateProjectResponse = Shapes::StructureShape.new(name: 'UpdateProjectResponse')
- Uuid = Shapes::StringShape.new(name: 'Uuid')
- ValidationException = Shapes::StructureShape.new(name: 'ValidationException')
- ValidationExceptionField = Shapes::StructureShape.new(name: 'ValidationExceptionField')
- ValidationExceptionFieldList = Shapes::ListShape.new(name: 'ValidationExceptionFieldList')
- ValidationExceptionReason = Shapes::StringShape.new(name: 'ValidationExceptionReason')
- VariableValue = Shapes::UnionShape.new(name: 'VariableValue')
- VariableValueLongValueLong = Shapes::IntegerShape.new(name: 'VariableValueLongValueLong')
- VariableValueStringValueString = Shapes::StringShape.new(name: 'VariableValueStringValueString')
- Variation = Shapes::StructureShape.new(name: 'Variation')
- VariationConfig = Shapes::StructureShape.new(name: 'VariationConfig')
- VariationConfigsList = Shapes::ListShape.new(name: 'VariationConfigsList')
- VariationName = Shapes::StringShape.new(name: 'VariationName')
- VariationNameList = Shapes::ListShape.new(name: 'VariationNameList')
- VariationValueType = Shapes::StringShape.new(name: 'VariationValueType')
- VariationsList = Shapes::ListShape.new(name: 'VariationsList')
-
- AccessDeniedException.add_member(:message, Shapes::ShapeRef.new(shape: String, location_name: "message"))
- AccessDeniedException.struct_class = Types::AccessDeniedException
-
- BatchEvaluateFeatureRequest.add_member(:project, Shapes::ShapeRef.new(shape: ProjectRef, required: true, location: "uri", location_name: "project"))
- BatchEvaluateFeatureRequest.add_member(:requests, Shapes::ShapeRef.new(shape: EvaluationRequestsList, required: true, location_name: "requests"))
- BatchEvaluateFeatureRequest.struct_class = Types::BatchEvaluateFeatureRequest
-
- BatchEvaluateFeatureResponse.add_member(:results, Shapes::ShapeRef.new(shape: EvaluationResultsList, location_name: "results"))
- BatchEvaluateFeatureResponse.struct_class = Types::BatchEvaluateFeatureResponse
-
- CloudWatchLogsDestination.add_member(:log_group, Shapes::ShapeRef.new(shape: CwLogGroupSafeName, location_name: "logGroup"))
- CloudWatchLogsDestination.struct_class = Types::CloudWatchLogsDestination
-
- CloudWatchLogsDestinationConfig.add_member(:log_group, Shapes::ShapeRef.new(shape: CwLogGroupSafeName, location_name: "logGroup"))
- CloudWatchLogsDestinationConfig.struct_class = Types::CloudWatchLogsDestinationConfig
-
- ConflictException.add_member(:message, Shapes::ShapeRef.new(shape: String, location_name: "message"))
- ConflictException.add_member(:resource_id, Shapes::ShapeRef.new(shape: String, location_name: "resourceId"))
- ConflictException.add_member(:resource_type, Shapes::ShapeRef.new(shape: String, location_name: "resourceType"))
- ConflictException.struct_class = Types::ConflictException
-
- CreateExperimentRequest.add_member(:description, Shapes::ShapeRef.new(shape: Description, location_name: "description"))
- CreateExperimentRequest.add_member(:metric_goals, Shapes::ShapeRef.new(shape: MetricGoalConfigList, required: true, location_name: "metricGoals"))
- CreateExperimentRequest.add_member(:name, Shapes::ShapeRef.new(shape: ExperimentName, required: true, location_name: "name"))
- CreateExperimentRequest.add_member(:online_ab_config, Shapes::ShapeRef.new(shape: OnlineAbConfig, location_name: "onlineAbConfig"))
- CreateExperimentRequest.add_member(:project, Shapes::ShapeRef.new(shape: ProjectRef, required: true, location: "uri", location_name: "project"))
- CreateExperimentRequest.add_member(:randomization_salt, Shapes::ShapeRef.new(shape: RandomizationSalt, location_name: "randomizationSalt"))
- CreateExperimentRequest.add_member(:sampling_rate, Shapes::ShapeRef.new(shape: SplitWeight, location_name: "samplingRate", metadata: {"box" => true}))
- CreateExperimentRequest.add_member(:segment, Shapes::ShapeRef.new(shape: SegmentRef, location_name: "segment"))
- CreateExperimentRequest.add_member(:tags, Shapes::ShapeRef.new(shape: TagMap, location_name: "tags"))
- CreateExperimentRequest.add_member(:treatments, Shapes::ShapeRef.new(shape: TreatmentConfigList, required: true, location_name: "treatments"))
- CreateExperimentRequest.struct_class = Types::CreateExperimentRequest
-
- CreateExperimentResponse.add_member(:experiment, Shapes::ShapeRef.new(shape: Experiment, required: true, location_name: "experiment"))
- CreateExperimentResponse.struct_class = Types::CreateExperimentResponse
-
- CreateFeatureRequest.add_member(:default_variation, Shapes::ShapeRef.new(shape: VariationName, location_name: "defaultVariation"))
- CreateFeatureRequest.add_member(:description, Shapes::ShapeRef.new(shape: Description, location_name: "description"))
- CreateFeatureRequest.add_member(:entity_overrides, Shapes::ShapeRef.new(shape: EntityOverrideMap, location_name: "entityOverrides"))
- CreateFeatureRequest.add_member(:evaluation_strategy, Shapes::ShapeRef.new(shape: FeatureEvaluationStrategy, location_name: "evaluationStrategy"))
- CreateFeatureRequest.add_member(:name, Shapes::ShapeRef.new(shape: FeatureName, required: true, location_name: "name"))
- CreateFeatureRequest.add_member(:project, Shapes::ShapeRef.new(shape: ProjectRef, required: true, location: "uri", location_name: "project"))
- CreateFeatureRequest.add_member(:tags, Shapes::ShapeRef.new(shape: TagMap, location_name: "tags"))
- CreateFeatureRequest.add_member(:variations, Shapes::ShapeRef.new(shape: VariationConfigsList, required: true, location_name: "variations"))
- CreateFeatureRequest.struct_class = Types::CreateFeatureRequest
-
- CreateFeatureResponse.add_member(:feature, Shapes::ShapeRef.new(shape: Feature, location_name: "feature"))
- CreateFeatureResponse.struct_class = Types::CreateFeatureResponse
-
- CreateLaunchRequest.add_member(:description, Shapes::ShapeRef.new(shape: Description, location_name: "description"))
- CreateLaunchRequest.add_member(:groups, Shapes::ShapeRef.new(shape: LaunchGroupConfigList, required: true, location_name: "groups"))
- CreateLaunchRequest.add_member(:metric_monitors, Shapes::ShapeRef.new(shape: MetricMonitorConfigList, location_name: "metricMonitors"))
- CreateLaunchRequest.add_member(:name, Shapes::ShapeRef.new(shape: LaunchName, required: true, location_name: "name"))
- CreateLaunchRequest.add_member(:project, Shapes::ShapeRef.new(shape: ProjectRef, required: true, location: "uri", location_name: "project"))
- CreateLaunchRequest.add_member(:randomization_salt, Shapes::ShapeRef.new(shape: RandomizationSalt, location_name: "randomizationSalt"))
- CreateLaunchRequest.add_member(:scheduled_splits_config, Shapes::ShapeRef.new(shape: ScheduledSplitsLaunchConfig, location_name: "scheduledSplitsConfig"))
- CreateLaunchRequest.add_member(:tags, Shapes::ShapeRef.new(shape: TagMap, location_name: "tags"))
- CreateLaunchRequest.struct_class = Types::CreateLaunchRequest
-
- CreateLaunchResponse.add_member(:launch, Shapes::ShapeRef.new(shape: Launch, required: true, location_name: "launch"))
- CreateLaunchResponse.struct_class = Types::CreateLaunchResponse
-
- CreateProjectRequest.add_member(:app_config_resource, Shapes::ShapeRef.new(shape: ProjectAppConfigResourceConfig, location_name: "appConfigResource"))
- CreateProjectRequest.add_member(:data_delivery, Shapes::ShapeRef.new(shape: ProjectDataDeliveryConfig, location_name: "dataDelivery"))
- CreateProjectRequest.add_member(:description, Shapes::ShapeRef.new(shape: Description, location_name: "description"))
- CreateProjectRequest.add_member(:name, Shapes::ShapeRef.new(shape: ProjectName, required: true, location_name: "name"))
- CreateProjectRequest.add_member(:tags, Shapes::ShapeRef.new(shape: TagMap, location_name: "tags"))
- CreateProjectRequest.struct_class = Types::CreateProjectRequest
-
- CreateProjectResponse.add_member(:project, Shapes::ShapeRef.new(shape: Project, required: true, location_name: "project"))
- CreateProjectResponse.struct_class = Types::CreateProjectResponse
-
- CreateSegmentRequest.add_member(:description, Shapes::ShapeRef.new(shape: Description, location_name: "description"))
- CreateSegmentRequest.add_member(:name, Shapes::ShapeRef.new(shape: SegmentName, required: true, location_name: "name"))
- CreateSegmentRequest.add_member(:pattern, Shapes::ShapeRef.new(shape: SegmentPattern, required: true, location_name: "pattern", metadata: {"jsonvalue" => true}))
- CreateSegmentRequest.add_member(:tags, Shapes::ShapeRef.new(shape: TagMap, location_name: "tags"))
- CreateSegmentRequest.struct_class = Types::CreateSegmentRequest
-
- CreateSegmentResponse.add_member(:segment, Shapes::ShapeRef.new(shape: Segment, required: true, location_name: "segment"))
- CreateSegmentResponse.struct_class = Types::CreateSegmentResponse
-
- DeleteExperimentRequest.add_member(:experiment, Shapes::ShapeRef.new(shape: ExperimentName, required: true, location: "uri", location_name: "experiment"))
- DeleteExperimentRequest.add_member(:project, Shapes::ShapeRef.new(shape: ProjectRef, required: true, location: "uri", location_name: "project"))
- DeleteExperimentRequest.struct_class = Types::DeleteExperimentRequest
-
- DeleteExperimentResponse.struct_class = Types::DeleteExperimentResponse
-
- DeleteFeatureRequest.add_member(:feature, Shapes::ShapeRef.new(shape: FeatureName, required: true, location: "uri", location_name: "feature"))
- DeleteFeatureRequest.add_member(:project, Shapes::ShapeRef.new(shape: ProjectRef, required: true, location: "uri", location_name: "project"))
- DeleteFeatureRequest.struct_class = Types::DeleteFeatureRequest
-
- DeleteFeatureResponse.struct_class = Types::DeleteFeatureResponse
-
- DeleteLaunchRequest.add_member(:launch, Shapes::ShapeRef.new(shape: LaunchName, required: true, location: "uri", location_name: "launch"))
- DeleteLaunchRequest.add_member(:project, Shapes::ShapeRef.new(shape: ProjectRef, required: true, location: "uri", location_name: "project"))
- DeleteLaunchRequest.struct_class = Types::DeleteLaunchRequest
-
- DeleteLaunchResponse.struct_class = Types::DeleteLaunchResponse
-
- DeleteProjectRequest.add_member(:project, Shapes::ShapeRef.new(shape: ProjectRef, required: true, location: "uri", location_name: "project"))
- DeleteProjectRequest.struct_class = Types::DeleteProjectRequest
-
- DeleteProjectResponse.struct_class = Types::DeleteProjectResponse
-
- DeleteSegmentRequest.add_member(:segment, Shapes::ShapeRef.new(shape: SegmentRef, required: true, location: "uri", location_name: "segment"))
- DeleteSegmentRequest.struct_class = Types::DeleteSegmentRequest
-
- DeleteSegmentResponse.struct_class = Types::DeleteSegmentResponse
-
- DoubleValueList.member = Shapes::ShapeRef.new(shape: Double)
-
- EntityOverrideMap.key = Shapes::ShapeRef.new(shape: EntityId)
- EntityOverrideMap.value = Shapes::ShapeRef.new(shape: VariationName)
-
- EvaluateFeatureRequest.add_member(:entity_id, Shapes::ShapeRef.new(shape: EntityId, required: true, location_name: "entityId"))
- EvaluateFeatureRequest.add_member(:evaluation_context, Shapes::ShapeRef.new(shape: JsonValue, location_name: "evaluationContext", metadata: {"jsonvalue" => true}))
- EvaluateFeatureRequest.add_member(:feature, Shapes::ShapeRef.new(shape: FeatureName, required: true, location: "uri", location_name: "feature"))
- EvaluateFeatureRequest.add_member(:project, Shapes::ShapeRef.new(shape: ProjectRef, required: true, location: "uri", location_name: "project"))
- EvaluateFeatureRequest.struct_class = Types::EvaluateFeatureRequest
-
- EvaluateFeatureResponse.add_member(:details, Shapes::ShapeRef.new(shape: JsonValue, location_name: "details", metadata: {"jsonvalue" => true}))
- EvaluateFeatureResponse.add_member(:reason, Shapes::ShapeRef.new(shape: String, location_name: "reason"))
- EvaluateFeatureResponse.add_member(:value, Shapes::ShapeRef.new(shape: VariableValue, location_name: "value"))
- EvaluateFeatureResponse.add_member(:variation, Shapes::ShapeRef.new(shape: String, location_name: "variation"))
- EvaluateFeatureResponse.struct_class = Types::EvaluateFeatureResponse
-
- EvaluationRequest.add_member(:entity_id, Shapes::ShapeRef.new(shape: EntityId, required: true, location_name: "entityId"))
- EvaluationRequest.add_member(:evaluation_context, Shapes::ShapeRef.new(shape: JsonValue, location_name: "evaluationContext", metadata: {"jsonvalue" => true}))
- EvaluationRequest.add_member(:feature, Shapes::ShapeRef.new(shape: FeatureName, required: true, location_name: "feature"))
- EvaluationRequest.struct_class = Types::EvaluationRequest
-
- EvaluationRequestsList.member = Shapes::ShapeRef.new(shape: EvaluationRequest)
-
- EvaluationResult.add_member(:details, Shapes::ShapeRef.new(shape: JsonValue, location_name: "details", metadata: {"jsonvalue" => true}))
- EvaluationResult.add_member(:entity_id, Shapes::ShapeRef.new(shape: EntityId, required: true, location_name: "entityId"))
- EvaluationResult.add_member(:feature, Shapes::ShapeRef.new(shape: FeatureName, required: true, location_name: "feature"))
- EvaluationResult.add_member(:project, Shapes::ShapeRef.new(shape: Arn, location_name: "project"))
- EvaluationResult.add_member(:reason, Shapes::ShapeRef.new(shape: String, location_name: "reason"))
- EvaluationResult.add_member(:value, Shapes::ShapeRef.new(shape: VariableValue, location_name: "value"))
- EvaluationResult.add_member(:variation, Shapes::ShapeRef.new(shape: String, location_name: "variation"))
- EvaluationResult.struct_class = Types::EvaluationResult
-
- EvaluationResultsList.member = Shapes::ShapeRef.new(shape: EvaluationResult)
-
- EvaluationRule.add_member(:name, Shapes::ShapeRef.new(shape: RuleName, location_name: "name"))
- EvaluationRule.add_member(:type, Shapes::ShapeRef.new(shape: RuleType, required: true, location_name: "type"))
- EvaluationRule.struct_class = Types::EvaluationRule
-
- EvaluationRulesList.member = Shapes::ShapeRef.new(shape: EvaluationRule)
-
- Event.add_member(:data, Shapes::ShapeRef.new(shape: JsonValue, required: true, location_name: "data", metadata: {"jsonvalue" => true}))
- Event.add_member(:timestamp, Shapes::ShapeRef.new(shape: Timestamp, required: true, location_name: "timestamp"))
- Event.add_member(:type, Shapes::ShapeRef.new(shape: EventType, required: true, location_name: "type"))
- Event.struct_class = Types::Event
-
- EventList.member = Shapes::ShapeRef.new(shape: Event)
-
- Experiment.add_member(:arn, Shapes::ShapeRef.new(shape: ExperimentArn, required: true, location_name: "arn"))
- Experiment.add_member(:created_time, Shapes::ShapeRef.new(shape: Timestamp, required: true, location_name: "createdTime"))
- Experiment.add_member(:description, Shapes::ShapeRef.new(shape: Description, location_name: "description"))
- Experiment.add_member(:execution, Shapes::ShapeRef.new(shape: ExperimentExecution, location_name: "execution"))
- Experiment.add_member(:last_updated_time, Shapes::ShapeRef.new(shape: Timestamp, required: true, location_name: "lastUpdatedTime"))
- Experiment.add_member(:metric_goals, Shapes::ShapeRef.new(shape: MetricGoalsList, location_name: "metricGoals"))
- Experiment.add_member(:name, Shapes::ShapeRef.new(shape: ExperimentName, required: true, location_name: "name"))
- Experiment.add_member(:online_ab_definition, Shapes::ShapeRef.new(shape: OnlineAbDefinition, location_name: "onlineAbDefinition"))
- Experiment.add_member(:project, Shapes::ShapeRef.new(shape: ProjectArn, location_name: "project"))
- Experiment.add_member(:randomization_salt, Shapes::ShapeRef.new(shape: RandomizationSalt, location_name: "randomizationSalt"))
- Experiment.add_member(:sampling_rate, Shapes::ShapeRef.new(shape: SplitWeight, location_name: "samplingRate"))
- Experiment.add_member(:schedule, Shapes::ShapeRef.new(shape: ExperimentSchedule, location_name: "schedule"))
- Experiment.add_member(:segment, Shapes::ShapeRef.new(shape: SegmentArn, location_name: "segment"))
- Experiment.add_member(:status, Shapes::ShapeRef.new(shape: ExperimentStatus, required: true, location_name: "status"))
- Experiment.add_member(:status_reason, Shapes::ShapeRef.new(shape: Description, location_name: "statusReason"))
- Experiment.add_member(:tags, Shapes::ShapeRef.new(shape: TagMap, location_name: "tags"))
- Experiment.add_member(:treatments, Shapes::ShapeRef.new(shape: TreatmentList, location_name: "treatments"))
- Experiment.add_member(:type, Shapes::ShapeRef.new(shape: ExperimentType, required: true, location_name: "type"))
- Experiment.struct_class = Types::Experiment
-
- ExperimentExecution.add_member(:ended_time, Shapes::ShapeRef.new(shape: Timestamp, location_name: "endedTime"))
- ExperimentExecution.add_member(:started_time, Shapes::ShapeRef.new(shape: Timestamp, location_name: "startedTime"))
- ExperimentExecution.struct_class = Types::ExperimentExecution
-
- ExperimentList.member = Shapes::ShapeRef.new(shape: Experiment)
-
- ExperimentReport.add_member(:content, Shapes::ShapeRef.new(shape: JsonValue, location_name: "content", metadata: {"jsonvalue" => true}))
- ExperimentReport.add_member(:metric_name, Shapes::ShapeRef.new(shape: CwDimensionSafeName, location_name: "metricName"))
- ExperimentReport.add_member(:report_name, Shapes::ShapeRef.new(shape: ExperimentReportName, location_name: "reportName"))
- ExperimentReport.add_member(:treatment_name, Shapes::ShapeRef.new(shape: TreatmentName, location_name: "treatmentName"))
- ExperimentReport.struct_class = Types::ExperimentReport
-
- ExperimentReportList.member = Shapes::ShapeRef.new(shape: ExperimentReport)
-
- ExperimentReportNameList.member = Shapes::ShapeRef.new(shape: ExperimentReportName)
-
- ExperimentResultRequestTypeList.member = Shapes::ShapeRef.new(shape: ExperimentResultRequestType)
-
- ExperimentResultsData.add_member(:metric_name, Shapes::ShapeRef.new(shape: CwDimensionSafeName, location_name: "metricName"))
- ExperimentResultsData.add_member(:result_stat, Shapes::ShapeRef.new(shape: ExperimentResultResponseType, location_name: "resultStat"))
- ExperimentResultsData.add_member(:treatment_name, Shapes::ShapeRef.new(shape: TreatmentName, location_name: "treatmentName"))
- ExperimentResultsData.add_member(:values, Shapes::ShapeRef.new(shape: DoubleValueList, location_name: "values"))
- ExperimentResultsData.struct_class = Types::ExperimentResultsData
-
- ExperimentResultsDataList.member = Shapes::ShapeRef.new(shape: ExperimentResultsData)
-
- ExperimentSchedule.add_member(:analysis_complete_time, Shapes::ShapeRef.new(shape: Timestamp, location_name: "analysisCompleteTime"))
- ExperimentSchedule.struct_class = Types::ExperimentSchedule
-
- Feature.add_member(:arn, Shapes::ShapeRef.new(shape: FeatureArn, required: true, location_name: "arn"))
- Feature.add_member(:created_time, Shapes::ShapeRef.new(shape: Timestamp, required: true, location_name: "createdTime"))
- Feature.add_member(:default_variation, Shapes::ShapeRef.new(shape: VariationName, location_name: "defaultVariation"))
- Feature.add_member(:description, Shapes::ShapeRef.new(shape: Description, location_name: "description"))
- Feature.add_member(:entity_overrides, Shapes::ShapeRef.new(shape: EntityOverrideMap, location_name: "entityOverrides"))
- Feature.add_member(:evaluation_rules, Shapes::ShapeRef.new(shape: EvaluationRulesList, location_name: "evaluationRules"))
- Feature.add_member(:evaluation_strategy, Shapes::ShapeRef.new(shape: FeatureEvaluationStrategy, required: true, location_name: "evaluationStrategy"))
- Feature.add_member(:last_updated_time, Shapes::ShapeRef.new(shape: Timestamp, required: true, location_name: "lastUpdatedTime"))
- Feature.add_member(:name, Shapes::ShapeRef.new(shape: FeatureName, required: true, location_name: "name"))
- Feature.add_member(:project, Shapes::ShapeRef.new(shape: ProjectArn, location_name: "project"))
- Feature.add_member(:status, Shapes::ShapeRef.new(shape: FeatureStatus, required: true, location_name: "status"))
- Feature.add_member(:tags, Shapes::ShapeRef.new(shape: TagMap, location_name: "tags"))
- Feature.add_member(:value_type, Shapes::ShapeRef.new(shape: VariationValueType, required: true, location_name: "valueType"))
- Feature.add_member(:variations, Shapes::ShapeRef.new(shape: VariationsList, required: true, location_name: "variations"))
- Feature.struct_class = Types::Feature
-
- FeatureSummariesList.member = Shapes::ShapeRef.new(shape: FeatureSummary)
-
- FeatureSummary.add_member(:arn, Shapes::ShapeRef.new(shape: Arn, required: true, location_name: "arn"))
- FeatureSummary.add_member(:created_time, Shapes::ShapeRef.new(shape: Timestamp, required: true, location_name: "createdTime"))
- FeatureSummary.add_member(:default_variation, Shapes::ShapeRef.new(shape: VariationName, location_name: "defaultVariation"))
- FeatureSummary.add_member(:evaluation_rules, Shapes::ShapeRef.new(shape: EvaluationRulesList, location_name: "evaluationRules"))
- FeatureSummary.add_member(:evaluation_strategy, Shapes::ShapeRef.new(shape: FeatureEvaluationStrategy, required: true, location_name: "evaluationStrategy"))
- FeatureSummary.add_member(:last_updated_time, Shapes::ShapeRef.new(shape: Timestamp, required: true, location_name: "lastUpdatedTime"))
- FeatureSummary.add_member(:name, Shapes::ShapeRef.new(shape: FeatureName, required: true, location_name: "name"))
- FeatureSummary.add_member(:project, Shapes::ShapeRef.new(shape: ProjectRef, location_name: "project"))
- FeatureSummary.add_member(:status, Shapes::ShapeRef.new(shape: FeatureStatus, required: true, location_name: "status"))
- FeatureSummary.add_member(:tags, Shapes::ShapeRef.new(shape: TagMap, location_name: "tags"))
- FeatureSummary.struct_class = Types::FeatureSummary
-
- FeatureToVariationMap.key = Shapes::ShapeRef.new(shape: FeatureName)
- FeatureToVariationMap.value = Shapes::ShapeRef.new(shape: VariationName)
-
- GetExperimentRequest.add_member(:experiment, Shapes::ShapeRef.new(shape: ExperimentName, required: true, location: "uri", location_name: "experiment"))
- GetExperimentRequest.add_member(:project, Shapes::ShapeRef.new(shape: ProjectRef, required: true, location: "uri", location_name: "project"))
- GetExperimentRequest.struct_class = Types::GetExperimentRequest
-
- GetExperimentResponse.add_member(:experiment, Shapes::ShapeRef.new(shape: Experiment, location_name: "experiment"))
- GetExperimentResponse.struct_class = Types::GetExperimentResponse
-
- GetExperimentResultsRequest.add_member(:base_stat, Shapes::ShapeRef.new(shape: ExperimentBaseStat, location_name: "baseStat"))
- GetExperimentResultsRequest.add_member(:end_time, Shapes::ShapeRef.new(shape: Timestamp, location_name: "endTime"))
- GetExperimentResultsRequest.add_member(:experiment, Shapes::ShapeRef.new(shape: ExperimentName, required: true, location: "uri", location_name: "experiment"))
- GetExperimentResultsRequest.add_member(:metric_names, Shapes::ShapeRef.new(shape: MetricNameList, required: true, location_name: "metricNames"))
- GetExperimentResultsRequest.add_member(:period, Shapes::ShapeRef.new(shape: ResultsPeriod, location_name: "period"))
- GetExperimentResultsRequest.add_member(:project, Shapes::ShapeRef.new(shape: ProjectRef, required: true, location: "uri", location_name: "project"))
- GetExperimentResultsRequest.add_member(:report_names, Shapes::ShapeRef.new(shape: ExperimentReportNameList, location_name: "reportNames"))
- GetExperimentResultsRequest.add_member(:result_stats, Shapes::ShapeRef.new(shape: ExperimentResultRequestTypeList, location_name: "resultStats"))
- GetExperimentResultsRequest.add_member(:start_time, Shapes::ShapeRef.new(shape: Timestamp, location_name: "startTime"))
- GetExperimentResultsRequest.add_member(:treatment_names, Shapes::ShapeRef.new(shape: TreatmentNameList, required: true, location_name: "treatmentNames"))
- GetExperimentResultsRequest.struct_class = Types::GetExperimentResultsRequest
-
- GetExperimentResultsResponse.add_member(:details, Shapes::ShapeRef.new(shape: String, location_name: "details"))
- GetExperimentResultsResponse.add_member(:reports, Shapes::ShapeRef.new(shape: ExperimentReportList, location_name: "reports"))
- GetExperimentResultsResponse.add_member(:results_data, Shapes::ShapeRef.new(shape: ExperimentResultsDataList, location_name: "resultsData"))
- GetExperimentResultsResponse.add_member(:timestamps, Shapes::ShapeRef.new(shape: TimestampList, location_name: "timestamps"))
- GetExperimentResultsResponse.struct_class = Types::GetExperimentResultsResponse
-
- GetFeatureRequest.add_member(:feature, Shapes::ShapeRef.new(shape: FeatureName, required: true, location: "uri", location_name: "feature"))
- GetFeatureRequest.add_member(:project, Shapes::ShapeRef.new(shape: ProjectRef, required: true, location: "uri", location_name: "project"))
- GetFeatureRequest.struct_class = Types::GetFeatureRequest
-
- GetFeatureResponse.add_member(:feature, Shapes::ShapeRef.new(shape: Feature, required: true, location_name: "feature"))
- GetFeatureResponse.struct_class = Types::GetFeatureResponse
-
- GetLaunchRequest.add_member(:launch, Shapes::ShapeRef.new(shape: LaunchName, required: true, location: "uri", location_name: "launch"))
- GetLaunchRequest.add_member(:project, Shapes::ShapeRef.new(shape: ProjectRef, required: true, location: "uri", location_name: "project"))
- GetLaunchRequest.struct_class = Types::GetLaunchRequest
-
- GetLaunchResponse.add_member(:launch, Shapes::ShapeRef.new(shape: Launch, location_name: "launch"))
- GetLaunchResponse.struct_class = Types::GetLaunchResponse
-
- GetProjectRequest.add_member(:project, Shapes::ShapeRef.new(shape: ProjectRef, required: true, location: "uri", location_name: "project"))
- GetProjectRequest.struct_class = Types::GetProjectRequest
-
- GetProjectResponse.add_member(:project, Shapes::ShapeRef.new(shape: Project, required: true, location_name: "project"))
- GetProjectResponse.struct_class = Types::GetProjectResponse
-
- GetSegmentRequest.add_member(:segment, Shapes::ShapeRef.new(shape: SegmentRef, required: true, location: "uri", location_name: "segment"))
- GetSegmentRequest.struct_class = Types::GetSegmentRequest
-
- GetSegmentResponse.add_member(:segment, Shapes::ShapeRef.new(shape: Segment, required: true, location_name: "segment"))
- GetSegmentResponse.struct_class = Types::GetSegmentResponse
-
- GroupToWeightMap.key = Shapes::ShapeRef.new(shape: GroupName)
- GroupToWeightMap.value = Shapes::ShapeRef.new(shape: SplitWeight)
-
- InternalServerException.add_member(:message, Shapes::ShapeRef.new(shape: String, location_name: "message"))
- InternalServerException.struct_class = Types::InternalServerException
-
- Launch.add_member(:arn, Shapes::ShapeRef.new(shape: LaunchArn, required: true, location_name: "arn"))
- Launch.add_member(:created_time, Shapes::ShapeRef.new(shape: Timestamp, required: true, location_name: "createdTime"))
- Launch.add_member(:description, Shapes::ShapeRef.new(shape: Description, location_name: "description"))
- Launch.add_member(:execution, Shapes::ShapeRef.new(shape: LaunchExecution, location_name: "execution"))
- Launch.add_member(:groups, Shapes::ShapeRef.new(shape: LaunchGroupList, location_name: "groups"))
- Launch.add_member(:last_updated_time, Shapes::ShapeRef.new(shape: Timestamp, required: true, location_name: "lastUpdatedTime"))
- Launch.add_member(:metric_monitors, Shapes::ShapeRef.new(shape: MetricMonitorList, location_name: "metricMonitors"))
- Launch.add_member(:name, Shapes::ShapeRef.new(shape: LaunchName, required: true, location_name: "name"))
- Launch.add_member(:project, Shapes::ShapeRef.new(shape: ProjectRef, location_name: "project"))
- Launch.add_member(:randomization_salt, Shapes::ShapeRef.new(shape: RandomizationSalt, location_name: "randomizationSalt"))
- Launch.add_member(:scheduled_splits_definition, Shapes::ShapeRef.new(shape: ScheduledSplitsLaunchDefinition, location_name: "scheduledSplitsDefinition"))
- Launch.add_member(:status, Shapes::ShapeRef.new(shape: LaunchStatus, required: true, location_name: "status"))
- Launch.add_member(:status_reason, Shapes::ShapeRef.new(shape: Description, location_name: "statusReason"))
- Launch.add_member(:tags, Shapes::ShapeRef.new(shape: TagMap, location_name: "tags"))
- Launch.add_member(:type, Shapes::ShapeRef.new(shape: LaunchType, required: true, location_name: "type"))
- Launch.struct_class = Types::Launch
-
- LaunchExecution.add_member(:ended_time, Shapes::ShapeRef.new(shape: Timestamp, location_name: "endedTime"))
- LaunchExecution.add_member(:started_time, Shapes::ShapeRef.new(shape: Timestamp, location_name: "startedTime"))
- LaunchExecution.struct_class = Types::LaunchExecution
-
- LaunchGroup.add_member(:description, Shapes::ShapeRef.new(shape: Description, location_name: "description"))
- LaunchGroup.add_member(:feature_variations, Shapes::ShapeRef.new(shape: FeatureToVariationMap, required: true, location_name: "featureVariations"))
- LaunchGroup.add_member(:name, Shapes::ShapeRef.new(shape: GroupName, required: true, location_name: "name"))
- LaunchGroup.struct_class = Types::LaunchGroup
-
- LaunchGroupConfig.add_member(:description, Shapes::ShapeRef.new(shape: Description, location_name: "description"))
- LaunchGroupConfig.add_member(:feature, Shapes::ShapeRef.new(shape: FeatureName, required: true, location_name: "feature"))
- LaunchGroupConfig.add_member(:name, Shapes::ShapeRef.new(shape: GroupName, required: true, location_name: "name"))
- LaunchGroupConfig.add_member(:variation, Shapes::ShapeRef.new(shape: VariationName, required: true, location_name: "variation"))
- LaunchGroupConfig.struct_class = Types::LaunchGroupConfig
-
- LaunchGroupConfigList.member = Shapes::ShapeRef.new(shape: LaunchGroupConfig)
-
- LaunchGroupList.member = Shapes::ShapeRef.new(shape: LaunchGroup)
-
- LaunchesList.member = Shapes::ShapeRef.new(shape: Launch)
-
- ListExperimentsRequest.add_member(:max_results, Shapes::ShapeRef.new(shape: MaxExperiments, location: "querystring", location_name: "maxResults"))
- ListExperimentsRequest.add_member(:next_token, Shapes::ShapeRef.new(shape: NextToken, location: "querystring", location_name: "nextToken"))
- ListExperimentsRequest.add_member(:project, Shapes::ShapeRef.new(shape: ProjectRef, required: true, location: "uri", location_name: "project"))
- ListExperimentsRequest.add_member(:status, Shapes::ShapeRef.new(shape: ExperimentStatus, location: "querystring", location_name: "status"))
- ListExperimentsRequest.struct_class = Types::ListExperimentsRequest
-
- ListExperimentsResponse.add_member(:experiments, Shapes::ShapeRef.new(shape: ExperimentList, location_name: "experiments"))
- ListExperimentsResponse.add_member(:next_token, Shapes::ShapeRef.new(shape: NextToken, location_name: "nextToken"))
- ListExperimentsResponse.struct_class = Types::ListExperimentsResponse
-
- ListFeaturesRequest.add_member(:max_results, Shapes::ShapeRef.new(shape: MaxFeatures, location: "querystring", location_name: "maxResults"))
- ListFeaturesRequest.add_member(:next_token, Shapes::ShapeRef.new(shape: NextToken, location: "querystring", location_name: "nextToken"))
- ListFeaturesRequest.add_member(:project, Shapes::ShapeRef.new(shape: ProjectRef, required: true, location: "uri", location_name: "project"))
- ListFeaturesRequest.struct_class = Types::ListFeaturesRequest
-
- ListFeaturesResponse.add_member(:features, Shapes::ShapeRef.new(shape: FeatureSummariesList, location_name: "features"))
- ListFeaturesResponse.add_member(:next_token, Shapes::ShapeRef.new(shape: NextToken, location_name: "nextToken"))
- ListFeaturesResponse.struct_class = Types::ListFeaturesResponse
-
- ListLaunchesRequest.add_member(:max_results, Shapes::ShapeRef.new(shape: MaxLaunches, location: "querystring", location_name: "maxResults"))
- ListLaunchesRequest.add_member(:next_token, Shapes::ShapeRef.new(shape: NextToken, location: "querystring", location_name: "nextToken"))
- ListLaunchesRequest.add_member(:project, Shapes::ShapeRef.new(shape: ProjectRef, required: true, location: "uri", location_name: "project"))
- ListLaunchesRequest.add_member(:status, Shapes::ShapeRef.new(shape: LaunchStatus, location: "querystring", location_name: "status"))
- ListLaunchesRequest.struct_class = Types::ListLaunchesRequest
-
- ListLaunchesResponse.add_member(:launches, Shapes::ShapeRef.new(shape: LaunchesList, location_name: "launches"))
- ListLaunchesResponse.add_member(:next_token, Shapes::ShapeRef.new(shape: NextToken, location_name: "nextToken"))
- ListLaunchesResponse.struct_class = Types::ListLaunchesResponse
-
- ListProjectsRequest.add_member(:max_results, Shapes::ShapeRef.new(shape: MaxProjects, location: "querystring", location_name: "maxResults"))
- ListProjectsRequest.add_member(:next_token, Shapes::ShapeRef.new(shape: NextToken, location: "querystring", location_name: "nextToken"))
- ListProjectsRequest.struct_class = Types::ListProjectsRequest
-
- ListProjectsResponse.add_member(:next_token, Shapes::ShapeRef.new(shape: NextToken, location_name: "nextToken"))
- ListProjectsResponse.add_member(:projects, Shapes::ShapeRef.new(shape: ProjectSummariesList, location_name: "projects"))
- ListProjectsResponse.struct_class = Types::ListProjectsResponse
-
- ListSegmentReferencesRequest.add_member(:max_results, Shapes::ShapeRef.new(shape: MaxReferences, location: "querystring", location_name: "maxResults"))
- ListSegmentReferencesRequest.add_member(:next_token, Shapes::ShapeRef.new(shape: NextToken, location: "querystring", location_name: "nextToken"))
- ListSegmentReferencesRequest.add_member(:segment, Shapes::ShapeRef.new(shape: SegmentRef, required: true, location: "uri", location_name: "segment"))
- ListSegmentReferencesRequest.add_member(:type, Shapes::ShapeRef.new(shape: SegmentReferenceResourceType, required: true, location: "querystring", location_name: "type"))
- ListSegmentReferencesRequest.struct_class = Types::ListSegmentReferencesRequest
-
- ListSegmentReferencesResponse.add_member(:next_token, Shapes::ShapeRef.new(shape: NextToken, location_name: "nextToken"))
- ListSegmentReferencesResponse.add_member(:referenced_by, Shapes::ShapeRef.new(shape: RefResourceList, location_name: "referencedBy"))
- ListSegmentReferencesResponse.struct_class = Types::ListSegmentReferencesResponse
-
- ListSegmentsRequest.add_member(:max_results, Shapes::ShapeRef.new(shape: MaxSegments, location: "querystring", location_name: "maxResults"))
- ListSegmentsRequest.add_member(:next_token, Shapes::ShapeRef.new(shape: NextToken, location: "querystring", location_name: "nextToken"))
- ListSegmentsRequest.struct_class = Types::ListSegmentsRequest
-
- ListSegmentsResponse.add_member(:next_token, Shapes::ShapeRef.new(shape: NextToken, location_name: "nextToken"))
- ListSegmentsResponse.add_member(:segments, Shapes::ShapeRef.new(shape: SegmentList, location_name: "segments"))
- ListSegmentsResponse.struct_class = Types::ListSegmentsResponse
-
- ListTagsForResourceRequest.add_member(:resource_arn, Shapes::ShapeRef.new(shape: Arn, required: true, location: "uri", location_name: "resourceArn"))
- ListTagsForResourceRequest.struct_class = Types::ListTagsForResourceRequest
-
- ListTagsForResourceResponse.add_member(:tags, Shapes::ShapeRef.new(shape: TagMap, location_name: "tags"))
- ListTagsForResourceResponse.struct_class = Types::ListTagsForResourceResponse
-
- MetricDefinition.add_member(:entity_id_key, Shapes::ShapeRef.new(shape: JsonPath, location_name: "entityIdKey"))
- MetricDefinition.add_member(:event_pattern, Shapes::ShapeRef.new(shape: JsonValue, location_name: "eventPattern", metadata: {"jsonvalue" => true}))
- MetricDefinition.add_member(:name, Shapes::ShapeRef.new(shape: CwDimensionSafeName, location_name: "name"))
- MetricDefinition.add_member(:unit_label, Shapes::ShapeRef.new(shape: MetricUnitLabel, location_name: "unitLabel"))
- MetricDefinition.add_member(:value_key, Shapes::ShapeRef.new(shape: JsonPath, location_name: "valueKey"))
- MetricDefinition.struct_class = Types::MetricDefinition
-
- MetricDefinitionConfig.add_member(:entity_id_key, Shapes::ShapeRef.new(shape: JsonPath, required: true, location_name: "entityIdKey"))
- MetricDefinitionConfig.add_member(:event_pattern, Shapes::ShapeRef.new(shape: MetricDefinitionConfigEventPatternString, location_name: "eventPattern", metadata: {"jsonvalue" => true}))
- MetricDefinitionConfig.add_member(:name, Shapes::ShapeRef.new(shape: CwDimensionSafeName, required: true, location_name: "name"))
- MetricDefinitionConfig.add_member(:unit_label, Shapes::ShapeRef.new(shape: MetricUnitLabel, location_name: "unitLabel"))
- MetricDefinitionConfig.add_member(:value_key, Shapes::ShapeRef.new(shape: JsonPath, required: true, location_name: "valueKey"))
- MetricDefinitionConfig.struct_class = Types::MetricDefinitionConfig
-
- MetricGoal.add_member(:desired_change, Shapes::ShapeRef.new(shape: ChangeDirectionEnum, location_name: "desiredChange"))
- MetricGoal.add_member(:metric_definition, Shapes::ShapeRef.new(shape: MetricDefinition, required: true, location_name: "metricDefinition"))
- MetricGoal.struct_class = Types::MetricGoal
-
- MetricGoalConfig.add_member(:desired_change, Shapes::ShapeRef.new(shape: ChangeDirectionEnum, location_name: "desiredChange"))
- MetricGoalConfig.add_member(:metric_definition, Shapes::ShapeRef.new(shape: MetricDefinitionConfig, required: true, location_name: "metricDefinition"))
- MetricGoalConfig.struct_class = Types::MetricGoalConfig
-
- MetricGoalConfigList.member = Shapes::ShapeRef.new(shape: MetricGoalConfig)
-
- MetricGoalsList.member = Shapes::ShapeRef.new(shape: MetricGoal)
-
- MetricMonitor.add_member(:metric_definition, Shapes::ShapeRef.new(shape: MetricDefinition, required: true, location_name: "metricDefinition"))
- MetricMonitor.struct_class = Types::MetricMonitor
-
- MetricMonitorConfig.add_member(:metric_definition, Shapes::ShapeRef.new(shape: MetricDefinitionConfig, required: true, location_name: "metricDefinition"))
- MetricMonitorConfig.struct_class = Types::MetricMonitorConfig
-
- MetricMonitorConfigList.member = Shapes::ShapeRef.new(shape: MetricMonitorConfig)
-
- MetricMonitorList.member = Shapes::ShapeRef.new(shape: MetricMonitor)
-
- MetricNameList.member = Shapes::ShapeRef.new(shape: CwDimensionSafeName)
-
- OnlineAbConfig.add_member(:control_treatment_name, Shapes::ShapeRef.new(shape: TreatmentName, location_name: "controlTreatmentName"))
- OnlineAbConfig.add_member(:treatment_weights, Shapes::ShapeRef.new(shape: TreatmentToWeightMap, location_name: "treatmentWeights"))
- OnlineAbConfig.struct_class = Types::OnlineAbConfig
-
- OnlineAbDefinition.add_member(:control_treatment_name, Shapes::ShapeRef.new(shape: TreatmentName, location_name: "controlTreatmentName"))
- OnlineAbDefinition.add_member(:treatment_weights, Shapes::ShapeRef.new(shape: TreatmentToWeightMap, location_name: "treatmentWeights"))
- OnlineAbDefinition.struct_class = Types::OnlineAbDefinition
-
- Project.add_member(:active_experiment_count, Shapes::ShapeRef.new(shape: Long, location_name: "activeExperimentCount"))
- Project.add_member(:active_launch_count, Shapes::ShapeRef.new(shape: Long, location_name: "activeLaunchCount"))
- Project.add_member(:app_config_resource, Shapes::ShapeRef.new(shape: ProjectAppConfigResource, location_name: "appConfigResource"))
- Project.add_member(:arn, Shapes::ShapeRef.new(shape: ProjectArn, required: true, location_name: "arn"))
- Project.add_member(:created_time, Shapes::ShapeRef.new(shape: Timestamp, required: true, location_name: "createdTime"))
- Project.add_member(:data_delivery, Shapes::ShapeRef.new(shape: ProjectDataDelivery, location_name: "dataDelivery"))
- Project.add_member(:description, Shapes::ShapeRef.new(shape: Description, location_name: "description"))
- Project.add_member(:experiment_count, Shapes::ShapeRef.new(shape: Long, location_name: "experimentCount"))
- Project.add_member(:feature_count, Shapes::ShapeRef.new(shape: Long, location_name: "featureCount"))
- Project.add_member(:last_updated_time, Shapes::ShapeRef.new(shape: Timestamp, required: true, location_name: "lastUpdatedTime"))
- Project.add_member(:launch_count, Shapes::ShapeRef.new(shape: Long, location_name: "launchCount"))
- Project.add_member(:name, Shapes::ShapeRef.new(shape: ProjectName, required: true, location_name: "name"))
- Project.add_member(:status, Shapes::ShapeRef.new(shape: ProjectStatus, required: true, location_name: "status"))
- Project.add_member(:tags, Shapes::ShapeRef.new(shape: TagMap, location_name: "tags"))
- Project.struct_class = Types::Project
-
- ProjectAppConfigResource.add_member(:application_id, Shapes::ShapeRef.new(shape: AppConfigResourceId, required: true, location_name: "applicationId"))
- ProjectAppConfigResource.add_member(:configuration_profile_id, Shapes::ShapeRef.new(shape: AppConfigResourceId, required: true, location_name: "configurationProfileId"))
- ProjectAppConfigResource.add_member(:environment_id, Shapes::ShapeRef.new(shape: AppConfigResourceId, required: true, location_name: "environmentId"))
- ProjectAppConfigResource.struct_class = Types::ProjectAppConfigResource
-
- ProjectAppConfigResourceConfig.add_member(:application_id, Shapes::ShapeRef.new(shape: AppConfigResourceId, location_name: "applicationId"))
- ProjectAppConfigResourceConfig.add_member(:environment_id, Shapes::ShapeRef.new(shape: AppConfigResourceId, location_name: "environmentId"))
- ProjectAppConfigResourceConfig.struct_class = Types::ProjectAppConfigResourceConfig
-
- ProjectDataDelivery.add_member(:cloud_watch_logs, Shapes::ShapeRef.new(shape: CloudWatchLogsDestination, location_name: "cloudWatchLogs"))
- ProjectDataDelivery.add_member(:s3_destination, Shapes::ShapeRef.new(shape: S3Destination, location_name: "s3Destination"))
- ProjectDataDelivery.struct_class = Types::ProjectDataDelivery
-
- ProjectDataDeliveryConfig.add_member(:cloud_watch_logs, Shapes::ShapeRef.new(shape: CloudWatchLogsDestinationConfig, location_name: "cloudWatchLogs"))
- ProjectDataDeliveryConfig.add_member(:s3_destination, Shapes::ShapeRef.new(shape: S3DestinationConfig, location_name: "s3Destination"))
- ProjectDataDeliveryConfig.struct_class = Types::ProjectDataDeliveryConfig
-
- ProjectSummariesList.member = Shapes::ShapeRef.new(shape: ProjectSummary)
-
- ProjectSummary.add_member(:active_experiment_count, Shapes::ShapeRef.new(shape: Long, location_name: "activeExperimentCount"))
- ProjectSummary.add_member(:active_launch_count, Shapes::ShapeRef.new(shape: Long, location_name: "activeLaunchCount"))
- ProjectSummary.add_member(:arn, Shapes::ShapeRef.new(shape: ProjectArn, required: true, location_name: "arn"))
- ProjectSummary.add_member(:created_time, Shapes::ShapeRef.new(shape: Timestamp, required: true, location_name: "createdTime"))
- ProjectSummary.add_member(:description, Shapes::ShapeRef.new(shape: Description, location_name: "description"))
- ProjectSummary.add_member(:experiment_count, Shapes::ShapeRef.new(shape: Long, location_name: "experimentCount"))
- ProjectSummary.add_member(:feature_count, Shapes::ShapeRef.new(shape: Long, location_name: "featureCount"))
- ProjectSummary.add_member(:last_updated_time, Shapes::ShapeRef.new(shape: Timestamp, required: true, location_name: "lastUpdatedTime"))
- ProjectSummary.add_member(:launch_count, Shapes::ShapeRef.new(shape: Long, location_name: "launchCount"))
- ProjectSummary.add_member(:name, Shapes::ShapeRef.new(shape: ProjectName, required: true, location_name: "name"))
- ProjectSummary.add_member(:status, Shapes::ShapeRef.new(shape: ProjectStatus, required: true, location_name: "status"))
- ProjectSummary.add_member(:tags, Shapes::ShapeRef.new(shape: TagMap, location_name: "tags"))
- ProjectSummary.struct_class = Types::ProjectSummary
-
- PutProjectEventsRequest.add_member(:events, Shapes::ShapeRef.new(shape: EventList, required: true, location_name: "events"))
- PutProjectEventsRequest.add_member(:project, Shapes::ShapeRef.new(shape: ProjectRef, required: true, location: "uri", location_name: "project"))
- PutProjectEventsRequest.struct_class = Types::PutProjectEventsRequest
-
- PutProjectEventsResponse.add_member(:event_results, Shapes::ShapeRef.new(shape: PutProjectEventsResultEntryList, location_name: "eventResults"))
- PutProjectEventsResponse.add_member(:failed_event_count, Shapes::ShapeRef.new(shape: Integer, location_name: "failedEventCount"))
- PutProjectEventsResponse.struct_class = Types::PutProjectEventsResponse
-
- PutProjectEventsResultEntry.add_member(:error_code, Shapes::ShapeRef.new(shape: ErrorCodeEnum, location_name: "errorCode"))
- PutProjectEventsResultEntry.add_member(:error_message, Shapes::ShapeRef.new(shape: ErrorMessage, location_name: "errorMessage"))
- PutProjectEventsResultEntry.add_member(:event_id, Shapes::ShapeRef.new(shape: Uuid, location_name: "eventId"))
- PutProjectEventsResultEntry.struct_class = Types::PutProjectEventsResultEntry
-
- PutProjectEventsResultEntryList.member = Shapes::ShapeRef.new(shape: PutProjectEventsResultEntry)
-
- RefResource.add_member(:arn, Shapes::ShapeRef.new(shape: String, location_name: "arn"))
- RefResource.add_member(:end_time, Shapes::ShapeRef.new(shape: String, location_name: "endTime"))
- RefResource.add_member(:last_updated_on, Shapes::ShapeRef.new(shape: String, location_name: "lastUpdatedOn"))
- RefResource.add_member(:name, Shapes::ShapeRef.new(shape: String, required: true, location_name: "name"))
- RefResource.add_member(:start_time, Shapes::ShapeRef.new(shape: String, location_name: "startTime"))
- RefResource.add_member(:status, Shapes::ShapeRef.new(shape: String, location_name: "status"))
- RefResource.add_member(:type, Shapes::ShapeRef.new(shape: String, required: true, location_name: "type"))
- RefResource.struct_class = Types::RefResource
-
- RefResourceList.member = Shapes::ShapeRef.new(shape: RefResource)
-
- ResourceNotFoundException.add_member(:message, Shapes::ShapeRef.new(shape: String, location_name: "message"))
- ResourceNotFoundException.add_member(:resource_id, Shapes::ShapeRef.new(shape: String, location_name: "resourceId"))
- ResourceNotFoundException.add_member(:resource_type, Shapes::ShapeRef.new(shape: String, location_name: "resourceType"))
- ResourceNotFoundException.struct_class = Types::ResourceNotFoundException
-
- S3Destination.add_member(:bucket, Shapes::ShapeRef.new(shape: S3BucketSafeName, location_name: "bucket"))
- S3Destination.add_member(:prefix, Shapes::ShapeRef.new(shape: S3PrefixSafeName, location_name: "prefix"))
- S3Destination.struct_class = Types::S3Destination
-
- S3DestinationConfig.add_member(:bucket, Shapes::ShapeRef.new(shape: S3BucketSafeName, location_name: "bucket"))
- S3DestinationConfig.add_member(:prefix, Shapes::ShapeRef.new(shape: S3PrefixSafeName, location_name: "prefix"))
- S3DestinationConfig.struct_class = Types::S3DestinationConfig
-
- ScheduledSplit.add_member(:group_weights, Shapes::ShapeRef.new(shape: GroupToWeightMap, location_name: "groupWeights"))
- ScheduledSplit.add_member(:segment_overrides, Shapes::ShapeRef.new(shape: SegmentOverridesList, location_name: "segmentOverrides"))
- ScheduledSplit.add_member(:start_time, Shapes::ShapeRef.new(shape: Timestamp, required: true, location_name: "startTime"))
- ScheduledSplit.struct_class = Types::ScheduledSplit
-
- ScheduledSplitConfig.add_member(:group_weights, Shapes::ShapeRef.new(shape: GroupToWeightMap, required: true, location_name: "groupWeights"))
- ScheduledSplitConfig.add_member(:segment_overrides, Shapes::ShapeRef.new(shape: SegmentOverridesList, location_name: "segmentOverrides"))
- ScheduledSplitConfig.add_member(:start_time, Shapes::ShapeRef.new(shape: Timestamp, required: true, location_name: "startTime"))
- ScheduledSplitConfig.struct_class = Types::ScheduledSplitConfig
-
- ScheduledSplitConfigList.member = Shapes::ShapeRef.new(shape: ScheduledSplitConfig)
-
- ScheduledSplitsLaunchConfig.add_member(:steps, Shapes::ShapeRef.new(shape: ScheduledSplitConfigList, required: true, location_name: "steps"))
- ScheduledSplitsLaunchConfig.struct_class = Types::ScheduledSplitsLaunchConfig
-
- ScheduledSplitsLaunchDefinition.add_member(:steps, Shapes::ShapeRef.new(shape: ScheduledStepList, location_name: "steps"))
- ScheduledSplitsLaunchDefinition.struct_class = Types::ScheduledSplitsLaunchDefinition
-
- ScheduledStepList.member = Shapes::ShapeRef.new(shape: ScheduledSplit)
-
- Segment.add_member(:arn, Shapes::ShapeRef.new(shape: SegmentArn, required: true, location_name: "arn"))
- Segment.add_member(:created_time, Shapes::ShapeRef.new(shape: Timestamp, required: true, location_name: "createdTime"))
- Segment.add_member(:description, Shapes::ShapeRef.new(shape: Description, location_name: "description"))
- Segment.add_member(:experiment_count, Shapes::ShapeRef.new(shape: Long, location_name: "experimentCount"))
- Segment.add_member(:last_updated_time, Shapes::ShapeRef.new(shape: Timestamp, required: true, location_name: "lastUpdatedTime"))
- Segment.add_member(:launch_count, Shapes::ShapeRef.new(shape: Long, location_name: "launchCount"))
- Segment.add_member(:name, Shapes::ShapeRef.new(shape: SegmentName, required: true, location_name: "name"))
- Segment.add_member(:pattern, Shapes::ShapeRef.new(shape: SegmentPattern, required: true, location_name: "pattern", metadata: {"jsonvalue" => true}))
- Segment.add_member(:tags, Shapes::ShapeRef.new(shape: TagMap, location_name: "tags"))
- Segment.struct_class = Types::Segment
-
- SegmentList.member = Shapes::ShapeRef.new(shape: Segment)
-
- SegmentOverride.add_member(:evaluation_order, Shapes::ShapeRef.new(shape: Long, required: true, location_name: "evaluationOrder"))
- SegmentOverride.add_member(:segment, Shapes::ShapeRef.new(shape: SegmentRef, required: true, location_name: "segment"))
- SegmentOverride.add_member(:weights, Shapes::ShapeRef.new(shape: GroupToWeightMap, required: true, location_name: "weights"))
- SegmentOverride.struct_class = Types::SegmentOverride
-
- SegmentOverridesList.member = Shapes::ShapeRef.new(shape: SegmentOverride)
-
- ServiceQuotaExceededException.add_member(:message, Shapes::ShapeRef.new(shape: String, location_name: "message"))
- ServiceQuotaExceededException.add_member(:quota_code, Shapes::ShapeRef.new(shape: String, location_name: "quotaCode"))
- ServiceQuotaExceededException.add_member(:resource_id, Shapes::ShapeRef.new(shape: String, location_name: "resourceId"))
- ServiceQuotaExceededException.add_member(:resource_type, Shapes::ShapeRef.new(shape: String, location_name: "resourceType"))
- ServiceQuotaExceededException.add_member(:service_code, Shapes::ShapeRef.new(shape: String, location_name: "serviceCode"))
- ServiceQuotaExceededException.struct_class = Types::ServiceQuotaExceededException
-
- ServiceUnavailableException.add_member(:message, Shapes::ShapeRef.new(shape: String, location_name: "message"))
- ServiceUnavailableException.struct_class = Types::ServiceUnavailableException
-
- StartExperimentRequest.add_member(:analysis_complete_time, Shapes::ShapeRef.new(shape: Timestamp, required: true, location_name: "analysisCompleteTime"))
- StartExperimentRequest.add_member(:experiment, Shapes::ShapeRef.new(shape: ExperimentName, required: true, location: "uri", location_name: "experiment"))
- StartExperimentRequest.add_member(:project, Shapes::ShapeRef.new(shape: ProjectRef, required: true, location: "uri", location_name: "project"))
- StartExperimentRequest.struct_class = Types::StartExperimentRequest
-
- StartExperimentResponse.add_member(:started_time, Shapes::ShapeRef.new(shape: Timestamp, location_name: "startedTime"))
- StartExperimentResponse.struct_class = Types::StartExperimentResponse
-
- StartLaunchRequest.add_member(:launch, Shapes::ShapeRef.new(shape: LaunchName, required: true, location: "uri", location_name: "launch"))
- StartLaunchRequest.add_member(:project, Shapes::ShapeRef.new(shape: ProjectRef, required: true, location: "uri", location_name: "project"))
- StartLaunchRequest.struct_class = Types::StartLaunchRequest
-
- StartLaunchResponse.add_member(:launch, Shapes::ShapeRef.new(shape: Launch, required: true, location_name: "launch"))
- StartLaunchResponse.struct_class = Types::StartLaunchResponse
-
- StopExperimentRequest.add_member(:desired_state, Shapes::ShapeRef.new(shape: ExperimentStopDesiredState, location_name: "desiredState"))
- StopExperimentRequest.add_member(:experiment, Shapes::ShapeRef.new(shape: ExperimentName, required: true, location: "uri", location_name: "experiment"))
- StopExperimentRequest.add_member(:project, Shapes::ShapeRef.new(shape: ProjectRef, required: true, location: "uri", location_name: "project"))
- StopExperimentRequest.add_member(:reason, Shapes::ShapeRef.new(shape: Description, location_name: "reason"))
- StopExperimentRequest.struct_class = Types::StopExperimentRequest
-
- StopExperimentResponse.add_member(:ended_time, Shapes::ShapeRef.new(shape: Timestamp, location_name: "endedTime"))
- StopExperimentResponse.struct_class = Types::StopExperimentResponse
-
- StopLaunchRequest.add_member(:desired_state, Shapes::ShapeRef.new(shape: LaunchStopDesiredState, location_name: "desiredState"))
- StopLaunchRequest.add_member(:launch, Shapes::ShapeRef.new(shape: LaunchName, required: true, location: "uri", location_name: "launch"))
- StopLaunchRequest.add_member(:project, Shapes::ShapeRef.new(shape: ProjectRef, required: true, location: "uri", location_name: "project"))
- StopLaunchRequest.add_member(:reason, Shapes::ShapeRef.new(shape: Description, location_name: "reason"))
- StopLaunchRequest.struct_class = Types::StopLaunchRequest
-
- StopLaunchResponse.add_member(:ended_time, Shapes::ShapeRef.new(shape: Timestamp, location_name: "endedTime"))
- StopLaunchResponse.struct_class = Types::StopLaunchResponse
-
- TagKeyList.member = Shapes::ShapeRef.new(shape: TagKey)
-
- TagMap.key = Shapes::ShapeRef.new(shape: TagKey)
- TagMap.value = Shapes::ShapeRef.new(shape: TagValue)
-
- TagResourceRequest.add_member(:resource_arn, Shapes::ShapeRef.new(shape: Arn, required: true, location: "uri", location_name: "resourceArn"))
- TagResourceRequest.add_member(:tags, Shapes::ShapeRef.new(shape: TagMap, required: true, location_name: "tags"))
- TagResourceRequest.struct_class = Types::TagResourceRequest
-
- TagResourceResponse.struct_class = Types::TagResourceResponse
-
- TestSegmentPatternRequest.add_member(:pattern, Shapes::ShapeRef.new(shape: SegmentPattern, required: true, location_name: "pattern", metadata: {"jsonvalue" => true}))
- TestSegmentPatternRequest.add_member(:payload, Shapes::ShapeRef.new(shape: JsonValue, required: true, location_name: "payload", metadata: {"jsonvalue" => true}))
- TestSegmentPatternRequest.struct_class = Types::TestSegmentPatternRequest
-
- TestSegmentPatternResponse.add_member(:match, Shapes::ShapeRef.new(shape: Boolean, required: true, location_name: "match"))
- TestSegmentPatternResponse.struct_class = Types::TestSegmentPatternResponse
-
- ThrottlingException.add_member(:message, Shapes::ShapeRef.new(shape: String, location_name: "message"))
- ThrottlingException.add_member(:quota_code, Shapes::ShapeRef.new(shape: String, location_name: "quotaCode"))
- ThrottlingException.add_member(:service_code, Shapes::ShapeRef.new(shape: String, location_name: "serviceCode"))
- ThrottlingException.struct_class = Types::ThrottlingException
-
- TimestampList.member = Shapes::ShapeRef.new(shape: Timestamp)
-
- Treatment.add_member(:description, Shapes::ShapeRef.new(shape: Description, location_name: "description"))
- Treatment.add_member(:feature_variations, Shapes::ShapeRef.new(shape: FeatureToVariationMap, location_name: "featureVariations"))
- Treatment.add_member(:name, Shapes::ShapeRef.new(shape: TreatmentName, required: true, location_name: "name"))
- Treatment.struct_class = Types::Treatment
-
- TreatmentConfig.add_member(:description, Shapes::ShapeRef.new(shape: Description, location_name: "description"))
- TreatmentConfig.add_member(:feature, Shapes::ShapeRef.new(shape: FeatureName, required: true, location_name: "feature"))
- TreatmentConfig.add_member(:name, Shapes::ShapeRef.new(shape: TreatmentName, required: true, location_name: "name"))
- TreatmentConfig.add_member(:variation, Shapes::ShapeRef.new(shape: VariationName, required: true, location_name: "variation"))
- TreatmentConfig.struct_class = Types::TreatmentConfig
-
- TreatmentConfigList.member = Shapes::ShapeRef.new(shape: TreatmentConfig)
-
- TreatmentList.member = Shapes::ShapeRef.new(shape: Treatment)
-
- TreatmentNameList.member = Shapes::ShapeRef.new(shape: TreatmentName)
-
- TreatmentToWeightMap.key = Shapes::ShapeRef.new(shape: TreatmentName)
- TreatmentToWeightMap.value = Shapes::ShapeRef.new(shape: SplitWeight)
-
- UntagResourceRequest.add_member(:resource_arn, Shapes::ShapeRef.new(shape: Arn, required: true, location: "uri", location_name: "resourceArn"))
- UntagResourceRequest.add_member(:tag_keys, Shapes::ShapeRef.new(shape: TagKeyList, required: true, location: "querystring", location_name: "tagKeys"))
- UntagResourceRequest.struct_class = Types::UntagResourceRequest
-
- UntagResourceResponse.struct_class = Types::UntagResourceResponse
-
- UpdateExperimentRequest.add_member(:description, Shapes::ShapeRef.new(shape: Description, location_name: "description"))
- UpdateExperimentRequest.add_member(:experiment, Shapes::ShapeRef.new(shape: ExperimentName, required: true, location: "uri", location_name: "experiment"))
- UpdateExperimentRequest.add_member(:metric_goals, Shapes::ShapeRef.new(shape: MetricGoalConfigList, location_name: "metricGoals"))
- UpdateExperimentRequest.add_member(:online_ab_config, Shapes::ShapeRef.new(shape: OnlineAbConfig, location_name: "onlineAbConfig"))
- UpdateExperimentRequest.add_member(:project, Shapes::ShapeRef.new(shape: ProjectRef, required: true, location: "uri", location_name: "project"))
- UpdateExperimentRequest.add_member(:randomization_salt, Shapes::ShapeRef.new(shape: RandomizationSalt, location_name: "randomizationSalt"))
- UpdateExperimentRequest.add_member(:remove_segment, Shapes::ShapeRef.new(shape: PrimitiveBoolean, location_name: "removeSegment"))
- UpdateExperimentRequest.add_member(:sampling_rate, Shapes::ShapeRef.new(shape: SplitWeight, location_name: "samplingRate", metadata: {"box" => true}))
- UpdateExperimentRequest.add_member(:segment, Shapes::ShapeRef.new(shape: SegmentRef, location_name: "segment"))
- UpdateExperimentRequest.add_member(:treatments, Shapes::ShapeRef.new(shape: TreatmentConfigList, location_name: "treatments"))
- UpdateExperimentRequest.struct_class = Types::UpdateExperimentRequest
-
- UpdateExperimentResponse.add_member(:experiment, Shapes::ShapeRef.new(shape: Experiment, required: true, location_name: "experiment"))
- UpdateExperimentResponse.struct_class = Types::UpdateExperimentResponse
-
- UpdateFeatureRequest.add_member(:add_or_update_variations, Shapes::ShapeRef.new(shape: VariationConfigsList, location_name: "addOrUpdateVariations"))
- UpdateFeatureRequest.add_member(:default_variation, Shapes::ShapeRef.new(shape: VariationName, location_name: "defaultVariation"))
- UpdateFeatureRequest.add_member(:description, Shapes::ShapeRef.new(shape: Description, location_name: "description"))
- UpdateFeatureRequest.add_member(:entity_overrides, Shapes::ShapeRef.new(shape: EntityOverrideMap, location_name: "entityOverrides"))
- UpdateFeatureRequest.add_member(:evaluation_strategy, Shapes::ShapeRef.new(shape: FeatureEvaluationStrategy, location_name: "evaluationStrategy"))
- UpdateFeatureRequest.add_member(:feature, Shapes::ShapeRef.new(shape: FeatureName, required: true, location: "uri", location_name: "feature"))
- UpdateFeatureRequest.add_member(:project, Shapes::ShapeRef.new(shape: ProjectRef, required: true, location: "uri", location_name: "project"))
- UpdateFeatureRequest.add_member(:remove_variations, Shapes::ShapeRef.new(shape: VariationNameList, location_name: "removeVariations"))
- UpdateFeatureRequest.struct_class = Types::UpdateFeatureRequest
-
- UpdateFeatureResponse.add_member(:feature, Shapes::ShapeRef.new(shape: Feature, required: true, location_name: "feature"))
- UpdateFeatureResponse.struct_class = Types::UpdateFeatureResponse
-
- UpdateLaunchRequest.add_member(:description, Shapes::ShapeRef.new(shape: Description, location_name: "description"))
- UpdateLaunchRequest.add_member(:groups, Shapes::ShapeRef.new(shape: LaunchGroupConfigList, location_name: "groups"))
- UpdateLaunchRequest.add_member(:launch, Shapes::ShapeRef.new(shape: LaunchName, required: true, location: "uri", location_name: "launch"))
- UpdateLaunchRequest.add_member(:metric_monitors, Shapes::ShapeRef.new(shape: MetricMonitorConfigList, location_name: "metricMonitors"))
- UpdateLaunchRequest.add_member(:project, Shapes::ShapeRef.new(shape: ProjectRef, required: true, location: "uri", location_name: "project"))
- UpdateLaunchRequest.add_member(:randomization_salt, Shapes::ShapeRef.new(shape: RandomizationSalt, location_name: "randomizationSalt"))
- UpdateLaunchRequest.add_member(:scheduled_splits_config, Shapes::ShapeRef.new(shape: ScheduledSplitsLaunchConfig, location_name: "scheduledSplitsConfig"))
- UpdateLaunchRequest.struct_class = Types::UpdateLaunchRequest
-
- UpdateLaunchResponse.add_member(:launch, Shapes::ShapeRef.new(shape: Launch, required: true, location_name: "launch"))
- UpdateLaunchResponse.struct_class = Types::UpdateLaunchResponse
-
- UpdateProjectDataDeliveryRequest.add_member(:cloud_watch_logs, Shapes::ShapeRef.new(shape: CloudWatchLogsDestinationConfig, location_name: "cloudWatchLogs"))
- UpdateProjectDataDeliveryRequest.add_member(:project, Shapes::ShapeRef.new(shape: ProjectRef, required: true, location: "uri", location_name: "project"))
- UpdateProjectDataDeliveryRequest.add_member(:s3_destination, Shapes::ShapeRef.new(shape: S3DestinationConfig, location_name: "s3Destination"))
- UpdateProjectDataDeliveryRequest.struct_class = Types::UpdateProjectDataDeliveryRequest
-
- UpdateProjectDataDeliveryResponse.add_member(:project, Shapes::ShapeRef.new(shape: Project, required: true, location_name: "project"))
- UpdateProjectDataDeliveryResponse.struct_class = Types::UpdateProjectDataDeliveryResponse
-
- UpdateProjectRequest.add_member(:app_config_resource, Shapes::ShapeRef.new(shape: ProjectAppConfigResourceConfig, location_name: "appConfigResource"))
- UpdateProjectRequest.add_member(:description, Shapes::ShapeRef.new(shape: Description, location_name: "description"))
- UpdateProjectRequest.add_member(:project, Shapes::ShapeRef.new(shape: ProjectRef, required: true, location: "uri", location_name: "project"))
- UpdateProjectRequest.struct_class = Types::UpdateProjectRequest
-
- UpdateProjectResponse.add_member(:project, Shapes::ShapeRef.new(shape: Project, required: true, location_name: "project"))
- UpdateProjectResponse.struct_class = Types::UpdateProjectResponse
-
- ValidationException.add_member(:field_list, Shapes::ShapeRef.new(shape: ValidationExceptionFieldList, location_name: "fieldList"))
- ValidationException.add_member(:message, Shapes::ShapeRef.new(shape: String, location_name: "message"))
- ValidationException.add_member(:reason, Shapes::ShapeRef.new(shape: ValidationExceptionReason, location_name: "reason"))
- ValidationException.struct_class = Types::ValidationException
-
- ValidationExceptionField.add_member(:message, Shapes::ShapeRef.new(shape: String, required: true, location_name: "message"))
- ValidationExceptionField.add_member(:name, Shapes::ShapeRef.new(shape: String, required: true, location_name: "name"))
- ValidationExceptionField.struct_class = Types::ValidationExceptionField
-
- ValidationExceptionFieldList.member = Shapes::ShapeRef.new(shape: ValidationExceptionField)
-
- VariableValue.add_member(:bool_value, Shapes::ShapeRef.new(shape: Boolean, location_name: "boolValue"))
- VariableValue.add_member(:double_value, Shapes::ShapeRef.new(shape: Double, location_name: "doubleValue"))
- VariableValue.add_member(:long_value, Shapes::ShapeRef.new(shape: VariableValueLongValueLong, location_name: "longValue"))
- VariableValue.add_member(:string_value, Shapes::ShapeRef.new(shape: VariableValueStringValueString, location_name: "stringValue"))
- VariableValue.add_member(:unknown, Shapes::ShapeRef.new(shape: nil, location_name: 'unknown'))
- VariableValue.add_member_subclass(:bool_value, Types::VariableValue::BoolValue)
- VariableValue.add_member_subclass(:double_value, Types::VariableValue::DoubleValue)
- VariableValue.add_member_subclass(:long_value, Types::VariableValue::LongValue)
- VariableValue.add_member_subclass(:string_value, Types::VariableValue::StringValue)
- VariableValue.add_member_subclass(:unknown, Types::VariableValue::Unknown)
- VariableValue.struct_class = Types::VariableValue
-
- Variation.add_member(:name, Shapes::ShapeRef.new(shape: VariationName, location_name: "name"))
- Variation.add_member(:value, Shapes::ShapeRef.new(shape: VariableValue, location_name: "value"))
- Variation.struct_class = Types::Variation
-
- VariationConfig.add_member(:name, Shapes::ShapeRef.new(shape: VariationName, required: true, location_name: "name"))
- VariationConfig.add_member(:value, Shapes::ShapeRef.new(shape: VariableValue, required: true, location_name: "value"))
- VariationConfig.struct_class = Types::VariationConfig
-
- VariationConfigsList.member = Shapes::ShapeRef.new(shape: VariationConfig)
-
- VariationNameList.member = Shapes::ShapeRef.new(shape: VariationName)
-
- VariationsList.member = Shapes::ShapeRef.new(shape: Variation)
-
-
- # @api private
- API = Seahorse::Model::Api.new.tap do |api|
-
- api.version = "2021-02-01"
-
- api.metadata = {
- "apiVersion" => "2021-02-01",
- "auth" => ["aws.auth#sigv4"],
- "endpointPrefix" => "evidently",
- "jsonVersion" => "1.1",
- "protocol" => "rest-json",
- "protocols" => ["rest-json"],
- "serviceFullName" => "Amazon CloudWatch Evidently",
- "serviceId" => "Evidently",
- "signatureVersion" => "v4",
- "signingName" => "evidently",
- "uid" => "evidently-2021-02-01",
- }
-
- api.add_operation(:batch_evaluate_feature, Seahorse::Model::Operation.new.tap do |o|
- o.name = "BatchEvaluateFeature"
- o.http_method = "POST"
- o.http_request_uri = "/projects/{project}/evaluations"
- o.deprecated = true
- o.endpoint_pattern = {
- "hostPrefix" => "dataplane.",
- }
- o.input = Shapes::ShapeRef.new(shape: BatchEvaluateFeatureRequest)
- o.output = Shapes::ShapeRef.new(shape: BatchEvaluateFeatureResponse)
- o.errors << Shapes::ShapeRef.new(shape: ThrottlingException)
- o.errors << Shapes::ShapeRef.new(shape: ValidationException)
- o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException)
- o.errors << Shapes::ShapeRef.new(shape: AccessDeniedException)
- end)
-
- api.add_operation(:create_experiment, Seahorse::Model::Operation.new.tap do |o|
- o.name = "CreateExperiment"
- o.http_method = "POST"
- o.http_request_uri = "/projects/{project}/experiments"
- o.deprecated = true
- o.input = Shapes::ShapeRef.new(shape: CreateExperimentRequest)
- o.output = Shapes::ShapeRef.new(shape: CreateExperimentResponse)
- o.errors << Shapes::ShapeRef.new(shape: ValidationException)
- o.errors << Shapes::ShapeRef.new(shape: ConflictException)
- o.errors << Shapes::ShapeRef.new(shape: ServiceQuotaExceededException)
- o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException)
- o.errors << Shapes::ShapeRef.new(shape: AccessDeniedException)
- end)
-
- api.add_operation(:create_feature, Seahorse::Model::Operation.new.tap do |o|
- o.name = "CreateFeature"
- o.http_method = "POST"
- o.http_request_uri = "/projects/{project}/features"
- o.deprecated = true
- o.input = Shapes::ShapeRef.new(shape: CreateFeatureRequest)
- o.output = Shapes::ShapeRef.new(shape: CreateFeatureResponse)
- o.errors << Shapes::ShapeRef.new(shape: ValidationException)
- o.errors << Shapes::ShapeRef.new(shape: ConflictException)
- o.errors << Shapes::ShapeRef.new(shape: ServiceQuotaExceededException)
- o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException)
- o.errors << Shapes::ShapeRef.new(shape: AccessDeniedException)
- end)
-
- api.add_operation(:create_launch, Seahorse::Model::Operation.new.tap do |o|
- o.name = "CreateLaunch"
- o.http_method = "POST"
- o.http_request_uri = "/projects/{project}/launches"
- o.deprecated = true
- o.input = Shapes::ShapeRef.new(shape: CreateLaunchRequest)
- o.output = Shapes::ShapeRef.new(shape: CreateLaunchResponse)
- o.errors << Shapes::ShapeRef.new(shape: ValidationException)
- o.errors << Shapes::ShapeRef.new(shape: ConflictException)
- o.errors << Shapes::ShapeRef.new(shape: ServiceQuotaExceededException)
- o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException)
- o.errors << Shapes::ShapeRef.new(shape: AccessDeniedException)
- end)
-
- api.add_operation(:create_project, Seahorse::Model::Operation.new.tap do |o|
- o.name = "CreateProject"
- o.http_method = "POST"
- o.http_request_uri = "/projects"
- o.deprecated = true
- o.input = Shapes::ShapeRef.new(shape: CreateProjectRequest)
- o.output = Shapes::ShapeRef.new(shape: CreateProjectResponse)
- o.errors << Shapes::ShapeRef.new(shape: ValidationException)
- o.errors << Shapes::ShapeRef.new(shape: ConflictException)
- o.errors << Shapes::ShapeRef.new(shape: ServiceQuotaExceededException)
- o.errors << Shapes::ShapeRef.new(shape: AccessDeniedException)
- end)
-
- api.add_operation(:create_segment, Seahorse::Model::Operation.new.tap do |o|
- o.name = "CreateSegment"
- o.http_method = "POST"
- o.http_request_uri = "/segments"
- o.deprecated = true
- o.input = Shapes::ShapeRef.new(shape: CreateSegmentRequest)
- o.output = Shapes::ShapeRef.new(shape: CreateSegmentResponse)
- o.errors << Shapes::ShapeRef.new(shape: ValidationException)
- o.errors << Shapes::ShapeRef.new(shape: ConflictException)
- o.errors << Shapes::ShapeRef.new(shape: ServiceQuotaExceededException)
- o.errors << Shapes::ShapeRef.new(shape: AccessDeniedException)
- end)
-
- api.add_operation(:delete_experiment, Seahorse::Model::Operation.new.tap do |o|
- o.name = "DeleteExperiment"
- o.http_method = "DELETE"
- o.http_request_uri = "/projects/{project}/experiments/{experiment}"
- o.deprecated = true
- o.input = Shapes::ShapeRef.new(shape: DeleteExperimentRequest)
- o.output = Shapes::ShapeRef.new(shape: DeleteExperimentResponse)
- o.errors << Shapes::ShapeRef.new(shape: ValidationException)
- o.errors << Shapes::ShapeRef.new(shape: InternalServerException)
- o.errors << Shapes::ShapeRef.new(shape: ConflictException)
- o.errors << Shapes::ShapeRef.new(shape: ServiceUnavailableException)
- o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException)
- o.errors << Shapes::ShapeRef.new(shape: AccessDeniedException)
- end)
-
- api.add_operation(:delete_feature, Seahorse::Model::Operation.new.tap do |o|
- o.name = "DeleteFeature"
- o.http_method = "DELETE"
- o.http_request_uri = "/projects/{project}/features/{feature}"
- o.deprecated = true
- o.input = Shapes::ShapeRef.new(shape: DeleteFeatureRequest)
- o.output = Shapes::ShapeRef.new(shape: DeleteFeatureResponse)
- o.errors << Shapes::ShapeRef.new(shape: ThrottlingException)
- o.errors << Shapes::ShapeRef.new(shape: ValidationException)
- o.errors << Shapes::ShapeRef.new(shape: ConflictException)
- o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException)
- o.errors << Shapes::ShapeRef.new(shape: AccessDeniedException)
- end)
-
- api.add_operation(:delete_launch, Seahorse::Model::Operation.new.tap do |o|
- o.name = "DeleteLaunch"
- o.http_method = "DELETE"
- o.http_request_uri = "/projects/{project}/launches/{launch}"
- o.deprecated = true
- o.input = Shapes::ShapeRef.new(shape: DeleteLaunchRequest)
- o.output = Shapes::ShapeRef.new(shape: DeleteLaunchResponse)
- o.errors << Shapes::ShapeRef.new(shape: ThrottlingException)
- o.errors << Shapes::ShapeRef.new(shape: ValidationException)
- o.errors << Shapes::ShapeRef.new(shape: ConflictException)
- o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException)
- o.errors << Shapes::ShapeRef.new(shape: AccessDeniedException)
- end)
-
- api.add_operation(:delete_project, Seahorse::Model::Operation.new.tap do |o|
- o.name = "DeleteProject"
- o.http_method = "DELETE"
- o.http_request_uri = "/projects/{project}"
- o.deprecated = true
- o.input = Shapes::ShapeRef.new(shape: DeleteProjectRequest)
- o.output = Shapes::ShapeRef.new(shape: DeleteProjectResponse)
- o.errors << Shapes::ShapeRef.new(shape: ThrottlingException)
- o.errors << Shapes::ShapeRef.new(shape: ValidationException)
- o.errors << Shapes::ShapeRef.new(shape: ConflictException)
- o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException)
- o.errors << Shapes::ShapeRef.new(shape: AccessDeniedException)
- end)
-
- api.add_operation(:delete_segment, Seahorse::Model::Operation.new.tap do |o|
- o.name = "DeleteSegment"
- o.http_method = "DELETE"
- o.http_request_uri = "/segments/{segment}"
- o.deprecated = true
- o.input = Shapes::ShapeRef.new(shape: DeleteSegmentRequest)
- o.output = Shapes::ShapeRef.new(shape: DeleteSegmentResponse)
- o.errors << Shapes::ShapeRef.new(shape: ThrottlingException)
- o.errors << Shapes::ShapeRef.new(shape: ValidationException)
- o.errors << Shapes::ShapeRef.new(shape: ConflictException)
- o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException)
- o.errors << Shapes::ShapeRef.new(shape: AccessDeniedException)
- end)
-
- api.add_operation(:evaluate_feature, Seahorse::Model::Operation.new.tap do |o|
- o.name = "EvaluateFeature"
- o.http_method = "POST"
- o.http_request_uri = "/projects/{project}/evaluations/{feature}"
- o.deprecated = true
- o.endpoint_pattern = {
- "hostPrefix" => "dataplane.",
- }
- o.input = Shapes::ShapeRef.new(shape: EvaluateFeatureRequest)
- o.output = Shapes::ShapeRef.new(shape: EvaluateFeatureResponse)
- o.errors << Shapes::ShapeRef.new(shape: ThrottlingException)
- o.errors << Shapes::ShapeRef.new(shape: ValidationException)
- o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException)
- o.errors << Shapes::ShapeRef.new(shape: AccessDeniedException)
- end)
-
- api.add_operation(:get_experiment, Seahorse::Model::Operation.new.tap do |o|
- o.name = "GetExperiment"
- o.http_method = "GET"
- o.http_request_uri = "/projects/{project}/experiments/{experiment}"
- o.deprecated = true
- o.input = Shapes::ShapeRef.new(shape: GetExperimentRequest)
- o.output = Shapes::ShapeRef.new(shape: GetExperimentResponse)
- o.errors << Shapes::ShapeRef.new(shape: ThrottlingException)
- o.errors << Shapes::ShapeRef.new(shape: ValidationException)
- o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException)
- o.errors << Shapes::ShapeRef.new(shape: AccessDeniedException)
- end)
-
- api.add_operation(:get_experiment_results, Seahorse::Model::Operation.new.tap do |o|
- o.name = "GetExperimentResults"
- o.http_method = "POST"
- o.http_request_uri = "/projects/{project}/experiments/{experiment}/results"
- o.deprecated = true
- o.input = Shapes::ShapeRef.new(shape: GetExperimentResultsRequest)
- o.output = Shapes::ShapeRef.new(shape: GetExperimentResultsResponse)
- o.errors << Shapes::ShapeRef.new(shape: ThrottlingException)
- o.errors << Shapes::ShapeRef.new(shape: ValidationException)
- o.errors << Shapes::ShapeRef.new(shape: ConflictException)
- o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException)
- o.errors << Shapes::ShapeRef.new(shape: AccessDeniedException)
- end)
-
- api.add_operation(:get_feature, Seahorse::Model::Operation.new.tap do |o|
- o.name = "GetFeature"
- o.http_method = "GET"
- o.http_request_uri = "/projects/{project}/features/{feature}"
- o.deprecated = true
- o.input = Shapes::ShapeRef.new(shape: GetFeatureRequest)
- o.output = Shapes::ShapeRef.new(shape: GetFeatureResponse)
- o.errors << Shapes::ShapeRef.new(shape: ThrottlingException)
- o.errors << Shapes::ShapeRef.new(shape: ValidationException)
- o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException)
- o.errors << Shapes::ShapeRef.new(shape: AccessDeniedException)
- end)
-
- api.add_operation(:get_launch, Seahorse::Model::Operation.new.tap do |o|
- o.name = "GetLaunch"
- o.http_method = "GET"
- o.http_request_uri = "/projects/{project}/launches/{launch}"
- o.deprecated = true
- o.input = Shapes::ShapeRef.new(shape: GetLaunchRequest)
- o.output = Shapes::ShapeRef.new(shape: GetLaunchResponse)
- o.errors << Shapes::ShapeRef.new(shape: ThrottlingException)
- o.errors << Shapes::ShapeRef.new(shape: ValidationException)
- o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException)
- o.errors << Shapes::ShapeRef.new(shape: AccessDeniedException)
- end)
-
- api.add_operation(:get_project, Seahorse::Model::Operation.new.tap do |o|
- o.name = "GetProject"
- o.http_method = "GET"
- o.http_request_uri = "/projects/{project}"
- o.deprecated = true
- o.input = Shapes::ShapeRef.new(shape: GetProjectRequest)
- o.output = Shapes::ShapeRef.new(shape: GetProjectResponse)
- o.errors << Shapes::ShapeRef.new(shape: ThrottlingException)
- o.errors << Shapes::ShapeRef.new(shape: ValidationException)
- o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException)
- o.errors << Shapes::ShapeRef.new(shape: AccessDeniedException)
- end)
-
- api.add_operation(:get_segment, Seahorse::Model::Operation.new.tap do |o|
- o.name = "GetSegment"
- o.http_method = "GET"
- o.http_request_uri = "/segments/{segment}"
- o.deprecated = true
- o.input = Shapes::ShapeRef.new(shape: GetSegmentRequest)
- o.output = Shapes::ShapeRef.new(shape: GetSegmentResponse)
- o.errors << Shapes::ShapeRef.new(shape: ThrottlingException)
- o.errors << Shapes::ShapeRef.new(shape: ValidationException)
- o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException)
- o.errors << Shapes::ShapeRef.new(shape: AccessDeniedException)
- end)
-
- api.add_operation(:list_experiments, Seahorse::Model::Operation.new.tap do |o|
- o.name = "ListExperiments"
- o.http_method = "GET"
- o.http_request_uri = "/projects/{project}/experiments"
- o.deprecated = true
- o.input = Shapes::ShapeRef.new(shape: ListExperimentsRequest)
- o.output = Shapes::ShapeRef.new(shape: ListExperimentsResponse)
- o.errors << Shapes::ShapeRef.new(shape: ValidationException)
- o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException)
- o.errors << Shapes::ShapeRef.new(shape: AccessDeniedException)
- o[:pager] = Aws::Pager.new(
- limit_key: "max_results",
- tokens: {
- "next_token" => "next_token"
- }
- )
- end)
-
- api.add_operation(:list_features, Seahorse::Model::Operation.new.tap do |o|
- o.name = "ListFeatures"
- o.http_method = "GET"
- o.http_request_uri = "/projects/{project}/features"
- o.deprecated = true
- o.input = Shapes::ShapeRef.new(shape: ListFeaturesRequest)
- o.output = Shapes::ShapeRef.new(shape: ListFeaturesResponse)
- o.errors << Shapes::ShapeRef.new(shape: ThrottlingException)
- o.errors << Shapes::ShapeRef.new(shape: ValidationException)
- o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException)
- o.errors << Shapes::ShapeRef.new(shape: AccessDeniedException)
- o[:pager] = Aws::Pager.new(
- limit_key: "max_results",
- tokens: {
- "next_token" => "next_token"
- }
- )
- end)
-
- api.add_operation(:list_launches, Seahorse::Model::Operation.new.tap do |o|
- o.name = "ListLaunches"
- o.http_method = "GET"
- o.http_request_uri = "/projects/{project}/launches"
- o.deprecated = true
- o.input = Shapes::ShapeRef.new(shape: ListLaunchesRequest)
- o.output = Shapes::ShapeRef.new(shape: ListLaunchesResponse)
- o.errors << Shapes::ShapeRef.new(shape: ThrottlingException)
- o.errors << Shapes::ShapeRef.new(shape: ValidationException)
- o.errors << Shapes::ShapeRef.new(shape: AccessDeniedException)
- o[:pager] = Aws::Pager.new(
- limit_key: "max_results",
- tokens: {
- "next_token" => "next_token"
- }
- )
- end)
-
- api.add_operation(:list_projects, Seahorse::Model::Operation.new.tap do |o|
- o.name = "ListProjects"
- o.http_method = "GET"
- o.http_request_uri = "/projects"
- o.deprecated = true
- o.input = Shapes::ShapeRef.new(shape: ListProjectsRequest)
- o.output = Shapes::ShapeRef.new(shape: ListProjectsResponse)
- o.errors << Shapes::ShapeRef.new(shape: ThrottlingException)
- o.errors << Shapes::ShapeRef.new(shape: ValidationException)
- o.errors << Shapes::ShapeRef.new(shape: AccessDeniedException)
- o[:pager] = Aws::Pager.new(
- limit_key: "max_results",
- tokens: {
- "next_token" => "next_token"
- }
- )
- end)
-
- api.add_operation(:list_segment_references, Seahorse::Model::Operation.new.tap do |o|
- o.name = "ListSegmentReferences"
- o.http_method = "GET"
- o.http_request_uri = "/segments/{segment}/references"
- o.deprecated = true
- o.input = Shapes::ShapeRef.new(shape: ListSegmentReferencesRequest)
- o.output = Shapes::ShapeRef.new(shape: ListSegmentReferencesResponse)
- o.errors << Shapes::ShapeRef.new(shape: ThrottlingException)
- o.errors << Shapes::ShapeRef.new(shape: ValidationException)
- o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException)
- o.errors << Shapes::ShapeRef.new(shape: AccessDeniedException)
- o[:pager] = Aws::Pager.new(
- limit_key: "max_results",
- tokens: {
- "next_token" => "next_token"
- }
- )
- end)
-
- api.add_operation(:list_segments, Seahorse::Model::Operation.new.tap do |o|
- o.name = "ListSegments"
- o.http_method = "GET"
- o.http_request_uri = "/segments"
- o.deprecated = true
- o.input = Shapes::ShapeRef.new(shape: ListSegmentsRequest)
- o.output = Shapes::ShapeRef.new(shape: ListSegmentsResponse)
- o.errors << Shapes::ShapeRef.new(shape: ThrottlingException)
- o.errors << Shapes::ShapeRef.new(shape: ValidationException)
- o.errors << Shapes::ShapeRef.new(shape: AccessDeniedException)
- o[:pager] = Aws::Pager.new(
- limit_key: "max_results",
- tokens: {
- "next_token" => "next_token"
- }
- )
- end)
-
- api.add_operation(:list_tags_for_resource, Seahorse::Model::Operation.new.tap do |o|
- o.name = "ListTagsForResource"
- o.http_method = "GET"
- o.http_request_uri = "/tags/{resourceArn}"
- o.deprecated = true
- o.input = Shapes::ShapeRef.new(shape: ListTagsForResourceRequest)
- o.output = Shapes::ShapeRef.new(shape: ListTagsForResourceResponse)
- o.errors << Shapes::ShapeRef.new(shape: ValidationException)
- o.errors << Shapes::ShapeRef.new(shape: ConflictException)
- o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException)
- end)
-
- api.add_operation(:put_project_events, Seahorse::Model::Operation.new.tap do |o|
- o.name = "PutProjectEvents"
- o.http_method = "POST"
- o.http_request_uri = "/events/projects/{project}"
- o.deprecated = true
- o.endpoint_pattern = {
- "hostPrefix" => "dataplane.",
- }
- o.input = Shapes::ShapeRef.new(shape: PutProjectEventsRequest)
- o.output = Shapes::ShapeRef.new(shape: PutProjectEventsResponse)
- o.errors << Shapes::ShapeRef.new(shape: ThrottlingException)
- o.errors << Shapes::ShapeRef.new(shape: ValidationException)
- o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException)
- o.errors << Shapes::ShapeRef.new(shape: AccessDeniedException)
- end)
-
- api.add_operation(:start_experiment, Seahorse::Model::Operation.new.tap do |o|
- o.name = "StartExperiment"
- o.http_method = "POST"
- o.http_request_uri = "/projects/{project}/experiments/{experiment}/start"
- o.deprecated = true
- o.input = Shapes::ShapeRef.new(shape: StartExperimentRequest)
- o.output = Shapes::ShapeRef.new(shape: StartExperimentResponse)
- o.errors << Shapes::ShapeRef.new(shape: ThrottlingException)
- o.errors << Shapes::ShapeRef.new(shape: ValidationException)
- o.errors << Shapes::ShapeRef.new(shape: ConflictException)
- o.errors << Shapes::ShapeRef.new(shape: ServiceQuotaExceededException)
- o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException)
- o.errors << Shapes::ShapeRef.new(shape: AccessDeniedException)
- end)
-
- api.add_operation(:start_launch, Seahorse::Model::Operation.new.tap do |o|
- o.name = "StartLaunch"
- o.http_method = "POST"
- o.http_request_uri = "/projects/{project}/launches/{launch}/start"
- o.deprecated = true
- o.input = Shapes::ShapeRef.new(shape: StartLaunchRequest)
- o.output = Shapes::ShapeRef.new(shape: StartLaunchResponse)
- o.errors << Shapes::ShapeRef.new(shape: ThrottlingException)
- o.errors << Shapes::ShapeRef.new(shape: ValidationException)
- o.errors << Shapes::ShapeRef.new(shape: ConflictException)
- o.errors << Shapes::ShapeRef.new(shape: ServiceQuotaExceededException)
- o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException)
- o.errors << Shapes::ShapeRef.new(shape: AccessDeniedException)
- end)
-
- api.add_operation(:stop_experiment, Seahorse::Model::Operation.new.tap do |o|
- o.name = "StopExperiment"
- o.http_method = "POST"
- o.http_request_uri = "/projects/{project}/experiments/{experiment}/cancel"
- o.deprecated = true
- o.input = Shapes::ShapeRef.new(shape: StopExperimentRequest)
- o.output = Shapes::ShapeRef.new(shape: StopExperimentResponse)
- o.errors << Shapes::ShapeRef.new(shape: ThrottlingException)
- o.errors << Shapes::ShapeRef.new(shape: ValidationException)
- o.errors << Shapes::ShapeRef.new(shape: ConflictException)
- o.errors << Shapes::ShapeRef.new(shape: ServiceQuotaExceededException)
- o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException)
- o.errors << Shapes::ShapeRef.new(shape: AccessDeniedException)
- end)
-
- api.add_operation(:stop_launch, Seahorse::Model::Operation.new.tap do |o|
- o.name = "StopLaunch"
- o.http_method = "POST"
- o.http_request_uri = "/projects/{project}/launches/{launch}/cancel"
- o.deprecated = true
- o.input = Shapes::ShapeRef.new(shape: StopLaunchRequest)
- o.output = Shapes::ShapeRef.new(shape: StopLaunchResponse)
- o.errors << Shapes::ShapeRef.new(shape: ThrottlingException)
- o.errors << Shapes::ShapeRef.new(shape: ValidationException)
- o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException)
- o.errors << Shapes::ShapeRef.new(shape: AccessDeniedException)
- end)
-
- api.add_operation(:tag_resource, Seahorse::Model::Operation.new.tap do |o|
- o.name = "TagResource"
- o.http_method = "POST"
- o.http_request_uri = "/tags/{resourceArn}"
- o.deprecated = true
- o.input = Shapes::ShapeRef.new(shape: TagResourceRequest)
- o.output = Shapes::ShapeRef.new(shape: TagResourceResponse)
- o.errors << Shapes::ShapeRef.new(shape: ValidationException)
- o.errors << Shapes::ShapeRef.new(shape: ConflictException)
- o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException)
- end)
-
- api.add_operation(:test_segment_pattern, Seahorse::Model::Operation.new.tap do |o|
- o.name = "TestSegmentPattern"
- o.http_method = "POST"
- o.http_request_uri = "/test-segment-pattern"
- o.deprecated = true
- o.input = Shapes::ShapeRef.new(shape: TestSegmentPatternRequest)
- o.output = Shapes::ShapeRef.new(shape: TestSegmentPatternResponse)
- o.errors << Shapes::ShapeRef.new(shape: ThrottlingException)
- o.errors << Shapes::ShapeRef.new(shape: ValidationException)
- o.errors << Shapes::ShapeRef.new(shape: AccessDeniedException)
- end)
-
- api.add_operation(:untag_resource, Seahorse::Model::Operation.new.tap do |o|
- o.name = "UntagResource"
- o.http_method = "DELETE"
- o.http_request_uri = "/tags/{resourceArn}"
- o.deprecated = true
- o.input = Shapes::ShapeRef.new(shape: UntagResourceRequest)
- o.output = Shapes::ShapeRef.new(shape: UntagResourceResponse)
- o.errors << Shapes::ShapeRef.new(shape: ValidationException)
- o.errors << Shapes::ShapeRef.new(shape: ConflictException)
- o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException)
- end)
-
- api.add_operation(:update_experiment, Seahorse::Model::Operation.new.tap do |o|
- o.name = "UpdateExperiment"
- o.http_method = "PATCH"
- o.http_request_uri = "/projects/{project}/experiments/{experiment}"
- o.deprecated = true
- o.input = Shapes::ShapeRef.new(shape: UpdateExperimentRequest)
- o.output = Shapes::ShapeRef.new(shape: UpdateExperimentResponse)
- o.errors << Shapes::ShapeRef.new(shape: ValidationException)
- o.errors << Shapes::ShapeRef.new(shape: ConflictException)
- o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException)
- o.errors << Shapes::ShapeRef.new(shape: AccessDeniedException)
- end)
-
- api.add_operation(:update_feature, Seahorse::Model::Operation.new.tap do |o|
- o.name = "UpdateFeature"
- o.http_method = "PATCH"
- o.http_request_uri = "/projects/{project}/features/{feature}"
- o.deprecated = true
- o.input = Shapes::ShapeRef.new(shape: UpdateFeatureRequest)
- o.output = Shapes::ShapeRef.new(shape: UpdateFeatureResponse)
- o.errors << Shapes::ShapeRef.new(shape: ValidationException)
- o.errors << Shapes::ShapeRef.new(shape: ConflictException)
- o.errors << Shapes::ShapeRef.new(shape: ServiceQuotaExceededException)
- o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException)
- o.errors << Shapes::ShapeRef.new(shape: AccessDeniedException)
- end)
-
- api.add_operation(:update_launch, Seahorse::Model::Operation.new.tap do |o|
- o.name = "UpdateLaunch"
- o.http_method = "PATCH"
- o.http_request_uri = "/projects/{project}/launches/{launch}"
- o.deprecated = true
- o.input = Shapes::ShapeRef.new(shape: UpdateLaunchRequest)
- o.output = Shapes::ShapeRef.new(shape: UpdateLaunchResponse)
- o.errors << Shapes::ShapeRef.new(shape: ValidationException)
- o.errors << Shapes::ShapeRef.new(shape: ConflictException)
- o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException)
- o.errors << Shapes::ShapeRef.new(shape: AccessDeniedException)
- end)
-
- api.add_operation(:update_project, Seahorse::Model::Operation.new.tap do |o|
- o.name = "UpdateProject"
- o.http_method = "PATCH"
- o.http_request_uri = "/projects/{project}"
- o.deprecated = true
- o.input = Shapes::ShapeRef.new(shape: UpdateProjectRequest)
- o.output = Shapes::ShapeRef.new(shape: UpdateProjectResponse)
- o.errors << Shapes::ShapeRef.new(shape: ValidationException)
- o.errors << Shapes::ShapeRef.new(shape: ConflictException)
- o.errors << Shapes::ShapeRef.new(shape: ServiceQuotaExceededException)
- o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException)
- o.errors << Shapes::ShapeRef.new(shape: AccessDeniedException)
- end)
-
- api.add_operation(:update_project_data_delivery, Seahorse::Model::Operation.new.tap do |o|
- o.name = "UpdateProjectDataDelivery"
- o.http_method = "PATCH"
- o.http_request_uri = "/projects/{project}/data-delivery"
- o.deprecated = true
- o.input = Shapes::ShapeRef.new(shape: UpdateProjectDataDeliveryRequest)
- o.output = Shapes::ShapeRef.new(shape: UpdateProjectDataDeliveryResponse)
- o.errors << Shapes::ShapeRef.new(shape: ValidationException)
- o.errors << Shapes::ShapeRef.new(shape: ConflictException)
- o.errors << Shapes::ShapeRef.new(shape: ServiceQuotaExceededException)
- o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException)
- o.errors << Shapes::ShapeRef.new(shape: AccessDeniedException)
- end)
- end
-
- end
-end
diff --git a/gems/aws-sdk-cloudwatchevidently/lib/aws-sdk-cloudwatchevidently/customizations.rb b/gems/aws-sdk-cloudwatchevidently/lib/aws-sdk-cloudwatchevidently/customizations.rb
deleted file mode 100644
index e69de29bb2d..00000000000
diff --git a/gems/aws-sdk-cloudwatchevidently/lib/aws-sdk-cloudwatchevidently/endpoint_parameters.rb b/gems/aws-sdk-cloudwatchevidently/lib/aws-sdk-cloudwatchevidently/endpoint_parameters.rb
deleted file mode 100644
index 87f46f09cfc..00000000000
--- a/gems/aws-sdk-cloudwatchevidently/lib/aws-sdk-cloudwatchevidently/endpoint_parameters.rb
+++ /dev/null
@@ -1,69 +0,0 @@
-# frozen_string_literal: true
-
-# WARNING ABOUT GENERATED CODE
-#
-# This file is generated. See the contributing guide for more information:
-# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md
-#
-# WARNING ABOUT GENERATED CODE
-
-module Aws::CloudWatchEvidently
- # Endpoint parameters used to influence endpoints per request.
- #
- # @!attribute region
- # The AWS region used to dispatch the request.
- #
- # @return [string]
- #
- # @!attribute use_dual_stack
- # When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.
- #
- # @return [boolean]
- #
- # @!attribute use_fips
- # When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.
- #
- # @return [boolean]
- #
- # @!attribute endpoint
- # Override the endpoint used to send this request
- #
- # @return [string]
- #
- EndpointParameters = Struct.new(
- :region,
- :use_dual_stack,
- :use_fips,
- :endpoint,
- ) do
- include Aws::Structure
-
- # @api private
- class << self
- PARAM_MAP = {
- 'Region' => :region,
- 'UseDualStack' => :use_dual_stack,
- 'UseFIPS' => :use_fips,
- 'Endpoint' => :endpoint,
- }.freeze
- end
-
- def initialize(options = {})
- self[:region] = options[:region]
- self[:use_dual_stack] = options[:use_dual_stack]
- self[:use_dual_stack] = false if self[:use_dual_stack].nil?
- self[:use_fips] = options[:use_fips]
- self[:use_fips] = false if self[:use_fips].nil?
- self[:endpoint] = options[:endpoint]
- end
-
- def self.create(config, options={})
- new({
- region: config.region,
- use_dual_stack: config.use_dualstack_endpoint,
- use_fips: config.use_fips_endpoint,
- endpoint: (config.endpoint.to_s unless config.regional_endpoint),
- }.merge(options))
- end
- end
-end
diff --git a/gems/aws-sdk-cloudwatchevidently/lib/aws-sdk-cloudwatchevidently/endpoint_provider.rb b/gems/aws-sdk-cloudwatchevidently/lib/aws-sdk-cloudwatchevidently/endpoint_provider.rb
deleted file mode 100644
index 7781706fab1..00000000000
--- a/gems/aws-sdk-cloudwatchevidently/lib/aws-sdk-cloudwatchevidently/endpoint_provider.rb
+++ /dev/null
@@ -1,50 +0,0 @@
-# frozen_string_literal: true
-
-# WARNING ABOUT GENERATED CODE
-#
-# This file is generated. See the contributing guide for more information:
-# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md
-#
-# WARNING ABOUT GENERATED CODE
-
-module Aws::CloudWatchEvidently
- class EndpointProvider
- def resolve_endpoint(parameters)
- if Aws::Endpoints::Matchers.set?(parameters.endpoint)
- if Aws::Endpoints::Matchers.boolean_equals?(parameters.use_fips, true)
- raise ArgumentError, "Invalid Configuration: FIPS and custom endpoint are not supported"
- end
- if Aws::Endpoints::Matchers.boolean_equals?(parameters.use_dual_stack, true)
- raise ArgumentError, "Invalid Configuration: Dualstack and custom endpoint are not supported"
- end
- return Aws::Endpoints::Endpoint.new(url: parameters.endpoint, headers: {}, properties: {})
- end
- if Aws::Endpoints::Matchers.set?(parameters.region)
- if (partition_result = Aws::Endpoints::Matchers.aws_partition(parameters.region))
- if Aws::Endpoints::Matchers.boolean_equals?(parameters.use_fips, true) && Aws::Endpoints::Matchers.boolean_equals?(parameters.use_dual_stack, true)
- if Aws::Endpoints::Matchers.boolean_equals?(true, Aws::Endpoints::Matchers.attr(partition_result, "supportsFIPS")) && Aws::Endpoints::Matchers.boolean_equals?(true, Aws::Endpoints::Matchers.attr(partition_result, "supportsDualStack"))
- return Aws::Endpoints::Endpoint.new(url: "https://evidently-fips.#{parameters.region}.#{partition_result['dualStackDnsSuffix']}", headers: {}, properties: {})
- end
- raise ArgumentError, "FIPS and DualStack are enabled, but this partition does not support one or both"
- end
- if Aws::Endpoints::Matchers.boolean_equals?(parameters.use_fips, true)
- if Aws::Endpoints::Matchers.boolean_equals?(Aws::Endpoints::Matchers.attr(partition_result, "supportsFIPS"), true)
- return Aws::Endpoints::Endpoint.new(url: "https://evidently-fips.#{parameters.region}.#{partition_result['dnsSuffix']}", headers: {}, properties: {})
- end
- raise ArgumentError, "FIPS is enabled but this partition does not support FIPS"
- end
- if Aws::Endpoints::Matchers.boolean_equals?(parameters.use_dual_stack, true)
- if Aws::Endpoints::Matchers.boolean_equals?(true, Aws::Endpoints::Matchers.attr(partition_result, "supportsDualStack"))
- return Aws::Endpoints::Endpoint.new(url: "https://evidently.#{parameters.region}.#{partition_result['dualStackDnsSuffix']}", headers: {}, properties: {})
- end
- raise ArgumentError, "DualStack is enabled but this partition does not support DualStack"
- end
- return Aws::Endpoints::Endpoint.new(url: "https://evidently.#{parameters.region}.#{partition_result['dnsSuffix']}", headers: {}, properties: {})
- end
- end
- raise ArgumentError, "Invalid Configuration: Missing Region"
- raise ArgumentError, 'No endpoint could be resolved'
-
- end
- end
-end
diff --git a/gems/aws-sdk-cloudwatchevidently/lib/aws-sdk-cloudwatchevidently/endpoints.rb b/gems/aws-sdk-cloudwatchevidently/lib/aws-sdk-cloudwatchevidently/endpoints.rb
deleted file mode 100644
index 80dd4a88578..00000000000
--- a/gems/aws-sdk-cloudwatchevidently/lib/aws-sdk-cloudwatchevidently/endpoints.rb
+++ /dev/null
@@ -1,20 +0,0 @@
-# frozen_string_literal: true
-
-# WARNING ABOUT GENERATED CODE
-#
-# This file is generated. See the contributing guide for more information:
-# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md
-#
-# WARNING ABOUT GENERATED CODE
-
-
-module Aws::CloudWatchEvidently
- # @api private
- module Endpoints
-
-
- def self.parameters_for_operation(context)
- Aws::CloudWatchEvidently::EndpointParameters.create(context.config)
- end
- end
-end
diff --git a/gems/aws-sdk-cloudwatchevidently/lib/aws-sdk-cloudwatchevidently/errors.rb b/gems/aws-sdk-cloudwatchevidently/lib/aws-sdk-cloudwatchevidently/errors.rb
deleted file mode 100644
index edd3abff05a..00000000000
--- a/gems/aws-sdk-cloudwatchevidently/lib/aws-sdk-cloudwatchevidently/errors.rb
+++ /dev/null
@@ -1,226 +0,0 @@
-# frozen_string_literal: true
-
-# WARNING ABOUT GENERATED CODE
-#
-# This file is generated. See the contributing guide for more information:
-# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md
-#
-# WARNING ABOUT GENERATED CODE
-
-module Aws::CloudWatchEvidently
-
- # When CloudWatchEvidently returns an error response, the Ruby SDK constructs and raises an error.
- # These errors all extend Aws::CloudWatchEvidently::Errors::ServiceError < {Aws::Errors::ServiceError}
- #
- # You can rescue all CloudWatchEvidently errors using ServiceError:
- #
- # begin
- # # do stuff
- # rescue Aws::CloudWatchEvidently::Errors::ServiceError
- # # rescues all CloudWatchEvidently API errors
- # end
- #
- #
- # ## Request Context
- # ServiceError objects have a {Aws::Errors::ServiceError#context #context} method that returns
- # information about the request that generated the error.
- # See {Seahorse::Client::RequestContext} for more information.
- #
- # ## Error Classes
- # * {AccessDeniedException}
- # * {ConflictException}
- # * {InternalServerException}
- # * {ResourceNotFoundException}
- # * {ServiceQuotaExceededException}
- # * {ServiceUnavailableException}
- # * {ThrottlingException}
- # * {ValidationException}
- #
- # Additionally, error classes are dynamically generated for service errors based on the error code
- # if they are not defined above.
- module Errors
-
- extend Aws::Errors::DynamicErrors
-
- class AccessDeniedException < ServiceError
-
- # @param [Seahorse::Client::RequestContext] context
- # @param [String] message
- # @param [Aws::CloudWatchEvidently::Types::AccessDeniedException] data
- def initialize(context, message, data = Aws::EmptyStructure.new)
- super(context, message, data)
- end
-
- # @return [String]
- def message
- @message || @data[:message]
- end
- end
-
- class ConflictException < ServiceError
-
- # @param [Seahorse::Client::RequestContext] context
- # @param [String] message
- # @param [Aws::CloudWatchEvidently::Types::ConflictException] data
- def initialize(context, message, data = Aws::EmptyStructure.new)
- super(context, message, data)
- end
-
- # @return [String]
- def message
- @message || @data[:message]
- end
-
- # @return [String]
- def resource_id
- @data[:resource_id]
- end
-
- # @return [String]
- def resource_type
- @data[:resource_type]
- end
- end
-
- class InternalServerException < ServiceError
-
- # @param [Seahorse::Client::RequestContext] context
- # @param [String] message
- # @param [Aws::CloudWatchEvidently::Types::InternalServerException] data
- def initialize(context, message, data = Aws::EmptyStructure.new)
- super(context, message, data)
- end
-
- # @return [String]
- def message
- @message || @data[:message]
- end
- end
-
- class ResourceNotFoundException < ServiceError
-
- # @param [Seahorse::Client::RequestContext] context
- # @param [String] message
- # @param [Aws::CloudWatchEvidently::Types::ResourceNotFoundException] data
- def initialize(context, message, data = Aws::EmptyStructure.new)
- super(context, message, data)
- end
-
- # @return [String]
- def message
- @message || @data[:message]
- end
-
- # @return [String]
- def resource_id
- @data[:resource_id]
- end
-
- # @return [String]
- def resource_type
- @data[:resource_type]
- end
- end
-
- class ServiceQuotaExceededException < ServiceError
-
- # @param [Seahorse::Client::RequestContext] context
- # @param [String] message
- # @param [Aws::CloudWatchEvidently::Types::ServiceQuotaExceededException] data
- def initialize(context, message, data = Aws::EmptyStructure.new)
- super(context, message, data)
- end
-
- # @return [String]
- def message
- @message || @data[:message]
- end
-
- # @return [String]
- def quota_code
- @data[:quota_code]
- end
-
- # @return [String]
- def resource_id
- @data[:resource_id]
- end
-
- # @return [String]
- def resource_type
- @data[:resource_type]
- end
-
- # @return [String]
- def service_code
- @data[:service_code]
- end
- end
-
- class ServiceUnavailableException < ServiceError
-
- # @param [Seahorse::Client::RequestContext] context
- # @param [String] message
- # @param [Aws::CloudWatchEvidently::Types::ServiceUnavailableException] data
- def initialize(context, message, data = Aws::EmptyStructure.new)
- super(context, message, data)
- end
-
- # @return [String]
- def message
- @message || @data[:message]
- end
- end
-
- class ThrottlingException < ServiceError
-
- # @param [Seahorse::Client::RequestContext] context
- # @param [String] message
- # @param [Aws::CloudWatchEvidently::Types::ThrottlingException] data
- def initialize(context, message, data = Aws::EmptyStructure.new)
- super(context, message, data)
- end
-
- # @return [String]
- def message
- @message || @data[:message]
- end
-
- # @return [String]
- def quota_code
- @data[:quota_code]
- end
-
- # @return [String]
- def service_code
- @data[:service_code]
- end
- end
-
- class ValidationException < ServiceError
-
- # @param [Seahorse::Client::RequestContext] context
- # @param [String] message
- # @param [Aws::CloudWatchEvidently::Types::ValidationException] data
- def initialize(context, message, data = Aws::EmptyStructure.new)
- super(context, message, data)
- end
-
- # @return [String]
- def field_list
- @data[:field_list]
- end
-
- # @return [String]
- def message
- @message || @data[:message]
- end
-
- # @return [String]
- def reason
- @data[:reason]
- end
- end
-
- end
-end
diff --git a/gems/aws-sdk-cloudwatchevidently/lib/aws-sdk-cloudwatchevidently/plugins/endpoints.rb b/gems/aws-sdk-cloudwatchevidently/lib/aws-sdk-cloudwatchevidently/plugins/endpoints.rb
deleted file mode 100644
index d67d732d1c8..00000000000
--- a/gems/aws-sdk-cloudwatchevidently/lib/aws-sdk-cloudwatchevidently/plugins/endpoints.rb
+++ /dev/null
@@ -1,77 +0,0 @@
-# frozen_string_literal: true
-
-# WARNING ABOUT GENERATED CODE
-#
-# This file is generated. See the contributing guide for more information:
-# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md
-#
-# WARNING ABOUT GENERATED CODE
-
-
-module Aws::CloudWatchEvidently
- module Plugins
- class Endpoints < Seahorse::Client::Plugin
- option(
- :endpoint_provider,
- doc_type: 'Aws::CloudWatchEvidently::EndpointProvider',
- rbs_type: 'untyped',
- docstring: <<~DOCS) do |_cfg|
-The endpoint provider used to resolve endpoints. Any object that responds to
-`#resolve_endpoint(parameters)` where `parameters` is a Struct similar to
-`Aws::CloudWatchEvidently::EndpointParameters`.
- DOCS
- Aws::CloudWatchEvidently::EndpointProvider.new
- end
-
- # @api private
- class Handler < Seahorse::Client::Handler
- def call(context)
- unless context[:discovered_endpoint]
- params = Aws::CloudWatchEvidently::Endpoints.parameters_for_operation(context)
- endpoint = context.config.endpoint_provider.resolve_endpoint(params)
-
- context.http_request.endpoint = endpoint.url
- apply_endpoint_headers(context, endpoint.headers)
-
- context[:endpoint_params] = params
- context[:endpoint_properties] = endpoint.properties
- end
-
- context[:auth_scheme] =
- Aws::Endpoints.resolve_auth_scheme(context, endpoint)
-
- with_metrics(context) { @handler.call(context) }
- end
-
- private
-
- def with_metrics(context, &block)
- metrics = []
- metrics << 'ENDPOINT_OVERRIDE' unless context.config.regional_endpoint
- if context[:auth_scheme] && context[:auth_scheme]['name'] == 'sigv4a'
- metrics << 'SIGV4A_SIGNING'
- end
- if context.config.credentials&.credentials&.account_id
- metrics << 'RESOLVED_ACCOUNT_ID'
- end
- Aws::Plugins::UserAgent.metric(*metrics, &block)
- end
-
- def apply_endpoint_headers(context, headers)
- headers.each do |key, values|
- value = values
- .compact
- .map { |s| Seahorse::Util.escape_header_list_string(s.to_s) }
- .join(',')
-
- context.http_request.headers[key] = value
- end
- end
- end
-
- def add_handlers(handlers, _config)
- handlers.add(Handler, step: :build, priority: 75)
- end
- end
- end
-end
diff --git a/gems/aws-sdk-cloudwatchevidently/lib/aws-sdk-cloudwatchevidently/resource.rb b/gems/aws-sdk-cloudwatchevidently/lib/aws-sdk-cloudwatchevidently/resource.rb
deleted file mode 100644
index 8fb80c42ca2..00000000000
--- a/gems/aws-sdk-cloudwatchevidently/lib/aws-sdk-cloudwatchevidently/resource.rb
+++ /dev/null
@@ -1,26 +0,0 @@
-# frozen_string_literal: true
-
-# WARNING ABOUT GENERATED CODE
-#
-# This file is generated. See the contributing guide for more information:
-# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md
-#
-# WARNING ABOUT GENERATED CODE
-
-module Aws::CloudWatchEvidently
-
- class Resource
-
- # @param options ({})
- # @option options [Client] :client
- def initialize(options = {})
- @client = options[:client] || Client.new(options)
- end
-
- # @return [Client]
- def client
- @client
- end
-
- end
-end
diff --git a/gems/aws-sdk-cloudwatchevidently/lib/aws-sdk-cloudwatchevidently/types.rb b/gems/aws-sdk-cloudwatchevidently/lib/aws-sdk-cloudwatchevidently/types.rb
deleted file mode 100644
index be403ec7ba5..00000000000
--- a/gems/aws-sdk-cloudwatchevidently/lib/aws-sdk-cloudwatchevidently/types.rb
+++ /dev/null
@@ -1,3650 +0,0 @@
-# frozen_string_literal: true
-
-# WARNING ABOUT GENERATED CODE
-#
-# This file is generated. See the contributing guide for more information:
-# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md
-#
-# WARNING ABOUT GENERATED CODE
-
-module Aws::CloudWatchEvidently
- module Types
-
- # You do not have sufficient permissions to perform this action.
- #
- # @!attribute [rw] message
- # @return [String]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/AccessDeniedException AWS API Documentation
- #
- class AccessDeniedException < Struct.new(
- :message)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] project
- # The name or ARN of the project that contains the feature being
- # evaluated.
- # @return [String]
- #
- # @!attribute [rw] requests
- # An array of structures, where each structure assigns a feature
- # variation to one user session.
- # @return [Array]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/BatchEvaluateFeatureRequest AWS API Documentation
- #
- class BatchEvaluateFeatureRequest < Struct.new(
- :project,
- :requests)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] results
- # An array of structures, where each structure displays the results of
- # one feature evaluation assignment to one user session.
- # @return [Array]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/BatchEvaluateFeatureResponse AWS API Documentation
- #
- class BatchEvaluateFeatureResponse < Struct.new(
- :results)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # A structure containing the CloudWatch Logs log group where the project
- # stores evaluation events.
- #
- # @!attribute [rw] log_group
- # The name of the log group where the project stores evaluation
- # events.
- # @return [String]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/CloudWatchLogsDestination AWS API Documentation
- #
- class CloudWatchLogsDestination < Struct.new(
- :log_group)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # A structure containing the CloudWatch Logs log group where the project
- # stores evaluation events.
- #
- # @!attribute [rw] log_group
- # The name of the log group where the project stores evaluation
- # events.
- # @return [String]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/CloudWatchLogsDestinationConfig AWS API Documentation
- #
- class CloudWatchLogsDestinationConfig < Struct.new(
- :log_group)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # A resource was in an inconsistent state during an update or a
- # deletion.
- #
- # @!attribute [rw] message
- # @return [String]
- #
- # @!attribute [rw] resource_id
- # The ID of the resource that caused the exception.
- # @return [String]
- #
- # @!attribute [rw] resource_type
- # The type of the resource that is associated with the error.
- # @return [String]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/ConflictException AWS API Documentation
- #
- class ConflictException < Struct.new(
- :message,
- :resource_id,
- :resource_type)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] description
- # An optional description of the experiment.
- # @return [String]
- #
- # @!attribute [rw] metric_goals
- # An array of structures that defines the metrics used for the
- # experiment, and whether a higher or lower value for each metric is
- # the goal.
- # @return [Array]
- #
- # @!attribute [rw] name
- # A name for the new experiment.
- # @return [String]
- #
- # @!attribute [rw] online_ab_config
- # A structure that contains the configuration of which variation to
- # use as the "control" version. tThe "control" version is used for
- # comparison with other variations. This structure also specifies how
- # much experiment traffic is allocated to each variation.
- # @return [Types::OnlineAbConfig]
- #
- # @!attribute [rw] project
- # The name or ARN of the project that you want to create the new
- # experiment in.
- # @return [String]
- #
- # @!attribute [rw] randomization_salt
- # When Evidently assigns a particular user session to an experiment,
- # it must use a randomization ID to determine which variation the user
- # session is served. This randomization ID is a combination of the
- # entity ID and `randomizationSalt`. If you omit `randomizationSalt`,
- # Evidently uses the experiment name as the `randomizationSalt`.
- # @return [String]
- #
- # @!attribute [rw] sampling_rate
- # The portion of the available audience that you want to allocate to
- # this experiment, in thousandths of a percent. The available audience
- # is the total audience minus the audience that you have allocated to
- # overrides or current launches of this feature.
- #
- # This is represented in thousandths of a percent. For example,
- # specify 10,000 to allocate 10% of the available audience.
- # @return [Integer]
- #
- # @!attribute [rw] segment
- # Specifies an audience *segment* to use in the experiment. When a
- # segment is used in an experiment, only user sessions that match the
- # segment pattern are used in the experiment.
- # @return [String]
- #
- # @!attribute [rw] tags
- # Assigns one or more tags (key-value pairs) to the experiment.
- #
- # Tags can help you organize and categorize your resources. You can
- # also use them to scope user permissions by granting a user
- # permission to access or change only resources with certain tag
- # values.
- #
- # Tags don't have any semantic meaning to Amazon Web Services and are
- # interpreted strictly as strings of characters.
- #
- # You can associate as many as 50 tags with an experiment.
- #
- # For more information, see [Tagging Amazon Web Services
- # resources][1].
- #
- #
- #
- # [1]: https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html
- # @return [Hash]
- #
- # @!attribute [rw] treatments
- # An array of structures that describe the configuration of each
- # feature variation used in the experiment.
- # @return [Array]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/CreateExperimentRequest AWS API Documentation
- #
- class CreateExperimentRequest < Struct.new(
- :description,
- :metric_goals,
- :name,
- :online_ab_config,
- :project,
- :randomization_salt,
- :sampling_rate,
- :segment,
- :tags,
- :treatments)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] experiment
- # A structure containing the configuration details of the experiment
- # that you created.
- # @return [Types::Experiment]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/CreateExperimentResponse AWS API Documentation
- #
- class CreateExperimentResponse < Struct.new(
- :experiment)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] default_variation
- # The name of the variation to use as the default variation. The
- # default variation is served to users who are not allocated to any
- # ongoing launches or experiments of this feature.
- #
- # This variation must also be listed in the `variations` structure.
- #
- # If you omit `defaultVariation`, the first variation listed in the
- # `variations` structure is used as the default variation.
- # @return [String]
- #
- # @!attribute [rw] description
- # An optional description of the feature.
- # @return [String]
- #
- # @!attribute [rw] entity_overrides
- # Specify users that should always be served a specific variation of a
- # feature. Each user is specified by a key-value pair . For each key,
- # specify a user by entering their user ID, account ID, or some other
- # identifier. For the value, specify the name of the variation that
- # they are to be served.
- #
- # This parameter is limited to 2500 overrides or a total of 40KB. The
- # 40KB limit includes an overhead of 6 bytes per override.
- # @return [Hash]
- #
- # @!attribute [rw] evaluation_strategy
- # Specify `ALL_RULES` to activate the traffic allocation specified by
- # any ongoing launches or experiments. Specify `DEFAULT_VARIATION` to
- # serve the default variation to all users instead.
- # @return [String]
- #
- # @!attribute [rw] name
- # The name for the new feature.
- # @return [String]
- #
- # @!attribute [rw] project
- # The name or ARN of the project that is to contain the new feature.
- # @return [String]
- #
- # @!attribute [rw] tags
- # Assigns one or more tags (key-value pairs) to the feature.
- #
- # Tags can help you organize and categorize your resources. You can
- # also use them to scope user permissions by granting a user
- # permission to access or change only resources with certain tag
- # values.
- #
- # Tags don't have any semantic meaning to Amazon Web Services and are
- # interpreted strictly as strings of characters.
- #
- # You can associate as many as 50 tags with a feature.
- #
- # For more information, see [Tagging Amazon Web Services
- # resources][1].
- #
- #
- #
- # [1]: https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html
- # @return [Hash]
- #
- # @!attribute [rw] variations
- # An array of structures that contain the configuration of the
- # feature's different variations.
- # @return [Array]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/CreateFeatureRequest AWS API Documentation
- #
- class CreateFeatureRequest < Struct.new(
- :default_variation,
- :description,
- :entity_overrides,
- :evaluation_strategy,
- :name,
- :project,
- :tags,
- :variations)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] feature
- # A structure that contains information about the new feature.
- # @return [Types::Feature]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/CreateFeatureResponse AWS API Documentation
- #
- class CreateFeatureResponse < Struct.new(
- :feature)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] description
- # An optional description for the launch.
- # @return [String]
- #
- # @!attribute [rw] groups
- # An array of structures that contains the feature and variations that
- # are to be used for the launch.
- # @return [Array]
- #
- # @!attribute [rw] metric_monitors
- # An array of structures that define the metrics that will be used to
- # monitor the launch performance.
- # @return [Array]
- #
- # @!attribute [rw] name
- # The name for the new launch.
- # @return [String]
- #
- # @!attribute [rw] project
- # The name or ARN of the project that you want to create the launch
- # in.
- # @return [String]
- #
- # @!attribute [rw] randomization_salt
- # When Evidently assigns a particular user session to a launch, it
- # must use a randomization ID to determine which variation the user
- # session is served. This randomization ID is a combination of the
- # entity ID and `randomizationSalt`. If you omit `randomizationSalt`,
- # Evidently uses the launch name as the `randomizationSalt`.
- # @return [String]
- #
- # @!attribute [rw] scheduled_splits_config
- # An array of structures that define the traffic allocation
- # percentages among the feature variations during each step of the
- # launch.
- # @return [Types::ScheduledSplitsLaunchConfig]
- #
- # @!attribute [rw] tags
- # Assigns one or more tags (key-value pairs) to the launch.
- #
- # Tags can help you organize and categorize your resources. You can
- # also use them to scope user permissions by granting a user
- # permission to access or change only resources with certain tag
- # values.
- #
- # Tags don't have any semantic meaning to Amazon Web Services and are
- # interpreted strictly as strings of characters.
- #
- # You can associate as many as 50 tags with a launch.
- #
- # For more information, see [Tagging Amazon Web Services
- # resources][1].
- #
- #
- #
- # [1]: https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html
- # @return [Hash]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/CreateLaunchRequest AWS API Documentation
- #
- class CreateLaunchRequest < Struct.new(
- :description,
- :groups,
- :metric_monitors,
- :name,
- :project,
- :randomization_salt,
- :scheduled_splits_config,
- :tags)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] launch
- # A structure that contains the configuration of the launch that was
- # created.
- # @return [Types::Launch]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/CreateLaunchResponse AWS API Documentation
- #
- class CreateLaunchResponse < Struct.new(
- :launch)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] app_config_resource
- # Use this parameter if the project will use *client-side evaluation
- # powered by AppConfig*. Client-side evaluation allows your
- # application to assign variations to user sessions locally instead of
- # by calling the [EvaluateFeature][1] operation. This mitigates the
- # latency and availability risks that come with an API call. For more
- # information, see [ Client-side evaluation - powered by
- # AppConfig.][2]
- #
- # This parameter is a structure that contains information about the
- # AppConfig application and environment that will be used as for
- # client-side evaluation.
- #
- # To create a project that uses client-side evaluation, you must have
- # the `evidently:ExportProjectAsConfiguration` permission.
- #
- #
- #
- # [1]: https://docs.aws.amazon.com/cloudwatchevidently/latest/APIReference/API_EvaluateFeature.html
- # [2]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-Evidently-client-side-evaluation.html
- # @return [Types::ProjectAppConfigResourceConfig]
- #
- # @!attribute [rw] data_delivery
- # A structure that contains information about where Evidently is to
- # store evaluation events for longer term storage, if you choose to do
- # so. If you choose not to store these events, Evidently deletes them
- # after using them to produce metrics and other experiment results
- # that you can view.
- # @return [Types::ProjectDataDeliveryConfig]
- #
- # @!attribute [rw] description
- # An optional description of the project.
- # @return [String]
- #
- # @!attribute [rw] name
- # The name for the project.
- # @return [String]
- #
- # @!attribute [rw] tags
- # Assigns one or more tags (key-value pairs) to the project.
- #
- # Tags can help you organize and categorize your resources. You can
- # also use them to scope user permissions by granting a user
- # permission to access or change only resources with certain tag
- # values.
- #
- # Tags don't have any semantic meaning to Amazon Web Services and are
- # interpreted strictly as strings of characters.
- #
- # You can associate as many as 50 tags with a project.
- #
- # For more information, see [Tagging Amazon Web Services
- # resources][1].
- #
- #
- #
- # [1]: https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html
- # @return [Hash]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/CreateProjectRequest AWS API Documentation
- #
- class CreateProjectRequest < Struct.new(
- :app_config_resource,
- :data_delivery,
- :description,
- :name,
- :tags)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] project
- # A structure that contains information about the created project.
- # @return [Types::Project]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/CreateProjectResponse AWS API Documentation
- #
- class CreateProjectResponse < Struct.new(
- :project)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] description
- # An optional description for this segment.
- # @return [String]
- #
- # @!attribute [rw] name
- # A name for the segment.
- # @return [String]
- #
- # @!attribute [rw] pattern
- # The pattern to use for the segment. For more information about
- # pattern syntax, see [ Segment rule pattern syntax][1].
- #
- #
- #
- # [1]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-Evidently-segments.html#CloudWatch-Evidently-segments-syntax.html
- # @return [String]
- #
- # @!attribute [rw] tags
- # Assigns one or more tags (key-value pairs) to the segment.
- #
- # Tags can help you organize and categorize your resources. You can
- # also use them to scope user permissions by granting a user
- # permission to access or change only resources with certain tag
- # values.
- #
- # Tags don't have any semantic meaning to Amazon Web Services and are
- # interpreted strictly as strings of characters.
- #
- # You can associate as many as 50 tags with a segment.
- #
- # For more information, see [Tagging Amazon Web Services
- # resources][1].
- #
- #
- #
- # [1]: https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html
- # @return [Hash]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/CreateSegmentRequest AWS API Documentation
- #
- class CreateSegmentRequest < Struct.new(
- :description,
- :name,
- :pattern,
- :tags)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] segment
- # A structure that contains the complete information about the segment
- # that was just created.
- # @return [Types::Segment]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/CreateSegmentResponse AWS API Documentation
- #
- class CreateSegmentResponse < Struct.new(
- :segment)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] experiment
- # The name of the experiment to delete.
- # @return [String]
- #
- # @!attribute [rw] project
- # The name or ARN of the project that contains the experiment to
- # delete.
- # @return [String]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/DeleteExperimentRequest AWS API Documentation
- #
- class DeleteExperimentRequest < Struct.new(
- :experiment,
- :project)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/DeleteExperimentResponse AWS API Documentation
- #
- class DeleteExperimentResponse < Aws::EmptyStructure; end
-
- # @!attribute [rw] feature
- # The name of the feature to delete.
- # @return [String]
- #
- # @!attribute [rw] project
- # The name or ARN of the project that contains the feature to delete.
- # @return [String]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/DeleteFeatureRequest AWS API Documentation
- #
- class DeleteFeatureRequest < Struct.new(
- :feature,
- :project)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/DeleteFeatureResponse AWS API Documentation
- #
- class DeleteFeatureResponse < Aws::EmptyStructure; end
-
- # @!attribute [rw] launch
- # The name of the launch to delete.
- # @return [String]
- #
- # @!attribute [rw] project
- # The name or ARN of the project that contains the launch to delete.
- # @return [String]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/DeleteLaunchRequest AWS API Documentation
- #
- class DeleteLaunchRequest < Struct.new(
- :launch,
- :project)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/DeleteLaunchResponse AWS API Documentation
- #
- class DeleteLaunchResponse < Aws::EmptyStructure; end
-
- # @!attribute [rw] project
- # The name or ARN of the project to delete.
- # @return [String]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/DeleteProjectRequest AWS API Documentation
- #
- class DeleteProjectRequest < Struct.new(
- :project)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/DeleteProjectResponse AWS API Documentation
- #
- class DeleteProjectResponse < Aws::EmptyStructure; end
-
- # @!attribute [rw] segment
- # Specifies the segment to delete.
- # @return [String]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/DeleteSegmentRequest AWS API Documentation
- #
- class DeleteSegmentRequest < Struct.new(
- :segment)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/DeleteSegmentResponse AWS API Documentation
- #
- class DeleteSegmentResponse < Aws::EmptyStructure; end
-
- # @!attribute [rw] entity_id
- # An internal ID that represents a unique user of the application.
- # This `entityID` is checked against any override rules assigned for
- # this feature.
- # @return [String]
- #
- # @!attribute [rw] evaluation_context
- # A JSON object of attributes that you can optionally pass in as part
- # of the evaluation event sent to Evidently from the user session.
- # Evidently can use this value to match user sessions with defined
- # audience segments. For more information, see [Use segments to focus
- # your audience][1].
- #
- # If you include this parameter, the value must be a JSON object. A
- # JSON array is not supported.
- #
- #
- #
- # [1]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-Evidently-segments.html
- # @return [String]
- #
- # @!attribute [rw] feature
- # The name of the feature being evaluated.
- # @return [String]
- #
- # @!attribute [rw] project
- # The name or ARN of the project that contains this feature.
- # @return [String]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/EvaluateFeatureRequest AWS API Documentation
- #
- class EvaluateFeatureRequest < Struct.new(
- :entity_id,
- :evaluation_context,
- :feature,
- :project)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] details
- # If this user was assigned to a launch or experiment, this field
- # lists the launch or experiment name.
- # @return [String]
- #
- # @!attribute [rw] reason
- # Specifies the reason that the user session was assigned this
- # variation. Possible values include `DEFAULT`, meaning the user was
- # served the default variation; `LAUNCH_RULE_MATCH`, if the user
- # session was enrolled in a launch; `EXPERIMENT_RULE_MATCH`, if the
- # user session was enrolled in an experiment; or
- # `ENTITY_OVERRIDES_MATCH`, if the user's `entityId` matches an
- # override rule.
- # @return [String]
- #
- # @!attribute [rw] value
- # The value assigned to this variation to differentiate it from the
- # other variations of this feature.
- # @return [Types::VariableValue]
- #
- # @!attribute [rw] variation
- # The name of the variation that was served to the user session.
- # @return [String]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/EvaluateFeatureResponse AWS API Documentation
- #
- class EvaluateFeatureResponse < Struct.new(
- :details,
- :reason,
- :value,
- :variation)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # This structure assigns a feature variation to one user session.
- #
- # @!attribute [rw] entity_id
- # An internal ID that represents a unique user session of the
- # application. This `entityID` is checked against any override rules
- # assigned for this feature.
- # @return [String]
- #
- # @!attribute [rw] evaluation_context
- # A JSON block of attributes that you can optionally pass in. This
- # JSON block is included in the evaluation events sent to Evidently
- # from the user session.
- # @return [String]
- #
- # @!attribute [rw] feature
- # The name of the feature being evaluated.
- # @return [String]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/EvaluationRequest AWS API Documentation
- #
- class EvaluationRequest < Struct.new(
- :entity_id,
- :evaluation_context,
- :feature)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # This structure displays the results of one feature evaluation
- # assignment to one user session.
- #
- # @!attribute [rw] details
- # If this user was assigned to a launch or experiment, this field
- # lists the launch or experiment name.
- # @return [String]
- #
- # @!attribute [rw] entity_id
- # An internal ID that represents a unique user session of the
- # application.
- # @return [String]
- #
- # @!attribute [rw] feature
- # The name of the feature being evaluated.
- # @return [String]
- #
- # @!attribute [rw] project
- # The name or ARN of the project that contains the feature being
- # evaluated.
- # @return [String]
- #
- # @!attribute [rw] reason
- # Specifies the reason that the user session was assigned this
- # variation. Possible values include `DEFAULT`, meaning the user was
- # served the default variation; `LAUNCH_RULE_MATCH`, if the user
- # session was enrolled in a launch; or `EXPERIMENT_RULE_MATCH`, if the
- # user session was enrolled in an experiment.
- # @return [String]
- #
- # @!attribute [rw] value
- # The value assigned to this variation to differentiate it from the
- # other variations of this feature.
- # @return [Types::VariableValue]
- #
- # @!attribute [rw] variation
- # The name of the variation that was served to the user session.
- # @return [String]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/EvaluationResult AWS API Documentation
- #
- class EvaluationResult < Struct.new(
- :details,
- :entity_id,
- :feature,
- :project,
- :reason,
- :value,
- :variation)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # A structure that contains the information about an evaluation rule for
- # this feature, if it is used in a launch or experiment.
- #
- # @!attribute [rw] name
- # The name of the experiment or launch.
- # @return [String]
- #
- # @!attribute [rw] type
- # This value is `aws.evidently.splits` if this is an evaluation rule
- # for a launch, and it is `aws.evidently.onlineab` if this is an
- # evaluation rule for an experiment.
- # @return [String]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/EvaluationRule AWS API Documentation
- #
- class EvaluationRule < Struct.new(
- :name,
- :type)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # A structure that contains the information about one evaluation event
- # or custom event sent to Evidently. This is a JSON payload. If this
- # event specifies a pre-defined event type, the payload must follow the
- # defined event schema.
- #
- # @!attribute [rw] data
- # The event data.
- # @return [String]
- #
- # @!attribute [rw] timestamp
- # The timestamp of the event.
- # @return [Time]
- #
- # @!attribute [rw] type
- # `aws.evidently.evaluation` specifies an evaluation event, which
- # determines which feature variation that a user sees.
- # `aws.evidently.custom` specifies a custom event, which generates
- # metrics from user actions such as clicks and checkouts.
- # @return [String]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/Event AWS API Documentation
- #
- class Event < Struct.new(
- :data,
- :timestamp,
- :type)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # A structure containing the configuration details of an experiment.
- #
- # @!attribute [rw] arn
- # The ARN of the experiment.
- # @return [String]
- #
- # @!attribute [rw] created_time
- # The date and time that the experiment is first created.
- # @return [Time]
- #
- # @!attribute [rw] description
- # A description of the experiment.
- # @return [String]
- #
- # @!attribute [rw] execution
- # A structure that contains the date and time that the experiment
- # started and ended.
- # @return [Types::ExperimentExecution]
- #
- # @!attribute [rw] last_updated_time
- # The date and time that the experiment was most recently updated.
- # @return [Time]
- #
- # @!attribute [rw] metric_goals
- # An array of structures that defines the metrics used for the
- # experiment, and whether a higher or lower value for each metric is
- # the goal.
- # @return [Array]
- #
- # @!attribute [rw] name
- # The name of the experiment.
- # @return [String]
- #
- # @!attribute [rw] online_ab_definition
- # A structure that contains the configuration of which variation to
- # use as the "control" version. The "control" version is used for
- # comparison with other variations. This structure also specifies how
- # much experiment traffic is allocated to each variation.
- # @return [Types::OnlineAbDefinition]
- #
- # @!attribute [rw] project
- # The name or ARN of the project that contains this experiment.
- # @return [String]
- #
- # @!attribute [rw] randomization_salt
- # This value is used when Evidently assigns a particular user session
- # to the experiment. It helps create a randomization ID to determine
- # which variation the user session is served. This randomization ID is
- # a combination of the entity ID and `randomizationSalt`.
- # @return [String]
- #
- # @!attribute [rw] sampling_rate
- # In thousandths of a percent, the amount of the available audience
- # that is allocated to this experiment. The available audience is the
- # total audience minus the audience that you have allocated to
- # overrides or current launches of this feature.
- #
- # This is represented in thousandths of a percent, so a value of
- # 10,000 is 10% of the available audience.
- # @return [Integer]
- #
- # @!attribute [rw] schedule
- # A structure that contains the time and date that Evidently completed
- # the analysis of the experiment.
- # @return [Types::ExperimentSchedule]
- #
- # @!attribute [rw] segment
- # The audience segment being used for the experiment, if a segment is
- # being used.
- # @return [String]
- #
- # @!attribute [rw] status
- # The current state of the experiment.
- # @return [String]
- #
- # @!attribute [rw] status_reason
- # If the experiment was stopped, this is the string that was entered
- # by the person who stopped the experiment, to explain why it was
- # stopped.
- # @return [String]
- #
- # @!attribute [rw] tags
- # The list of tag keys and values associated with this experiment.
- # @return [Hash]
- #
- # @!attribute [rw] treatments
- # An array of structures that describe the configuration of each
- # feature variation used in the experiment.
- # @return [Array]
- #
- # @!attribute [rw] type
- # The type of this experiment. Currently, this value must be
- # `aws.experiment.onlineab`.
- # @return [String]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/Experiment AWS API Documentation
- #
- class Experiment < Struct.new(
- :arn,
- :created_time,
- :description,
- :execution,
- :last_updated_time,
- :metric_goals,
- :name,
- :online_ab_definition,
- :project,
- :randomization_salt,
- :sampling_rate,
- :schedule,
- :segment,
- :status,
- :status_reason,
- :tags,
- :treatments,
- :type)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # This structure contains the date and time that the experiment started
- # and ended.
- #
- # @!attribute [rw] ended_time
- # The date and time that the experiment ended.
- # @return [Time]
- #
- # @!attribute [rw] started_time
- # The date and time that the experiment started.
- # @return [Time]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/ExperimentExecution AWS API Documentation
- #
- class ExperimentExecution < Struct.new(
- :ended_time,
- :started_time)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # A structure that contains results of an experiment.
- #
- # @!attribute [rw] content
- # The content of the report.
- # @return [String]
- #
- # @!attribute [rw] metric_name
- # The name of the metric that is analyzed in this experiment report.
- # @return [String]
- #
- # @!attribute [rw] report_name
- # The type of analysis used for this report.
- # @return [String]
- #
- # @!attribute [rw] treatment_name
- # The name of the variation that this report pertains to.
- # @return [String]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/ExperimentReport AWS API Documentation
- #
- class ExperimentReport < Struct.new(
- :content,
- :metric_name,
- :report_name,
- :treatment_name)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # A structure that contains experiment results for one metric that is
- # monitored in the experiment.
- #
- # @!attribute [rw] metric_name
- # The name of the metric.
- # @return [String]
- #
- # @!attribute [rw] result_stat
- # The experiment statistic that these results pertain to.
- # @return [String]
- #
- # @!attribute [rw] treatment_name
- # The treatment, or variation, that returned the `values` in this
- # structure.
- # @return [String]
- #
- # @!attribute [rw] values
- # The values for the `metricName` that were recorded in the
- # experiment.
- # @return [Array]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/ExperimentResultsData AWS API Documentation
- #
- class ExperimentResultsData < Struct.new(
- :metric_name,
- :result_stat,
- :treatment_name,
- :values)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # This structure contains the time and date that Evidently completed the
- # analysis of the experiment.
- #
- # @!attribute [rw] analysis_complete_time
- # The time and date that Evidently completed the analysis of the
- # experiment.
- # @return [Time]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/ExperimentSchedule AWS API Documentation
- #
- class ExperimentSchedule < Struct.new(
- :analysis_complete_time)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # This structure contains information about one Evidently feature in
- # your account.
- #
- # @!attribute [rw] arn
- # The ARN of the feature.
- # @return [String]
- #
- # @!attribute [rw] created_time
- # The date and time that the feature is created.
- # @return [Time]
- #
- # @!attribute [rw] default_variation
- # The name of the variation that is used as the default variation. The
- # default variation is served to users who are not allocated to any
- # ongoing launches or experiments of this feature.
- #
- # This variation must also be listed in the `variations` structure.
- #
- # If you omit `defaultVariation`, the first variation listed in the
- # `variations` structure is used as the default variation.
- # @return [String]
- #
- # @!attribute [rw] description
- # The description of the feature.
- # @return [String]
- #
- # @!attribute [rw] entity_overrides
- # A set of key-value pairs that specify users who should always be
- # served a specific variation of a feature. Each key specifies a user
- # using their user ID, account ID, or some other identifier. The value
- # specifies the name of the variation that the user is to be served.
- #
- # For the override to be successful, the value of the key must match
- # the `entityId` used in the [EvaluateFeature][1] operation.
- #
- #
- #
- # [1]: https://docs.aws.amazon.com/cloudwatchevidently/latest/APIReference/API_EvaluateFeature.html
- # @return [Hash]
- #
- # @!attribute [rw] evaluation_rules
- # An array of structures that define the evaluation rules for the
- # feature.
- # @return [Array]
- #
- # @!attribute [rw] evaluation_strategy
- # If this value is `ALL_RULES`, the traffic allocation specified by
- # any ongoing launches or experiments is being used. If this is
- # `DEFAULT_VARIATION`, the default variation is being served to all
- # users.
- # @return [String]
- #
- # @!attribute [rw] last_updated_time
- # The date and time that the feature was most recently updated.
- # @return [Time]
- #
- # @!attribute [rw] name
- # The name of the feature.
- # @return [String]
- #
- # @!attribute [rw] project
- # The name or ARN of the project that contains the feature.
- # @return [String]
- #
- # @!attribute [rw] status
- # The current state of the feature.
- # @return [String]
- #
- # @!attribute [rw] tags
- # The list of tag keys and values associated with this feature.
- # @return [Hash]
- #
- # @!attribute [rw] value_type
- # Defines the type of value used to define the different feature
- # variations. For more information, see [Variation types][1]
- #
- #
- #
- # [1]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-Evidently-variationtypes.html
- # @return [String]
- #
- # @!attribute [rw] variations
- # An array of structures that contain the configuration of the
- # feature's different variations.
- # @return [Array]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/Feature AWS API Documentation
- #
- class Feature < Struct.new(
- :arn,
- :created_time,
- :default_variation,
- :description,
- :entity_overrides,
- :evaluation_rules,
- :evaluation_strategy,
- :last_updated_time,
- :name,
- :project,
- :status,
- :tags,
- :value_type,
- :variations)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # This structure contains information about one Evidently feature in
- # your account.
- #
- # @!attribute [rw] arn
- # The ARN of the feature.
- # @return [String]
- #
- # @!attribute [rw] created_time
- # The date and time that the feature is created.
- # @return [Time]
- #
- # @!attribute [rw] default_variation
- # The name of the variation that is used as the default variation. The
- # default variation is served to users who are not allocated to any
- # ongoing launches or experiments of this feature.
- # @return [String]
- #
- # @!attribute [rw] evaluation_rules
- # An array of structures that define
- # @return [Array]
- #
- # @!attribute [rw] evaluation_strategy
- # If this value is `ALL_RULES`, the traffic allocation specified by
- # any ongoing launches or experiments is being used. If this is
- # `DEFAULT_VARIATION`, the default variation is being served to all
- # users.
- # @return [String]
- #
- # @!attribute [rw] last_updated_time
- # The date and time that the feature was most recently updated.
- # @return [Time]
- #
- # @!attribute [rw] name
- # The name of the feature.
- # @return [String]
- #
- # @!attribute [rw] project
- # The name or ARN of the project that contains the feature.
- # @return [String]
- #
- # @!attribute [rw] status
- # The current state of the feature.
- # @return [String]
- #
- # @!attribute [rw] tags
- # The list of tag keys and values associated with this feature.
- # @return [Hash]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/FeatureSummary AWS API Documentation
- #
- class FeatureSummary < Struct.new(
- :arn,
- :created_time,
- :default_variation,
- :evaluation_rules,
- :evaluation_strategy,
- :last_updated_time,
- :name,
- :project,
- :status,
- :tags)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] experiment
- # The name of the experiment that you want to see the details of.
- # @return [String]
- #
- # @!attribute [rw] project
- # The name or ARN of the project that contains the experiment.
- # @return [String]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/GetExperimentRequest AWS API Documentation
- #
- class GetExperimentRequest < Struct.new(
- :experiment,
- :project)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] experiment
- # A structure containing the configuration details of the experiment.
- # @return [Types::Experiment]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/GetExperimentResponse AWS API Documentation
- #
- class GetExperimentResponse < Struct.new(
- :experiment)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] base_stat
- # The statistic used to calculate experiment results. Currently the
- # only valid value is `mean`, which uses the mean of the collected
- # values as the statistic.
- # @return [String]
- #
- # @!attribute [rw] end_time
- # The date and time that the experiment ended, if it is completed.
- # This must be no longer than 30 days after the experiment start time.
- # @return [Time]
- #
- # @!attribute [rw] experiment
- # The name of the experiment to retrieve the results of.
- # @return [String]
- #
- # @!attribute [rw] metric_names
- # The names of the experiment metrics that you want to see the results
- # of.
- # @return [Array]
- #
- # @!attribute [rw] period
- # In seconds, the amount of time to aggregate results together.
- # @return [Integer]
- #
- # @!attribute [rw] project
- # The name or ARN of the project that contains the experiment that you
- # want to see the results of.
- # @return [String]
- #
- # @!attribute [rw] report_names
- # The names of the report types that you want to see. Currently,
- # `BayesianInference` is the only valid value.
- # @return [Array]
- #
- # @!attribute [rw] result_stats
- # The statistics that you want to see in the returned results.
- #
- # * `PValue` specifies to use p-values for the results. A p-value is
- # used in hypothesis testing to measure how often you are willing to
- # make a mistake in rejecting the null hypothesis. A general
- # practice is to reject the null hypothesis and declare that the
- # results are statistically significant when the p-value is less
- # than 0.05.
- #
- # * `ConfidenceInterval` specifies a confidence interval for the
- # results. The confidence interval represents the range of values
- # for the chosen metric that is likely to contain the true
- # difference between the `baseStat` of a variation and the baseline.
- # Evidently returns the 95% confidence interval.
- #
- # * `TreatmentEffect` is the difference in the statistic specified by
- # the `baseStat` parameter between each variation and the default
- # variation.
- #
- # * `BaseStat` returns the statistical values collected for the metric
- # for each variation. The statistic uses the same statistic
- # specified in the `baseStat` parameter. Therefore, if `baseStat` is
- # `mean`, this returns the mean of the values collected for each
- # variation.
- # @return [Array]
- #
- # @!attribute [rw] start_time
- # The date and time that the experiment started.
- # @return [Time]
- #
- # @!attribute [rw] treatment_names
- # The names of the experiment treatments that you want to see the
- # results for.
- # @return [Array]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/GetExperimentResultsRequest AWS API Documentation
- #
- class GetExperimentResultsRequest < Struct.new(
- :base_stat,
- :end_time,
- :experiment,
- :metric_names,
- :period,
- :project,
- :report_names,
- :result_stats,
- :start_time,
- :treatment_names)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] details
- # If the experiment doesn't yet have enough events to provide valid
- # results, this field is returned with the message `Not enough events
- # to generate results`. If there are enough events to provide valid
- # results, this field is not returned.
- # @return [String]
- #
- # @!attribute [rw] reports
- # An array of structures that include the reports that you requested.
- # @return [Array]
- #
- # @!attribute [rw] results_data
- # An array of structures that include experiment results including
- # metric names and values.
- # @return [Array]
- #
- # @!attribute [rw] timestamps
- # The timestamps of each result returned.
- # @return [Array]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/GetExperimentResultsResponse AWS API Documentation
- #
- class GetExperimentResultsResponse < Struct.new(
- :details,
- :reports,
- :results_data,
- :timestamps)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] feature
- # The name of the feature that you want to retrieve information for.
- # @return [String]
- #
- # @!attribute [rw] project
- # The name or ARN of the project that contains the feature.
- # @return [String]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/GetFeatureRequest AWS API Documentation
- #
- class GetFeatureRequest < Struct.new(
- :feature,
- :project)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] feature
- # A structure containing the configuration details of the feature.
- # @return [Types::Feature]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/GetFeatureResponse AWS API Documentation
- #
- class GetFeatureResponse < Struct.new(
- :feature)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] launch
- # The name of the launch that you want to see the details of.
- # @return [String]
- #
- # @!attribute [rw] project
- # The name or ARN of the project that contains the launch.
- # @return [String]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/GetLaunchRequest AWS API Documentation
- #
- class GetLaunchRequest < Struct.new(
- :launch,
- :project)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] launch
- # A structure containing the configuration details of the launch.
- # @return [Types::Launch]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/GetLaunchResponse AWS API Documentation
- #
- class GetLaunchResponse < Struct.new(
- :launch)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] project
- # The name or ARN of the project that you want to see the details of.
- # @return [String]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/GetProjectRequest AWS API Documentation
- #
- class GetProjectRequest < Struct.new(
- :project)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] project
- # A structure containing the configuration details of the project.
- # @return [Types::Project]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/GetProjectResponse AWS API Documentation
- #
- class GetProjectResponse < Struct.new(
- :project)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] segment
- # The ARN of the segment to return information for.
- # @return [String]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/GetSegmentRequest AWS API Documentation
- #
- class GetSegmentRequest < Struct.new(
- :segment)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] segment
- # A structure that contains the complete information about the
- # segment.
- # @return [Types::Segment]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/GetSegmentResponse AWS API Documentation
- #
- class GetSegmentResponse < Struct.new(
- :segment)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # Unexpected error while processing the request. Retry the request.
- #
- # @!attribute [rw] message
- # @return [String]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/InternalServerException AWS API Documentation
- #
- class InternalServerException < Struct.new(
- :message)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # This structure contains the configuration details of one Evidently
- # launch.
- #
- # @!attribute [rw] arn
- # The ARN of the launch.
- # @return [String]
- #
- # @!attribute [rw] created_time
- # The date and time that the launch is created.
- # @return [Time]
- #
- # @!attribute [rw] description
- # The description of the launch.
- # @return [String]
- #
- # @!attribute [rw] execution
- # A structure that contains information about the start and end times
- # of the launch.
- # @return [Types::LaunchExecution]
- #
- # @!attribute [rw] groups
- # An array of structures that define the feature variations that are
- # being used in the launch.
- # @return [Array]
- #
- # @!attribute [rw] last_updated_time
- # The date and time that the launch was most recently updated.
- # @return [Time]
- #
- # @!attribute [rw] metric_monitors
- # An array of structures that define the metrics that are being used
- # to monitor the launch performance.
- # @return [Array]
- #
- # @!attribute [rw] name
- # The name of the launch.
- # @return [String]
- #
- # @!attribute [rw] project
- # The name or ARN of the project that contains the launch.
- # @return [String]
- #
- # @!attribute [rw] randomization_salt
- # This value is used when Evidently assigns a particular user session
- # to the launch, to help create a randomization ID to determine which
- # variation the user session is served. This randomization ID is a
- # combination of the entity ID and `randomizationSalt`.
- # @return [String]
- #
- # @!attribute [rw] scheduled_splits_definition
- # An array of structures that define the traffic allocation
- # percentages among the feature variations during each step of the
- # launch.
- # @return [Types::ScheduledSplitsLaunchDefinition]
- #
- # @!attribute [rw] status
- # The current state of the launch.
- # @return [String]
- #
- # @!attribute [rw] status_reason
- # If the launch was stopped, this is the string that was entered by
- # the person who stopped the launch, to explain why it was stopped.
- # @return [String]
- #
- # @!attribute [rw] tags
- # The list of tag keys and values associated with this launch.
- # @return [Hash]
- #
- # @!attribute [rw] type
- # The type of launch.
- # @return [String]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/Launch AWS API Documentation
- #
- class Launch < Struct.new(
- :arn,
- :created_time,
- :description,
- :execution,
- :groups,
- :last_updated_time,
- :metric_monitors,
- :name,
- :project,
- :randomization_salt,
- :scheduled_splits_definition,
- :status,
- :status_reason,
- :tags,
- :type)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # This structure contains information about the start and end times of
- # the launch.
- #
- # @!attribute [rw] ended_time
- # The date and time that the launch ended.
- # @return [Time]
- #
- # @!attribute [rw] started_time
- # The date and time that the launch started.
- # @return [Time]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/LaunchExecution AWS API Documentation
- #
- class LaunchExecution < Struct.new(
- :ended_time,
- :started_time)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # A structure that defines one launch group in a launch. A launch group
- # is a variation of the feature that you are including in the launch.
- #
- # @!attribute [rw] description
- # A description of the launch group.
- # @return [String]
- #
- # @!attribute [rw] feature_variations
- # The feature variation for this launch group. This is a key-value
- # pair.
- # @return [Hash]
- #
- # @!attribute [rw] name
- # The name of the launch group.
- # @return [String]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/LaunchGroup AWS API Documentation
- #
- class LaunchGroup < Struct.new(
- :description,
- :feature_variations,
- :name)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # A structure that defines one launch group in a launch. A launch group
- # is a variation of the feature that you are including in the launch.
- #
- # @!attribute [rw] description
- # A description of the launch group.
- # @return [String]
- #
- # @!attribute [rw] feature
- # The feature that this launch is using.
- # @return [String]
- #
- # @!attribute [rw] name
- # A name for this launch group.
- # @return [String]
- #
- # @!attribute [rw] variation
- # The feature variation to use for this launch group.
- # @return [String]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/LaunchGroupConfig AWS API Documentation
- #
- class LaunchGroupConfig < Struct.new(
- :description,
- :feature,
- :name,
- :variation)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] max_results
- # The maximum number of results to include in the response.
- # @return [Integer]
- #
- # @!attribute [rw] next_token
- # The token to use when requesting the next set of results. You
- # received this token from a previous `ListExperiments` operation.
- # @return [String]
- #
- # @!attribute [rw] project
- # The name or ARN of the project to return the experiment list from.
- # @return [String]
- #
- # @!attribute [rw] status
- # Use this optional parameter to limit the returned results to only
- # the experiments with the status that you specify here.
- # @return [String]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/ListExperimentsRequest AWS API Documentation
- #
- class ListExperimentsRequest < Struct.new(
- :max_results,
- :next_token,
- :project,
- :status)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] experiments
- # An array of structures that contain the configuration details of the
- # experiments in the specified project.
- # @return [Array]
- #
- # @!attribute [rw] next_token
- # The token to use in a subsequent `ListExperiments` operation to
- # return the next set of results.
- # @return [String]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/ListExperimentsResponse AWS API Documentation
- #
- class ListExperimentsResponse < Struct.new(
- :experiments,
- :next_token)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] max_results
- # The maximum number of results to include in the response.
- # @return [Integer]
- #
- # @!attribute [rw] next_token
- # The token to use when requesting the next set of results. You
- # received this token from a previous `ListFeatures` operation.
- # @return [String]
- #
- # @!attribute [rw] project
- # The name or ARN of the project to return the feature list from.
- # @return [String]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/ListFeaturesRequest AWS API Documentation
- #
- class ListFeaturesRequest < Struct.new(
- :max_results,
- :next_token,
- :project)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] features
- # An array of structures that contain the configuration details of the
- # features in the specified project.
- # @return [Array]
- #
- # @!attribute [rw] next_token
- # The token to use in a subsequent `ListFeatures` operation to return
- # the next set of results.
- # @return [String]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/ListFeaturesResponse AWS API Documentation
- #
- class ListFeaturesResponse < Struct.new(
- :features,
- :next_token)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] max_results
- # The maximum number of results to include in the response.
- # @return [Integer]
- #
- # @!attribute [rw] next_token
- # The token to use when requesting the next set of results. You
- # received this token from a previous `ListLaunches` operation.
- # @return [String]
- #
- # @!attribute [rw] project
- # The name or ARN of the project to return the launch list from.
- # @return [String]
- #
- # @!attribute [rw] status
- # Use this optional parameter to limit the returned results to only
- # the launches with the status that you specify here.
- # @return [String]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/ListLaunchesRequest AWS API Documentation
- #
- class ListLaunchesRequest < Struct.new(
- :max_results,
- :next_token,
- :project,
- :status)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] launches
- # An array of structures that contain the configuration details of the
- # launches in the specified project.
- # @return [Array]
- #
- # @!attribute [rw] next_token
- # The token to use in a subsequent `ListLaunches` operation to return
- # the next set of results.
- # @return [String]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/ListLaunchesResponse AWS API Documentation
- #
- class ListLaunchesResponse < Struct.new(
- :launches,
- :next_token)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] max_results
- # The maximum number of results to include in the response.
- # @return [Integer]
- #
- # @!attribute [rw] next_token
- # The token to use when requesting the next set of results. You
- # received this token from a previous `ListProjects` operation.
- # @return [String]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/ListProjectsRequest AWS API Documentation
- #
- class ListProjectsRequest < Struct.new(
- :max_results,
- :next_token)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] next_token
- # The token to use in a subsequent `ListProjects` operation to return
- # the next set of results.
- # @return [String]
- #
- # @!attribute [rw] projects
- # An array of structures that contain the configuration details of the
- # projects in the Region.
- # @return [Array]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/ListProjectsResponse AWS API Documentation
- #
- class ListProjectsResponse < Struct.new(
- :next_token,
- :projects)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] max_results
- # The maximum number of results to include in the response. If you
- # omit this, the default of 50 is used.
- # @return [Integer]
- #
- # @!attribute [rw] next_token
- # The token to use when requesting the next set of results. You
- # received this token from a previous `ListSegmentReferences`
- # operation.
- # @return [String]
- #
- # @!attribute [rw] segment
- # The ARN of the segment that you want to view information for.
- # @return [String]
- #
- # @!attribute [rw] type
- # Specifies whether to return information about launches or
- # experiments that use this segment.
- # @return [String]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/ListSegmentReferencesRequest AWS API Documentation
- #
- class ListSegmentReferencesRequest < Struct.new(
- :max_results,
- :next_token,
- :segment,
- :type)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] next_token
- # The token to use in a subsequent `ListSegmentReferences` operation
- # to return the next set of results.
- # @return [String]
- #
- # @!attribute [rw] referenced_by
- # An array of structures, where each structure contains information
- # about one experiment or launch that uses this segment.
- # @return [Array]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/ListSegmentReferencesResponse AWS API Documentation
- #
- class ListSegmentReferencesResponse < Struct.new(
- :next_token,
- :referenced_by)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] max_results
- # The maximum number of results to include in the response. If you
- # omit this, the default of 50 is used.
- # @return [Integer]
- #
- # @!attribute [rw] next_token
- # The token to use when requesting the next set of results. You
- # received this token from a previous `ListSegments` operation.
- # @return [String]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/ListSegmentsRequest AWS API Documentation
- #
- class ListSegmentsRequest < Struct.new(
- :max_results,
- :next_token)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] next_token
- # The token to use in a subsequent `ListSegments` operation to return
- # the next set of results.
- # @return [String]
- #
- # @!attribute [rw] segments
- # An array of structures that contain information about the segments
- # in this Region.
- # @return [Array]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/ListSegmentsResponse AWS API Documentation
- #
- class ListSegmentsResponse < Struct.new(
- :next_token,
- :segments)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] resource_arn
- # The ARN of the resource that you want to see the tags of.
- # @return [String]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/ListTagsForResourceRequest AWS API Documentation
- #
- class ListTagsForResourceRequest < Struct.new(
- :resource_arn)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] tags
- # The list of tag keys and values associated with the resource you
- # specified.
- # @return [Hash]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/ListTagsForResourceResponse AWS API Documentation
- #
- class ListTagsForResourceResponse < Struct.new(
- :tags)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # This structure defines a metric that is being used to evaluate the
- # variations during a launch or experiment.
- #
- # @!attribute [rw] entity_id_key
- # The entity, such as a user or session, that does an action that
- # causes a metric value to be recorded.
- # @return [String]
- #
- # @!attribute [rw] event_pattern
- # The EventBridge event pattern that defines how the metric is
- # recorded.
- #
- # For more information about EventBridge event patterns, see [Amazon
- # EventBridge event patterns][1].
- #
- #
- #
- # [1]: https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-event-patterns.html
- # @return [String]
- #
- # @!attribute [rw] name
- # The name of the metric.
- # @return [String]
- #
- # @!attribute [rw] unit_label
- # The label for the units that the metric is measuring.
- # @return [String]
- #
- # @!attribute [rw] value_key
- # The value that is tracked to produce the metric.
- # @return [String]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/MetricDefinition AWS API Documentation
- #
- class MetricDefinition < Struct.new(
- :entity_id_key,
- :event_pattern,
- :name,
- :unit_label,
- :value_key)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # This structure defines a metric that you want to use to evaluate the
- # variations during a launch or experiment.
- #
- # @!attribute [rw] entity_id_key
- # The entity, such as a user or session, that does an action that
- # causes a metric value to be recorded. An example is
- # `userDetails.userID`.
- # @return [String]
- #
- # @!attribute [rw] event_pattern
- # The EventBridge event pattern that defines how the metric is
- # recorded.
- #
- # For more information about EventBridge event patterns, see [Amazon
- # EventBridge event patterns][1].
- #
- #
- #
- # [1]: https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-event-patterns.html
- # @return [String]
- #
- # @!attribute [rw] name
- # A name for the metric.
- # @return [String]
- #
- # @!attribute [rw] unit_label
- # A label for the units that the metric is measuring.
- # @return [String]
- #
- # @!attribute [rw] value_key
- # The value that is tracked to produce the metric.
- # @return [String]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/MetricDefinitionConfig AWS API Documentation
- #
- class MetricDefinitionConfig < Struct.new(
- :entity_id_key,
- :event_pattern,
- :name,
- :unit_label,
- :value_key)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # A structure that tells Evidently whether higher or lower values are
- # desired for a metric that is used in an experiment.
- #
- # @!attribute [rw] desired_change
- # `INCREASE` means that a variation with a higher number for this
- # metric is performing better.
- #
- # `DECREASE` means that a variation with a lower number for this
- # metric is performing better.
- # @return [String]
- #
- # @!attribute [rw] metric_definition
- # A structure that contains details about the metric.
- # @return [Types::MetricDefinition]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/MetricGoal AWS API Documentation
- #
- class MetricGoal < Struct.new(
- :desired_change,
- :metric_definition)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # Use this structure to tell Evidently whether higher or lower values
- # are desired for a metric that is used in an experiment.
- #
- # @!attribute [rw] desired_change
- # `INCREASE` means that a variation with a higher number for this
- # metric is performing better.
- #
- # `DECREASE` means that a variation with a lower number for this
- # metric is performing better.
- # @return [String]
- #
- # @!attribute [rw] metric_definition
- # A structure that contains details about the metric.
- # @return [Types::MetricDefinitionConfig]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/MetricGoalConfig AWS API Documentation
- #
- class MetricGoalConfig < Struct.new(
- :desired_change,
- :metric_definition)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # A structure that defines a metric to be used to monitor performance of
- # the variations during a launch.
- #
- # @!attribute [rw] metric_definition
- # A structure that defines the metric.
- # @return [Types::MetricDefinition]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/MetricMonitor AWS API Documentation
- #
- class MetricMonitor < Struct.new(
- :metric_definition)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # A structure that defines a metric to be used to monitor performance of
- # the variations during a launch.
- #
- # @!attribute [rw] metric_definition
- # A structure that defines the metric.
- # @return [Types::MetricDefinitionConfig]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/MetricMonitorConfig AWS API Documentation
- #
- class MetricMonitorConfig < Struct.new(
- :metric_definition)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # A structure that contains the configuration of which variation to use
- # as the "control" version. The "control" version is used for
- # comparison with other variations. This structure also specifies how
- # much experiment traffic is allocated to each variation.
- #
- # @!attribute [rw] control_treatment_name
- # The name of the variation that is to be the default variation that
- # the other variations are compared to.
- # @return [String]
- #
- # @!attribute [rw] treatment_weights
- # A set of key-value pairs. The keys are variation names, and the
- # values are the portion of experiment traffic to be assigned to that
- # variation. Specify the traffic portion in thousandths of a percent,
- # so 20,000 for a variation would allocate 20% of the experiment
- # traffic to that variation.
- # @return [Hash]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/OnlineAbConfig AWS API Documentation
- #
- class OnlineAbConfig < Struct.new(
- :control_treatment_name,
- :treatment_weights)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # A structure that contains the configuration of which variation to use
- # as the "control" version. The "control" version is used for
- # comparison with other variations. This structure also specifies how
- # much experiment traffic is allocated to each variation.
- #
- # @!attribute [rw] control_treatment_name
- # The name of the variation that is the default variation that the
- # other variations are compared to.
- # @return [String]
- #
- # @!attribute [rw] treatment_weights
- # A set of key-value pairs. The keys are variation names, and the
- # values are the portion of experiment traffic to be assigned to that
- # variation. The traffic portion is specified in thousandths of a
- # percent, so 20,000 for a variation would allocate 20% of the
- # experiment traffic to that variation.
- # @return [Hash]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/OnlineAbDefinition AWS API Documentation
- #
- class OnlineAbDefinition < Struct.new(
- :control_treatment_name,
- :treatment_weights)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # This structure defines a project, which is the logical object in
- # Evidently that can contain features, launches, and experiments. Use
- # projects to group similar features together.
- #
- # @!attribute [rw] active_experiment_count
- # The number of ongoing experiments currently in the project.
- # @return [Integer]
- #
- # @!attribute [rw] active_launch_count
- # The number of ongoing launches currently in the project.
- # @return [Integer]
- #
- # @!attribute [rw] app_config_resource
- # This structure defines the configuration of how your application
- # integrates with AppConfig to run client-side evaluation.
- # @return [Types::ProjectAppConfigResource]
- #
- # @!attribute [rw] arn
- # The name or ARN of the project.
- # @return [String]
- #
- # @!attribute [rw] created_time
- # The date and time that the project is created.
- # @return [Time]
- #
- # @!attribute [rw] data_delivery
- # A structure that contains information about where Evidently is to
- # store evaluation events for longer term storage.
- # @return [Types::ProjectDataDelivery]
- #
- # @!attribute [rw] description
- # The user-entered description of the project.
- # @return [String]
- #
- # @!attribute [rw] experiment_count
- # The number of experiments currently in the project. This includes
- # all experiments that have been created and not deleted, whether they
- # are ongoing or not.
- # @return [Integer]
- #
- # @!attribute [rw] feature_count
- # The number of features currently in the project.
- # @return [Integer]
- #
- # @!attribute [rw] last_updated_time
- # The date and time that the project was most recently updated.
- # @return [Time]
- #
- # @!attribute [rw] launch_count
- # The number of launches currently in the project. This includes all
- # launches that have been created and not deleted, whether they are
- # ongoing or not.
- # @return [Integer]
- #
- # @!attribute [rw] name
- # The name of the project.
- # @return [String]
- #
- # @!attribute [rw] status
- # The current state of the project.
- # @return [String]
- #
- # @!attribute [rw] tags
- # The list of tag keys and values associated with this project.
- # @return [Hash]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/Project AWS API Documentation
- #
- class Project < Struct.new(
- :active_experiment_count,
- :active_launch_count,
- :app_config_resource,
- :arn,
- :created_time,
- :data_delivery,
- :description,
- :experiment_count,
- :feature_count,
- :last_updated_time,
- :launch_count,
- :name,
- :status,
- :tags)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # This is a structure that defines the configuration of how your
- # application integrates with AppConfig to run client-side evaluation.
- #
- # @!attribute [rw] application_id
- # The ID of the AppConfig application to use for client-side
- # evaluation.
- # @return [String]
- #
- # @!attribute [rw] configuration_profile_id
- # The ID of the AppConfig profile to use for client-side evaluation.
- # @return [String]
- #
- # @!attribute [rw] environment_id
- # The ID of the AppConfig environment to use for client-side
- # evaluation. This must be an environment that is within the
- # application that you specify for `applicationId`.
- # @return [String]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/ProjectAppConfigResource AWS API Documentation
- #
- class ProjectAppConfigResource < Struct.new(
- :application_id,
- :configuration_profile_id,
- :environment_id)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # Use this parameter to configure client-side evaluation for your
- # project. Client-side evaluation allows your application to assign
- # variations to user sessions locally instead of by calling the
- # [EvaluateFeature][1] operation to assign the variations. This
- # mitigates the latency and availability risks that come with an API
- # call.
- #
- # `ProjectAppConfigResource` is a structure that defines the
- # configuration of how your application integrates with AppConfig to run
- # client-side evaluation.
- #
- #
- #
- # [1]: https://docs.aws.amazon.com/cloudwatchevidently/latest/APIReference/API_EvaluateFeature.html
- #
- # @!attribute [rw] application_id
- # The ID of the AppConfig application to use for client-side
- # evaluation.
- # @return [String]
- #
- # @!attribute [rw] environment_id
- # The ID of the AppConfig environment to use for client-side
- # evaluation. This must be an environment that is within the
- # application that you specify for `applicationId`.
- # @return [String]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/ProjectAppConfigResourceConfig AWS API Documentation
- #
- class ProjectAppConfigResourceConfig < Struct.new(
- :application_id,
- :environment_id)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # A structure that contains information about where Evidently is to
- # store evaluation events for longer term storage.
- #
- # @!attribute [rw] cloud_watch_logs
- # If the project stores evaluation events in CloudWatch Logs, this
- # structure stores the log group name.
- # @return [Types::CloudWatchLogsDestination]
- #
- # @!attribute [rw] s3_destination
- # If the project stores evaluation events in an Amazon S3 bucket, this
- # structure stores the bucket name and bucket prefix.
- # @return [Types::S3Destination]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/ProjectDataDelivery AWS API Documentation
- #
- class ProjectDataDelivery < Struct.new(
- :cloud_watch_logs,
- :s3_destination)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # A structure that contains information about where Evidently is to
- # store evaluation events for longer term storage.
- #
- # @!attribute [rw] cloud_watch_logs
- # If the project stores evaluation events in CloudWatch Logs, this
- # structure stores the log group name.
- # @return [Types::CloudWatchLogsDestinationConfig]
- #
- # @!attribute [rw] s3_destination
- # If the project stores evaluation events in an Amazon S3 bucket, this
- # structure stores the bucket name and bucket prefix.
- # @return [Types::S3DestinationConfig]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/ProjectDataDeliveryConfig AWS API Documentation
- #
- class ProjectDataDeliveryConfig < Struct.new(
- :cloud_watch_logs,
- :s3_destination)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # A structure that contains configuration information about an Evidently
- # project.
- #
- # @!attribute [rw] active_experiment_count
- # The number of experiments currently in the project.
- # @return [Integer]
- #
- # @!attribute [rw] active_launch_count
- # The number of ongoing launches currently in the project.
- # @return [Integer]
- #
- # @!attribute [rw] arn
- # The name or ARN of the project.
- # @return [String]
- #
- # @!attribute [rw] created_time
- # The date and time that the project is created.
- # @return [Time]
- #
- # @!attribute [rw] description
- # The description of the project.
- # @return [String]
- #
- # @!attribute [rw] experiment_count
- # The number of experiments currently in the project.
- # @return [Integer]
- #
- # @!attribute [rw] feature_count
- # The number of features currently in the project.
- # @return [Integer]
- #
- # @!attribute [rw] last_updated_time
- # The date and time that the project was most recently updated.
- # @return [Time]
- #
- # @!attribute [rw] launch_count
- # The number of launches currently in the project, including launches
- # that are ongoing, completed, and not started yet.
- # @return [Integer]
- #
- # @!attribute [rw] name
- # The name of the project.
- # @return [String]
- #
- # @!attribute [rw] status
- # The current state of the project.
- # @return [String]
- #
- # @!attribute [rw] tags
- # The list of tag keys and values associated with this project.
- # @return [Hash]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/ProjectSummary AWS API Documentation
- #
- class ProjectSummary < Struct.new(
- :active_experiment_count,
- :active_launch_count,
- :arn,
- :created_time,
- :description,
- :experiment_count,
- :feature_count,
- :last_updated_time,
- :launch_count,
- :name,
- :status,
- :tags)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] events
- # An array of event structures that contain the performance data that
- # is being sent to Evidently.
- # @return [Array]
- #
- # @!attribute [rw] project
- # The name or ARN of the project to write the events to.
- # @return [String]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/PutProjectEventsRequest AWS API Documentation
- #
- class PutProjectEventsRequest < Struct.new(
- :events,
- :project)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] event_results
- # A structure that contains Evidently's response to the sent events,
- # including an event ID and error codes, if any.
- # @return [Array]
- #
- # @!attribute [rw] failed_event_count
- # The number of events in the operation that could not be used by
- # Evidently.
- # @return [Integer]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/PutProjectEventsResponse AWS API Documentation
- #
- class PutProjectEventsResponse < Struct.new(
- :event_results,
- :failed_event_count)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # A structure that contains Evidently's response to the sent events,
- # including an event ID and error codes, if any.
- #
- # @!attribute [rw] error_code
- # If the `PutProjectEvents` operation has an error, the error code is
- # returned here.
- # @return [String]
- #
- # @!attribute [rw] error_message
- # If the `PutProjectEvents` operation has an error, the error message
- # is returned here.
- # @return [String]
- #
- # @!attribute [rw] event_id
- # A unique ID assigned to this `PutProjectEvents` operation.
- # @return [String]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/PutProjectEventsResultEntry AWS API Documentation
- #
- class PutProjectEventsResultEntry < Struct.new(
- :error_code,
- :error_message,
- :event_id)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # A structure that contains information about one experiment or launch
- # that uses the specified segment.
- #
- # @!attribute [rw] arn
- # The ARN of the experiment or launch.
- # @return [String]
- #
- # @!attribute [rw] end_time
- # The day and time that this experiment or launch ended.
- # @return [String]
- #
- # @!attribute [rw] last_updated_on
- # The day and time that this experiment or launch was most recently
- # updated.
- # @return [String]
- #
- # @!attribute [rw] name
- # The name of the experiment or launch.
- # @return [String]
- #
- # @!attribute [rw] start_time
- # The day and time that this experiment or launch started.
- # @return [String]
- #
- # @!attribute [rw] status
- # The status of the experiment or launch.
- # @return [String]
- #
- # @!attribute [rw] type
- # Specifies whether the resource that this structure contains
- # information about is an experiment or a launch.
- # @return [String]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/RefResource AWS API Documentation
- #
- class RefResource < Struct.new(
- :arn,
- :end_time,
- :last_updated_on,
- :name,
- :start_time,
- :status,
- :type)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # The request references a resource that does not exist.
- #
- # @!attribute [rw] message
- # @return [String]
- #
- # @!attribute [rw] resource_id
- # The ID of the resource that caused the exception.
- # @return [String]
- #
- # @!attribute [rw] resource_type
- # The type of the resource that is associated with the error.
- # @return [String]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/ResourceNotFoundException AWS API Documentation
- #
- class ResourceNotFoundException < Struct.new(
- :message,
- :resource_id,
- :resource_type)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # If the project stores evaluation events in an Amazon S3 bucket, this
- # structure stores the bucket name and bucket prefix.
- #
- # @!attribute [rw] bucket
- # The name of the bucket in which Evidently stores evaluation events.
- # @return [String]
- #
- # @!attribute [rw] prefix
- # The bucket prefix in which Evidently stores evaluation events.
- # @return [String]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/S3Destination AWS API Documentation
- #
- class S3Destination < Struct.new(
- :bucket,
- :prefix)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # If the project stores evaluation events in an Amazon S3 bucket, this
- # structure stores the bucket name and bucket prefix.
- #
- # @!attribute [rw] bucket
- # The name of the bucket in which Evidently stores evaluation events.
- # @return [String]
- #
- # @!attribute [rw] prefix
- # The bucket prefix in which Evidently stores evaluation events.
- # @return [String]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/S3DestinationConfig AWS API Documentation
- #
- class S3DestinationConfig < Struct.new(
- :bucket,
- :prefix)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # This structure defines the traffic allocation percentages among the
- # feature variations during one step of a launch, and the start time of
- # that step.
- #
- # @!attribute [rw] group_weights
- # The traffic allocation percentages among the feature variations
- # during one step of a launch. This is a set of key-value pairs. The
- # keys are variation names. The values represent the percentage of
- # traffic to allocate to that variation during this step.
- #
- # The values is expressed in thousandths of a percent, so assigning a
- # weight of 50000 assigns 50% of traffic to that variation.
- #
- # If the sum of the weights for all the variations in a segment
- # override does not add up to 100,000, then the remaining traffic that
- # matches this segment is not assigned by this segment override, and
- # instead moves on to the next segment override or the default traffic
- # split.
- # @return [Hash]
- #
- # @!attribute [rw] segment_overrides
- # Use this parameter to specify different traffic splits for one or
- # more audience *segments*. A segment is a portion of your audience
- # that share one or more characteristics. Examples could be Chrome
- # browser users, users in Europe, or Firefox browser users in Europe
- # who also fit other criteria that your application collects, such as
- # age.
- #
- # This parameter is an array of up to six segment override objects.
- # Each of these objects specifies a segment that you have already
- # created, and defines the traffic split for that segment.
- # @return [Array]
- #
- # @!attribute [rw] start_time
- # The date and time that this step of the launch starts.
- # @return [Time]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/ScheduledSplit AWS API Documentation
- #
- class ScheduledSplit < Struct.new(
- :group_weights,
- :segment_overrides,
- :start_time)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # This structure defines the traffic allocation percentages among the
- # feature variations during one step of a launch, and the start time of
- # that step.
- #
- # @!attribute [rw] group_weights
- # The traffic allocation percentages among the feature variations
- # during one step of a launch. This is a set of key-value pairs. The
- # keys are variation names. The values represent the percentage of
- # traffic to allocate to that variation during this step.
- #
- # The values is expressed in thousandths of a percent, so assigning a
- # weight of 50000 assigns 50% of traffic to that variation.
- #
- # If the sum of the weights for all the variations in a segment
- # override does not add up to 100,000, then the remaining traffic that
- # matches this segment is not assigned by this segment override, and
- # instead moves on to the next segment override or the default traffic
- # split.
- # @return [Hash]
- #
- # @!attribute [rw] segment_overrides
- # Use this parameter to specify different traffic splits for one or
- # more audience *segments*. A segment is a portion of your audience
- # that share one or more characteristics. Examples could be Chrome
- # browser users, users in Europe, or Firefox browser users in Europe
- # who also fit other criteria that your application collects, such as
- # age.
- #
- # This parameter is an array of up to six segment override objects.
- # Each of these objects specifies a segment that you have already
- # created, and defines the traffic split for that segment.
- # @return [Array]
- #
- # @!attribute [rw] start_time
- # The date and time that this step of the launch starts.
- # @return [Time]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/ScheduledSplitConfig AWS API Documentation
- #
- class ScheduledSplitConfig < Struct.new(
- :group_weights,
- :segment_overrides,
- :start_time)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # An array of structures that define the traffic allocation percentages
- # among the feature variations during each step of a launch. This also
- # defines the start time of each step.
- #
- # @!attribute [rw] steps
- # An array of structures that define the traffic allocation
- # percentages among the feature variations during each step of the
- # launch. This also defines the start time of each step.
- # @return [Array]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/ScheduledSplitsLaunchConfig AWS API Documentation
- #
- class ScheduledSplitsLaunchConfig < Struct.new(
- :steps)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # An array of structures that define the traffic allocation percentages
- # among the feature variations during each step of a launch. This also
- # defines the start time of each step.
- #
- # @!attribute [rw] steps
- # An array of structures that define the traffic allocation
- # percentages among the feature variations during each step of the
- # launch. This also defines the start time of each step.
- # @return [Array]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/ScheduledSplitsLaunchDefinition AWS API Documentation
- #
- class ScheduledSplitsLaunchDefinition < Struct.new(
- :steps)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # This structure contains information about one audience *segment*. You
- # can use segments in your experiments and launches to narrow the user
- # sessions used for experiment or launch to only the user sessions that
- # match one or more criteria.
- #
- # @!attribute [rw] arn
- # The ARN of the segment.
- # @return [String]
- #
- # @!attribute [rw] created_time
- # The date and time that this segment was created.
- # @return [Time]
- #
- # @!attribute [rw] description
- # The customer-created description for this segment.
- # @return [String]
- #
- # @!attribute [rw] experiment_count
- # The number of experiments that this segment is used in. This count
- # includes all current experiments, not just those that are currently
- # running.
- # @return [Integer]
- #
- # @!attribute [rw] last_updated_time
- # The date and time that this segment was most recently updated.
- # @return [Time]
- #
- # @!attribute [rw] launch_count
- # The number of launches that this segment is used in. This count
- # includes all current launches, not just those that are currently
- # running.
- # @return [Integer]
- #
- # @!attribute [rw] name
- # The name of the segment.
- # @return [String]
- #
- # @!attribute [rw] pattern
- # The pattern that defines the attributes to use to evalute whether a
- # user session will be in the segment. For more information about the
- # pattern syntax, see [Segment rule pattern syntax][1].
- #
- #
- #
- # [1]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-Evidently-segments.html
- # @return [String]
- #
- # @!attribute [rw] tags
- # The list of tag keys and values associated with this launch.
- # @return [Hash]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/Segment AWS API Documentation
- #
- class Segment < Struct.new(
- :arn,
- :created_time,
- :description,
- :experiment_count,
- :last_updated_time,
- :launch_count,
- :name,
- :pattern,
- :tags)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # This structure specifies a segment that you have already created, and
- # defines the traffic split for that segment to be used in a launch.
- #
- # @!attribute [rw] evaluation_order
- # A number indicating the order to use to evaluate segment overrides,
- # if there are more than one. Segment overrides with lower numbers are
- # evaluated first.
- # @return [Integer]
- #
- # @!attribute [rw] segment
- # The ARN of the segment to use.
- # @return [String]
- #
- # @!attribute [rw] weights
- # The traffic allocation percentages among the feature variations to
- # assign to this segment. This is a set of key-value pairs. The keys
- # are variation names. The values represent the amount of traffic to
- # allocate to that variation for this segment. This is expressed in
- # thousandths of a percent, so a weight of 50000 represents 50% of
- # traffic.
- # @return [Hash]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/SegmentOverride AWS API Documentation
- #
- class SegmentOverride < Struct.new(
- :evaluation_order,
- :segment,
- :weights)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # The request would cause a service quota to be exceeded.
- #
- # @!attribute [rw] message
- # @return [String]
- #
- # @!attribute [rw] quota_code
- # The ID of the service quota that was exceeded.
- # @return [String]
- #
- # @!attribute [rw] resource_id
- # The ID of the resource that caused the exception.
- # @return [String]
- #
- # @!attribute [rw] resource_type
- # The type of the resource that is associated with the error.
- # @return [String]
- #
- # @!attribute [rw] service_code
- # The ID of the service that is associated with the error.
- # @return [String]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/ServiceQuotaExceededException AWS API Documentation
- #
- class ServiceQuotaExceededException < Struct.new(
- :message,
- :quota_code,
- :resource_id,
- :resource_type,
- :service_code)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # The service was unavailable. Retry the request.
- #
- # @!attribute [rw] message
- # @return [String]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/ServiceUnavailableException AWS API Documentation
- #
- class ServiceUnavailableException < Struct.new(
- :message)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] analysis_complete_time
- # The date and time to end the experiment. This must be no more than
- # 30 days after the experiment starts.
- # @return [Time]
- #
- # @!attribute [rw] experiment
- # The name of the experiment to start.
- # @return [String]
- #
- # @!attribute [rw] project
- # The name or ARN of the project that contains the experiment to
- # start.
- # @return [String]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/StartExperimentRequest AWS API Documentation
- #
- class StartExperimentRequest < Struct.new(
- :analysis_complete_time,
- :experiment,
- :project)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] started_time
- # A timestamp that indicates when the experiment started.
- # @return [Time]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/StartExperimentResponse AWS API Documentation
- #
- class StartExperimentResponse < Struct.new(
- :started_time)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] launch
- # The name of the launch to start.
- # @return [String]
- #
- # @!attribute [rw] project
- # The name or ARN of the project that contains the launch to start.
- # @return [String]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/StartLaunchRequest AWS API Documentation
- #
- class StartLaunchRequest < Struct.new(
- :launch,
- :project)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] launch
- # A structure that contains information about the launch that was
- # started.
- # @return [Types::Launch]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/StartLaunchResponse AWS API Documentation
- #
- class StartLaunchResponse < Struct.new(
- :launch)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] desired_state
- # Specify whether the experiment is to be considered `COMPLETED` or
- # `CANCELLED` after it stops.
- # @return [String]
- #
- # @!attribute [rw] experiment
- # The name of the experiment to stop.
- # @return [String]
- #
- # @!attribute [rw] project
- # The name or ARN of the project that contains the experiment to stop.
- # @return [String]
- #
- # @!attribute [rw] reason
- # A string that describes why you are stopping the experiment.
- # @return [String]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/StopExperimentRequest AWS API Documentation
- #
- class StopExperimentRequest < Struct.new(
- :desired_state,
- :experiment,
- :project,
- :reason)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] ended_time
- # The date and time that the experiment stopped.
- # @return [Time]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/StopExperimentResponse AWS API Documentation
- #
- class StopExperimentResponse < Struct.new(
- :ended_time)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] desired_state
- # Specify whether to consider the launch as `COMPLETED` or `CANCELLED`
- # after it stops.
- # @return [String]
- #
- # @!attribute [rw] launch
- # The name of the launch to stop.
- # @return [String]
- #
- # @!attribute [rw] project
- # The name or ARN of the project that contains the launch that you
- # want to stop.
- # @return [String]
- #
- # @!attribute [rw] reason
- # A string that describes why you are stopping the launch.
- # @return [String]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/StopLaunchRequest AWS API Documentation
- #
- class StopLaunchRequest < Struct.new(
- :desired_state,
- :launch,
- :project,
- :reason)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] ended_time
- # The date and time that the launch stopped.
- # @return [Time]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/StopLaunchResponse AWS API Documentation
- #
- class StopLaunchResponse < Struct.new(
- :ended_time)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] resource_arn
- # The ARN of the CloudWatch Evidently resource that you're adding
- # tags to.
- # @return [String]
- #
- # @!attribute [rw] tags
- # The list of key-value pairs to associate with the resource.
- # @return [Hash]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/TagResourceRequest AWS API Documentation
- #
- class TagResourceRequest < Struct.new(
- :resource_arn,
- :tags)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/TagResourceResponse AWS API Documentation
- #
- class TagResourceResponse < Aws::EmptyStructure; end
-
- # @!attribute [rw] pattern
- # The pattern to test.
- # @return [String]
- #
- # @!attribute [rw] payload
- # A sample `evaluationContext` JSON block to test against the
- # specified pattern.
- # @return [String]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/TestSegmentPatternRequest AWS API Documentation
- #
- class TestSegmentPatternRequest < Struct.new(
- :pattern,
- :payload)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] match
- # Returns `true` if the pattern matches the payload.
- # @return [Boolean]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/TestSegmentPatternResponse AWS API Documentation
- #
- class TestSegmentPatternResponse < Struct.new(
- :match)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # The request was denied because of request throttling. Retry the
- # request.
- #
- # @!attribute [rw] message
- # @return [String]
- #
- # @!attribute [rw] quota_code
- # The ID of the service quota that was exceeded.
- # @return [String]
- #
- # @!attribute [rw] service_code
- # The ID of the service that is associated with the error.
- # @return [String]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/ThrottlingException AWS API Documentation
- #
- class ThrottlingException < Struct.new(
- :message,
- :quota_code,
- :service_code)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # A structure that defines one treatment in an experiment. A treatment
- # is a variation of the feature that you are including in the
- # experiment.
- #
- # @!attribute [rw] description
- # The description of the treatment.
- # @return [String]
- #
- # @!attribute [rw] feature_variations
- # The feature variation used for this treatment. This is a key-value
- # pair. The key is the feature name, and the value is the variation
- # name.
- # @return [Hash]
- #
- # @!attribute [rw] name
- # The name of this treatment.
- # @return [String]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/Treatment AWS API Documentation
- #
- class Treatment < Struct.new(
- :description,
- :feature_variations,
- :name)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # A structure that defines one treatment in an experiment. A treatment
- # is a variation of the feature that you are including in the
- # experiment.
- #
- # @!attribute [rw] description
- # A description for this treatment.
- # @return [String]
- #
- # @!attribute [rw] feature
- # The feature that this experiment is testing.
- # @return [String]
- #
- # @!attribute [rw] name
- # A name for this treatment.
- # @return [String]
- #
- # @!attribute [rw] variation
- # The name of the variation to use as this treatment in the
- # experiment.
- # @return [String]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/TreatmentConfig AWS API Documentation
- #
- class TreatmentConfig < Struct.new(
- :description,
- :feature,
- :name,
- :variation)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] resource_arn
- # The ARN of the CloudWatch Evidently resource that you're removing
- # tags from.
- # @return [String]
- #
- # @!attribute [rw] tag_keys
- # The list of tag keys to remove from the resource.
- # @return [Array]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/UntagResourceRequest AWS API Documentation
- #
- class UntagResourceRequest < Struct.new(
- :resource_arn,
- :tag_keys)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/UntagResourceResponse AWS API Documentation
- #
- class UntagResourceResponse < Aws::EmptyStructure; end
-
- # @!attribute [rw] description
- # An optional description of the experiment.
- # @return [String]
- #
- # @!attribute [rw] experiment
- # The name of the experiment to update.
- # @return [String]
- #
- # @!attribute [rw] metric_goals
- # An array of structures that defines the metrics used for the
- # experiment, and whether a higher or lower value for each metric is
- # the goal.
- # @return [Array]
- #
- # @!attribute [rw] online_ab_config
- # A structure that contains the configuration of which variation o use
- # as the "control" version. The "control" version is used for
- # comparison with other variations. This structure also specifies how
- # much experiment traffic is allocated to each variation.
- # @return [Types::OnlineAbConfig]
- #
- # @!attribute [rw] project
- # The name or ARN of the project that contains the experiment that you
- # want to update.
- # @return [String]
- #
- # @!attribute [rw] randomization_salt
- # When Evidently assigns a particular user session to an experiment,
- # it must use a randomization ID to determine which variation the user
- # session is served. This randomization ID is a combination of the
- # entity ID and `randomizationSalt`. If you omit `randomizationSalt`,
- # Evidently uses the experiment name as the `randomizationSalt`.
- # @return [String]
- #
- # @!attribute [rw] remove_segment
- # Removes a segment from being used in an experiment. You can't use
- # this parameter if the experiment is currently running.
- # @return [Boolean]
- #
- # @!attribute [rw] sampling_rate
- # The portion of the available audience that you want to allocate to
- # this experiment, in thousandths of a percent. The available audience
- # is the total audience minus the audience that you have allocated to
- # overrides or current launches of this feature.
- #
- # This is represented in thousandths of a percent. For example,
- # specify 20,000 to allocate 20% of the available audience.
- # @return [Integer]
- #
- # @!attribute [rw] segment
- # Adds an audience *segment* to an experiment. When a segment is used
- # in an experiment, only user sessions that match the segment pattern
- # are used in the experiment. You can't use this parameter if the
- # experiment is currently running.
- # @return [String]
- #
- # @!attribute [rw] treatments
- # An array of structures that define the variations being tested in
- # the experiment.
- # @return [Array]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/UpdateExperimentRequest AWS API Documentation
- #
- class UpdateExperimentRequest < Struct.new(
- :description,
- :experiment,
- :metric_goals,
- :online_ab_config,
- :project,
- :randomization_salt,
- :remove_segment,
- :sampling_rate,
- :segment,
- :treatments)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] experiment
- # A structure containing the configuration details of the experiment
- # that was updated.
- # @return [Types::Experiment]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/UpdateExperimentResponse AWS API Documentation
- #
- class UpdateExperimentResponse < Struct.new(
- :experiment)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] add_or_update_variations
- # To update variation configurations for this feature, or add new
- # ones, specify this structure. In this array, include any variations
- # that you want to add or update. If the array includes a variation
- # name that already exists for this feature, it is updated. If it
- # includes a new variation name, it is added as a new variation.
- # @return [Array]
- #
- # @!attribute [rw] default_variation
- # The name of the variation to use as the default variation. The
- # default variation is served to users who are not allocated to any
- # ongoing launches or experiments of this feature.
- # @return [String]
- #
- # @!attribute [rw] description
- # An optional description of the feature.
- # @return [String]
- #
- # @!attribute [rw] entity_overrides
- # Specified users that should always be served a specific variation of
- # a feature. Each user is specified by a key-value pair . For each
- # key, specify a user by entering their user ID, account ID, or some
- # other identifier. For the value, specify the name of the variation
- # that they are to be served.
- #
- # This parameter is limited to 2500 overrides or a total of 40KB. The
- # 40KB limit includes an overhead of 6 bytes per override.
- # @return [Hash]
- #
- # @!attribute [rw] evaluation_strategy
- # Specify `ALL_RULES` to activate the traffic allocation specified by
- # any ongoing launches or experiments. Specify `DEFAULT_VARIATION` to
- # serve the default variation to all users instead.
- # @return [String]
- #
- # @!attribute [rw] feature
- # The name of the feature to be updated.
- # @return [String]
- #
- # @!attribute [rw] project
- # The name or ARN of the project that contains the feature to be
- # updated.
- # @return [String]
- #
- # @!attribute [rw] remove_variations
- # Removes a variation from the feature. If the variation you specify
- # doesn't exist, then this makes no change and does not report an
- # error.
- #
- # This operation fails if you try to remove a variation that is part
- # of an ongoing launch or experiment.
- # @return [Array]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/UpdateFeatureRequest AWS API Documentation
- #
- class UpdateFeatureRequest < Struct.new(
- :add_or_update_variations,
- :default_variation,
- :description,
- :entity_overrides,
- :evaluation_strategy,
- :feature,
- :project,
- :remove_variations)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] feature
- # A structure that contains information about the updated feature.
- # @return [Types::Feature]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/UpdateFeatureResponse AWS API Documentation
- #
- class UpdateFeatureResponse < Struct.new(
- :feature)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] description
- # An optional description for the launch.
- # @return [String]
- #
- # @!attribute [rw] groups
- # An array of structures that contains the feature and variations that
- # are to be used for the launch.
- # @return [Array]
- #
- # @!attribute [rw] launch
- # The name of the launch that is to be updated.
- # @return [String]
- #
- # @!attribute [rw] metric_monitors
- # An array of structures that define the metrics that will be used to
- # monitor the launch performance.
- # @return [Array]
- #
- # @!attribute [rw] project
- # The name or ARN of the project that contains the launch that you
- # want to update.
- # @return [String]
- #
- # @!attribute [rw] randomization_salt
- # When Evidently assigns a particular user session to a launch, it
- # must use a randomization ID to determine which variation the user
- # session is served. This randomization ID is a combination of the
- # entity ID and `randomizationSalt`. If you omit `randomizationSalt`,
- # Evidently uses the launch name as the `randomizationSalt`.
- # @return [String]
- #
- # @!attribute [rw] scheduled_splits_config
- # An array of structures that define the traffic allocation
- # percentages among the feature variations during each step of the
- # launch.
- # @return [Types::ScheduledSplitsLaunchConfig]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/UpdateLaunchRequest AWS API Documentation
- #
- class UpdateLaunchRequest < Struct.new(
- :description,
- :groups,
- :launch,
- :metric_monitors,
- :project,
- :randomization_salt,
- :scheduled_splits_config)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] launch
- # A structure that contains the new configuration of the launch that
- # was updated.
- # @return [Types::Launch]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/UpdateLaunchResponse AWS API Documentation
- #
- class UpdateLaunchResponse < Struct.new(
- :launch)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] cloud_watch_logs
- # A structure containing the CloudWatch Logs log group where you want
- # to store evaluation events.
- # @return [Types::CloudWatchLogsDestinationConfig]
- #
- # @!attribute [rw] project
- # The name or ARN of the project that you want to modify the data
- # storage options for.
- # @return [String]
- #
- # @!attribute [rw] s3_destination
- # A structure containing the S3 bucket name and bucket prefix where
- # you want to store evaluation events.
- # @return [Types::S3DestinationConfig]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/UpdateProjectDataDeliveryRequest AWS API Documentation
- #
- class UpdateProjectDataDeliveryRequest < Struct.new(
- :cloud_watch_logs,
- :project,
- :s3_destination)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] project
- # A structure containing details about the project that you updated.
- # @return [Types::Project]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/UpdateProjectDataDeliveryResponse AWS API Documentation
- #
- class UpdateProjectDataDeliveryResponse < Struct.new(
- :project)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] app_config_resource
- # Use this parameter if the project will use client-side evaluation
- # powered by AppConfig. Client-side evaluation allows your application
- # to assign variations to user sessions locally instead of by calling
- # the [EvaluateFeature][1] operation. This mitigates the latency and
- # availability risks that come with an API call. allows you to
- #
- # This parameter is a structure that contains information about the
- # AppConfig application that will be used for client-side evaluation.
- #
- #
- #
- # [1]: https://docs.aws.amazon.com/cloudwatchevidently/latest/APIReference/API_EvaluateFeature.html
- # @return [Types::ProjectAppConfigResourceConfig]
- #
- # @!attribute [rw] description
- # An optional description of the project.
- # @return [String]
- #
- # @!attribute [rw] project
- # The name or ARN of the project to update.
- # @return [String]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/UpdateProjectRequest AWS API Documentation
- #
- class UpdateProjectRequest < Struct.new(
- :app_config_resource,
- :description,
- :project)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] project
- # A structure containing information about the updated project.
- # @return [Types::Project]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/UpdateProjectResponse AWS API Documentation
- #
- class UpdateProjectResponse < Struct.new(
- :project)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # The value of a parameter in the request caused an error.
- #
- # @!attribute [rw] field_list
- # The parameter that caused the exception.
- # @return [Array]
- #
- # @!attribute [rw] message
- # @return [String]
- #
- # @!attribute [rw] reason
- # A reason for the error.
- # @return [String]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/ValidationException AWS API Documentation
- #
- class ValidationException < Struct.new(
- :field_list,
- :message,
- :reason)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # A structure containing an error name and message.
- #
- # @!attribute [rw] message
- # The error message.
- # @return [String]
- #
- # @!attribute [rw] name
- # The error name.
- # @return [String]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/ValidationExceptionField AWS API Documentation
- #
- class ValidationExceptionField < Struct.new(
- :message,
- :name)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # The value assigned to a feature variation. This structure must contain
- # exactly one field. It can be `boolValue`, `doubleValue`, `longValue`,
- # or `stringValue`.
- #
- # @note VariableValue is a union - when making an API calls you must set exactly one of the members.
- #
- # @note VariableValue is a union - when returned from an API call exactly one value will be set and the returned type will be a subclass of VariableValue corresponding to the set member.
- #
- # @!attribute [rw] bool_value
- # If this feature uses the Boolean variation type, this field contains
- # the Boolean value of this variation.
- # @return [Boolean]
- #
- # @!attribute [rw] double_value
- # If this feature uses the double integer variation type, this field
- # contains the double integer value of this variation.
- # @return [Float]
- #
- # @!attribute [rw] long_value
- # If this feature uses the long variation type, this field contains
- # the long value of this variation.
- # @return [Integer]
- #
- # @!attribute [rw] string_value
- # If this feature uses the string variation type, this field contains
- # the string value of this variation.
- # @return [String]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/VariableValue AWS API Documentation
- #
- class VariableValue < Struct.new(
- :bool_value,
- :double_value,
- :long_value,
- :string_value,
- :unknown)
- SENSITIVE = []
- include Aws::Structure
- include Aws::Structure::Union
-
- class BoolValue < VariableValue; end
- class DoubleValue < VariableValue; end
- class LongValue < VariableValue; end
- class StringValue < VariableValue; end
- class Unknown < VariableValue; end
- end
-
- # This structure contains the name and variation value of one variation
- # of a feature.
- #
- # @!attribute [rw] name
- # The name of the variation.
- # @return [String]
- #
- # @!attribute [rw] value
- # The value assigned to this variation.
- # @return [Types::VariableValue]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/Variation AWS API Documentation
- #
- class Variation < Struct.new(
- :name,
- :value)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # This structure contains the name and variation value of one variation
- # of a feature.
- #
- # @!attribute [rw] name
- # The name of the variation.
- # @return [String]
- #
- # @!attribute [rw] value
- # The value assigned to this variation.
- # @return [Types::VariableValue]
- #
- # @see http://docs.aws.amazon.com/goto/WebAPI/evidently-2021-02-01/VariationConfig AWS API Documentation
- #
- class VariationConfig < Struct.new(
- :name,
- :value)
- SENSITIVE = []
- include Aws::Structure
- end
-
- end
-end
-
diff --git a/gems/aws-sdk-cloudwatchevidently/sig/client.rbs b/gems/aws-sdk-cloudwatchevidently/sig/client.rbs
deleted file mode 100644
index 25566b493ec..00000000000
--- a/gems/aws-sdk-cloudwatchevidently/sig/client.rbs
+++ /dev/null
@@ -1,725 +0,0 @@
-# WARNING ABOUT GENERATED CODE
-#
-# This file is generated. See the contributing guide for more information:
-# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md
-#
-# WARNING ABOUT GENERATED CODE
-
-module Aws
- module CloudWatchEvidently
- class Client < ::Seahorse::Client::Base
- include ::Aws::ClientStubs
-
- # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/CloudWatchEvidently/Client.html#initialize-instance_method
- def self.new: (
- ?credentials: untyped,
- ?region: String,
- ?access_key_id: String,
- ?account_id: String,
- ?active_endpoint_cache: bool,
- ?adaptive_retry_wait_to_fill: bool,
- ?auth_scheme_preference: Array[String],
- ?client_side_monitoring: bool,
- ?client_side_monitoring_client_id: String,
- ?client_side_monitoring_host: String,
- ?client_side_monitoring_port: Integer,
- ?client_side_monitoring_publisher: untyped,
- ?convert_params: bool,
- ?correct_clock_skew: bool,
- ?defaults_mode: String,
- ?disable_host_prefix_injection: bool,
- ?disable_request_compression: bool,
- ?endpoint: String,
- ?endpoint_cache_max_entries: Integer,
- ?endpoint_cache_max_threads: Integer,
- ?endpoint_cache_poll_interval: Integer,
- ?endpoint_discovery: bool,
- ?ignore_configured_endpoint_urls: bool,
- ?log_formatter: untyped,
- ?log_level: Symbol,
- ?logger: untyped,
- ?max_attempts: Integer,
- ?profile: String,
- ?request_checksum_calculation: String,
- ?request_min_compression_size_bytes: Integer,
- ?response_checksum_validation: String,
- ?retry_backoff: Proc,
- ?retry_base_delay: Float,
- ?retry_jitter: (:none | :equal | :full | ^(Integer) -> Integer),
- ?retry_limit: Integer,
- ?retry_max_delay: Integer,
- ?retry_mode: ("legacy" | "standard" | "adaptive"),
- ?sdk_ua_app_id: String,
- ?secret_access_key: String,
- ?session_token: String,
- ?sigv4a_signing_region_set: Array[String],
- ?stub_responses: untyped,
- ?telemetry_provider: Aws::Telemetry::TelemetryProviderBase,
- ?token_provider: untyped,
- ?use_dualstack_endpoint: bool,
- ?use_fips_endpoint: bool,
- ?validate_params: bool,
- ?endpoint_provider: untyped,
- ?http_proxy: String,
- ?http_open_timeout: (Float | Integer),
- ?http_read_timeout: (Float | Integer),
- ?http_idle_timeout: (Float | Integer),
- ?http_continue_timeout: (Float | Integer),
- ?ssl_timeout: (Float | Integer | nil),
- ?http_wire_trace: bool,
- ?ssl_verify_peer: bool,
- ?ssl_ca_bundle: String,
- ?ssl_ca_directory: String,
- ?ssl_ca_store: String,
- ?on_chunk_received: Proc,
- ?on_chunk_sent: Proc,
- ?raise_response_errors: bool
- ) -> instance
- | (?Hash[Symbol, untyped]) -> instance
-
-
- interface _BatchEvaluateFeatureResponseSuccess
- include ::Seahorse::Client::_ResponseSuccess[Types::BatchEvaluateFeatureResponse]
- def results: () -> ::Array[Types::EvaluationResult]
- end
- # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/CloudWatchEvidently/Client.html#batch_evaluate_feature-instance_method
- def batch_evaluate_feature: (
- project: ::String,
- requests: Array[
- {
- entity_id: ::String,
- evaluation_context: ::String?,
- feature: ::String
- },
- ]
- ) -> _BatchEvaluateFeatureResponseSuccess
- | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _BatchEvaluateFeatureResponseSuccess
-
- interface _CreateExperimentResponseSuccess
- include ::Seahorse::Client::_ResponseSuccess[Types::CreateExperimentResponse]
- def experiment: () -> Types::Experiment
- end
- # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/CloudWatchEvidently/Client.html#create_experiment-instance_method
- def create_experiment: (
- ?description: ::String,
- metric_goals: Array[
- {
- desired_change: ("INCREASE" | "DECREASE")?,
- metric_definition: {
- entity_id_key: ::String,
- event_pattern: ::String?,
- name: ::String,
- unit_label: ::String?,
- value_key: ::String
- }
- },
- ],
- name: ::String,
- ?online_ab_config: {
- control_treatment_name: ::String?,
- treatment_weights: Hash[::String, ::Integer]?
- },
- project: ::String,
- ?randomization_salt: ::String,
- ?sampling_rate: ::Integer,
- ?segment: ::String,
- ?tags: Hash[::String, ::String],
- treatments: Array[
- {
- description: ::String?,
- feature: ::String,
- name: ::String,
- variation: ::String
- },
- ]
- ) -> _CreateExperimentResponseSuccess
- | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _CreateExperimentResponseSuccess
-
- interface _CreateFeatureResponseSuccess
- include ::Seahorse::Client::_ResponseSuccess[Types::CreateFeatureResponse]
- def feature: () -> Types::Feature
- end
- # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/CloudWatchEvidently/Client.html#create_feature-instance_method
- def create_feature: (
- ?default_variation: ::String,
- ?description: ::String,
- ?entity_overrides: Hash[::String, ::String],
- ?evaluation_strategy: ("ALL_RULES" | "DEFAULT_VARIATION"),
- name: ::String,
- project: ::String,
- ?tags: Hash[::String, ::String],
- variations: Array[
- {
- name: ::String,
- value: {
- bool_value: bool?,
- double_value: ::Float?,
- long_value: ::Integer?,
- string_value: ::String?
- }
- },
- ]
- ) -> _CreateFeatureResponseSuccess
- | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _CreateFeatureResponseSuccess
-
- interface _CreateLaunchResponseSuccess
- include ::Seahorse::Client::_ResponseSuccess[Types::CreateLaunchResponse]
- def launch: () -> Types::Launch
- end
- # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/CloudWatchEvidently/Client.html#create_launch-instance_method
- def create_launch: (
- ?description: ::String,
- groups: Array[
- {
- description: ::String?,
- feature: ::String,
- name: ::String,
- variation: ::String
- },
- ],
- ?metric_monitors: Array[
- {
- metric_definition: {
- entity_id_key: ::String,
- event_pattern: ::String?,
- name: ::String,
- unit_label: ::String?,
- value_key: ::String
- }
- },
- ],
- name: ::String,
- project: ::String,
- ?randomization_salt: ::String,
- ?scheduled_splits_config: {
- steps: Array[
- {
- group_weights: Hash[::String, ::Integer],
- segment_overrides: Array[
- {
- evaluation_order: ::Integer,
- segment: ::String,
- weights: Hash[::String, ::Integer]
- },
- ]?,
- start_time: ::Time
- },
- ]
- },
- ?tags: Hash[::String, ::String]
- ) -> _CreateLaunchResponseSuccess
- | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _CreateLaunchResponseSuccess
-
- interface _CreateProjectResponseSuccess
- include ::Seahorse::Client::_ResponseSuccess[Types::CreateProjectResponse]
- def project: () -> Types::Project
- end
- # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/CloudWatchEvidently/Client.html#create_project-instance_method
- def create_project: (
- ?app_config_resource: {
- application_id: ::String?,
- environment_id: ::String?
- },
- ?data_delivery: {
- cloud_watch_logs: {
- log_group: ::String?
- }?,
- s3_destination: {
- bucket: ::String?,
- prefix: ::String?
- }?
- },
- ?description: ::String,
- name: ::String,
- ?tags: Hash[::String, ::String]
- ) -> _CreateProjectResponseSuccess
- | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _CreateProjectResponseSuccess
-
- interface _CreateSegmentResponseSuccess
- include ::Seahorse::Client::_ResponseSuccess[Types::CreateSegmentResponse]
- def segment: () -> Types::Segment
- end
- # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/CloudWatchEvidently/Client.html#create_segment-instance_method
- def create_segment: (
- ?description: ::String,
- name: ::String,
- pattern: ::String,
- ?tags: Hash[::String, ::String]
- ) -> _CreateSegmentResponseSuccess
- | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _CreateSegmentResponseSuccess
-
- interface _DeleteExperimentResponseSuccess
- include ::Seahorse::Client::_ResponseSuccess[Types::DeleteExperimentResponse]
- end
- # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/CloudWatchEvidently/Client.html#delete_experiment-instance_method
- def delete_experiment: (
- experiment: ::String,
- project: ::String
- ) -> _DeleteExperimentResponseSuccess
- | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _DeleteExperimentResponseSuccess
-
- interface _DeleteFeatureResponseSuccess
- include ::Seahorse::Client::_ResponseSuccess[Types::DeleteFeatureResponse]
- end
- # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/CloudWatchEvidently/Client.html#delete_feature-instance_method
- def delete_feature: (
- feature: ::String,
- project: ::String
- ) -> _DeleteFeatureResponseSuccess
- | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _DeleteFeatureResponseSuccess
-
- interface _DeleteLaunchResponseSuccess
- include ::Seahorse::Client::_ResponseSuccess[Types::DeleteLaunchResponse]
- end
- # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/CloudWatchEvidently/Client.html#delete_launch-instance_method
- def delete_launch: (
- launch: ::String,
- project: ::String
- ) -> _DeleteLaunchResponseSuccess
- | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _DeleteLaunchResponseSuccess
-
- interface _DeleteProjectResponseSuccess
- include ::Seahorse::Client::_ResponseSuccess[Types::DeleteProjectResponse]
- end
- # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/CloudWatchEvidently/Client.html#delete_project-instance_method
- def delete_project: (
- project: ::String
- ) -> _DeleteProjectResponseSuccess
- | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _DeleteProjectResponseSuccess
-
- interface _DeleteSegmentResponseSuccess
- include ::Seahorse::Client::_ResponseSuccess[Types::DeleteSegmentResponse]
- end
- # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/CloudWatchEvidently/Client.html#delete_segment-instance_method
- def delete_segment: (
- segment: ::String
- ) -> _DeleteSegmentResponseSuccess
- | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _DeleteSegmentResponseSuccess
-
- interface _EvaluateFeatureResponseSuccess
- include ::Seahorse::Client::_ResponseSuccess[Types::EvaluateFeatureResponse]
- def details: () -> ::String
- def reason: () -> ::String
- def value: () -> Types::VariableValue
- def variation: () -> ::String
- end
- # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/CloudWatchEvidently/Client.html#evaluate_feature-instance_method
- def evaluate_feature: (
- entity_id: ::String,
- ?evaluation_context: ::String,
- feature: ::String,
- project: ::String
- ) -> _EvaluateFeatureResponseSuccess
- | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _EvaluateFeatureResponseSuccess
-
- interface _GetExperimentResponseSuccess
- include ::Seahorse::Client::_ResponseSuccess[Types::GetExperimentResponse]
- def experiment: () -> Types::Experiment
- end
- # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/CloudWatchEvidently/Client.html#get_experiment-instance_method
- def get_experiment: (
- experiment: ::String,
- project: ::String
- ) -> _GetExperimentResponseSuccess
- | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _GetExperimentResponseSuccess
-
- interface _GetExperimentResultsResponseSuccess
- include ::Seahorse::Client::_ResponseSuccess[Types::GetExperimentResultsResponse]
- def details: () -> ::String
- def reports: () -> ::Array[Types::ExperimentReport]
- def results_data: () -> ::Array[Types::ExperimentResultsData]
- def timestamps: () -> ::Array[::Time]
- end
- # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/CloudWatchEvidently/Client.html#get_experiment_results-instance_method
- def get_experiment_results: (
- ?base_stat: ("Mean"),
- ?end_time: ::Time,
- experiment: ::String,
- metric_names: Array[::String],
- ?period: ::Integer,
- project: ::String,
- ?report_names: Array[("BayesianInference")],
- ?result_stats: Array[("BaseStat" | "TreatmentEffect" | "ConfidenceInterval" | "PValue")],
- ?start_time: ::Time,
- treatment_names: Array[::String]
- ) -> _GetExperimentResultsResponseSuccess
- | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _GetExperimentResultsResponseSuccess
-
- interface _GetFeatureResponseSuccess
- include ::Seahorse::Client::_ResponseSuccess[Types::GetFeatureResponse]
- def feature: () -> Types::Feature
- end
- # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/CloudWatchEvidently/Client.html#get_feature-instance_method
- def get_feature: (
- feature: ::String,
- project: ::String
- ) -> _GetFeatureResponseSuccess
- | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _GetFeatureResponseSuccess
-
- interface _GetLaunchResponseSuccess
- include ::Seahorse::Client::_ResponseSuccess[Types::GetLaunchResponse]
- def launch: () -> Types::Launch
- end
- # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/CloudWatchEvidently/Client.html#get_launch-instance_method
- def get_launch: (
- launch: ::String,
- project: ::String
- ) -> _GetLaunchResponseSuccess
- | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _GetLaunchResponseSuccess
-
- interface _GetProjectResponseSuccess
- include ::Seahorse::Client::_ResponseSuccess[Types::GetProjectResponse]
- def project: () -> Types::Project
- end
- # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/CloudWatchEvidently/Client.html#get_project-instance_method
- def get_project: (
- project: ::String
- ) -> _GetProjectResponseSuccess
- | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _GetProjectResponseSuccess
-
- interface _GetSegmentResponseSuccess
- include ::Seahorse::Client::_ResponseSuccess[Types::GetSegmentResponse]
- def segment: () -> Types::Segment
- end
- # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/CloudWatchEvidently/Client.html#get_segment-instance_method
- def get_segment: (
- segment: ::String
- ) -> _GetSegmentResponseSuccess
- | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _GetSegmentResponseSuccess
-
- interface _ListExperimentsResponseSuccess
- include ::Seahorse::Client::_ResponseSuccess[Types::ListExperimentsResponse]
- def experiments: () -> ::Array[Types::Experiment]
- def next_token: () -> ::String
- end
- # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/CloudWatchEvidently/Client.html#list_experiments-instance_method
- def list_experiments: (
- ?max_results: ::Integer,
- ?next_token: ::String,
- project: ::String,
- ?status: ("CREATED" | "UPDATING" | "RUNNING" | "COMPLETED" | "CANCELLED")
- ) -> _ListExperimentsResponseSuccess
- | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _ListExperimentsResponseSuccess
-
- interface _ListFeaturesResponseSuccess
- include ::Seahorse::Client::_ResponseSuccess[Types::ListFeaturesResponse]
- def features: () -> ::Array[Types::FeatureSummary]
- def next_token: () -> ::String
- end
- # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/CloudWatchEvidently/Client.html#list_features-instance_method
- def list_features: (
- ?max_results: ::Integer,
- ?next_token: ::String,
- project: ::String
- ) -> _ListFeaturesResponseSuccess
- | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _ListFeaturesResponseSuccess
-
- interface _ListLaunchesResponseSuccess
- include ::Seahorse::Client::_ResponseSuccess[Types::ListLaunchesResponse]
- def launches: () -> ::Array[Types::Launch]
- def next_token: () -> ::String
- end
- # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/CloudWatchEvidently/Client.html#list_launches-instance_method
- def list_launches: (
- ?max_results: ::Integer,
- ?next_token: ::String,
- project: ::String,
- ?status: ("CREATED" | "UPDATING" | "RUNNING" | "COMPLETED" | "CANCELLED")
- ) -> _ListLaunchesResponseSuccess
- | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _ListLaunchesResponseSuccess
-
- interface _ListProjectsResponseSuccess
- include ::Seahorse::Client::_ResponseSuccess[Types::ListProjectsResponse]
- def next_token: () -> ::String
- def projects: () -> ::Array[Types::ProjectSummary]
- end
- # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/CloudWatchEvidently/Client.html#list_projects-instance_method
- def list_projects: (
- ?max_results: ::Integer,
- ?next_token: ::String
- ) -> _ListProjectsResponseSuccess
- | (?Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _ListProjectsResponseSuccess
-
- interface _ListSegmentReferencesResponseSuccess
- include ::Seahorse::Client::_ResponseSuccess[Types::ListSegmentReferencesResponse]
- def next_token: () -> ::String
- def referenced_by: () -> ::Array[Types::RefResource]
- end
- # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/CloudWatchEvidently/Client.html#list_segment_references-instance_method
- def list_segment_references: (
- ?max_results: ::Integer,
- ?next_token: ::String,
- segment: ::String,
- type: ("EXPERIMENT" | "LAUNCH")
- ) -> _ListSegmentReferencesResponseSuccess
- | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _ListSegmentReferencesResponseSuccess
-
- interface _ListSegmentsResponseSuccess
- include ::Seahorse::Client::_ResponseSuccess[Types::ListSegmentsResponse]
- def next_token: () -> ::String
- def segments: () -> ::Array[Types::Segment]
- end
- # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/CloudWatchEvidently/Client.html#list_segments-instance_method
- def list_segments: (
- ?max_results: ::Integer,
- ?next_token: ::String
- ) -> _ListSegmentsResponseSuccess
- | (?Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _ListSegmentsResponseSuccess
-
- interface _ListTagsForResourceResponseSuccess
- include ::Seahorse::Client::_ResponseSuccess[Types::ListTagsForResourceResponse]
- def tags: () -> ::Hash[::String, ::String]
- end
- # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/CloudWatchEvidently/Client.html#list_tags_for_resource-instance_method
- def list_tags_for_resource: (
- resource_arn: ::String
- ) -> _ListTagsForResourceResponseSuccess
- | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _ListTagsForResourceResponseSuccess
-
- interface _PutProjectEventsResponseSuccess
- include ::Seahorse::Client::_ResponseSuccess[Types::PutProjectEventsResponse]
- def event_results: () -> ::Array[Types::PutProjectEventsResultEntry]
- def failed_event_count: () -> ::Integer
- end
- # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/CloudWatchEvidently/Client.html#put_project_events-instance_method
- def put_project_events: (
- events: Array[
- {
- data: ::String,
- timestamp: ::Time,
- type: ("aws.evidently.evaluation" | "aws.evidently.custom")
- },
- ],
- project: ::String
- ) -> _PutProjectEventsResponseSuccess
- | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _PutProjectEventsResponseSuccess
-
- interface _StartExperimentResponseSuccess
- include ::Seahorse::Client::_ResponseSuccess[Types::StartExperimentResponse]
- def started_time: () -> ::Time
- end
- # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/CloudWatchEvidently/Client.html#start_experiment-instance_method
- def start_experiment: (
- analysis_complete_time: ::Time,
- experiment: ::String,
- project: ::String
- ) -> _StartExperimentResponseSuccess
- | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _StartExperimentResponseSuccess
-
- interface _StartLaunchResponseSuccess
- include ::Seahorse::Client::_ResponseSuccess[Types::StartLaunchResponse]
- def launch: () -> Types::Launch
- end
- # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/CloudWatchEvidently/Client.html#start_launch-instance_method
- def start_launch: (
- launch: ::String,
- project: ::String
- ) -> _StartLaunchResponseSuccess
- | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _StartLaunchResponseSuccess
-
- interface _StopExperimentResponseSuccess
- include ::Seahorse::Client::_ResponseSuccess[Types::StopExperimentResponse]
- def ended_time: () -> ::Time
- end
- # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/CloudWatchEvidently/Client.html#stop_experiment-instance_method
- def stop_experiment: (
- ?desired_state: ("COMPLETED" | "CANCELLED"),
- experiment: ::String,
- project: ::String,
- ?reason: ::String
- ) -> _StopExperimentResponseSuccess
- | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _StopExperimentResponseSuccess
-
- interface _StopLaunchResponseSuccess
- include ::Seahorse::Client::_ResponseSuccess[Types::StopLaunchResponse]
- def ended_time: () -> ::Time
- end
- # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/CloudWatchEvidently/Client.html#stop_launch-instance_method
- def stop_launch: (
- ?desired_state: ("COMPLETED" | "CANCELLED"),
- launch: ::String,
- project: ::String,
- ?reason: ::String
- ) -> _StopLaunchResponseSuccess
- | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _StopLaunchResponseSuccess
-
- interface _TagResourceResponseSuccess
- include ::Seahorse::Client::_ResponseSuccess[Types::TagResourceResponse]
- end
- # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/CloudWatchEvidently/Client.html#tag_resource-instance_method
- def tag_resource: (
- resource_arn: ::String,
- tags: Hash[::String, ::String]
- ) -> _TagResourceResponseSuccess
- | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _TagResourceResponseSuccess
-
- interface _TestSegmentPatternResponseSuccess
- include ::Seahorse::Client::_ResponseSuccess[Types::TestSegmentPatternResponse]
- def match: () -> bool
- end
- # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/CloudWatchEvidently/Client.html#test_segment_pattern-instance_method
- def test_segment_pattern: (
- pattern: ::String,
- payload: ::String
- ) -> _TestSegmentPatternResponseSuccess
- | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _TestSegmentPatternResponseSuccess
-
- interface _UntagResourceResponseSuccess
- include ::Seahorse::Client::_ResponseSuccess[Types::UntagResourceResponse]
- end
- # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/CloudWatchEvidently/Client.html#untag_resource-instance_method
- def untag_resource: (
- resource_arn: ::String,
- tag_keys: Array[::String]
- ) -> _UntagResourceResponseSuccess
- | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _UntagResourceResponseSuccess
-
- interface _UpdateExperimentResponseSuccess
- include ::Seahorse::Client::_ResponseSuccess[Types::UpdateExperimentResponse]
- def experiment: () -> Types::Experiment
- end
- # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/CloudWatchEvidently/Client.html#update_experiment-instance_method
- def update_experiment: (
- ?description: ::String,
- experiment: ::String,
- ?metric_goals: Array[
- {
- desired_change: ("INCREASE" | "DECREASE")?,
- metric_definition: {
- entity_id_key: ::String,
- event_pattern: ::String?,
- name: ::String,
- unit_label: ::String?,
- value_key: ::String
- }
- },
- ],
- ?online_ab_config: {
- control_treatment_name: ::String?,
- treatment_weights: Hash[::String, ::Integer]?
- },
- project: ::String,
- ?randomization_salt: ::String,
- ?remove_segment: bool,
- ?sampling_rate: ::Integer,
- ?segment: ::String,
- ?treatments: Array[
- {
- description: ::String?,
- feature: ::String,
- name: ::String,
- variation: ::String
- },
- ]
- ) -> _UpdateExperimentResponseSuccess
- | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _UpdateExperimentResponseSuccess
-
- interface _UpdateFeatureResponseSuccess
- include ::Seahorse::Client::_ResponseSuccess[Types::UpdateFeatureResponse]
- def feature: () -> Types::Feature
- end
- # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/CloudWatchEvidently/Client.html#update_feature-instance_method
- def update_feature: (
- ?add_or_update_variations: Array[
- {
- name: ::String,
- value: {
- bool_value: bool?,
- double_value: ::Float?,
- long_value: ::Integer?,
- string_value: ::String?
- }
- },
- ],
- ?default_variation: ::String,
- ?description: ::String,
- ?entity_overrides: Hash[::String, ::String],
- ?evaluation_strategy: ("ALL_RULES" | "DEFAULT_VARIATION"),
- feature: ::String,
- project: ::String,
- ?remove_variations: Array[::String]
- ) -> _UpdateFeatureResponseSuccess
- | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _UpdateFeatureResponseSuccess
-
- interface _UpdateLaunchResponseSuccess
- include ::Seahorse::Client::_ResponseSuccess[Types::UpdateLaunchResponse]
- def launch: () -> Types::Launch
- end
- # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/CloudWatchEvidently/Client.html#update_launch-instance_method
- def update_launch: (
- ?description: ::String,
- ?groups: Array[
- {
- description: ::String?,
- feature: ::String,
- name: ::String,
- variation: ::String
- },
- ],
- launch: ::String,
- ?metric_monitors: Array[
- {
- metric_definition: {
- entity_id_key: ::String,
- event_pattern: ::String?,
- name: ::String,
- unit_label: ::String?,
- value_key: ::String
- }
- },
- ],
- project: ::String,
- ?randomization_salt: ::String,
- ?scheduled_splits_config: {
- steps: Array[
- {
- group_weights: Hash[::String, ::Integer],
- segment_overrides: Array[
- {
- evaluation_order: ::Integer,
- segment: ::String,
- weights: Hash[::String, ::Integer]
- },
- ]?,
- start_time: ::Time
- },
- ]
- }
- ) -> _UpdateLaunchResponseSuccess
- | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _UpdateLaunchResponseSuccess
-
- interface _UpdateProjectResponseSuccess
- include ::Seahorse::Client::_ResponseSuccess[Types::UpdateProjectResponse]
- def project: () -> Types::Project
- end
- # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/CloudWatchEvidently/Client.html#update_project-instance_method
- def update_project: (
- ?app_config_resource: {
- application_id: ::String?,
- environment_id: ::String?
- },
- ?description: ::String,
- project: ::String
- ) -> _UpdateProjectResponseSuccess
- | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _UpdateProjectResponseSuccess
-
- interface _UpdateProjectDataDeliveryResponseSuccess
- include ::Seahorse::Client::_ResponseSuccess[Types::UpdateProjectDataDeliveryResponse]
- def project: () -> Types::Project
- end
- # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/CloudWatchEvidently/Client.html#update_project_data_delivery-instance_method
- def update_project_data_delivery: (
- ?cloud_watch_logs: {
- log_group: ::String?
- },
- project: ::String,
- ?s3_destination: {
- bucket: ::String?,
- prefix: ::String?
- }
- ) -> _UpdateProjectDataDeliveryResponseSuccess
- | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _UpdateProjectDataDeliveryResponseSuccess
- end
- end
-end
-
diff --git a/gems/aws-sdk-cloudwatchevidently/sig/errors.rbs b/gems/aws-sdk-cloudwatchevidently/sig/errors.rbs
deleted file mode 100644
index c08e09b4498..00000000000
--- a/gems/aws-sdk-cloudwatchevidently/sig/errors.rbs
+++ /dev/null
@@ -1,52 +0,0 @@
-# WARNING ABOUT GENERATED CODE
-#
-# This file is generated. See the contributing guide for more information:
-# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md
-#
-# WARNING ABOUT GENERATED CODE
-
-module Aws
- module CloudWatchEvidently
- module Errors
- class ServiceError < ::Aws::Errors::ServiceError
- end
-
- class AccessDeniedException < ::Aws::Errors::ServiceError
- def message: () -> ::String
- end
- class ConflictException < ::Aws::Errors::ServiceError
- def message: () -> ::String
- def resource_id: () -> ::String
- def resource_type: () -> ::String
- end
- class InternalServerException < ::Aws::Errors::ServiceError
- def message: () -> ::String
- end
- class ResourceNotFoundException < ::Aws::Errors::ServiceError
- def message: () -> ::String
- def resource_id: () -> ::String
- def resource_type: () -> ::String
- end
- class ServiceQuotaExceededException < ::Aws::Errors::ServiceError
- def message: () -> ::String
- def quota_code: () -> ::String
- def resource_id: () -> ::String
- def resource_type: () -> ::String
- def service_code: () -> ::String
- end
- class ServiceUnavailableException < ::Aws::Errors::ServiceError
- def message: () -> ::String
- end
- class ThrottlingException < ::Aws::Errors::ServiceError
- def message: () -> ::String
- def quota_code: () -> ::String
- def service_code: () -> ::String
- end
- class ValidationException < ::Aws::Errors::ServiceError
- def field_list: () -> ::String
- def message: () -> ::String
- def reason: () -> ::String
- end
- end
- end
-end
diff --git a/gems/aws-sdk-cloudwatchevidently/sig/resource.rbs b/gems/aws-sdk-cloudwatchevidently/sig/resource.rbs
deleted file mode 100644
index 2c0eb441d64..00000000000
--- a/gems/aws-sdk-cloudwatchevidently/sig/resource.rbs
+++ /dev/null
@@ -1,85 +0,0 @@
-# WARNING ABOUT GENERATED CODE
-#
-# This file is generated. See the contributing guide for more information:
-# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md
-#
-# WARNING ABOUT GENERATED CODE
-
-module Aws
- module CloudWatchEvidently
- # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/CloudWatchEvidently/Resource.html
- class Resource
- # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/CloudWatchEvidently/Resource.html#initialize-instance_method
- def initialize: (
- ?client: Client,
- ?credentials: untyped,
- ?region: String,
- ?access_key_id: String,
- ?account_id: String,
- ?active_endpoint_cache: bool,
- ?adaptive_retry_wait_to_fill: bool,
- ?auth_scheme_preference: Array[String],
- ?client_side_monitoring: bool,
- ?client_side_monitoring_client_id: String,
- ?client_side_monitoring_host: String,
- ?client_side_monitoring_port: Integer,
- ?client_side_monitoring_publisher: untyped,
- ?convert_params: bool,
- ?correct_clock_skew: bool,
- ?defaults_mode: String,
- ?disable_host_prefix_injection: bool,
- ?disable_request_compression: bool,
- ?endpoint: String,
- ?endpoint_cache_max_entries: Integer,
- ?endpoint_cache_max_threads: Integer,
- ?endpoint_cache_poll_interval: Integer,
- ?endpoint_discovery: bool,
- ?ignore_configured_endpoint_urls: bool,
- ?log_formatter: untyped,
- ?log_level: Symbol,
- ?logger: untyped,
- ?max_attempts: Integer,
- ?profile: String,
- ?request_checksum_calculation: String,
- ?request_min_compression_size_bytes: Integer,
- ?response_checksum_validation: String,
- ?retry_backoff: Proc,
- ?retry_base_delay: Float,
- ?retry_jitter: (:none | :equal | :full | ^(Integer) -> Integer),
- ?retry_limit: Integer,
- ?retry_max_delay: Integer,
- ?retry_mode: ("legacy" | "standard" | "adaptive"),
- ?sdk_ua_app_id: String,
- ?secret_access_key: String,
- ?session_token: String,
- ?sigv4a_signing_region_set: Array[String],
- ?stub_responses: untyped,
- ?telemetry_provider: Aws::Telemetry::TelemetryProviderBase,
- ?token_provider: untyped,
- ?use_dualstack_endpoint: bool,
- ?use_fips_endpoint: bool,
- ?validate_params: bool,
- ?endpoint_provider: untyped,
- ?http_proxy: String,
- ?http_open_timeout: (Float | Integer),
- ?http_read_timeout: (Float | Integer),
- ?http_idle_timeout: (Float | Integer),
- ?http_continue_timeout: (Float | Integer),
- ?ssl_timeout: (Float | Integer | nil),
- ?http_wire_trace: bool,
- ?ssl_verify_peer: bool,
- ?ssl_ca_bundle: String,
- ?ssl_ca_directory: String,
- ?ssl_ca_store: String,
- ?on_chunk_received: Proc,
- ?on_chunk_sent: Proc,
- ?raise_response_errors: bool
- ) -> void
- | (?Hash[Symbol, untyped]) -> void
-
- def client: () -> Client
-
-
- end
- end
-end
diff --git a/gems/aws-sdk-cloudwatchevidently/sig/types.rbs b/gems/aws-sdk-cloudwatchevidently/sig/types.rbs
deleted file mode 100644
index 828d6d8d2e6..00000000000
--- a/gems/aws-sdk-cloudwatchevidently/sig/types.rbs
+++ /dev/null
@@ -1,943 +0,0 @@
-# WARNING ABOUT GENERATED CODE
-#
-# This file is generated. See the contributing guide for more information:
-# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md
-#
-# WARNING ABOUT GENERATED CODE
-
-module Aws::CloudWatchEvidently
- module Types
-
- class AccessDeniedException
- attr_accessor message: ::String
- SENSITIVE: []
- end
-
- class BatchEvaluateFeatureRequest
- attr_accessor project: ::String
- attr_accessor requests: ::Array[Types::EvaluationRequest]
- SENSITIVE: []
- end
-
- class BatchEvaluateFeatureResponse
- attr_accessor results: ::Array[Types::EvaluationResult]
- SENSITIVE: []
- end
-
- class CloudWatchLogsDestination
- attr_accessor log_group: ::String
- SENSITIVE: []
- end
-
- class CloudWatchLogsDestinationConfig
- attr_accessor log_group: ::String
- SENSITIVE: []
- end
-
- class ConflictException
- attr_accessor message: ::String
- attr_accessor resource_id: ::String
- attr_accessor resource_type: ::String
- SENSITIVE: []
- end
-
- class CreateExperimentRequest
- attr_accessor description: ::String
- attr_accessor metric_goals: ::Array[Types::MetricGoalConfig]
- attr_accessor name: ::String
- attr_accessor online_ab_config: Types::OnlineAbConfig
- attr_accessor project: ::String
- attr_accessor randomization_salt: ::String
- attr_accessor sampling_rate: ::Integer
- attr_accessor segment: ::String
- attr_accessor tags: ::Hash[::String, ::String]
- attr_accessor treatments: ::Array[Types::TreatmentConfig]
- SENSITIVE: []
- end
-
- class CreateExperimentResponse
- attr_accessor experiment: Types::Experiment
- SENSITIVE: []
- end
-
- class CreateFeatureRequest
- attr_accessor default_variation: ::String
- attr_accessor description: ::String
- attr_accessor entity_overrides: ::Hash[::String, ::String]
- attr_accessor evaluation_strategy: ("ALL_RULES" | "DEFAULT_VARIATION")
- attr_accessor name: ::String
- attr_accessor project: ::String
- attr_accessor tags: ::Hash[::String, ::String]
- attr_accessor variations: ::Array[Types::VariationConfig]
- SENSITIVE: []
- end
-
- class CreateFeatureResponse
- attr_accessor feature: Types::Feature
- SENSITIVE: []
- end
-
- class CreateLaunchRequest
- attr_accessor description: ::String
- attr_accessor groups: ::Array[Types::LaunchGroupConfig]
- attr_accessor metric_monitors: ::Array[Types::MetricMonitorConfig]
- attr_accessor name: ::String
- attr_accessor project: ::String
- attr_accessor randomization_salt: ::String
- attr_accessor scheduled_splits_config: Types::ScheduledSplitsLaunchConfig
- attr_accessor tags: ::Hash[::String, ::String]
- SENSITIVE: []
- end
-
- class CreateLaunchResponse
- attr_accessor launch: Types::Launch
- SENSITIVE: []
- end
-
- class CreateProjectRequest
- attr_accessor app_config_resource: Types::ProjectAppConfigResourceConfig
- attr_accessor data_delivery: Types::ProjectDataDeliveryConfig
- attr_accessor description: ::String
- attr_accessor name: ::String
- attr_accessor tags: ::Hash[::String, ::String]
- SENSITIVE: []
- end
-
- class CreateProjectResponse
- attr_accessor project: Types::Project
- SENSITIVE: []
- end
-
- class CreateSegmentRequest
- attr_accessor description: ::String
- attr_accessor name: ::String
- attr_accessor pattern: ::String
- attr_accessor tags: ::Hash[::String, ::String]
- SENSITIVE: []
- end
-
- class CreateSegmentResponse
- attr_accessor segment: Types::Segment
- SENSITIVE: []
- end
-
- class DeleteExperimentRequest
- attr_accessor experiment: ::String
- attr_accessor project: ::String
- SENSITIVE: []
- end
-
- class DeleteExperimentResponse < Aws::EmptyStructure
- end
-
- class DeleteFeatureRequest
- attr_accessor feature: ::String
- attr_accessor project: ::String
- SENSITIVE: []
- end
-
- class DeleteFeatureResponse < Aws::EmptyStructure
- end
-
- class DeleteLaunchRequest
- attr_accessor launch: ::String
- attr_accessor project: ::String
- SENSITIVE: []
- end
-
- class DeleteLaunchResponse < Aws::EmptyStructure
- end
-
- class DeleteProjectRequest
- attr_accessor project: ::String
- SENSITIVE: []
- end
-
- class DeleteProjectResponse < Aws::EmptyStructure
- end
-
- class DeleteSegmentRequest
- attr_accessor segment: ::String
- SENSITIVE: []
- end
-
- class DeleteSegmentResponse < Aws::EmptyStructure
- end
-
- class EvaluateFeatureRequest
- attr_accessor entity_id: ::String
- attr_accessor evaluation_context: ::String
- attr_accessor feature: ::String
- attr_accessor project: ::String
- SENSITIVE: []
- end
-
- class EvaluateFeatureResponse
- attr_accessor details: ::String
- attr_accessor reason: ::String
- attr_accessor value: Types::VariableValue
- attr_accessor variation: ::String
- SENSITIVE: []
- end
-
- class EvaluationRequest
- attr_accessor entity_id: ::String
- attr_accessor evaluation_context: ::String
- attr_accessor feature: ::String
- SENSITIVE: []
- end
-
- class EvaluationResult
- attr_accessor details: ::String
- attr_accessor entity_id: ::String
- attr_accessor feature: ::String
- attr_accessor project: ::String
- attr_accessor reason: ::String
- attr_accessor value: Types::VariableValue
- attr_accessor variation: ::String
- SENSITIVE: []
- end
-
- class EvaluationRule
- attr_accessor name: ::String
- attr_accessor type: ::String
- SENSITIVE: []
- end
-
- class Event
- attr_accessor data: ::String
- attr_accessor timestamp: ::Time
- attr_accessor type: ("aws.evidently.evaluation" | "aws.evidently.custom")
- SENSITIVE: []
- end
-
- class Experiment
- attr_accessor arn: ::String
- attr_accessor created_time: ::Time
- attr_accessor description: ::String
- attr_accessor execution: Types::ExperimentExecution
- attr_accessor last_updated_time: ::Time
- attr_accessor metric_goals: ::Array[Types::MetricGoal]
- attr_accessor name: ::String
- attr_accessor online_ab_definition: Types::OnlineAbDefinition
- attr_accessor project: ::String
- attr_accessor randomization_salt: ::String
- attr_accessor sampling_rate: ::Integer
- attr_accessor schedule: Types::ExperimentSchedule
- attr_accessor segment: ::String
- attr_accessor status: ("CREATED" | "UPDATING" | "RUNNING" | "COMPLETED" | "CANCELLED")
- attr_accessor status_reason: ::String
- attr_accessor tags: ::Hash[::String, ::String]
- attr_accessor treatments: ::Array[Types::Treatment]
- attr_accessor type: ("aws.evidently.onlineab")
- SENSITIVE: []
- end
-
- class ExperimentExecution
- attr_accessor ended_time: ::Time
- attr_accessor started_time: ::Time
- SENSITIVE: []
- end
-
- class ExperimentReport
- attr_accessor content: ::String
- attr_accessor metric_name: ::String
- attr_accessor report_name: ("BayesianInference")
- attr_accessor treatment_name: ::String
- SENSITIVE: []
- end
-
- class ExperimentResultsData
- attr_accessor metric_name: ::String
- attr_accessor result_stat: ("Mean" | "TreatmentEffect" | "ConfidenceIntervalUpperBound" | "ConfidenceIntervalLowerBound" | "PValue")
- attr_accessor treatment_name: ::String
- attr_accessor values: ::Array[::Float]
- SENSITIVE: []
- end
-
- class ExperimentSchedule
- attr_accessor analysis_complete_time: ::Time
- SENSITIVE: []
- end
-
- class Feature
- attr_accessor arn: ::String
- attr_accessor created_time: ::Time
- attr_accessor default_variation: ::String
- attr_accessor description: ::String
- attr_accessor entity_overrides: ::Hash[::String, ::String]
- attr_accessor evaluation_rules: ::Array[Types::EvaluationRule]
- attr_accessor evaluation_strategy: ("ALL_RULES" | "DEFAULT_VARIATION")
- attr_accessor last_updated_time: ::Time
- attr_accessor name: ::String
- attr_accessor project: ::String
- attr_accessor status: ("AVAILABLE" | "UPDATING")
- attr_accessor tags: ::Hash[::String, ::String]
- attr_accessor value_type: ("STRING" | "LONG" | "DOUBLE" | "BOOLEAN")
- attr_accessor variations: ::Array[Types::Variation]
- SENSITIVE: []
- end
-
- class FeatureSummary
- attr_accessor arn: ::String
- attr_accessor created_time: ::Time
- attr_accessor default_variation: ::String
- attr_accessor evaluation_rules: ::Array[Types::EvaluationRule]
- attr_accessor evaluation_strategy: ("ALL_RULES" | "DEFAULT_VARIATION")
- attr_accessor last_updated_time: ::Time
- attr_accessor name: ::String
- attr_accessor project: ::String
- attr_accessor status: ("AVAILABLE" | "UPDATING")
- attr_accessor tags: ::Hash[::String, ::String]
- SENSITIVE: []
- end
-
- class GetExperimentRequest
- attr_accessor experiment: ::String
- attr_accessor project: ::String
- SENSITIVE: []
- end
-
- class GetExperimentResponse
- attr_accessor experiment: Types::Experiment
- SENSITIVE: []
- end
-
- class GetExperimentResultsRequest
- attr_accessor base_stat: ("Mean")
- attr_accessor end_time: ::Time
- attr_accessor experiment: ::String
- attr_accessor metric_names: ::Array[::String]
- attr_accessor period: ::Integer
- attr_accessor project: ::String
- attr_accessor report_names: ::Array[("BayesianInference")]
- attr_accessor result_stats: ::Array[("BaseStat" | "TreatmentEffect" | "ConfidenceInterval" | "PValue")]
- attr_accessor start_time: ::Time
- attr_accessor treatment_names: ::Array[::String]
- SENSITIVE: []
- end
-
- class GetExperimentResultsResponse
- attr_accessor details: ::String
- attr_accessor reports: ::Array[Types::ExperimentReport]
- attr_accessor results_data: ::Array[Types::ExperimentResultsData]
- attr_accessor timestamps: ::Array[::Time]
- SENSITIVE: []
- end
-
- class GetFeatureRequest
- attr_accessor feature: ::String
- attr_accessor project: ::String
- SENSITIVE: []
- end
-
- class GetFeatureResponse
- attr_accessor feature: Types::Feature
- SENSITIVE: []
- end
-
- class GetLaunchRequest
- attr_accessor launch: ::String
- attr_accessor project: ::String
- SENSITIVE: []
- end
-
- class GetLaunchResponse
- attr_accessor launch: Types::Launch
- SENSITIVE: []
- end
-
- class GetProjectRequest
- attr_accessor project: ::String
- SENSITIVE: []
- end
-
- class GetProjectResponse
- attr_accessor project: Types::Project
- SENSITIVE: []
- end
-
- class GetSegmentRequest
- attr_accessor segment: ::String
- SENSITIVE: []
- end
-
- class GetSegmentResponse
- attr_accessor segment: Types::Segment
- SENSITIVE: []
- end
-
- class InternalServerException
- attr_accessor message: ::String
- SENSITIVE: []
- end
-
- class Launch
- attr_accessor arn: ::String
- attr_accessor created_time: ::Time
- attr_accessor description: ::String
- attr_accessor execution: Types::LaunchExecution
- attr_accessor groups: ::Array[Types::LaunchGroup]
- attr_accessor last_updated_time: ::Time
- attr_accessor metric_monitors: ::Array[Types::MetricMonitor]
- attr_accessor name: ::String
- attr_accessor project: ::String
- attr_accessor randomization_salt: ::String
- attr_accessor scheduled_splits_definition: Types::ScheduledSplitsLaunchDefinition
- attr_accessor status: ("CREATED" | "UPDATING" | "RUNNING" | "COMPLETED" | "CANCELLED")
- attr_accessor status_reason: ::String
- attr_accessor tags: ::Hash[::String, ::String]
- attr_accessor type: ("aws.evidently.splits")
- SENSITIVE: []
- end
-
- class LaunchExecution
- attr_accessor ended_time: ::Time
- attr_accessor started_time: ::Time
- SENSITIVE: []
- end
-
- class LaunchGroup
- attr_accessor description: ::String
- attr_accessor feature_variations: ::Hash[::String, ::String]
- attr_accessor name: ::String
- SENSITIVE: []
- end
-
- class LaunchGroupConfig
- attr_accessor description: ::String
- attr_accessor feature: ::String
- attr_accessor name: ::String
- attr_accessor variation: ::String
- SENSITIVE: []
- end
-
- class ListExperimentsRequest
- attr_accessor max_results: ::Integer
- attr_accessor next_token: ::String
- attr_accessor project: ::String
- attr_accessor status: ("CREATED" | "UPDATING" | "RUNNING" | "COMPLETED" | "CANCELLED")
- SENSITIVE: []
- end
-
- class ListExperimentsResponse
- attr_accessor experiments: ::Array[Types::Experiment]
- attr_accessor next_token: ::String
- SENSITIVE: []
- end
-
- class ListFeaturesRequest
- attr_accessor max_results: ::Integer
- attr_accessor next_token: ::String
- attr_accessor project: ::String
- SENSITIVE: []
- end
-
- class ListFeaturesResponse
- attr_accessor features: ::Array[Types::FeatureSummary]
- attr_accessor next_token: ::String
- SENSITIVE: []
- end
-
- class ListLaunchesRequest
- attr_accessor max_results: ::Integer
- attr_accessor next_token: ::String
- attr_accessor project: ::String
- attr_accessor status: ("CREATED" | "UPDATING" | "RUNNING" | "COMPLETED" | "CANCELLED")
- SENSITIVE: []
- end
-
- class ListLaunchesResponse
- attr_accessor launches: ::Array[Types::Launch]
- attr_accessor next_token: ::String
- SENSITIVE: []
- end
-
- class ListProjectsRequest
- attr_accessor max_results: ::Integer
- attr_accessor next_token: ::String
- SENSITIVE: []
- end
-
- class ListProjectsResponse
- attr_accessor next_token: ::String
- attr_accessor projects: ::Array[Types::ProjectSummary]
- SENSITIVE: []
- end
-
- class ListSegmentReferencesRequest
- attr_accessor max_results: ::Integer
- attr_accessor next_token: ::String
- attr_accessor segment: ::String
- attr_accessor type: ("EXPERIMENT" | "LAUNCH")
- SENSITIVE: []
- end
-
- class ListSegmentReferencesResponse
- attr_accessor next_token: ::String
- attr_accessor referenced_by: ::Array[Types::RefResource]
- SENSITIVE: []
- end
-
- class ListSegmentsRequest
- attr_accessor max_results: ::Integer
- attr_accessor next_token: ::String
- SENSITIVE: []
- end
-
- class ListSegmentsResponse
- attr_accessor next_token: ::String
- attr_accessor segments: ::Array[Types::Segment]
- SENSITIVE: []
- end
-
- class ListTagsForResourceRequest
- attr_accessor resource_arn: ::String
- SENSITIVE: []
- end
-
- class ListTagsForResourceResponse
- attr_accessor tags: ::Hash[::String, ::String]
- SENSITIVE: []
- end
-
- class MetricDefinition
- attr_accessor entity_id_key: ::String
- attr_accessor event_pattern: ::String
- attr_accessor name: ::String
- attr_accessor unit_label: ::String
- attr_accessor value_key: ::String
- SENSITIVE: []
- end
-
- class MetricDefinitionConfig
- attr_accessor entity_id_key: ::String
- attr_accessor event_pattern: ::String
- attr_accessor name: ::String
- attr_accessor unit_label: ::String
- attr_accessor value_key: ::String
- SENSITIVE: []
- end
-
- class MetricGoal
- attr_accessor desired_change: ("INCREASE" | "DECREASE")
- attr_accessor metric_definition: Types::MetricDefinition
- SENSITIVE: []
- end
-
- class MetricGoalConfig
- attr_accessor desired_change: ("INCREASE" | "DECREASE")
- attr_accessor metric_definition: Types::MetricDefinitionConfig
- SENSITIVE: []
- end
-
- class MetricMonitor
- attr_accessor metric_definition: Types::MetricDefinition
- SENSITIVE: []
- end
-
- class MetricMonitorConfig
- attr_accessor metric_definition: Types::MetricDefinitionConfig
- SENSITIVE: []
- end
-
- class OnlineAbConfig
- attr_accessor control_treatment_name: ::String
- attr_accessor treatment_weights: ::Hash[::String, ::Integer]
- SENSITIVE: []
- end
-
- class OnlineAbDefinition
- attr_accessor control_treatment_name: ::String
- attr_accessor treatment_weights: ::Hash[::String, ::Integer]
- SENSITIVE: []
- end
-
- class Project
- attr_accessor active_experiment_count: ::Integer
- attr_accessor active_launch_count: ::Integer
- attr_accessor app_config_resource: Types::ProjectAppConfigResource
- attr_accessor arn: ::String
- attr_accessor created_time: ::Time
- attr_accessor data_delivery: Types::ProjectDataDelivery
- attr_accessor description: ::String
- attr_accessor experiment_count: ::Integer
- attr_accessor feature_count: ::Integer
- attr_accessor last_updated_time: ::Time
- attr_accessor launch_count: ::Integer
- attr_accessor name: ::String
- attr_accessor status: ("AVAILABLE" | "UPDATING")
- attr_accessor tags: ::Hash[::String, ::String]
- SENSITIVE: []
- end
-
- class ProjectAppConfigResource
- attr_accessor application_id: ::String
- attr_accessor configuration_profile_id: ::String
- attr_accessor environment_id: ::String
- SENSITIVE: []
- end
-
- class ProjectAppConfigResourceConfig
- attr_accessor application_id: ::String
- attr_accessor environment_id: ::String
- SENSITIVE: []
- end
-
- class ProjectDataDelivery
- attr_accessor cloud_watch_logs: Types::CloudWatchLogsDestination
- attr_accessor s3_destination: Types::S3Destination
- SENSITIVE: []
- end
-
- class ProjectDataDeliveryConfig
- attr_accessor cloud_watch_logs: Types::CloudWatchLogsDestinationConfig
- attr_accessor s3_destination: Types::S3DestinationConfig
- SENSITIVE: []
- end
-
- class ProjectSummary
- attr_accessor active_experiment_count: ::Integer
- attr_accessor active_launch_count: ::Integer
- attr_accessor arn: ::String
- attr_accessor created_time: ::Time
- attr_accessor description: ::String
- attr_accessor experiment_count: ::Integer
- attr_accessor feature_count: ::Integer
- attr_accessor last_updated_time: ::Time
- attr_accessor launch_count: ::Integer
- attr_accessor name: ::String
- attr_accessor status: ("AVAILABLE" | "UPDATING")
- attr_accessor tags: ::Hash[::String, ::String]
- SENSITIVE: []
- end
-
- class PutProjectEventsRequest
- attr_accessor events: ::Array[Types::Event]
- attr_accessor project: ::String
- SENSITIVE: []
- end
-
- class PutProjectEventsResponse
- attr_accessor event_results: ::Array[Types::PutProjectEventsResultEntry]
- attr_accessor failed_event_count: ::Integer
- SENSITIVE: []
- end
-
- class PutProjectEventsResultEntry
- attr_accessor error_code: ::String
- attr_accessor error_message: ::String
- attr_accessor event_id: ::String
- SENSITIVE: []
- end
-
- class RefResource
- attr_accessor arn: ::String
- attr_accessor end_time: ::String
- attr_accessor last_updated_on: ::String
- attr_accessor name: ::String
- attr_accessor start_time: ::String
- attr_accessor status: ::String
- attr_accessor type: ::String
- SENSITIVE: []
- end
-
- class ResourceNotFoundException
- attr_accessor message: ::String
- attr_accessor resource_id: ::String
- attr_accessor resource_type: ::String
- SENSITIVE: []
- end
-
- class S3Destination
- attr_accessor bucket: ::String
- attr_accessor prefix: ::String
- SENSITIVE: []
- end
-
- class S3DestinationConfig
- attr_accessor bucket: ::String
- attr_accessor prefix: ::String
- SENSITIVE: []
- end
-
- class ScheduledSplit
- attr_accessor group_weights: ::Hash[::String, ::Integer]
- attr_accessor segment_overrides: ::Array[Types::SegmentOverride]
- attr_accessor start_time: ::Time
- SENSITIVE: []
- end
-
- class ScheduledSplitConfig
- attr_accessor group_weights: ::Hash[::String, ::Integer]
- attr_accessor segment_overrides: ::Array[Types::SegmentOverride]
- attr_accessor start_time: ::Time
- SENSITIVE: []
- end
-
- class ScheduledSplitsLaunchConfig
- attr_accessor steps: ::Array[Types::ScheduledSplitConfig]
- SENSITIVE: []
- end
-
- class ScheduledSplitsLaunchDefinition
- attr_accessor steps: ::Array[Types::ScheduledSplit]
- SENSITIVE: []
- end
-
- class Segment
- attr_accessor arn: ::String
- attr_accessor created_time: ::Time
- attr_accessor description: ::String
- attr_accessor experiment_count: ::Integer
- attr_accessor last_updated_time: ::Time
- attr_accessor launch_count: ::Integer
- attr_accessor name: ::String
- attr_accessor pattern: ::String
- attr_accessor tags: ::Hash[::String, ::String]
- SENSITIVE: []
- end
-
- class SegmentOverride
- attr_accessor evaluation_order: ::Integer
- attr_accessor segment: ::String
- attr_accessor weights: ::Hash[::String, ::Integer]
- SENSITIVE: []
- end
-
- class ServiceQuotaExceededException
- attr_accessor message: ::String
- attr_accessor quota_code: ::String
- attr_accessor resource_id: ::String
- attr_accessor resource_type: ::String
- attr_accessor service_code: ::String
- SENSITIVE: []
- end
-
- class ServiceUnavailableException
- attr_accessor message: ::String
- SENSITIVE: []
- end
-
- class StartExperimentRequest
- attr_accessor analysis_complete_time: ::Time
- attr_accessor experiment: ::String
- attr_accessor project: ::String
- SENSITIVE: []
- end
-
- class StartExperimentResponse
- attr_accessor started_time: ::Time
- SENSITIVE: []
- end
-
- class StartLaunchRequest
- attr_accessor launch: ::String
- attr_accessor project: ::String
- SENSITIVE: []
- end
-
- class StartLaunchResponse
- attr_accessor launch: Types::Launch
- SENSITIVE: []
- end
-
- class StopExperimentRequest
- attr_accessor desired_state: ("COMPLETED" | "CANCELLED")
- attr_accessor experiment: ::String
- attr_accessor project: ::String
- attr_accessor reason: ::String
- SENSITIVE: []
- end
-
- class StopExperimentResponse
- attr_accessor ended_time: ::Time
- SENSITIVE: []
- end
-
- class StopLaunchRequest
- attr_accessor desired_state: ("COMPLETED" | "CANCELLED")
- attr_accessor launch: ::String
- attr_accessor project: ::String
- attr_accessor reason: ::String
- SENSITIVE: []
- end
-
- class StopLaunchResponse
- attr_accessor ended_time: ::Time
- SENSITIVE: []
- end
-
- class TagResourceRequest
- attr_accessor resource_arn: ::String
- attr_accessor tags: ::Hash[::String, ::String]
- SENSITIVE: []
- end
-
- class TagResourceResponse < Aws::EmptyStructure
- end
-
- class TestSegmentPatternRequest
- attr_accessor pattern: ::String
- attr_accessor payload: ::String
- SENSITIVE: []
- end
-
- class TestSegmentPatternResponse
- attr_accessor match: bool
- SENSITIVE: []
- end
-
- class ThrottlingException
- attr_accessor message: ::String
- attr_accessor quota_code: ::String
- attr_accessor service_code: ::String
- SENSITIVE: []
- end
-
- class Treatment
- attr_accessor description: ::String
- attr_accessor feature_variations: ::Hash[::String, ::String]
- attr_accessor name: ::String
- SENSITIVE: []
- end
-
- class TreatmentConfig
- attr_accessor description: ::String
- attr_accessor feature: ::String
- attr_accessor name: ::String
- attr_accessor variation: ::String
- SENSITIVE: []
- end
-
- class UntagResourceRequest
- attr_accessor resource_arn: ::String
- attr_accessor tag_keys: ::Array[::String]
- SENSITIVE: []
- end
-
- class UntagResourceResponse < Aws::EmptyStructure
- end
-
- class UpdateExperimentRequest
- attr_accessor description: ::String
- attr_accessor experiment: ::String
- attr_accessor metric_goals: ::Array[Types::MetricGoalConfig]
- attr_accessor online_ab_config: Types::OnlineAbConfig
- attr_accessor project: ::String
- attr_accessor randomization_salt: ::String
- attr_accessor remove_segment: bool
- attr_accessor sampling_rate: ::Integer
- attr_accessor segment: ::String
- attr_accessor treatments: ::Array[Types::TreatmentConfig]
- SENSITIVE: []
- end
-
- class UpdateExperimentResponse
- attr_accessor experiment: Types::Experiment
- SENSITIVE: []
- end
-
- class UpdateFeatureRequest
- attr_accessor add_or_update_variations: ::Array[Types::VariationConfig]
- attr_accessor default_variation: ::String
- attr_accessor description: ::String
- attr_accessor entity_overrides: ::Hash[::String, ::String]
- attr_accessor evaluation_strategy: ("ALL_RULES" | "DEFAULT_VARIATION")
- attr_accessor feature: ::String
- attr_accessor project: ::String
- attr_accessor remove_variations: ::Array[::String]
- SENSITIVE: []
- end
-
- class UpdateFeatureResponse
- attr_accessor feature: Types::Feature
- SENSITIVE: []
- end
-
- class UpdateLaunchRequest
- attr_accessor description: ::String
- attr_accessor groups: ::Array[Types::LaunchGroupConfig]
- attr_accessor launch: ::String
- attr_accessor metric_monitors: ::Array[Types::MetricMonitorConfig]
- attr_accessor project: ::String
- attr_accessor randomization_salt: ::String
- attr_accessor scheduled_splits_config: Types::ScheduledSplitsLaunchConfig
- SENSITIVE: []
- end
-
- class UpdateLaunchResponse
- attr_accessor launch: Types::Launch
- SENSITIVE: []
- end
-
- class UpdateProjectDataDeliveryRequest
- attr_accessor cloud_watch_logs: Types::CloudWatchLogsDestinationConfig
- attr_accessor project: ::String
- attr_accessor s3_destination: Types::S3DestinationConfig
- SENSITIVE: []
- end
-
- class UpdateProjectDataDeliveryResponse
- attr_accessor project: Types::Project
- SENSITIVE: []
- end
-
- class UpdateProjectRequest
- attr_accessor app_config_resource: Types::ProjectAppConfigResourceConfig
- attr_accessor description: ::String
- attr_accessor project: ::String
- SENSITIVE: []
- end
-
- class UpdateProjectResponse
- attr_accessor project: Types::Project
- SENSITIVE: []
- end
-
- class ValidationException
- attr_accessor field_list: ::Array[Types::ValidationExceptionField]
- attr_accessor message: ::String
- attr_accessor reason: ("unknownOperation" | "cannotParse" | "fieldValidationFailed" | "other")
- SENSITIVE: []
- end
-
- class ValidationExceptionField
- attr_accessor message: ::String
- attr_accessor name: ::String
- SENSITIVE: []
- end
-
- class VariableValue
- attr_accessor bool_value: bool
- attr_accessor double_value: ::Float
- attr_accessor long_value: ::Integer
- attr_accessor string_value: ::String
- attr_accessor unknown: untyped
- SENSITIVE: []
-
- class BoolValue < VariableValue
- end
- class DoubleValue < VariableValue
- end
- class LongValue < VariableValue
- end
- class StringValue < VariableValue
- end
- class Unknown < VariableValue
- end
- end
-
- class Variation
- attr_accessor name: ::String
- attr_accessor value: Types::VariableValue
- SENSITIVE: []
- end
-
- class VariationConfig
- attr_accessor name: ::String
- attr_accessor value: Types::VariableValue
- SENSITIVE: []
- end
- end
-end
diff --git a/gems/aws-sdk-cloudwatchevidently/sig/waiters.rbs b/gems/aws-sdk-cloudwatchevidently/sig/waiters.rbs
deleted file mode 100644
index 81c7771933b..00000000000
--- a/gems/aws-sdk-cloudwatchevidently/sig/waiters.rbs
+++ /dev/null
@@ -1,13 +0,0 @@
-# WARNING ABOUT GENERATED CODE
-#
-# This file is generated. See the contributing guide for more information:
-# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md
-#
-# WARNING ABOUT GENERATED CODE
-
-module Aws
- module CloudWatchEvidently
- module Waiters
- end
- end
-end
diff --git a/gems/aws-sdk-cloudwatchevidently/spec/endpoint_provider_spec.rb b/gems/aws-sdk-cloudwatchevidently/spec/endpoint_provider_spec.rb
deleted file mode 100644
index b4aabe87b3a..00000000000
--- a/gems/aws-sdk-cloudwatchevidently/spec/endpoint_provider_spec.rb
+++ /dev/null
@@ -1,421 +0,0 @@
-# frozen_string_literal: true
-
-# WARNING ABOUT GENERATED CODE
-#
-# This file is generated. See the contributing guide for more information:
-# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md
-#
-# WARNING ABOUT GENERATED CODE
-
-
-require_relative 'spec_helper'
-
-module Aws::CloudWatchEvidently
- describe EndpointProvider do
- subject { Aws::CloudWatchEvidently::EndpointProvider.new }
-
- context "For region ap-northeast-1 with FIPS disabled and DualStack disabled" do
- let(:expected) do
- {"endpoint" => {"url" => "https://evidently.ap-northeast-1.amazonaws.com"}}
- end
-
- it 'produces the expected output from the EndpointProvider' do
- params = EndpointParameters.new(**{region: "ap-northeast-1", use_fips: false, use_dual_stack: false})
- endpoint = subject.resolve_endpoint(params)
- expect(endpoint.url).to eq(expected['endpoint']['url'])
- expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {})
- expect(endpoint.properties).to eq(expected['endpoint']['properties'] || {})
- end
- end
-
- context "For region ap-southeast-1 with FIPS disabled and DualStack disabled" do
- let(:expected) do
- {"endpoint" => {"url" => "https://evidently.ap-southeast-1.amazonaws.com"}}
- end
-
- it 'produces the expected output from the EndpointProvider' do
- params = EndpointParameters.new(**{region: "ap-southeast-1", use_fips: false, use_dual_stack: false})
- endpoint = subject.resolve_endpoint(params)
- expect(endpoint.url).to eq(expected['endpoint']['url'])
- expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {})
- expect(endpoint.properties).to eq(expected['endpoint']['properties'] || {})
- end
- end
-
- context "For region ap-southeast-2 with FIPS disabled and DualStack disabled" do
- let(:expected) do
- {"endpoint" => {"url" => "https://evidently.ap-southeast-2.amazonaws.com"}}
- end
-
- it 'produces the expected output from the EndpointProvider' do
- params = EndpointParameters.new(**{region: "ap-southeast-2", use_fips: false, use_dual_stack: false})
- endpoint = subject.resolve_endpoint(params)
- expect(endpoint.url).to eq(expected['endpoint']['url'])
- expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {})
- expect(endpoint.properties).to eq(expected['endpoint']['properties'] || {})
- end
- end
-
- context "For region eu-central-1 with FIPS disabled and DualStack disabled" do
- let(:expected) do
- {"endpoint" => {"url" => "https://evidently.eu-central-1.amazonaws.com"}}
- end
-
- it 'produces the expected output from the EndpointProvider' do
- params = EndpointParameters.new(**{region: "eu-central-1", use_fips: false, use_dual_stack: false})
- endpoint = subject.resolve_endpoint(params)
- expect(endpoint.url).to eq(expected['endpoint']['url'])
- expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {})
- expect(endpoint.properties).to eq(expected['endpoint']['properties'] || {})
- end
- end
-
- context "For region eu-north-1 with FIPS disabled and DualStack disabled" do
- let(:expected) do
- {"endpoint" => {"url" => "https://evidently.eu-north-1.amazonaws.com"}}
- end
-
- it 'produces the expected output from the EndpointProvider' do
- params = EndpointParameters.new(**{region: "eu-north-1", use_fips: false, use_dual_stack: false})
- endpoint = subject.resolve_endpoint(params)
- expect(endpoint.url).to eq(expected['endpoint']['url'])
- expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {})
- expect(endpoint.properties).to eq(expected['endpoint']['properties'] || {})
- end
- end
-
- context "For region eu-west-1 with FIPS disabled and DualStack disabled" do
- let(:expected) do
- {"endpoint" => {"url" => "https://evidently.eu-west-1.amazonaws.com"}}
- end
-
- it 'produces the expected output from the EndpointProvider' do
- params = EndpointParameters.new(**{region: "eu-west-1", use_fips: false, use_dual_stack: false})
- endpoint = subject.resolve_endpoint(params)
- expect(endpoint.url).to eq(expected['endpoint']['url'])
- expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {})
- expect(endpoint.properties).to eq(expected['endpoint']['properties'] || {})
- end
- end
-
- context "For region us-east-1 with FIPS disabled and DualStack disabled" do
- let(:expected) do
- {"endpoint" => {"url" => "https://evidently.us-east-1.amazonaws.com"}}
- end
-
- it 'produces the expected output from the EndpointProvider' do
- params = EndpointParameters.new(**{region: "us-east-1", use_fips: false, use_dual_stack: false})
- endpoint = subject.resolve_endpoint(params)
- expect(endpoint.url).to eq(expected['endpoint']['url'])
- expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {})
- expect(endpoint.properties).to eq(expected['endpoint']['properties'] || {})
- end
- end
-
- context "For region us-east-2 with FIPS disabled and DualStack disabled" do
- let(:expected) do
- {"endpoint" => {"url" => "https://evidently.us-east-2.amazonaws.com"}}
- end
-
- it 'produces the expected output from the EndpointProvider' do
- params = EndpointParameters.new(**{region: "us-east-2", use_fips: false, use_dual_stack: false})
- endpoint = subject.resolve_endpoint(params)
- expect(endpoint.url).to eq(expected['endpoint']['url'])
- expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {})
- expect(endpoint.properties).to eq(expected['endpoint']['properties'] || {})
- end
- end
-
- context "For region us-west-2 with FIPS disabled and DualStack disabled" do
- let(:expected) do
- {"endpoint" => {"url" => "https://evidently.us-west-2.amazonaws.com"}}
- end
-
- it 'produces the expected output from the EndpointProvider' do
- params = EndpointParameters.new(**{region: "us-west-2", use_fips: false, use_dual_stack: false})
- endpoint = subject.resolve_endpoint(params)
- expect(endpoint.url).to eq(expected['endpoint']['url'])
- expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {})
- expect(endpoint.properties).to eq(expected['endpoint']['properties'] || {})
- end
- end
-
- context "For region us-east-1 with FIPS enabled and DualStack enabled" do
- let(:expected) do
- {"endpoint" => {"url" => "https://evidently-fips.us-east-1.api.aws"}}
- end
-
- it 'produces the expected output from the EndpointProvider' do
- params = EndpointParameters.new(**{region: "us-east-1", use_fips: true, use_dual_stack: true})
- endpoint = subject.resolve_endpoint(params)
- expect(endpoint.url).to eq(expected['endpoint']['url'])
- expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {})
- expect(endpoint.properties).to eq(expected['endpoint']['properties'] || {})
- end
- end
-
- context "For region us-east-1 with FIPS enabled and DualStack disabled" do
- let(:expected) do
- {"endpoint" => {"url" => "https://evidently-fips.us-east-1.amazonaws.com"}}
- end
-
- it 'produces the expected output from the EndpointProvider' do
- params = EndpointParameters.new(**{region: "us-east-1", use_fips: true, use_dual_stack: false})
- endpoint = subject.resolve_endpoint(params)
- expect(endpoint.url).to eq(expected['endpoint']['url'])
- expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {})
- expect(endpoint.properties).to eq(expected['endpoint']['properties'] || {})
- end
- end
-
- context "For region us-east-1 with FIPS disabled and DualStack enabled" do
- let(:expected) do
- {"endpoint" => {"url" => "https://evidently.us-east-1.api.aws"}}
- end
-
- it 'produces the expected output from the EndpointProvider' do
- params = EndpointParameters.new(**{region: "us-east-1", use_fips: false, use_dual_stack: true})
- endpoint = subject.resolve_endpoint(params)
- expect(endpoint.url).to eq(expected['endpoint']['url'])
- expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {})
- expect(endpoint.properties).to eq(expected['endpoint']['properties'] || {})
- end
- end
-
- context "For region cn-north-1 with FIPS enabled and DualStack enabled" do
- let(:expected) do
- {"endpoint" => {"url" => "https://evidently-fips.cn-north-1.api.amazonwebservices.com.cn"}}
- end
-
- it 'produces the expected output from the EndpointProvider' do
- params = EndpointParameters.new(**{region: "cn-north-1", use_fips: true, use_dual_stack: true})
- endpoint = subject.resolve_endpoint(params)
- expect(endpoint.url).to eq(expected['endpoint']['url'])
- expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {})
- expect(endpoint.properties).to eq(expected['endpoint']['properties'] || {})
- end
- end
-
- context "For region cn-north-1 with FIPS enabled and DualStack disabled" do
- let(:expected) do
- {"endpoint" => {"url" => "https://evidently-fips.cn-north-1.amazonaws.com.cn"}}
- end
-
- it 'produces the expected output from the EndpointProvider' do
- params = EndpointParameters.new(**{region: "cn-north-1", use_fips: true, use_dual_stack: false})
- endpoint = subject.resolve_endpoint(params)
- expect(endpoint.url).to eq(expected['endpoint']['url'])
- expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {})
- expect(endpoint.properties).to eq(expected['endpoint']['properties'] || {})
- end
- end
-
- context "For region cn-north-1 with FIPS disabled and DualStack enabled" do
- let(:expected) do
- {"endpoint" => {"url" => "https://evidently.cn-north-1.api.amazonwebservices.com.cn"}}
- end
-
- it 'produces the expected output from the EndpointProvider' do
- params = EndpointParameters.new(**{region: "cn-north-1", use_fips: false, use_dual_stack: true})
- endpoint = subject.resolve_endpoint(params)
- expect(endpoint.url).to eq(expected['endpoint']['url'])
- expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {})
- expect(endpoint.properties).to eq(expected['endpoint']['properties'] || {})
- end
- end
-
- context "For region cn-north-1 with FIPS disabled and DualStack disabled" do
- let(:expected) do
- {"endpoint" => {"url" => "https://evidently.cn-north-1.amazonaws.com.cn"}}
- end
-
- it 'produces the expected output from the EndpointProvider' do
- params = EndpointParameters.new(**{region: "cn-north-1", use_fips: false, use_dual_stack: false})
- endpoint = subject.resolve_endpoint(params)
- expect(endpoint.url).to eq(expected['endpoint']['url'])
- expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {})
- expect(endpoint.properties).to eq(expected['endpoint']['properties'] || {})
- end
- end
-
- context "For region us-gov-east-1 with FIPS enabled and DualStack enabled" do
- let(:expected) do
- {"endpoint" => {"url" => "https://evidently-fips.us-gov-east-1.api.aws"}}
- end
-
- it 'produces the expected output from the EndpointProvider' do
- params = EndpointParameters.new(**{region: "us-gov-east-1", use_fips: true, use_dual_stack: true})
- endpoint = subject.resolve_endpoint(params)
- expect(endpoint.url).to eq(expected['endpoint']['url'])
- expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {})
- expect(endpoint.properties).to eq(expected['endpoint']['properties'] || {})
- end
- end
-
- context "For region us-gov-east-1 with FIPS enabled and DualStack disabled" do
- let(:expected) do
- {"endpoint" => {"url" => "https://evidently-fips.us-gov-east-1.amazonaws.com"}}
- end
-
- it 'produces the expected output from the EndpointProvider' do
- params = EndpointParameters.new(**{region: "us-gov-east-1", use_fips: true, use_dual_stack: false})
- endpoint = subject.resolve_endpoint(params)
- expect(endpoint.url).to eq(expected['endpoint']['url'])
- expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {})
- expect(endpoint.properties).to eq(expected['endpoint']['properties'] || {})
- end
- end
-
- context "For region us-gov-east-1 with FIPS disabled and DualStack enabled" do
- let(:expected) do
- {"endpoint" => {"url" => "https://evidently.us-gov-east-1.api.aws"}}
- end
-
- it 'produces the expected output from the EndpointProvider' do
- params = EndpointParameters.new(**{region: "us-gov-east-1", use_fips: false, use_dual_stack: true})
- endpoint = subject.resolve_endpoint(params)
- expect(endpoint.url).to eq(expected['endpoint']['url'])
- expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {})
- expect(endpoint.properties).to eq(expected['endpoint']['properties'] || {})
- end
- end
-
- context "For region us-gov-east-1 with FIPS disabled and DualStack disabled" do
- let(:expected) do
- {"endpoint" => {"url" => "https://evidently.us-gov-east-1.amazonaws.com"}}
- end
-
- it 'produces the expected output from the EndpointProvider' do
- params = EndpointParameters.new(**{region: "us-gov-east-1", use_fips: false, use_dual_stack: false})
- endpoint = subject.resolve_endpoint(params)
- expect(endpoint.url).to eq(expected['endpoint']['url'])
- expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {})
- expect(endpoint.properties).to eq(expected['endpoint']['properties'] || {})
- end
- end
-
- context "For region us-iso-east-1 with FIPS enabled and DualStack disabled" do
- let(:expected) do
- {"endpoint" => {"url" => "https://evidently-fips.us-iso-east-1.c2s.ic.gov"}}
- end
-
- it 'produces the expected output from the EndpointProvider' do
- params = EndpointParameters.new(**{region: "us-iso-east-1", use_fips: true, use_dual_stack: false})
- endpoint = subject.resolve_endpoint(params)
- expect(endpoint.url).to eq(expected['endpoint']['url'])
- expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {})
- expect(endpoint.properties).to eq(expected['endpoint']['properties'] || {})
- end
- end
-
- context "For region us-iso-east-1 with FIPS disabled and DualStack disabled" do
- let(:expected) do
- {"endpoint" => {"url" => "https://evidently.us-iso-east-1.c2s.ic.gov"}}
- end
-
- it 'produces the expected output from the EndpointProvider' do
- params = EndpointParameters.new(**{region: "us-iso-east-1", use_fips: false, use_dual_stack: false})
- endpoint = subject.resolve_endpoint(params)
- expect(endpoint.url).to eq(expected['endpoint']['url'])
- expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {})
- expect(endpoint.properties).to eq(expected['endpoint']['properties'] || {})
- end
- end
-
- context "For region us-isob-east-1 with FIPS enabled and DualStack disabled" do
- let(:expected) do
- {"endpoint" => {"url" => "https://evidently-fips.us-isob-east-1.sc2s.sgov.gov"}}
- end
-
- it 'produces the expected output from the EndpointProvider' do
- params = EndpointParameters.new(**{region: "us-isob-east-1", use_fips: true, use_dual_stack: false})
- endpoint = subject.resolve_endpoint(params)
- expect(endpoint.url).to eq(expected['endpoint']['url'])
- expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {})
- expect(endpoint.properties).to eq(expected['endpoint']['properties'] || {})
- end
- end
-
- context "For region us-isob-east-1 with FIPS disabled and DualStack disabled" do
- let(:expected) do
- {"endpoint" => {"url" => "https://evidently.us-isob-east-1.sc2s.sgov.gov"}}
- end
-
- it 'produces the expected output from the EndpointProvider' do
- params = EndpointParameters.new(**{region: "us-isob-east-1", use_fips: false, use_dual_stack: false})
- endpoint = subject.resolve_endpoint(params)
- expect(endpoint.url).to eq(expected['endpoint']['url'])
- expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {})
- expect(endpoint.properties).to eq(expected['endpoint']['properties'] || {})
- end
- end
-
- context "For custom endpoint with region set and fips disabled and dualstack disabled" do
- let(:expected) do
- {"endpoint" => {"url" => "https://example.com"}}
- end
-
- it 'produces the expected output from the EndpointProvider' do
- params = EndpointParameters.new(**{region: "us-east-1", use_fips: false, use_dual_stack: false, endpoint: "https://example.com"})
- endpoint = subject.resolve_endpoint(params)
- expect(endpoint.url).to eq(expected['endpoint']['url'])
- expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {})
- expect(endpoint.properties).to eq(expected['endpoint']['properties'] || {})
- end
- end
-
- context "For custom endpoint with region not set and fips disabled and dualstack disabled" do
- let(:expected) do
- {"endpoint" => {"url" => "https://example.com"}}
- end
-
- it 'produces the expected output from the EndpointProvider' do
- params = EndpointParameters.new(**{use_fips: false, use_dual_stack: false, endpoint: "https://example.com"})
- endpoint = subject.resolve_endpoint(params)
- expect(endpoint.url).to eq(expected['endpoint']['url'])
- expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {})
- expect(endpoint.properties).to eq(expected['endpoint']['properties'] || {})
- end
- end
-
- context "For custom endpoint with fips enabled and dualstack disabled" do
- let(:expected) do
- {"error" => "Invalid Configuration: FIPS and custom endpoint are not supported"}
- end
-
- it 'produces the expected output from the EndpointProvider' do
- params = EndpointParameters.new(**{region: "us-east-1", use_fips: true, use_dual_stack: false, endpoint: "https://example.com"})
- expect do
- subject.resolve_endpoint(params)
- end.to raise_error(ArgumentError, expected['error'])
- end
- end
-
- context "For custom endpoint with fips disabled and dualstack enabled" do
- let(:expected) do
- {"error" => "Invalid Configuration: Dualstack and custom endpoint are not supported"}
- end
-
- it 'produces the expected output from the EndpointProvider' do
- params = EndpointParameters.new(**{region: "us-east-1", use_fips: false, use_dual_stack: true, endpoint: "https://example.com"})
- expect do
- subject.resolve_endpoint(params)
- end.to raise_error(ArgumentError, expected['error'])
- end
- end
-
- context "Missing region" do
- let(:expected) do
- {"error" => "Invalid Configuration: Missing Region"}
- end
-
- it 'produces the expected output from the EndpointProvider' do
- params = EndpointParameters.new(**{})
- expect do
- subject.resolve_endpoint(params)
- end.to raise_error(ArgumentError, expected['error'])
- end
- end
-
- end
-end
diff --git a/gems/aws-sdk-cloudwatchevidently/spec/spec_helper.rb b/gems/aws-sdk-cloudwatchevidently/spec/spec_helper.rb
deleted file mode 100644
index fac2f4c1459..00000000000
--- a/gems/aws-sdk-cloudwatchevidently/spec/spec_helper.rb
+++ /dev/null
@@ -1,18 +0,0 @@
-# frozen_string_literal: true
-
-# WARNING ABOUT GENERATED CODE
-#
-# This file is generated. See the contributing guide for more information:
-# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md
-#
-# WARNING ABOUT GENERATED CODE
-
-require_relative '../../aws-sdk-core/spec/shared_spec_helper'
-
-$:.unshift(File.expand_path('../../lib', __FILE__))
-$:.unshift(File.expand_path('../../../aws-sdk-core/lib', __FILE__))
-$:.unshift(File.expand_path('../../../aws-sigv4/lib', __FILE__))
-
-require 'rspec'
-require 'webmock/rspec'
-require 'aws-sdk-cloudwatchevidently'
diff --git a/gems/aws-sdk-iotanalytics/CHANGELOG.md b/gems/aws-sdk-iotanalytics/CHANGELOG.md
deleted file mode 100644
index e59f0c88d9e..00000000000
--- a/gems/aws-sdk-iotanalytics/CHANGELOG.md
+++ /dev/null
@@ -1,482 +0,0 @@
-Unreleased Changes
-------------------
-
-* Feature - IoT Analytics has been removed from the SDK because it has been discontinued.
-
-1.93.0 (2026-01-16)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.92.0 (2026-01-08)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.91.0 (2026-01-05)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.90.0 (2025-11-21)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.89.0 (2025-10-22)
-------------------
-
-* Feature - Update endpoint ruleset parameters casing
-
-1.88.0 (2025-10-21)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.87.0 (2025-08-26)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.86.0 (2025-08-22)
-------------------
-
-* Feature - Remove incorrect endpoint tests
-
-1.85.0 (2025-08-04)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.84.0 (2025-07-31)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.83.0 (2025-07-21)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.82.0 (2025-06-02)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.81.0 (2025-05-12)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.80.0 (2025-05-01)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.79.0 (2025-02-18)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.78.0 (2025-02-06)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.77.0 (2025-01-15)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.76.0 (2024-11-06)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.75.0 (2024-10-18)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.74.0 (2024-09-24)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.73.0 (2024-09-23)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.72.0 (2024-09-20)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.71.0 (2024-09-11)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.70.0 (2024-09-10)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.69.0 (2024-09-03)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.68.0 (2024-07-02)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.67.0 (2024-06-28)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.66.0 (2024-06-25)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.65.0 (2024-06-24)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.64.0 (2024-06-05)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.63.0 (2024-05-13)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.62.0 (2024-04-25)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.61.0 (2024-01-26)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.60.0 (2023-11-28)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.59.0 (2023-11-22)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.58.0 (2023-09-27)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.57.0 (2023-09-19)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.56.0 (2023-07-11)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.55.0 (2023-07-06)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.54.0 (2023-06-28)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.53.0 (2023-06-15)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.52.0 (2023-05-31)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.51.0 (2023-01-18)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-* Issue - Replace runtime endpoint resolution approach with generated ruby code.
-
-1.50.0 (2022-10-25)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.49.0 (2022-02-24)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.48.0 (2022-02-03)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.47.0 (2021-12-21)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.46.0 (2021-11-30)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.45.0 (2021-11-04)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.44.0 (2021-10-18)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.43.0 (2021-09-01)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.42.0 (2021-07-30)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.41.0 (2021-07-28)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.40.0 (2021-07-27)
-------------------
-
-* Feature - IoT Analytics now supports creating a dataset resource with IoT SiteWise MultiLayerStorage data stores, enabling customers to query industrial data within the service. This release includes adding JOIN functionality for customers to query multiple data sources in a dataset.
-
-1.39.0 (2021-06-14)
-------------------
-
-* Feature - Adds support for data store partitions.
-
-1.38.0 (2021-03-10)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.37.0 (2021-02-02)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.36.0 (2020-12-15)
-------------------
-
-* Feature - FileFormatConfiguration enables data store to save data in JSON or Parquet format. S3Paths enables you to specify the S3 objects that save your channel messages when you reprocess the pipeline.
-
-1.35.0 (2020-11-09)
-------------------
-
-* Feature - AWS IoT Analytics now supports Late Data Notifications for datasets, dataset content creation using previous version IDs, and includes the LastMessageArrivalTime attribute for channels and datastores.
-
-1.34.0 (2020-09-30)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.33.0 (2020-09-15)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.32.0 (2020-08-25)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.31.0 (2020-06-23)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.30.1 (2020-06-11)
-------------------
-
-* Issue - Republish previous version with correct dependency on `aws-sdk-core`.
-
-1.30.0 (2020-06-10)
-------------------
-
-* Issue - This version has been yanked. (#2327).
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.29.0 (2020-05-28)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.28.0 (2020-05-07)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.27.0 (2020-03-09)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.26.0 (2019-10-23)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.25.0 (2019-10-10)
-------------------
-
-* Feature - Add `completionTime` to API call ListDatasetContents.
-
-1.24.0 (2019-07-25)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.23.0 (2019-07-01)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.22.0 (2019-06-17)
-------------------
-
-* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
-
-1.21.0 (2019-05-30)
-------------------
-
-* Feature - IoT Analytics adds the option to use your own S3 bucket to store channel and data store resources. Previously, only service-managed storage was used.
-
-1.20.0 (2019-05-21)
-------------------
-
-* Feature - API update.
-
-1.19.0 (2019-05-15)
-------------------
-
-* Feature - API update.
-
-1.18.0 (2019-05-14)
-------------------
-
-* Feature - API update.
-
-1.17.0 (2019-05-13)
-------------------
-
-* Feature - API update.
-
-1.16.0 (2019-03-25)
-------------------
-
-* Feature - API update.
-
-1.15.0 (2019-03-21)
-------------------
-
-* Feature - API update.
-
-1.14.0 (2019-03-18)
-------------------
-
-* Feature - API update.
-
-1.13.0 (2019-03-14)
-------------------
-
-* Feature - API update.
-
-1.12.0 (2019-01-03)
-------------------
-
-* Feature - API update.
-
-1.11.0 (2018-11-27)
-------------------
-
-* Feature - API update.
-
-1.10.0 (2018-11-20)
-------------------
-
-* Feature - API update.
-
-1.9.0 (2018-10-24)
-------------------
-
-* Feature - API update.
-
-1.8.0 (2018-10-23)
-------------------
-
-* Feature - API update.
-
-1.7.0 (2018-09-06)
-------------------
-
-* Feature - Adds code paths and plugins for future SDK instrumentation and telemetry.
-
-1.6.0 (2018-09-05)
-------------------
-
-* Feature - API update.
-
-1.5.0 (2018-08-27)
-------------------
-
-* Feature - API update.
-
-1.4.0 (2018-08-23)
-------------------
-
-* Feature - API update.
-
-1.3.0 (2018-07-18)
-------------------
-
-* Feature - API update.
-
-1.2.0 (2018-06-26)
-------------------
-
-* Feature - API update.
-
-1.1.0 (2018-06-14)
-------------------
-
-* Feature - API update.
-
-1.0.0 (2018-04-23)
-------------------
-
-* Feature - Initial release of `aws-sdk-iotanalytics`.
diff --git a/gems/aws-sdk-iotanalytics/LICENSE.txt b/gems/aws-sdk-iotanalytics/LICENSE.txt
deleted file mode 100644
index d6456956733..00000000000
--- a/gems/aws-sdk-iotanalytics/LICENSE.txt
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/gems/aws-sdk-iotanalytics/VERSION b/gems/aws-sdk-iotanalytics/VERSION
deleted file mode 100644
index 95784efddbc..00000000000
--- a/gems/aws-sdk-iotanalytics/VERSION
+++ /dev/null
@@ -1 +0,0 @@
-1.93.0
diff --git a/gems/aws-sdk-iotanalytics/aws-sdk-iotanalytics.gemspec b/gems/aws-sdk-iotanalytics/aws-sdk-iotanalytics.gemspec
deleted file mode 100644
index 9ac7e859a5a..00000000000
--- a/gems/aws-sdk-iotanalytics/aws-sdk-iotanalytics.gemspec
+++ /dev/null
@@ -1,32 +0,0 @@
-# frozen_string_literal: true
-
-# WARNING ABOUT GENERATED CODE
-#
-# This file is generated. See the contributing guide for more information:
-# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md
-#
-# WARNING ABOUT GENERATED CODE
-
-Gem::Specification.new do |spec|
-
- spec.name = 'aws-sdk-iotanalytics'
- spec.version = File.read(File.expand_path('../VERSION', __FILE__)).strip
- spec.summary = 'AWS SDK for Ruby - AWS IoT Analytics'
- spec.description = 'Official AWS Ruby gem for AWS IoT Analytics. This gem is part of the AWS SDK for Ruby.'
- spec.author = 'Amazon Web Services'
- spec.homepage = 'https://github.com/aws/aws-sdk-ruby'
- spec.license = 'Apache-2.0'
- spec.email = ['aws-dr-rubygems@amazon.com']
- spec.require_paths = ['lib']
- spec.files = Dir["LICENSE.txt", "CHANGELOG.md", "VERSION", "lib/**/*.rb", "sig/**/*.rbs"]
-
- spec.metadata = {
- 'source_code_uri' => 'https://github.com/aws/aws-sdk-ruby/tree/version-3/gems/aws-sdk-iotanalytics',
- 'changelog_uri' => 'https://github.com/aws/aws-sdk-ruby/tree/version-3/gems/aws-sdk-iotanalytics/CHANGELOG.md'
- }
-
- spec.add_dependency('aws-sdk-core', '~> 3', '>= 3.241.4')
- spec.add_dependency('aws-sigv4', '~> 1.5')
-
- spec.required_ruby_version = '>= 2.7'
-end
diff --git a/gems/aws-sdk-iotanalytics/features/env.rb b/gems/aws-sdk-iotanalytics/features/env.rb
deleted file mode 100644
index 85e7f54ae9d..00000000000
--- a/gems/aws-sdk-iotanalytics/features/env.rb
+++ /dev/null
@@ -1,18 +0,0 @@
-# frozen_string_literal: true
-
-# WARNING ABOUT GENERATED CODE
-#
-# This file is generated. See the contributing guide for more information:
-# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md
-#
-# WARNING ABOUT GENERATED CODE
-
-$:.unshift(File.expand_path('../../lib', __FILE__))
-$:.unshift(File.expand_path('../../../aws-sdk-core/features', __FILE__))
-$:.unshift(File.expand_path('../../../aws-sdk-core/lib', __FILE__))
-$:.unshift(File.expand_path('../../../aws-sigv4/lib', __FILE__))
-
-require 'features_helper'
-require 'aws-sdk-iotanalytics'
-
-Aws::IoTAnalytics::Client.add_plugin(ApiCallTracker)
diff --git a/gems/aws-sdk-iotanalytics/features/step_definitions.rb b/gems/aws-sdk-iotanalytics/features/step_definitions.rb
deleted file mode 100644
index ddc76fefc0d..00000000000
--- a/gems/aws-sdk-iotanalytics/features/step_definitions.rb
+++ /dev/null
@@ -1,10 +0,0 @@
-# frozen_string_literal: true
-
-Before("@iotanalytics") do
- @service = Aws::IoTAnalytics::Resource.new
- @client = @service.client
-end
-
-After("@iotanalytics") do
- # shared cleanup logic
-end
diff --git a/gems/aws-sdk-iotanalytics/lib/aws-sdk-iotanalytics.rb b/gems/aws-sdk-iotanalytics/lib/aws-sdk-iotanalytics.rb
deleted file mode 100644
index fd8924bfa5b..00000000000
--- a/gems/aws-sdk-iotanalytics/lib/aws-sdk-iotanalytics.rb
+++ /dev/null
@@ -1,61 +0,0 @@
-# frozen_string_literal: true
-
-# WARNING ABOUT GENERATED CODE
-#
-# This file is generated. See the contributing guide for more information:
-# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md
-#
-# WARNING ABOUT GENERATED CODE
-
-
-require 'aws-sdk-core'
-require 'aws-sigv4'
-
-Aws::Plugins::GlobalConfiguration.add_identifier(:iotanalytics)
-
-# This module provides support for AWS IoT Analytics. This module is available in the
-# `aws-sdk-iotanalytics` gem.
-#
-# # Client
-#
-# The {Client} class provides one method for each API operation. Operation
-# methods each accept a hash of request parameters and return a response
-# structure.
-#
-# io_t_analytics = Aws::IoTAnalytics::Client.new
-# resp = io_t_analytics.batch_put_message(params)
-#
-# See {Client} for more information.
-#
-# # Errors
-#
-# Errors returned from AWS IoT Analytics are defined in the
-# {Errors} module and all extend {Errors::ServiceError}.
-#
-# begin
-# # do stuff
-# rescue Aws::IoTAnalytics::Errors::ServiceError
-# # rescues all AWS IoT Analytics API errors
-# end
-#
-# See {Errors} for more information.
-#
-# @!group service
-module Aws::IoTAnalytics
- autoload :Types, 'aws-sdk-iotanalytics/types'
- autoload :ClientApi, 'aws-sdk-iotanalytics/client_api'
- module Plugins
- autoload :Endpoints, 'aws-sdk-iotanalytics/plugins/endpoints.rb'
- end
- autoload :Client, 'aws-sdk-iotanalytics/client'
- autoload :Errors, 'aws-sdk-iotanalytics/errors'
- autoload :Resource, 'aws-sdk-iotanalytics/resource'
- autoload :EndpointParameters, 'aws-sdk-iotanalytics/endpoint_parameters'
- autoload :EndpointProvider, 'aws-sdk-iotanalytics/endpoint_provider'
- autoload :Endpoints, 'aws-sdk-iotanalytics/endpoints'
-
- GEM_VERSION = '1.93.0'
-
-end
-
-require_relative 'aws-sdk-iotanalytics/customizations'
diff --git a/gems/aws-sdk-iotanalytics/lib/aws-sdk-iotanalytics/client.rb b/gems/aws-sdk-iotanalytics/lib/aws-sdk-iotanalytics/client.rb
deleted file mode 100644
index f3e8b7c180c..00000000000
--- a/gems/aws-sdk-iotanalytics/lib/aws-sdk-iotanalytics/client.rb
+++ /dev/null
@@ -1,2414 +0,0 @@
-# frozen_string_literal: true
-
-# WARNING ABOUT GENERATED CODE
-#
-# This file is generated. See the contributing guide for more information:
-# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md
-#
-# WARNING ABOUT GENERATED CODE
-
-require 'seahorse/client/plugins/content_length'
-require 'aws-sdk-core/plugins/credentials_configuration'
-require 'aws-sdk-core/plugins/logging'
-require 'aws-sdk-core/plugins/param_converter'
-require 'aws-sdk-core/plugins/param_validator'
-require 'aws-sdk-core/plugins/user_agent'
-require 'aws-sdk-core/plugins/helpful_socket_errors'
-require 'aws-sdk-core/plugins/retry_errors'
-require 'aws-sdk-core/plugins/global_configuration'
-require 'aws-sdk-core/plugins/regional_endpoint'
-require 'aws-sdk-core/plugins/endpoint_discovery'
-require 'aws-sdk-core/plugins/endpoint_pattern'
-require 'aws-sdk-core/plugins/response_paging'
-require 'aws-sdk-core/plugins/stub_responses'
-require 'aws-sdk-core/plugins/idempotency_token'
-require 'aws-sdk-core/plugins/invocation_id'
-require 'aws-sdk-core/plugins/jsonvalue_converter'
-require 'aws-sdk-core/plugins/client_metrics_plugin'
-require 'aws-sdk-core/plugins/client_metrics_send_plugin'
-require 'aws-sdk-core/plugins/transfer_encoding'
-require 'aws-sdk-core/plugins/http_checksum'
-require 'aws-sdk-core/plugins/checksum_algorithm'
-require 'aws-sdk-core/plugins/request_compression'
-require 'aws-sdk-core/plugins/defaults_mode'
-require 'aws-sdk-core/plugins/recursion_detection'
-require 'aws-sdk-core/plugins/telemetry'
-require 'aws-sdk-core/plugins/sign'
-require 'aws-sdk-core/plugins/protocols/rest_json'
-
-module Aws::IoTAnalytics
- # An API client for IoTAnalytics. To construct a client, you need to configure a `:region` and `:credentials`.
- #
- # client = Aws::IoTAnalytics::Client.new(
- # region: region_name,
- # credentials: credentials,
- # # ...
- # )
- #
- # For details on configuring region and credentials see
- # the [developer guide](/sdk-for-ruby/v3/developer-guide/setup-config.html).
- #
- # See {#initialize} for a full list of supported configuration options.
- class Client < Seahorse::Client::Base
-
- include Aws::ClientStubs
-
- @identifier = :iotanalytics
-
- set_api(ClientApi::API)
-
- add_plugin(Seahorse::Client::Plugins::ContentLength)
- add_plugin(Aws::Plugins::CredentialsConfiguration)
- add_plugin(Aws::Plugins::Logging)
- add_plugin(Aws::Plugins::ParamConverter)
- add_plugin(Aws::Plugins::ParamValidator)
- add_plugin(Aws::Plugins::UserAgent)
- add_plugin(Aws::Plugins::HelpfulSocketErrors)
- add_plugin(Aws::Plugins::RetryErrors)
- add_plugin(Aws::Plugins::GlobalConfiguration)
- add_plugin(Aws::Plugins::RegionalEndpoint)
- add_plugin(Aws::Plugins::EndpointDiscovery)
- add_plugin(Aws::Plugins::EndpointPattern)
- add_plugin(Aws::Plugins::ResponsePaging)
- add_plugin(Aws::Plugins::StubResponses)
- add_plugin(Aws::Plugins::IdempotencyToken)
- add_plugin(Aws::Plugins::InvocationId)
- add_plugin(Aws::Plugins::JsonvalueConverter)
- add_plugin(Aws::Plugins::ClientMetricsPlugin)
- add_plugin(Aws::Plugins::ClientMetricsSendPlugin)
- add_plugin(Aws::Plugins::TransferEncoding)
- add_plugin(Aws::Plugins::HttpChecksum)
- add_plugin(Aws::Plugins::ChecksumAlgorithm)
- add_plugin(Aws::Plugins::RequestCompression)
- add_plugin(Aws::Plugins::DefaultsMode)
- add_plugin(Aws::Plugins::RecursionDetection)
- add_plugin(Aws::Plugins::Telemetry)
- add_plugin(Aws::Plugins::Sign)
- add_plugin(Aws::Plugins::Protocols::RestJson)
- add_plugin(Aws::IoTAnalytics::Plugins::Endpoints)
-
- # @overload initialize(options)
- # @param [Hash] options
- #
- # @option options [Array] :plugins ([]])
- # A list of plugins to apply to the client. Each plugin is either a
- # class name or an instance of a plugin class.
- #
- # @option options [required, Aws::CredentialProvider] :credentials
- # Your AWS credentials used for authentication. This can be any class that includes and implements
- # `Aws::CredentialProvider`, or instance of any one of the following classes:
- #
- # * `Aws::Credentials` - Used for configuring static, non-refreshing
- # credentials.
- #
- # * `Aws::SharedCredentials` - Used for loading static credentials from a
- # shared file, such as `~/.aws/config`.
- #
- # * `Aws::AssumeRoleCredentials` - Used when you need to assume a role.
- #
- # * `Aws::AssumeRoleWebIdentityCredentials` - Used when you need to
- # assume a role after providing credentials via the web.
- #
- # * `Aws::SSOCredentials` - Used for loading credentials from AWS SSO using an
- # access token generated from `aws login`.
- #
- # * `Aws::ProcessCredentials` - Used for loading credentials from a
- # process that outputs to stdout.
- #
- # * `Aws::InstanceProfileCredentials` - Used for loading credentials
- # from an EC2 IMDS on an EC2 instance.
- #
- # * `Aws::ECSCredentials` - Used for loading credentials from
- # instances running in ECS.
- #
- # * `Aws::CognitoIdentityCredentials` - Used for loading credentials
- # from the Cognito Identity service.
- #
- # When `:credentials` are not configured directly, the following locations will be searched for credentials:
- #
- # * `Aws.config[:credentials]`
- #
- # * The `:access_key_id`, `:secret_access_key`, `:session_token`, and
- # `:account_id` options.
- #
- # * `ENV['AWS_ACCESS_KEY_ID']`, `ENV['AWS_SECRET_ACCESS_KEY']`,
- # `ENV['AWS_SESSION_TOKEN']`, and `ENV['AWS_ACCOUNT_ID']`.
- #
- # * `~/.aws/credentials`
- #
- # * `~/.aws/config`
- #
- # * EC2/ECS IMDS instance profile - When used by default, the timeouts are very aggressive.
- # Construct and pass an instance of `Aws::InstanceProfileCredentials` or `Aws::ECSCredentials` to
- # enable retries and extended timeouts. Instance profile credential fetching can be disabled by
- # setting `ENV['AWS_EC2_METADATA_DISABLED']` to `true`.
- #
- # @option options [required, String] :region
- # The AWS region to connect to. The configured `:region` is
- # used to determine the service `:endpoint`. When not passed,
- # a default `:region` is searched for in the following locations:
- #
- # * `Aws.config[:region]`
- # * `ENV['AWS_REGION']`
- # * `ENV['AMAZON_REGION']`
- # * `ENV['AWS_DEFAULT_REGION']`
- # * `~/.aws/credentials`
- # * `~/.aws/config`
- #
- # @option options [String] :access_key_id
- #
- # @option options [String] :account_id
- #
- # @option options [Boolean] :active_endpoint_cache (false)
- # When set to `true`, a thread polling for endpoints will be running in
- # the background every 60 secs (default). Defaults to `false`.
- #
- # @option options [Boolean] :adaptive_retry_wait_to_fill (true)
- # Used only in `adaptive` retry mode. When true, the request will sleep
- # until there is sufficent client side capacity to retry the request.
- # When false, the request will raise a `RetryCapacityNotAvailableError` and will
- # not retry instead of sleeping.
- #
- # @option options [Array] :auth_scheme_preference
- # A list of preferred authentication schemes to use when making a request. Supported values are:
- # `sigv4`, `sigv4a`, `httpBearerAuth`, and `noAuth`. When set using `ENV['AWS_AUTH_SCHEME_PREFERENCE']` or in
- # shared config as `auth_scheme_preference`, the value should be a comma-separated list.
- #
- # @option options [Boolean] :client_side_monitoring (false)
- # When `true`, client-side metrics will be collected for all API requests from
- # this client.
- #
- # @option options [String] :client_side_monitoring_client_id ("")
- # Allows you to provide an identifier for this client which will be attached to
- # all generated client side metrics. Defaults to an empty string.
- #
- # @option options [String] :client_side_monitoring_host ("127.0.0.1")
- # Allows you to specify the DNS hostname or IPv4 or IPv6 address that the client
- # side monitoring agent is running on, where client metrics will be published via UDP.
- #
- # @option options [Integer] :client_side_monitoring_port (31000)
- # Required for publishing client metrics. The port that the client side monitoring
- # agent is running on, where client metrics will be published via UDP.
- #
- # @option options [Aws::ClientSideMonitoring::Publisher] :client_side_monitoring_publisher (Aws::ClientSideMonitoring::Publisher)
- # Allows you to provide a custom client-side monitoring publisher class. By default,
- # will use the Client Side Monitoring Agent Publisher.
- #
- # @option options [Boolean] :convert_params (true)
- # When `true`, an attempt is made to coerce request parameters into
- # the required types.
- #
- # @option options [Boolean] :correct_clock_skew (true)
- # Used only in `standard` and adaptive retry modes. Specifies whether to apply
- # a clock skew correction and retry requests with skewed client clocks.
- #
- # @option options [String] :defaults_mode ("legacy")
- # See {Aws::DefaultsModeConfiguration} for a list of the
- # accepted modes and the configuration defaults that are included.
- #
- # @option options [Boolean] :disable_host_prefix_injection (false)
- # When `true`, the SDK will not prepend the modeled host prefix to the endpoint.
- #
- # @option options [Boolean] :disable_request_compression (false)
- # When set to 'true' the request body will not be compressed
- # for supported operations.
- #
- # @option options [String, URI::HTTPS, URI::HTTP] :endpoint
- # Normally you should not configure the `:endpoint` option
- # directly. This is normally constructed from the `:region`
- # option. Configuring `:endpoint` is normally reserved for
- # connecting to test or custom endpoints. The endpoint should
- # be a URI formatted like:
- #
- # 'http://example.com'
- # 'https://example.com'
- # 'http://example.com:123'
- #
- # @option options [Integer] :endpoint_cache_max_entries (1000)
- # Used for the maximum size limit of the LRU cache storing endpoints data
- # for endpoint discovery enabled operations. Defaults to 1000.
- #
- # @option options [Integer] :endpoint_cache_max_threads (10)
- # Used for the maximum threads in use for polling endpoints to be cached, defaults to 10.
- #
- # @option options [Integer] :endpoint_cache_poll_interval (60)
- # When :endpoint_discovery and :active_endpoint_cache is enabled,
- # Use this option to config the time interval in seconds for making
- # requests fetching endpoints information. Defaults to 60 sec.
- #
- # @option options [Boolean] :endpoint_discovery (false)
- # When set to `true`, endpoint discovery will be enabled for operations when available.
- #
- # @option options [Boolean] :ignore_configured_endpoint_urls
- # Setting to true disables use of endpoint URLs provided via environment
- # variables and the shared configuration file.
- #
- # @option options [Aws::Log::Formatter] :log_formatter (Aws::Log::Formatter.default)
- # The log formatter.
- #
- # @option options [Symbol] :log_level (:info)
- # The log level to send messages to the `:logger` at.
- #
- # @option options [Logger] :logger
- # The Logger instance to send log messages to. If this option
- # is not set, logging will be disabled.
- #
- # @option options [Integer] :max_attempts (3)
- # An integer representing the maximum number attempts that will be made for
- # a single request, including the initial attempt. For example,
- # setting this value to 5 will result in a request being retried up to
- # 4 times. Used in `standard` and `adaptive` retry modes.
- #
- # @option options [String] :profile ("default")
- # Used when loading credentials from the shared credentials file at `HOME/.aws/credentials`.
- # When not specified, 'default' is used.
- #
- # @option options [String] :request_checksum_calculation ("when_supported")
- # Determines when a checksum will be calculated for request payloads. Values are:
- #
- # * `when_supported` - (default) When set, a checksum will be
- # calculated for all request payloads of operations modeled with the
- # `httpChecksum` trait where `requestChecksumRequired` is `true` and/or a
- # `requestAlgorithmMember` is modeled.
- # * `when_required` - When set, a checksum will only be calculated for
- # request payloads of operations modeled with the `httpChecksum` trait where
- # `requestChecksumRequired` is `true` or where a `requestAlgorithmMember`
- # is modeled and supplied.
- #
- # @option options [Integer] :request_min_compression_size_bytes (10240)
- # The minimum size in bytes that triggers compression for request
- # bodies. The value must be non-negative integer value between 0
- # and 10485780 bytes inclusive.
- #
- # @option options [String] :response_checksum_validation ("when_supported")
- # Determines when checksum validation will be performed on response payloads. Values are:
- #
- # * `when_supported` - (default) When set, checksum validation is performed on all
- # response payloads of operations modeled with the `httpChecksum` trait where
- # `responseAlgorithms` is modeled, except when no modeled checksum algorithms
- # are supported.
- # * `when_required` - When set, checksum validation is not performed on
- # response payloads of operations unless the checksum algorithm is supported and
- # the `requestValidationModeMember` member is set to `ENABLED`.
- #
- # @option options [Proc] :retry_backoff
- # A proc or lambda used for backoff. Defaults to 2**retries * retry_base_delay.
- # This option is only used in the `legacy` retry mode.
- #
- # @option options [Float] :retry_base_delay (0.3)
- # The base delay in seconds used by the default backoff function. This option
- # is only used in the `legacy` retry mode.
- #
- # @option options [Symbol] :retry_jitter (:none)
- # A delay randomiser function used by the default backoff function.
- # Some predefined functions can be referenced by name - :none, :equal, :full,
- # otherwise a Proc that takes and returns a number. This option is only used
- # in the `legacy` retry mode.
- #
- # @see https://www.awsarchitectureblog.com/2015/03/backoff.html
- #
- # @option options [Integer] :retry_limit (3)
- # The maximum number of times to retry failed requests. Only
- # ~ 500 level server errors and certain ~ 400 level client errors
- # are retried. Generally, these are throttling errors, data
- # checksum errors, networking errors, timeout errors, auth errors,
- # endpoint discovery, and errors from expired credentials.
- # This option is only used in the `legacy` retry mode.
- #
- # @option options [Integer] :retry_max_delay (0)
- # The maximum number of seconds to delay between retries (0 for no limit)
- # used by the default backoff function. This option is only used in the
- # `legacy` retry mode.
- #
- # @option options [String] :retry_mode ("legacy")
- # Specifies which retry algorithm to use. Values are:
- #
- # * `legacy` - The pre-existing retry behavior. This is default value if
- # no retry mode is provided.
- #
- # * `standard` - A standardized set of retry rules across the AWS SDKs.
- # This includes support for retry quotas, which limit the number of
- # unsuccessful retries a client can make.
- #
- # * `adaptive` - An experimental retry mode that includes all the
- # functionality of `standard` mode along with automatic client side
- # throttling. This is a provisional mode that may change behavior
- # in the future.
- #
- # @option options [String] :sdk_ua_app_id
- # A unique and opaque application ID that is appended to the
- # User-Agent header as app/sdk_ua_app_id. It should have a
- # maximum length of 50. This variable is sourced from environment
- # variable AWS_SDK_UA_APP_ID or the shared config profile attribute sdk_ua_app_id.
- #
- # @option options [String] :secret_access_key
- #
- # @option options [String] :session_token
- #
- # @option options [Array] :sigv4a_signing_region_set
- # A list of regions that should be signed with SigV4a signing. When
- # not passed, a default `:sigv4a_signing_region_set` is searched for
- # in the following locations:
- #
- # * `Aws.config[:sigv4a_signing_region_set]`
- # * `ENV['AWS_SIGV4A_SIGNING_REGION_SET']`
- # * `~/.aws/config`
- #
- # @option options [Boolean] :stub_responses (false)
- # Causes the client to return stubbed responses. By default
- # fake responses are generated and returned. You can specify
- # the response data to return or errors to raise by calling
- # {ClientStubs#stub_responses}. See {ClientStubs} for more information.
- #
- # ** Please note ** When response stubbing is enabled, no HTTP
- # requests are made, and retries are disabled.
- #
- # @option options [Aws::Telemetry::TelemetryProviderBase] :telemetry_provider (Aws::Telemetry::NoOpTelemetryProvider)
- # Allows you to provide a telemetry provider, which is used to
- # emit telemetry data. By default, uses `NoOpTelemetryProvider` which
- # will not record or emit any telemetry data. The SDK supports the
- # following telemetry providers:
- #
- # * OpenTelemetry (OTel) - To use the OTel provider, install and require the
- # `opentelemetry-sdk` gem and then, pass in an instance of a
- # `Aws::Telemetry::OTelProvider` for telemetry provider.
- #
- # @option options [Aws::TokenProvider] :token_provider
- # Your Bearer token used for authentication. This can be any class that includes and implements
- # `Aws::TokenProvider`, or instance of any one of the following classes:
- #
- # * `Aws::StaticTokenProvider` - Used for configuring static, non-refreshing
- # tokens.
- #
- # * `Aws::SSOTokenProvider` - Used for loading tokens from AWS SSO using an
- # access token generated from `aws login`.
- #
- # When `:token_provider` is not configured directly, the `Aws::TokenProviderChain`
- # will be used to search for tokens configured for your profile in shared configuration files.
- #
- # @option options [Boolean] :use_dualstack_endpoint
- # When set to `true`, dualstack enabled endpoints (with `.aws` TLD)
- # will be used if available.
- #
- # @option options [Boolean] :use_fips_endpoint
- # When set to `true`, fips compatible endpoints will be used if available.
- # When a `fips` region is used, the region is normalized and this config
- # is set to `true`.
- #
- # @option options [Boolean] :validate_params (true)
- # When `true`, request parameters are validated before
- # sending the request.
- #
- # @option options [Aws::IoTAnalytics::EndpointProvider] :endpoint_provider
- # The endpoint provider used to resolve endpoints. Any object that responds to
- # `#resolve_endpoint(parameters)` where `parameters` is a Struct similar to
- # `Aws::IoTAnalytics::EndpointParameters`.
- #
- # @option options [Float] :http_continue_timeout (1)
- # The number of seconds to wait for a 100-continue response before sending the
- # request body. This option has no effect unless the request has "Expect"
- # header set to "100-continue". Defaults to `nil` which disables this
- # behaviour. This value can safely be set per request on the session.
- #
- # @option options [Float] :http_idle_timeout (5)
- # The number of seconds a connection is allowed to sit idle before it
- # is considered stale. Stale connections are closed and removed from the
- # pool before making a request.
- #
- # @option options [Float] :http_open_timeout (15)
- # The default number of seconds to wait for response data.
- # This value can safely be set per-request on the session.
- #
- # @option options [URI::HTTP,String] :http_proxy
- # A proxy to send requests through. Formatted like 'http://proxy.com:123'.
- #
- # @option options [Float] :http_read_timeout (60)
- # The default number of seconds to wait for response data.
- # This value can safely be set per-request on the session.
- #
- # @option options [Boolean] :http_wire_trace (false)
- # When `true`, HTTP debug output will be sent to the `:logger`.
- #
- # @option options [Proc] :on_chunk_received
- # When a Proc object is provided, it will be used as callback when each chunk
- # of the response body is received. It provides three arguments: the chunk,
- # the number of bytes received, and the total number of
- # bytes in the response (or nil if the server did not send a `content-length`).
- #
- # @option options [Proc] :on_chunk_sent
- # When a Proc object is provided, it will be used as callback when each chunk
- # of the request body is sent. It provides three arguments: the chunk,
- # the number of bytes read from the body, and the total number of
- # bytes in the body.
- #
- # @option options [Boolean] :raise_response_errors (true)
- # When `true`, response errors are raised.
- #
- # @option options [String] :ssl_ca_bundle
- # Full path to the SSL certificate authority bundle file that should be used when
- # verifying peer certificates. If you do not pass `:ssl_ca_bundle` or
- # `:ssl_ca_directory` the the system default will be used if available.
- #
- # @option options [String] :ssl_ca_directory
- # Full path of the directory that contains the unbundled SSL certificate
- # authority files for verifying peer certificates. If you do
- # not pass `:ssl_ca_bundle` or `:ssl_ca_directory` the the system
- # default will be used if available.
- #
- # @option options [String] :ssl_ca_store
- # Sets the X509::Store to verify peer certificate.
- #
- # @option options [OpenSSL::X509::Certificate] :ssl_cert
- # Sets a client certificate when creating http connections.
- #
- # @option options [OpenSSL::PKey] :ssl_key
- # Sets a client key when creating http connections.
- #
- # @option options [Float] :ssl_timeout
- # Sets the SSL timeout in seconds
- #
- # @option options [Boolean] :ssl_verify_peer (true)
- # When `true`, SSL peer certificates are verified when establishing a connection.
- #
- def initialize(*args)
- super
- end
-
- # @!group API Operations
-
- # Sends messages to a channel.
- #
- # @option params [required, String] :channel_name
- # The name of the channel where the messages are sent.
- #
- # @option params [required, Array] :messages
- # The list of messages to be sent. Each message has the format: \{
- # "messageId": "string", "payload": "string"}.
- #
- # The field names of message payloads (data) that you send to IoT
- # Analytics:
- #
- # * Must contain only alphanumeric characters and undescores (\_). No
- # other special characters are allowed.
- #
- # * Must begin with an alphabetic character or single underscore (\_).
- #
- # * Cannot contain hyphens (-).
- #
- # * In regular expression terms:
- # "^\[A-Za-z\_\](\[A-Za-z0-9\]*\|\[A-Za-z0-9\]\[A-Za-z0-9\_\]*)$".
- #
- # * Cannot be more than 255 characters.
- #
- # * Are case insensitive. (Fields named foo and FOO in the same payload
- # are considered duplicates.)
- #
- # For example, \{"temp\_01": 29} or \{"\_temp\_01": 29} are valid,
- # but \{"temp-01": 29}, \{"01\_temp": 29} or \{"\_\_temp\_01": 29}
- # are invalid in message payloads.
- #
- # @return [Types::BatchPutMessageResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
- #
- # * {Types::BatchPutMessageResponse#batch_put_message_error_entries #batch_put_message_error_entries} => Array<Types::BatchPutMessageErrorEntry>
- #
- # @example Request syntax with placeholder values
- #
- # resp = client.batch_put_message({
- # channel_name: "ChannelName", # required
- # messages: [ # required
- # {
- # message_id: "MessageId", # required
- # payload: "data", # required
- # },
- # ],
- # })
- #
- # @example Response structure
- #
- # resp.batch_put_message_error_entries #=> Array
- # resp.batch_put_message_error_entries[0].message_id #=> String
- # resp.batch_put_message_error_entries[0].error_code #=> String
- # resp.batch_put_message_error_entries[0].error_message #=> String
- #
- # @overload batch_put_message(params = {})
- # @param [Hash] params ({})
- def batch_put_message(params = {}, options = {})
- req = build_request(:batch_put_message, params)
- req.send_request(options)
- end
-
- # Cancels the reprocessing of data through the pipeline.
- #
- # @option params [required, String] :pipeline_name
- # The name of pipeline for which data reprocessing is canceled.
- #
- # @option params [required, String] :reprocessing_id
- # The ID of the reprocessing task (returned by
- # `StartPipelineReprocessing`).
- #
- # @return [Struct] Returns an empty {Seahorse::Client::Response response}.
- #
- # @example Request syntax with placeholder values
- #
- # resp = client.cancel_pipeline_reprocessing({
- # pipeline_name: "PipelineName", # required
- # reprocessing_id: "ReprocessingId", # required
- # })
- #
- # @overload cancel_pipeline_reprocessing(params = {})
- # @param [Hash] params ({})
- def cancel_pipeline_reprocessing(params = {}, options = {})
- req = build_request(:cancel_pipeline_reprocessing, params)
- req.send_request(options)
- end
-
- # Used to create a channel. A channel collects data from an MQTT topic
- # and archives the raw, unprocessed messages before publishing the data
- # to a pipeline.
- #
- # @option params [required, String] :channel_name
- # The name of the channel.
- #
- # @option params [Types::ChannelStorage] :channel_storage
- # Where channel data is stored. You can choose one of `serviceManagedS3`
- # or `customerManagedS3` storage. If not specified, the default is
- # `serviceManagedS3`. You can't change this storage option after the
- # channel is created.
- #
- # @option params [Types::RetentionPeriod] :retention_period
- # How long, in days, message data is kept for the channel. When
- # `customerManagedS3` storage is selected, this parameter is ignored.
- #
- # @option params [Array] :tags
- # Metadata which can be used to manage the channel.
- #
- # @return [Types::CreateChannelResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
- #
- # * {Types::CreateChannelResponse#channel_name #channel_name} => String
- # * {Types::CreateChannelResponse#channel_arn #channel_arn} => String
- # * {Types::CreateChannelResponse#retention_period #retention_period} => Types::RetentionPeriod
- #
- # @example Request syntax with placeholder values
- #
- # resp = client.create_channel({
- # channel_name: "ChannelName", # required
- # channel_storage: {
- # service_managed_s3: {
- # },
- # customer_managed_s3: {
- # bucket: "BucketName", # required
- # key_prefix: "S3KeyPrefix",
- # role_arn: "RoleArn", # required
- # },
- # },
- # retention_period: {
- # unlimited: false,
- # number_of_days: 1,
- # },
- # tags: [
- # {
- # key: "TagKey", # required
- # value: "TagValue", # required
- # },
- # ],
- # })
- #
- # @example Response structure
- #
- # resp.channel_name #=> String
- # resp.channel_arn #=> String
- # resp.retention_period.unlimited #=> Boolean
- # resp.retention_period.number_of_days #=> Integer
- #
- # @overload create_channel(params = {})
- # @param [Hash] params ({})
- def create_channel(params = {}, options = {})
- req = build_request(:create_channel, params)
- req.send_request(options)
- end
-
- # Used to create a dataset. A dataset stores data retrieved from a data
- # store by applying a `queryAction` (a SQL query) or a `containerAction`
- # (executing a containerized application). This operation creates the
- # skeleton of a dataset. The dataset can be populated manually by
- # calling `CreateDatasetContent` or automatically according to a trigger
- # you specify.
- #
- # @option params [required, String] :dataset_name
- # The name of the dataset.
- #
- # @option params [required, Array] :actions
- # A list of actions that create the dataset contents.
- #
- # @option params [Array] :triggers
- # A list of triggers. A trigger causes dataset contents to be populated
- # at a specified time interval or when another dataset's contents are
- # created. The list of triggers can be empty or contain up to five
- # `DataSetTrigger` objects.
- #
- # @option params [Array] :content_delivery_rules
- # When dataset contents are created, they are delivered to destinations
- # specified here.
- #
- # @option params [Types::RetentionPeriod] :retention_period
- # Optional. How long, in days, versions of dataset contents are kept for
- # the dataset. If not specified or set to `null`, versions of dataset
- # contents are retained for at most 90 days. The number of versions of
- # dataset contents retained is determined by the
- # `versioningConfiguration` parameter. For more information, see [
- # Keeping Multiple Versions of IoT Analytics datasets][1] in the *IoT
- # Analytics User Guide*.
- #
- #
- #
- # [1]: https://docs.aws.amazon.com/iotanalytics/latest/userguide/getting-started.html#aws-iot-analytics-dataset-versions
- #
- # @option params [Types::VersioningConfiguration] :versioning_configuration
- # Optional. How many versions of dataset contents are kept. If not
- # specified or set to null, only the latest version plus the latest
- # succeeded version (if they are different) are kept for the time period
- # specified by the `retentionPeriod` parameter. For more information,
- # see [Keeping Multiple Versions of IoT Analytics datasets][1] in the
- # *IoT Analytics User Guide*.
- #
- #
- #
- # [1]: https://docs.aws.amazon.com/iotanalytics/latest/userguide/getting-started.html#aws-iot-analytics-dataset-versions
- #
- # @option params [Array] :tags
- # Metadata which can be used to manage the dataset.
- #
- # @option params [Array] :late_data_rules
- # A list of data rules that send notifications to CloudWatch, when data
- # arrives late. To specify `lateDataRules`, the dataset must use a
- # [DeltaTimer][1] filter.
- #
- #
- #
- # [1]: https://docs.aws.amazon.com/iotanalytics/latest/APIReference/API_DeltaTime.html
- #
- # @return [Types::CreateDatasetResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
- #
- # * {Types::CreateDatasetResponse#dataset_name #dataset_name} => String
- # * {Types::CreateDatasetResponse#dataset_arn #dataset_arn} => String
- # * {Types::CreateDatasetResponse#retention_period #retention_period} => Types::RetentionPeriod
- #
- # @example Request syntax with placeholder values
- #
- # resp = client.create_dataset({
- # dataset_name: "DatasetName", # required
- # actions: [ # required
- # {
- # action_name: "DatasetActionName",
- # query_action: {
- # sql_query: "SqlQuery", # required
- # filters: [
- # {
- # delta_time: {
- # offset_seconds: 1, # required
- # time_expression: "TimeExpression", # required
- # },
- # },
- # ],
- # },
- # container_action: {
- # image: "Image", # required
- # execution_role_arn: "RoleArn", # required
- # resource_configuration: { # required
- # compute_type: "ACU_1", # required, accepts ACU_1, ACU_2
- # volume_size_in_gb: 1, # required
- # },
- # variables: [
- # {
- # name: "VariableName", # required
- # string_value: "StringValue",
- # double_value: 1.0,
- # dataset_content_version_value: {
- # dataset_name: "DatasetName", # required
- # },
- # output_file_uri_value: {
- # file_name: "OutputFileName", # required
- # },
- # },
- # ],
- # },
- # },
- # ],
- # triggers: [
- # {
- # schedule: {
- # expression: "ScheduleExpression",
- # },
- # dataset: {
- # name: "DatasetName", # required
- # },
- # },
- # ],
- # content_delivery_rules: [
- # {
- # entry_name: "EntryName",
- # destination: { # required
- # iot_events_destination_configuration: {
- # input_name: "IotEventsInputName", # required
- # role_arn: "RoleArn", # required
- # },
- # s3_destination_configuration: {
- # bucket: "BucketName", # required
- # key: "BucketKeyExpression", # required
- # glue_configuration: {
- # table_name: "GlueTableName", # required
- # database_name: "GlueDatabaseName", # required
- # },
- # role_arn: "RoleArn", # required
- # },
- # },
- # },
- # ],
- # retention_period: {
- # unlimited: false,
- # number_of_days: 1,
- # },
- # versioning_configuration: {
- # unlimited: false,
- # max_versions: 1,
- # },
- # tags: [
- # {
- # key: "TagKey", # required
- # value: "TagValue", # required
- # },
- # ],
- # late_data_rules: [
- # {
- # rule_name: "LateDataRuleName",
- # rule_configuration: { # required
- # delta_time_session_window_configuration: {
- # timeout_in_minutes: 1, # required
- # },
- # },
- # },
- # ],
- # })
- #
- # @example Response structure
- #
- # resp.dataset_name #=> String
- # resp.dataset_arn #=> String
- # resp.retention_period.unlimited #=> Boolean
- # resp.retention_period.number_of_days #=> Integer
- #
- # @overload create_dataset(params = {})
- # @param [Hash] params ({})
- def create_dataset(params = {}, options = {})
- req = build_request(:create_dataset, params)
- req.send_request(options)
- end
-
- # Creates the content of a dataset by applying a `queryAction` (a SQL
- # query) or a `containerAction` (executing a containerized application).
- #
- # @option params [required, String] :dataset_name
- # The name of the dataset.
- #
- # @option params [String] :version_id
- # The version ID of the dataset content. To specify `versionId` for a
- # dataset content, the dataset must use a [DeltaTimer][1] filter.
- #
- #
- #
- # [1]: https://docs.aws.amazon.com/iotanalytics/latest/APIReference/API_DeltaTime.html
- #
- # @return [Types::CreateDatasetContentResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
- #
- # * {Types::CreateDatasetContentResponse#version_id #version_id} => String
- #
- # @example Request syntax with placeholder values
- #
- # resp = client.create_dataset_content({
- # dataset_name: "DatasetName", # required
- # version_id: "DatasetContentVersion",
- # })
- #
- # @example Response structure
- #
- # resp.version_id #=> String
- #
- # @overload create_dataset_content(params = {})
- # @param [Hash] params ({})
- def create_dataset_content(params = {}, options = {})
- req = build_request(:create_dataset_content, params)
- req.send_request(options)
- end
-
- # Creates a data store, which is a repository for messages.
- #
- # @option params [required, String] :datastore_name
- # The name of the data store.
- #
- # @option params [Types::DatastoreStorage] :datastore_storage
- # Where data in a data store is stored.. You can choose
- # `serviceManagedS3` storage, `customerManagedS3` storage, or
- # `iotSiteWiseMultiLayerStorage` storage. The default is
- # `serviceManagedS3`. You can't change the choice of Amazon S3 storage
- # after your data store is created.
- #
- # @option params [Types::RetentionPeriod] :retention_period
- # How long, in days, message data is kept for the data store. When
- # `customerManagedS3` storage is selected, this parameter is ignored.
- #
- # @option params [Array] :tags
- # Metadata which can be used to manage the data store.
- #
- # @option params [Types::FileFormatConfiguration] :file_format_configuration
- # Contains the configuration information of file formats. IoT Analytics
- # data stores support JSON and [Parquet][1].
- #
- # The default file format is JSON. You can specify only one format.
- #
- # You can't change the file format after you create the data store.
- #
- #
- #
- # [1]: https://parquet.apache.org/
- #
- # @option params [Types::DatastorePartitions] :datastore_partitions
- # Contains information about the partition dimensions in a data store.
- #
- # @return [Types::CreateDatastoreResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
- #
- # * {Types::CreateDatastoreResponse#datastore_name #datastore_name} => String
- # * {Types::CreateDatastoreResponse#datastore_arn #datastore_arn} => String
- # * {Types::CreateDatastoreResponse#retention_period #retention_period} => Types::RetentionPeriod
- #
- # @example Request syntax with placeholder values
- #
- # resp = client.create_datastore({
- # datastore_name: "DatastoreName", # required
- # datastore_storage: {
- # service_managed_s3: {
- # },
- # customer_managed_s3: {
- # bucket: "BucketName", # required
- # key_prefix: "S3KeyPrefix",
- # role_arn: "RoleArn", # required
- # },
- # iot_site_wise_multi_layer_storage: {
- # customer_managed_s3_storage: { # required
- # bucket: "BucketName", # required
- # key_prefix: "S3KeyPrefix",
- # },
- # },
- # },
- # retention_period: {
- # unlimited: false,
- # number_of_days: 1,
- # },
- # tags: [
- # {
- # key: "TagKey", # required
- # value: "TagValue", # required
- # },
- # ],
- # file_format_configuration: {
- # json_configuration: {
- # },
- # parquet_configuration: {
- # schema_definition: {
- # columns: [
- # {
- # name: "ColumnName", # required
- # type: "ColumnDataType", # required
- # },
- # ],
- # },
- # },
- # },
- # datastore_partitions: {
- # partitions: [
- # {
- # attribute_partition: {
- # attribute_name: "PartitionAttributeName", # required
- # },
- # timestamp_partition: {
- # attribute_name: "PartitionAttributeName", # required
- # timestamp_format: "TimestampFormat",
- # },
- # },
- # ],
- # },
- # })
- #
- # @example Response structure
- #
- # resp.datastore_name #=> String
- # resp.datastore_arn #=> String
- # resp.retention_period.unlimited #=> Boolean
- # resp.retention_period.number_of_days #=> Integer
- #
- # @overload create_datastore(params = {})
- # @param [Hash] params ({})
- def create_datastore(params = {}, options = {})
- req = build_request(:create_datastore, params)
- req.send_request(options)
- end
-
- # Creates a pipeline. A pipeline consumes messages from a channel and
- # allows you to process the messages before storing them in a data
- # store. You must specify both a `channel` and a `datastore` activity
- # and, optionally, as many as 23 additional activities in the
- # `pipelineActivities` array.
- #
- # @option params [required, String] :pipeline_name
- # The name of the pipeline.
- #
- # @option params [required, Array] :pipeline_activities
- # A list of `PipelineActivity` objects. Activities perform
- # transformations on your messages, such as removing, renaming or adding
- # message attributes; filtering messages based on attribute values;
- # invoking your Lambda unctions on messages for advanced processing; or
- # performing mathematical transformations to normalize device data.
- #
- # The list can be 2-25 `PipelineActivity` objects and must contain both
- # a `channel` and a `datastore` activity. Each entry in the list must
- # contain only one activity. For example:
- #
- # `pipelineActivities = [ { "channel": { ... } }, { "lambda": { ... } },
- # ... ]`
- #
- # @option params [Array] :tags
- # Metadata which can be used to manage the pipeline.
- #
- # @return [Types::CreatePipelineResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
- #
- # * {Types::CreatePipelineResponse#pipeline_name #pipeline_name} => String
- # * {Types::CreatePipelineResponse#pipeline_arn #pipeline_arn} => String
- #
- # @example Request syntax with placeholder values
- #
- # resp = client.create_pipeline({
- # pipeline_name: "PipelineName", # required
- # pipeline_activities: [ # required
- # {
- # channel: {
- # name: "ActivityName", # required
- # channel_name: "ChannelName", # required
- # next: "ActivityName",
- # },
- # lambda: {
- # name: "ActivityName", # required
- # lambda_name: "LambdaName", # required
- # batch_size: 1, # required
- # next: "ActivityName",
- # },
- # datastore: {
- # name: "ActivityName", # required
- # datastore_name: "DatastoreName", # required
- # },
- # add_attributes: {
- # name: "ActivityName", # required
- # attributes: { # required
- # "AttributeName" => "AttributeName",
- # },
- # next: "ActivityName",
- # },
- # remove_attributes: {
- # name: "ActivityName", # required
- # attributes: ["AttributeName"], # required
- # next: "ActivityName",
- # },
- # select_attributes: {
- # name: "ActivityName", # required
- # attributes: ["AttributeName"], # required
- # next: "ActivityName",
- # },
- # filter: {
- # name: "ActivityName", # required
- # filter: "FilterExpression", # required
- # next: "ActivityName",
- # },
- # math: {
- # name: "ActivityName", # required
- # attribute: "AttributeName", # required
- # math: "MathExpression", # required
- # next: "ActivityName",
- # },
- # device_registry_enrich: {
- # name: "ActivityName", # required
- # attribute: "AttributeName", # required
- # thing_name: "AttributeName", # required
- # role_arn: "RoleArn", # required
- # next: "ActivityName",
- # },
- # device_shadow_enrich: {
- # name: "ActivityName", # required
- # attribute: "AttributeName", # required
- # thing_name: "AttributeName", # required
- # role_arn: "RoleArn", # required
- # next: "ActivityName",
- # },
- # },
- # ],
- # tags: [
- # {
- # key: "TagKey", # required
- # value: "TagValue", # required
- # },
- # ],
- # })
- #
- # @example Response structure
- #
- # resp.pipeline_name #=> String
- # resp.pipeline_arn #=> String
- #
- # @overload create_pipeline(params = {})
- # @param [Hash] params ({})
- def create_pipeline(params = {}, options = {})
- req = build_request(:create_pipeline, params)
- req.send_request(options)
- end
-
- # Deletes the specified channel.
- #
- # @option params [required, String] :channel_name
- # The name of the channel to delete.
- #
- # @return [Struct] Returns an empty {Seahorse::Client::Response response}.
- #
- # @example Request syntax with placeholder values
- #
- # resp = client.delete_channel({
- # channel_name: "ChannelName", # required
- # })
- #
- # @overload delete_channel(params = {})
- # @param [Hash] params ({})
- def delete_channel(params = {}, options = {})
- req = build_request(:delete_channel, params)
- req.send_request(options)
- end
-
- # Deletes the specified dataset.
- #
- # You do not have to delete the content of the dataset before you
- # perform this operation.
- #
- # @option params [required, String] :dataset_name
- # The name of the dataset to delete.
- #
- # @return [Struct] Returns an empty {Seahorse::Client::Response response}.
- #
- # @example Request syntax with placeholder values
- #
- # resp = client.delete_dataset({
- # dataset_name: "DatasetName", # required
- # })
- #
- # @overload delete_dataset(params = {})
- # @param [Hash] params ({})
- def delete_dataset(params = {}, options = {})
- req = build_request(:delete_dataset, params)
- req.send_request(options)
- end
-
- # Deletes the content of the specified dataset.
- #
- # @option params [required, String] :dataset_name
- # The name of the dataset whose content is deleted.
- #
- # @option params [String] :version_id
- # The version of the dataset whose content is deleted. You can also use
- # the strings "$LATEST" or "$LATEST\_SUCCEEDED" to delete the latest
- # or latest successfully completed data set. If not specified,
- # "$LATEST\_SUCCEEDED" is the default.
- #
- # @return [Struct] Returns an empty {Seahorse::Client::Response response}.
- #
- # @example Request syntax with placeholder values
- #
- # resp = client.delete_dataset_content({
- # dataset_name: "DatasetName", # required
- # version_id: "DatasetContentVersion",
- # })
- #
- # @overload delete_dataset_content(params = {})
- # @param [Hash] params ({})
- def delete_dataset_content(params = {}, options = {})
- req = build_request(:delete_dataset_content, params)
- req.send_request(options)
- end
-
- # Deletes the specified data store.
- #
- # @option params [required, String] :datastore_name
- # The name of the data store to delete.
- #
- # @return [Struct] Returns an empty {Seahorse::Client::Response response}.
- #
- # @example Request syntax with placeholder values
- #
- # resp = client.delete_datastore({
- # datastore_name: "DatastoreName", # required
- # })
- #
- # @overload delete_datastore(params = {})
- # @param [Hash] params ({})
- def delete_datastore(params = {}, options = {})
- req = build_request(:delete_datastore, params)
- req.send_request(options)
- end
-
- # Deletes the specified pipeline.
- #
- # @option params [required, String] :pipeline_name
- # The name of the pipeline to delete.
- #
- # @return [Struct] Returns an empty {Seahorse::Client::Response response}.
- #
- # @example Request syntax with placeholder values
- #
- # resp = client.delete_pipeline({
- # pipeline_name: "PipelineName", # required
- # })
- #
- # @overload delete_pipeline(params = {})
- # @param [Hash] params ({})
- def delete_pipeline(params = {}, options = {})
- req = build_request(:delete_pipeline, params)
- req.send_request(options)
- end
-
- # Retrieves information about a channel.
- #
- # @option params [required, String] :channel_name
- # The name of the channel whose information is retrieved.
- #
- # @option params [Boolean] :include_statistics
- # If true, additional statistical information about the channel is
- # included in the response. This feature can't be used with a channel
- # whose S3 storage is customer-managed.
- #
- # @return [Types::DescribeChannelResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
- #
- # * {Types::DescribeChannelResponse#channel #channel} => Types::Channel
- # * {Types::DescribeChannelResponse#statistics #statistics} => Types::ChannelStatistics
- #
- # @example Request syntax with placeholder values
- #
- # resp = client.describe_channel({
- # channel_name: "ChannelName", # required
- # include_statistics: false,
- # })
- #
- # @example Response structure
- #
- # resp.channel.name #=> String
- # resp.channel.storage.customer_managed_s3.bucket #=> String
- # resp.channel.storage.customer_managed_s3.key_prefix #=> String
- # resp.channel.storage.customer_managed_s3.role_arn #=> String
- # resp.channel.arn #=> String
- # resp.channel.status #=> String, one of "CREATING", "ACTIVE", "DELETING"
- # resp.channel.retention_period.unlimited #=> Boolean
- # resp.channel.retention_period.number_of_days #=> Integer
- # resp.channel.creation_time #=> Time
- # resp.channel.last_update_time #=> Time
- # resp.channel.last_message_arrival_time #=> Time
- # resp.statistics.size.estimated_size_in_bytes #=> Float
- # resp.statistics.size.estimated_on #=> Time
- #
- # @overload describe_channel(params = {})
- # @param [Hash] params ({})
- def describe_channel(params = {}, options = {})
- req = build_request(:describe_channel, params)
- req.send_request(options)
- end
-
- # Retrieves information about a dataset.
- #
- # @option params [required, String] :dataset_name
- # The name of the dataset whose information is retrieved.
- #
- # @return [Types::DescribeDatasetResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
- #
- # * {Types::DescribeDatasetResponse#dataset #dataset} => Types::Dataset
- #
- # @example Request syntax with placeholder values
- #
- # resp = client.describe_dataset({
- # dataset_name: "DatasetName", # required
- # })
- #
- # @example Response structure
- #
- # resp.dataset.name #=> String
- # resp.dataset.arn #=> String
- # resp.dataset.actions #=> Array
- # resp.dataset.actions[0].action_name #=> String
- # resp.dataset.actions[0].query_action.sql_query #=> String
- # resp.dataset.actions[0].query_action.filters #=> Array
- # resp.dataset.actions[0].query_action.filters[0].delta_time.offset_seconds #=> Integer
- # resp.dataset.actions[0].query_action.filters[0].delta_time.time_expression #=> String
- # resp.dataset.actions[0].container_action.image #=> String
- # resp.dataset.actions[0].container_action.execution_role_arn #=> String
- # resp.dataset.actions[0].container_action.resource_configuration.compute_type #=> String, one of "ACU_1", "ACU_2"
- # resp.dataset.actions[0].container_action.resource_configuration.volume_size_in_gb #=> Integer
- # resp.dataset.actions[0].container_action.variables #=> Array
- # resp.dataset.actions[0].container_action.variables[0].name #=> String
- # resp.dataset.actions[0].container_action.variables[0].string_value #=> String
- # resp.dataset.actions[0].container_action.variables[0].double_value #=> Float
- # resp.dataset.actions[0].container_action.variables[0].dataset_content_version_value.dataset_name #=> String
- # resp.dataset.actions[0].container_action.variables[0].output_file_uri_value.file_name #=> String
- # resp.dataset.triggers #=> Array
- # resp.dataset.triggers[0].schedule.expression #=> String
- # resp.dataset.triggers[0].dataset.name #=> String
- # resp.dataset.content_delivery_rules #=> Array
- # resp.dataset.content_delivery_rules[0].entry_name #=> String
- # resp.dataset.content_delivery_rules[0].destination.iot_events_destination_configuration.input_name #=> String
- # resp.dataset.content_delivery_rules[0].destination.iot_events_destination_configuration.role_arn #=> String
- # resp.dataset.content_delivery_rules[0].destination.s3_destination_configuration.bucket #=> String
- # resp.dataset.content_delivery_rules[0].destination.s3_destination_configuration.key #=> String
- # resp.dataset.content_delivery_rules[0].destination.s3_destination_configuration.glue_configuration.table_name #=> String
- # resp.dataset.content_delivery_rules[0].destination.s3_destination_configuration.glue_configuration.database_name #=> String
- # resp.dataset.content_delivery_rules[0].destination.s3_destination_configuration.role_arn #=> String
- # resp.dataset.status #=> String, one of "CREATING", "ACTIVE", "DELETING"
- # resp.dataset.creation_time #=> Time
- # resp.dataset.last_update_time #=> Time
- # resp.dataset.retention_period.unlimited #=> Boolean
- # resp.dataset.retention_period.number_of_days #=> Integer
- # resp.dataset.versioning_configuration.unlimited #=> Boolean
- # resp.dataset.versioning_configuration.max_versions #=> Integer
- # resp.dataset.late_data_rules #=> Array
- # resp.dataset.late_data_rules[0].rule_name #=> String
- # resp.dataset.late_data_rules[0].rule_configuration.delta_time_session_window_configuration.timeout_in_minutes #=> Integer
- #
- # @overload describe_dataset(params = {})
- # @param [Hash] params ({})
- def describe_dataset(params = {}, options = {})
- req = build_request(:describe_dataset, params)
- req.send_request(options)
- end
-
- # Retrieves information about a data store.
- #
- # @option params [required, String] :datastore_name
- # The name of the data store
- #
- # @option params [Boolean] :include_statistics
- # If true, additional statistical information about the data store is
- # included in the response. This feature can't be used with a data
- # store whose S3 storage is customer-managed.
- #
- # @return [Types::DescribeDatastoreResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
- #
- # * {Types::DescribeDatastoreResponse#datastore #datastore} => Types::Datastore
- # * {Types::DescribeDatastoreResponse#statistics #statistics} => Types::DatastoreStatistics
- #
- # @example Request syntax with placeholder values
- #
- # resp = client.describe_datastore({
- # datastore_name: "DatastoreName", # required
- # include_statistics: false,
- # })
- #
- # @example Response structure
- #
- # resp.datastore.name #=> String
- # resp.datastore.storage.customer_managed_s3.bucket #=> String
- # resp.datastore.storage.customer_managed_s3.key_prefix #=> String
- # resp.datastore.storage.customer_managed_s3.role_arn #=> String
- # resp.datastore.storage.iot_site_wise_multi_layer_storage.customer_managed_s3_storage.bucket #=> String
- # resp.datastore.storage.iot_site_wise_multi_layer_storage.customer_managed_s3_storage.key_prefix #=> String
- # resp.datastore.arn #=> String
- # resp.datastore.status #=> String, one of "CREATING", "ACTIVE", "DELETING"
- # resp.datastore.retention_period.unlimited #=> Boolean
- # resp.datastore.retention_period.number_of_days #=> Integer
- # resp.datastore.creation_time #=> Time
- # resp.datastore.last_update_time #=> Time
- # resp.datastore.last_message_arrival_time #=> Time
- # resp.datastore.file_format_configuration.parquet_configuration.schema_definition.columns #=> Array
- # resp.datastore.file_format_configuration.parquet_configuration.schema_definition.columns[0].name #=> String
- # resp.datastore.file_format_configuration.parquet_configuration.schema_definition.columns[0].type #=> String
- # resp.datastore.datastore_partitions.partitions #=> Array
- # resp.datastore.datastore_partitions.partitions[0].attribute_partition.attribute_name #=> String
- # resp.datastore.datastore_partitions.partitions[0].timestamp_partition.attribute_name #=> String
- # resp.datastore.datastore_partitions.partitions[0].timestamp_partition.timestamp_format #=> String
- # resp.statistics.size.estimated_size_in_bytes #=> Float
- # resp.statistics.size.estimated_on #=> Time
- #
- # @overload describe_datastore(params = {})
- # @param [Hash] params ({})
- def describe_datastore(params = {}, options = {})
- req = build_request(:describe_datastore, params)
- req.send_request(options)
- end
-
- # Retrieves the current settings of the IoT Analytics logging options.
- #
- # @return [Types::DescribeLoggingOptionsResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
- #
- # * {Types::DescribeLoggingOptionsResponse#logging_options #logging_options} => Types::LoggingOptions
- #
- # @example Response structure
- #
- # resp.logging_options.role_arn #=> String
- # resp.logging_options.level #=> String, one of "ERROR"
- # resp.logging_options.enabled #=> Boolean
- #
- # @overload describe_logging_options(params = {})
- # @param [Hash] params ({})
- def describe_logging_options(params = {}, options = {})
- req = build_request(:describe_logging_options, params)
- req.send_request(options)
- end
-
- # Retrieves information about a pipeline.
- #
- # @option params [required, String] :pipeline_name
- # The name of the pipeline whose information is retrieved.
- #
- # @return [Types::DescribePipelineResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
- #
- # * {Types::DescribePipelineResponse#pipeline #pipeline} => Types::Pipeline
- #
- # @example Request syntax with placeholder values
- #
- # resp = client.describe_pipeline({
- # pipeline_name: "PipelineName", # required
- # })
- #
- # @example Response structure
- #
- # resp.pipeline.name #=> String
- # resp.pipeline.arn #=> String
- # resp.pipeline.activities #=> Array
- # resp.pipeline.activities[0].channel.name #=> String
- # resp.pipeline.activities[0].channel.channel_name #=> String
- # resp.pipeline.activities[0].channel.next #=> String
- # resp.pipeline.activities[0].lambda.name #=> String
- # resp.pipeline.activities[0].lambda.lambda_name #=> String
- # resp.pipeline.activities[0].lambda.batch_size #=> Integer
- # resp.pipeline.activities[0].lambda.next #=> String
- # resp.pipeline.activities[0].datastore.name #=> String
- # resp.pipeline.activities[0].datastore.datastore_name #=> String
- # resp.pipeline.activities[0].add_attributes.name #=> String
- # resp.pipeline.activities[0].add_attributes.attributes #=> Hash
- # resp.pipeline.activities[0].add_attributes.attributes["AttributeName"] #=> String
- # resp.pipeline.activities[0].add_attributes.next #=> String
- # resp.pipeline.activities[0].remove_attributes.name #=> String
- # resp.pipeline.activities[0].remove_attributes.attributes #=> Array
- # resp.pipeline.activities[0].remove_attributes.attributes[0] #=> String
- # resp.pipeline.activities[0].remove_attributes.next #=> String
- # resp.pipeline.activities[0].select_attributes.name #=> String
- # resp.pipeline.activities[0].select_attributes.attributes #=> Array
- # resp.pipeline.activities[0].select_attributes.attributes[0] #=> String
- # resp.pipeline.activities[0].select_attributes.next #=> String
- # resp.pipeline.activities[0].filter.name #=> String
- # resp.pipeline.activities[0].filter.filter #=> String
- # resp.pipeline.activities[0].filter.next #=> String
- # resp.pipeline.activities[0].math.name #=> String
- # resp.pipeline.activities[0].math.attribute #=> String
- # resp.pipeline.activities[0].math.math #=> String
- # resp.pipeline.activities[0].math.next #=> String
- # resp.pipeline.activities[0].device_registry_enrich.name #=> String
- # resp.pipeline.activities[0].device_registry_enrich.attribute #=> String
- # resp.pipeline.activities[0].device_registry_enrich.thing_name #=> String
- # resp.pipeline.activities[0].device_registry_enrich.role_arn #=> String
- # resp.pipeline.activities[0].device_registry_enrich.next #=> String
- # resp.pipeline.activities[0].device_shadow_enrich.name #=> String
- # resp.pipeline.activities[0].device_shadow_enrich.attribute #=> String
- # resp.pipeline.activities[0].device_shadow_enrich.thing_name #=> String
- # resp.pipeline.activities[0].device_shadow_enrich.role_arn #=> String
- # resp.pipeline.activities[0].device_shadow_enrich.next #=> String
- # resp.pipeline.reprocessing_summaries #=> Array
- # resp.pipeline.reprocessing_summaries[0].id #=> String
- # resp.pipeline.reprocessing_summaries[0].status #=> String, one of "RUNNING", "SUCCEEDED", "CANCELLED", "FAILED"
- # resp.pipeline.reprocessing_summaries[0].creation_time #=> Time
- # resp.pipeline.creation_time #=> Time
- # resp.pipeline.last_update_time #=> Time
- #
- # @overload describe_pipeline(params = {})
- # @param [Hash] params ({})
- def describe_pipeline(params = {}, options = {})
- req = build_request(:describe_pipeline, params)
- req.send_request(options)
- end
-
- # Retrieves the contents of a dataset as presigned URIs.
- #
- # @option params [required, String] :dataset_name
- # The name of the dataset whose contents are retrieved.
- #
- # @option params [String] :version_id
- # The version of the dataset whose contents are retrieved. You can also
- # use the strings "$LATEST" or "$LATEST\_SUCCEEDED" to retrieve the
- # contents of the latest or latest successfully completed dataset. If
- # not specified, "$LATEST\_SUCCEEDED" is the default.
- #
- # @return [Types::GetDatasetContentResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
- #
- # * {Types::GetDatasetContentResponse#entries #data.entries} => Array<Types::DatasetEntry> (This method conflicts with a method on Response, call it through the data member)
- # * {Types::GetDatasetContentResponse#timestamp #timestamp} => Time
- # * {Types::GetDatasetContentResponse#status #status} => Types::DatasetContentStatus
- #
- # @example Request syntax with placeholder values
- #
- # resp = client.get_dataset_content({
- # dataset_name: "DatasetName", # required
- # version_id: "DatasetContentVersion",
- # })
- #
- # @example Response structure
- #
- # resp.data.entries #=> Array
- # resp.data.entries[0].entry_name #=> String
- # resp.data.entries[0].data_uri #=> String
- # resp.timestamp #=> Time
- # resp.status.state #=> String, one of "CREATING", "SUCCEEDED", "FAILED"
- # resp.status.reason #=> String
- #
- # @overload get_dataset_content(params = {})
- # @param [Hash] params ({})
- def get_dataset_content(params = {}, options = {})
- req = build_request(:get_dataset_content, params)
- req.send_request(options)
- end
-
- # Retrieves a list of channels.
- #
- # @option params [String] :next_token
- # The token for the next set of results.
- #
- # @option params [Integer] :max_results
- # The maximum number of results to return in this request.
- #
- # The default value is 100.
- #
- # @return [Types::ListChannelsResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
- #
- # * {Types::ListChannelsResponse#channel_summaries #channel_summaries} => Array<Types::ChannelSummary>
- # * {Types::ListChannelsResponse#next_token #next_token} => String
- #
- # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
- #
- # @example Request syntax with placeholder values
- #
- # resp = client.list_channels({
- # next_token: "NextToken",
- # max_results: 1,
- # })
- #
- # @example Response structure
- #
- # resp.channel_summaries #=> Array
- # resp.channel_summaries[0].channel_name #=> String
- # resp.channel_summaries[0].channel_storage.customer_managed_s3.bucket #=> String
- # resp.channel_summaries[0].channel_storage.customer_managed_s3.key_prefix #=> String
- # resp.channel_summaries[0].channel_storage.customer_managed_s3.role_arn #=> String
- # resp.channel_summaries[0].status #=> String, one of "CREATING", "ACTIVE", "DELETING"
- # resp.channel_summaries[0].creation_time #=> Time
- # resp.channel_summaries[0].last_update_time #=> Time
- # resp.channel_summaries[0].last_message_arrival_time #=> Time
- # resp.next_token #=> String
- #
- # @overload list_channels(params = {})
- # @param [Hash] params ({})
- def list_channels(params = {}, options = {})
- req = build_request(:list_channels, params)
- req.send_request(options)
- end
-
- # Lists information about dataset contents that have been created.
- #
- # @option params [required, String] :dataset_name
- # The name of the dataset whose contents information you want to list.
- #
- # @option params [String] :next_token
- # The token for the next set of results.
- #
- # @option params [Integer] :max_results
- # The maximum number of results to return in this request.
- #
- # @option params [Time,DateTime,Date,Integer,String] :scheduled_on_or_after
- # A filter to limit results to those dataset contents whose creation is
- # scheduled on or after the given time. See the field
- # `triggers.schedule` in the `CreateDataset` request. (timestamp)
- #
- # @option params [Time,DateTime,Date,Integer,String] :scheduled_before
- # A filter to limit results to those dataset contents whose creation is
- # scheduled before the given time. See the field `triggers.schedule` in
- # the `CreateDataset` request. (timestamp)
- #
- # @return [Types::ListDatasetContentsResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
- #
- # * {Types::ListDatasetContentsResponse#dataset_content_summaries #dataset_content_summaries} => Array<Types::DatasetContentSummary>
- # * {Types::ListDatasetContentsResponse#next_token #next_token} => String
- #
- # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
- #
- # @example Request syntax with placeholder values
- #
- # resp = client.list_dataset_contents({
- # dataset_name: "DatasetName", # required
- # next_token: "NextToken",
- # max_results: 1,
- # scheduled_on_or_after: Time.now,
- # scheduled_before: Time.now,
- # })
- #
- # @example Response structure
- #
- # resp.dataset_content_summaries #=> Array
- # resp.dataset_content_summaries[0].version #=> String
- # resp.dataset_content_summaries[0].status.state #=> String, one of "CREATING", "SUCCEEDED", "FAILED"
- # resp.dataset_content_summaries[0].status.reason #=> String
- # resp.dataset_content_summaries[0].creation_time #=> Time
- # resp.dataset_content_summaries[0].schedule_time #=> Time
- # resp.dataset_content_summaries[0].completion_time #=> Time
- # resp.next_token #=> String
- #
- # @overload list_dataset_contents(params = {})
- # @param [Hash] params ({})
- def list_dataset_contents(params = {}, options = {})
- req = build_request(:list_dataset_contents, params)
- req.send_request(options)
- end
-
- # Retrieves information about datasets.
- #
- # @option params [String] :next_token
- # The token for the next set of results.
- #
- # @option params [Integer] :max_results
- # The maximum number of results to return in this request.
- #
- # The default value is 100.
- #
- # @return [Types::ListDatasetsResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
- #
- # * {Types::ListDatasetsResponse#dataset_summaries #dataset_summaries} => Array<Types::DatasetSummary>
- # * {Types::ListDatasetsResponse#next_token #next_token} => String
- #
- # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
- #
- # @example Request syntax with placeholder values
- #
- # resp = client.list_datasets({
- # next_token: "NextToken",
- # max_results: 1,
- # })
- #
- # @example Response structure
- #
- # resp.dataset_summaries #=> Array
- # resp.dataset_summaries[0].dataset_name #=> String
- # resp.dataset_summaries[0].status #=> String, one of "CREATING", "ACTIVE", "DELETING"
- # resp.dataset_summaries[0].creation_time #=> Time
- # resp.dataset_summaries[0].last_update_time #=> Time
- # resp.dataset_summaries[0].triggers #=> Array
- # resp.dataset_summaries[0].triggers[0].schedule.expression #=> String
- # resp.dataset_summaries[0].triggers[0].dataset.name #=> String
- # resp.dataset_summaries[0].actions #=> Array
- # resp.dataset_summaries[0].actions[0].action_name #=> String
- # resp.dataset_summaries[0].actions[0].action_type #=> String, one of "QUERY", "CONTAINER"
- # resp.next_token #=> String
- #
- # @overload list_datasets(params = {})
- # @param [Hash] params ({})
- def list_datasets(params = {}, options = {})
- req = build_request(:list_datasets, params)
- req.send_request(options)
- end
-
- # Retrieves a list of data stores.
- #
- # @option params [String] :next_token
- # The token for the next set of results.
- #
- # @option params [Integer] :max_results
- # The maximum number of results to return in this request.
- #
- # The default value is 100.
- #
- # @return [Types::ListDatastoresResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
- #
- # * {Types::ListDatastoresResponse#datastore_summaries #datastore_summaries} => Array<Types::DatastoreSummary>
- # * {Types::ListDatastoresResponse#next_token #next_token} => String
- #
- # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
- #
- # @example Request syntax with placeholder values
- #
- # resp = client.list_datastores({
- # next_token: "NextToken",
- # max_results: 1,
- # })
- #
- # @example Response structure
- #
- # resp.datastore_summaries #=> Array
- # resp.datastore_summaries[0].datastore_name #=> String
- # resp.datastore_summaries[0].datastore_storage.customer_managed_s3.bucket #=> String
- # resp.datastore_summaries[0].datastore_storage.customer_managed_s3.key_prefix #=> String
- # resp.datastore_summaries[0].datastore_storage.customer_managed_s3.role_arn #=> String
- # resp.datastore_summaries[0].datastore_storage.iot_site_wise_multi_layer_storage.customer_managed_s3_storage.bucket #=> String
- # resp.datastore_summaries[0].datastore_storage.iot_site_wise_multi_layer_storage.customer_managed_s3_storage.key_prefix #=> String
- # resp.datastore_summaries[0].status #=> String, one of "CREATING", "ACTIVE", "DELETING"
- # resp.datastore_summaries[0].creation_time #=> Time
- # resp.datastore_summaries[0].last_update_time #=> Time
- # resp.datastore_summaries[0].last_message_arrival_time #=> Time
- # resp.datastore_summaries[0].file_format_type #=> String, one of "JSON", "PARQUET"
- # resp.datastore_summaries[0].datastore_partitions.partitions #=> Array
- # resp.datastore_summaries[0].datastore_partitions.partitions[0].attribute_partition.attribute_name #=> String
- # resp.datastore_summaries[0].datastore_partitions.partitions[0].timestamp_partition.attribute_name #=> String
- # resp.datastore_summaries[0].datastore_partitions.partitions[0].timestamp_partition.timestamp_format #=> String
- # resp.next_token #=> String
- #
- # @overload list_datastores(params = {})
- # @param [Hash] params ({})
- def list_datastores(params = {}, options = {})
- req = build_request(:list_datastores, params)
- req.send_request(options)
- end
-
- # Retrieves a list of pipelines.
- #
- # @option params [String] :next_token
- # The token for the next set of results.
- #
- # @option params [Integer] :max_results
- # The maximum number of results to return in this request.
- #
- # The default value is 100.
- #
- # @return [Types::ListPipelinesResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
- #
- # * {Types::ListPipelinesResponse#pipeline_summaries #pipeline_summaries} => Array<Types::PipelineSummary>
- # * {Types::ListPipelinesResponse#next_token #next_token} => String
- #
- # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
- #
- # @example Request syntax with placeholder values
- #
- # resp = client.list_pipelines({
- # next_token: "NextToken",
- # max_results: 1,
- # })
- #
- # @example Response structure
- #
- # resp.pipeline_summaries #=> Array
- # resp.pipeline_summaries[0].pipeline_name #=> String
- # resp.pipeline_summaries[0].reprocessing_summaries #=> Array
- # resp.pipeline_summaries[0].reprocessing_summaries[0].id #=> String
- # resp.pipeline_summaries[0].reprocessing_summaries[0].status #=> String, one of "RUNNING", "SUCCEEDED", "CANCELLED", "FAILED"
- # resp.pipeline_summaries[0].reprocessing_summaries[0].creation_time #=> Time
- # resp.pipeline_summaries[0].creation_time #=> Time
- # resp.pipeline_summaries[0].last_update_time #=> Time
- # resp.next_token #=> String
- #
- # @overload list_pipelines(params = {})
- # @param [Hash] params ({})
- def list_pipelines(params = {}, options = {})
- req = build_request(:list_pipelines, params)
- req.send_request(options)
- end
-
- # Lists the tags (metadata) that you have assigned to the resource.
- #
- # @option params [required, String] :resource_arn
- # The ARN of the resource whose tags you want to list.
- #
- # @return [Types::ListTagsForResourceResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
- #
- # * {Types::ListTagsForResourceResponse#tags #tags} => Array<Types::Tag>
- #
- # @example Request syntax with placeholder values
- #
- # resp = client.list_tags_for_resource({
- # resource_arn: "ResourceArn", # required
- # })
- #
- # @example Response structure
- #
- # resp.tags #=> Array
- # resp.tags[0].key #=> String
- # resp.tags[0].value #=> String
- #
- # @overload list_tags_for_resource(params = {})
- # @param [Hash] params ({})
- def list_tags_for_resource(params = {}, options = {})
- req = build_request(:list_tags_for_resource, params)
- req.send_request(options)
- end
-
- # Sets or updates the IoT Analytics logging options.
- #
- # If you update the value of any `loggingOptions` field, it takes up to
- # one minute for the change to take effect. Also, if you change the
- # policy attached to the role you specified in the `roleArn` field (for
- # example, to correct an invalid policy), it takes up to five minutes
- # for that change to take effect.
- #
- # @option params [required, Types::LoggingOptions] :logging_options
- # The new values of the IoT Analytics logging options.
- #
- # @return [Struct] Returns an empty {Seahorse::Client::Response response}.
- #
- # @example Request syntax with placeholder values
- #
- # resp = client.put_logging_options({
- # logging_options: { # required
- # role_arn: "RoleArn", # required
- # level: "ERROR", # required, accepts ERROR
- # enabled: false, # required
- # },
- # })
- #
- # @overload put_logging_options(params = {})
- # @param [Hash] params ({})
- def put_logging_options(params = {}, options = {})
- req = build_request(:put_logging_options, params)
- req.send_request(options)
- end
-
- # Simulates the results of running a pipeline activity on a message
- # payload.
- #
- # @option params [required, Types::PipelineActivity] :pipeline_activity
- # The pipeline activity that is run. This must not be a channel activity
- # or a data store activity because these activities are used in a
- # pipeline only to load the original message and to store the (possibly)
- # transformed message. If a Lambda activity is specified, only
- # short-running Lambda functions (those with a timeout of less than 30
- # seconds or less) can be used.
- #
- # @option params [required, Array] :payloads
- # The sample message payloads on which the pipeline activity is run.
- #
- # @return [Types::RunPipelineActivityResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
- #
- # * {Types::RunPipelineActivityResponse#payloads #payloads} => Array<String>
- # * {Types::RunPipelineActivityResponse#log_result #log_result} => String
- #
- # @example Request syntax with placeholder values
- #
- # resp = client.run_pipeline_activity({
- # pipeline_activity: { # required
- # channel: {
- # name: "ActivityName", # required
- # channel_name: "ChannelName", # required
- # next: "ActivityName",
- # },
- # lambda: {
- # name: "ActivityName", # required
- # lambda_name: "LambdaName", # required
- # batch_size: 1, # required
- # next: "ActivityName",
- # },
- # datastore: {
- # name: "ActivityName", # required
- # datastore_name: "DatastoreName", # required
- # },
- # add_attributes: {
- # name: "ActivityName", # required
- # attributes: { # required
- # "AttributeName" => "AttributeName",
- # },
- # next: "ActivityName",
- # },
- # remove_attributes: {
- # name: "ActivityName", # required
- # attributes: ["AttributeName"], # required
- # next: "ActivityName",
- # },
- # select_attributes: {
- # name: "ActivityName", # required
- # attributes: ["AttributeName"], # required
- # next: "ActivityName",
- # },
- # filter: {
- # name: "ActivityName", # required
- # filter: "FilterExpression", # required
- # next: "ActivityName",
- # },
- # math: {
- # name: "ActivityName", # required
- # attribute: "AttributeName", # required
- # math: "MathExpression", # required
- # next: "ActivityName",
- # },
- # device_registry_enrich: {
- # name: "ActivityName", # required
- # attribute: "AttributeName", # required
- # thing_name: "AttributeName", # required
- # role_arn: "RoleArn", # required
- # next: "ActivityName",
- # },
- # device_shadow_enrich: {
- # name: "ActivityName", # required
- # attribute: "AttributeName", # required
- # thing_name: "AttributeName", # required
- # role_arn: "RoleArn", # required
- # next: "ActivityName",
- # },
- # },
- # payloads: ["data"], # required
- # })
- #
- # @example Response structure
- #
- # resp.payloads #=> Array
- # resp.payloads[0] #=> String
- # resp.log_result #=> String
- #
- # @overload run_pipeline_activity(params = {})
- # @param [Hash] params ({})
- def run_pipeline_activity(params = {}, options = {})
- req = build_request(:run_pipeline_activity, params)
- req.send_request(options)
- end
-
- # Retrieves a sample of messages from the specified channel ingested
- # during the specified timeframe. Up to 10 messages can be retrieved.
- #
- # @option params [required, String] :channel_name
- # The name of the channel whose message samples are retrieved.
- #
- # @option params [Integer] :max_messages
- # The number of sample messages to be retrieved. The limit is 10. The
- # default is also 10.
- #
- # @option params [Time,DateTime,Date,Integer,String] :start_time
- # The start of the time window from which sample messages are retrieved.
- #
- # @option params [Time,DateTime,Date,Integer,String] :end_time
- # The end of the time window from which sample messages are retrieved.
- #
- # @return [Types::SampleChannelDataResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
- #
- # * {Types::SampleChannelDataResponse#payloads #payloads} => Array<String>
- #
- # @example Request syntax with placeholder values
- #
- # resp = client.sample_channel_data({
- # channel_name: "ChannelName", # required
- # max_messages: 1,
- # start_time: Time.now,
- # end_time: Time.now,
- # })
- #
- # @example Response structure
- #
- # resp.payloads #=> Array
- # resp.payloads[0] #=> String
- #
- # @overload sample_channel_data(params = {})
- # @param [Hash] params ({})
- def sample_channel_data(params = {}, options = {})
- req = build_request(:sample_channel_data, params)
- req.send_request(options)
- end
-
- # Starts the reprocessing of raw message data through the pipeline.
- #
- # @option params [required, String] :pipeline_name
- # The name of the pipeline on which to start reprocessing.
- #
- # @option params [Time,DateTime,Date,Integer,String] :start_time
- # The start time (inclusive) of raw message data that is reprocessed.
- #
- # If you specify a value for the `startTime` parameter, you must not use
- # the `channelMessages` object.
- #
- # @option params [Time,DateTime,Date,Integer,String] :end_time
- # The end time (exclusive) of raw message data that is reprocessed.
- #
- # If you specify a value for the `endTime` parameter, you must not use
- # the `channelMessages` object.
- #
- # @option params [Types::ChannelMessages] :channel_messages
- # Specifies one or more sets of channel messages that you want to
- # reprocess.
- #
- # If you use the `channelMessages` object, you must not specify a value
- # for `startTime` and `endTime`.
- #
- # @return [Types::StartPipelineReprocessingResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
- #
- # * {Types::StartPipelineReprocessingResponse#reprocessing_id #reprocessing_id} => String
- #
- # @example Request syntax with placeholder values
- #
- # resp = client.start_pipeline_reprocessing({
- # pipeline_name: "PipelineName", # required
- # start_time: Time.now,
- # end_time: Time.now,
- # channel_messages: {
- # s3_paths: ["S3PathChannelMessage"],
- # },
- # })
- #
- # @example Response structure
- #
- # resp.reprocessing_id #=> String
- #
- # @overload start_pipeline_reprocessing(params = {})
- # @param [Hash] params ({})
- def start_pipeline_reprocessing(params = {}, options = {})
- req = build_request(:start_pipeline_reprocessing, params)
- req.send_request(options)
- end
-
- # Adds to or modifies the tags of the given resource. Tags are metadata
- # that can be used to manage a resource.
- #
- # @option params [required, String] :resource_arn
- # The ARN of the resource whose tags you want to modify.
- #
- # @option params [required, Array] :tags
- # The new or modified tags for the resource.
- #
- # @return [Struct] Returns an empty {Seahorse::Client::Response response}.
- #
- # @example Request syntax with placeholder values
- #
- # resp = client.tag_resource({
- # resource_arn: "ResourceArn", # required
- # tags: [ # required
- # {
- # key: "TagKey", # required
- # value: "TagValue", # required
- # },
- # ],
- # })
- #
- # @overload tag_resource(params = {})
- # @param [Hash] params ({})
- def tag_resource(params = {}, options = {})
- req = build_request(:tag_resource, params)
- req.send_request(options)
- end
-
- # Removes the given tags (metadata) from the resource.
- #
- # @option params [required, String] :resource_arn
- # The ARN of the resource whose tags you want to remove.
- #
- # @option params [required, Array] :tag_keys
- # The keys of those tags which you want to remove.
- #
- # @return [Struct] Returns an empty {Seahorse::Client::Response response}.
- #
- # @example Request syntax with placeholder values
- #
- # resp = client.untag_resource({
- # resource_arn: "ResourceArn", # required
- # tag_keys: ["TagKey"], # required
- # })
- #
- # @overload untag_resource(params = {})
- # @param [Hash] params ({})
- def untag_resource(params = {}, options = {})
- req = build_request(:untag_resource, params)
- req.send_request(options)
- end
-
- # Used to update the settings of a channel.
- #
- # @option params [required, String] :channel_name
- # The name of the channel to be updated.
- #
- # @option params [Types::ChannelStorage] :channel_storage
- # Where channel data is stored. You can choose one of `serviceManagedS3`
- # or `customerManagedS3` storage. If not specified, the default is
- # `serviceManagedS3`. You can't change this storage option after the
- # channel is created.
- #
- # @option params [Types::RetentionPeriod] :retention_period
- # How long, in days, message data is kept for the channel. The retention
- # period can't be updated if the channel's Amazon S3 storage is
- # customer-managed.
- #
- # @return [Struct] Returns an empty {Seahorse::Client::Response response}.
- #
- # @example Request syntax with placeholder values
- #
- # resp = client.update_channel({
- # channel_name: "ChannelName", # required
- # channel_storage: {
- # service_managed_s3: {
- # },
- # customer_managed_s3: {
- # bucket: "BucketName", # required
- # key_prefix: "S3KeyPrefix",
- # role_arn: "RoleArn", # required
- # },
- # },
- # retention_period: {
- # unlimited: false,
- # number_of_days: 1,
- # },
- # })
- #
- # @overload update_channel(params = {})
- # @param [Hash] params ({})
- def update_channel(params = {}, options = {})
- req = build_request(:update_channel, params)
- req.send_request(options)
- end
-
- # Updates the settings of a dataset.
- #
- # @option params [required, String] :dataset_name
- # The name of the dataset to update.
- #
- # @option params [required, Array] :actions
- # A list of `DatasetAction` objects.
- #
- # @option params [Array] :triggers
- # A list of `DatasetTrigger` objects. The list can be empty or can
- # contain up to five `DatasetTrigger` objects.
- #
- # @option params [Array] :content_delivery_rules
- # When dataset contents are created, they are delivered to destinations
- # specified here.
- #
- # @option params [Types::RetentionPeriod] :retention_period
- # How long, in days, dataset contents are kept for the dataset.
- #
- # @option params [Types::VersioningConfiguration] :versioning_configuration
- # Optional. How many versions of dataset contents are kept. If not
- # specified or set to null, only the latest version plus the latest
- # succeeded version (if they are different) are kept for the time period
- # specified by the `retentionPeriod` parameter. For more information,
- # see [Keeping Multiple Versions of IoT Analytics datasets][1] in the
- # *IoT Analytics User Guide*.
- #
- #
- #
- # [1]: https://docs.aws.amazon.com/iotanalytics/latest/userguide/getting-started.html#aws-iot-analytics-dataset-versions
- #
- # @option params [Array] :late_data_rules
- # A list of data rules that send notifications to CloudWatch, when data
- # arrives late. To specify `lateDataRules`, the dataset must use a
- # [DeltaTimer][1] filter.
- #
- #
- #
- # [1]: https://docs.aws.amazon.com/iotanalytics/latest/APIReference/API_DeltaTime.html
- #
- # @return [Struct] Returns an empty {Seahorse::Client::Response response}.
- #
- # @example Request syntax with placeholder values
- #
- # resp = client.update_dataset({
- # dataset_name: "DatasetName", # required
- # actions: [ # required
- # {
- # action_name: "DatasetActionName",
- # query_action: {
- # sql_query: "SqlQuery", # required
- # filters: [
- # {
- # delta_time: {
- # offset_seconds: 1, # required
- # time_expression: "TimeExpression", # required
- # },
- # },
- # ],
- # },
- # container_action: {
- # image: "Image", # required
- # execution_role_arn: "RoleArn", # required
- # resource_configuration: { # required
- # compute_type: "ACU_1", # required, accepts ACU_1, ACU_2
- # volume_size_in_gb: 1, # required
- # },
- # variables: [
- # {
- # name: "VariableName", # required
- # string_value: "StringValue",
- # double_value: 1.0,
- # dataset_content_version_value: {
- # dataset_name: "DatasetName", # required
- # },
- # output_file_uri_value: {
- # file_name: "OutputFileName", # required
- # },
- # },
- # ],
- # },
- # },
- # ],
- # triggers: [
- # {
- # schedule: {
- # expression: "ScheduleExpression",
- # },
- # dataset: {
- # name: "DatasetName", # required
- # },
- # },
- # ],
- # content_delivery_rules: [
- # {
- # entry_name: "EntryName",
- # destination: { # required
- # iot_events_destination_configuration: {
- # input_name: "IotEventsInputName", # required
- # role_arn: "RoleArn", # required
- # },
- # s3_destination_configuration: {
- # bucket: "BucketName", # required
- # key: "BucketKeyExpression", # required
- # glue_configuration: {
- # table_name: "GlueTableName", # required
- # database_name: "GlueDatabaseName", # required
- # },
- # role_arn: "RoleArn", # required
- # },
- # },
- # },
- # ],
- # retention_period: {
- # unlimited: false,
- # number_of_days: 1,
- # },
- # versioning_configuration: {
- # unlimited: false,
- # max_versions: 1,
- # },
- # late_data_rules: [
- # {
- # rule_name: "LateDataRuleName",
- # rule_configuration: { # required
- # delta_time_session_window_configuration: {
- # timeout_in_minutes: 1, # required
- # },
- # },
- # },
- # ],
- # })
- #
- # @overload update_dataset(params = {})
- # @param [Hash] params ({})
- def update_dataset(params = {}, options = {})
- req = build_request(:update_dataset, params)
- req.send_request(options)
- end
-
- # Used to update the settings of a data store.
- #
- # @option params [required, String] :datastore_name
- # The name of the data store to be updated.
- #
- # @option params [Types::RetentionPeriod] :retention_period
- # How long, in days, message data is kept for the data store. The
- # retention period can't be updated if the data store's Amazon S3
- # storage is customer-managed.
- #
- # @option params [Types::DatastoreStorage] :datastore_storage
- # Where data in a data store is stored.. You can choose
- # `serviceManagedS3` storage, `customerManagedS3` storage, or
- # `iotSiteWiseMultiLayerStorage` storage. The default is
- # `serviceManagedS3`. You can't change the choice of Amazon S3 storage
- # after your data store is created.
- #
- # @option params [Types::FileFormatConfiguration] :file_format_configuration
- # Contains the configuration information of file formats. IoT Analytics
- # data stores support JSON and [Parquet][1].
- #
- # The default file format is JSON. You can specify only one format.
- #
- # You can't change the file format after you create the data store.
- #
- #
- #
- # [1]: https://parquet.apache.org/
- #
- # @return [Struct] Returns an empty {Seahorse::Client::Response response}.
- #
- # @example Request syntax with placeholder values
- #
- # resp = client.update_datastore({
- # datastore_name: "DatastoreName", # required
- # retention_period: {
- # unlimited: false,
- # number_of_days: 1,
- # },
- # datastore_storage: {
- # service_managed_s3: {
- # },
- # customer_managed_s3: {
- # bucket: "BucketName", # required
- # key_prefix: "S3KeyPrefix",
- # role_arn: "RoleArn", # required
- # },
- # iot_site_wise_multi_layer_storage: {
- # customer_managed_s3_storage: { # required
- # bucket: "BucketName", # required
- # key_prefix: "S3KeyPrefix",
- # },
- # },
- # },
- # file_format_configuration: {
- # json_configuration: {
- # },
- # parquet_configuration: {
- # schema_definition: {
- # columns: [
- # {
- # name: "ColumnName", # required
- # type: "ColumnDataType", # required
- # },
- # ],
- # },
- # },
- # },
- # })
- #
- # @overload update_datastore(params = {})
- # @param [Hash] params ({})
- def update_datastore(params = {}, options = {})
- req = build_request(:update_datastore, params)
- req.send_request(options)
- end
-
- # Updates the settings of a pipeline. You must specify both a `channel`
- # and a `datastore` activity and, optionally, as many as 23 additional
- # activities in the `pipelineActivities` array.
- #
- # @option params [required, String] :pipeline_name
- # The name of the pipeline to update.
- #
- # @option params [required, Array] :pipeline_activities
- # A list of `PipelineActivity` objects. Activities perform
- # transformations on your messages, such as removing, renaming or adding
- # message attributes; filtering messages based on attribute values;
- # invoking your Lambda functions on messages for advanced processing; or
- # performing mathematical transformations to normalize device data.
- #
- # The list can be 2-25 `PipelineActivity` objects and must contain both
- # a `channel` and a `datastore` activity. Each entry in the list must
- # contain only one activity. For example:
- #
- # `pipelineActivities = [ { "channel": { ... } }, { "lambda": { ... } },
- # ... ]`
- #
- # @return [Struct] Returns an empty {Seahorse::Client::Response response}.
- #
- # @example Request syntax with placeholder values
- #
- # resp = client.update_pipeline({
- # pipeline_name: "PipelineName", # required
- # pipeline_activities: [ # required
- # {
- # channel: {
- # name: "ActivityName", # required
- # channel_name: "ChannelName", # required
- # next: "ActivityName",
- # },
- # lambda: {
- # name: "ActivityName", # required
- # lambda_name: "LambdaName", # required
- # batch_size: 1, # required
- # next: "ActivityName",
- # },
- # datastore: {
- # name: "ActivityName", # required
- # datastore_name: "DatastoreName", # required
- # },
- # add_attributes: {
- # name: "ActivityName", # required
- # attributes: { # required
- # "AttributeName" => "AttributeName",
- # },
- # next: "ActivityName",
- # },
- # remove_attributes: {
- # name: "ActivityName", # required
- # attributes: ["AttributeName"], # required
- # next: "ActivityName",
- # },
- # select_attributes: {
- # name: "ActivityName", # required
- # attributes: ["AttributeName"], # required
- # next: "ActivityName",
- # },
- # filter: {
- # name: "ActivityName", # required
- # filter: "FilterExpression", # required
- # next: "ActivityName",
- # },
- # math: {
- # name: "ActivityName", # required
- # attribute: "AttributeName", # required
- # math: "MathExpression", # required
- # next: "ActivityName",
- # },
- # device_registry_enrich: {
- # name: "ActivityName", # required
- # attribute: "AttributeName", # required
- # thing_name: "AttributeName", # required
- # role_arn: "RoleArn", # required
- # next: "ActivityName",
- # },
- # device_shadow_enrich: {
- # name: "ActivityName", # required
- # attribute: "AttributeName", # required
- # thing_name: "AttributeName", # required
- # role_arn: "RoleArn", # required
- # next: "ActivityName",
- # },
- # },
- # ],
- # })
- #
- # @overload update_pipeline(params = {})
- # @param [Hash] params ({})
- def update_pipeline(params = {}, options = {})
- req = build_request(:update_pipeline, params)
- req.send_request(options)
- end
-
- # @!endgroup
-
- # @param params ({})
- # @api private
- def build_request(operation_name, params = {})
- handlers = @handlers.for(operation_name)
- tracer = config.telemetry_provider.tracer_provider.tracer(
- Aws::Telemetry.module_to_tracer_name('Aws::IoTAnalytics')
- )
- context = Seahorse::Client::RequestContext.new(
- operation_name: operation_name,
- operation: config.api.operation(operation_name),
- client: self,
- params: params,
- config: config,
- tracer: tracer
- )
- context[:gem_name] = 'aws-sdk-iotanalytics'
- context[:gem_version] = '1.93.0'
- Seahorse::Client::Request.new(handlers, context)
- end
-
- # @api private
- # @deprecated
- def waiter_names
- []
- end
-
- class << self
-
- # @api private
- attr_reader :identifier
-
- # @api private
- def errors_module
- Errors
- end
-
- end
- end
-end
diff --git a/gems/aws-sdk-iotanalytics/lib/aws-sdk-iotanalytics/client_api.rb b/gems/aws-sdk-iotanalytics/lib/aws-sdk-iotanalytics/client_api.rb
deleted file mode 100644
index f35c631f4e7..00000000000
--- a/gems/aws-sdk-iotanalytics/lib/aws-sdk-iotanalytics/client_api.rb
+++ /dev/null
@@ -1,1450 +0,0 @@
-# frozen_string_literal: true
-
-# WARNING ABOUT GENERATED CODE
-#
-# This file is generated. See the contributing guide for more information:
-# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md
-#
-# WARNING ABOUT GENERATED CODE
-
-
-module Aws::IoTAnalytics
- # @api private
- module ClientApi
-
- include Seahorse::Model
-
- ActivityBatchSize = Shapes::IntegerShape.new(name: 'ActivityBatchSize')
- ActivityName = Shapes::StringShape.new(name: 'ActivityName')
- AddAttributesActivity = Shapes::StructureShape.new(name: 'AddAttributesActivity')
- AttributeName = Shapes::StringShape.new(name: 'AttributeName')
- AttributeNameMapping = Shapes::MapShape.new(name: 'AttributeNameMapping')
- AttributeNames = Shapes::ListShape.new(name: 'AttributeNames')
- BatchPutMessageErrorEntries = Shapes::ListShape.new(name: 'BatchPutMessageErrorEntries')
- BatchPutMessageErrorEntry = Shapes::StructureShape.new(name: 'BatchPutMessageErrorEntry')
- BatchPutMessageRequest = Shapes::StructureShape.new(name: 'BatchPutMessageRequest')
- BatchPutMessageResponse = Shapes::StructureShape.new(name: 'BatchPutMessageResponse')
- BucketKeyExpression = Shapes::StringShape.new(name: 'BucketKeyExpression')
- BucketName = Shapes::StringShape.new(name: 'BucketName')
- CancelPipelineReprocessingRequest = Shapes::StructureShape.new(name: 'CancelPipelineReprocessingRequest')
- CancelPipelineReprocessingResponse = Shapes::StructureShape.new(name: 'CancelPipelineReprocessingResponse')
- Channel = Shapes::StructureShape.new(name: 'Channel')
- ChannelActivity = Shapes::StructureShape.new(name: 'ChannelActivity')
- ChannelArn = Shapes::StringShape.new(name: 'ChannelArn')
- ChannelMessages = Shapes::StructureShape.new(name: 'ChannelMessages')
- ChannelName = Shapes::StringShape.new(name: 'ChannelName')
- ChannelStatistics = Shapes::StructureShape.new(name: 'ChannelStatistics')
- ChannelStatus = Shapes::StringShape.new(name: 'ChannelStatus')
- ChannelStorage = Shapes::StructureShape.new(name: 'ChannelStorage')
- ChannelStorageSummary = Shapes::StructureShape.new(name: 'ChannelStorageSummary')
- ChannelSummaries = Shapes::ListShape.new(name: 'ChannelSummaries')
- ChannelSummary = Shapes::StructureShape.new(name: 'ChannelSummary')
- Column = Shapes::StructureShape.new(name: 'Column')
- ColumnDataType = Shapes::StringShape.new(name: 'ColumnDataType')
- ColumnName = Shapes::StringShape.new(name: 'ColumnName')
- Columns = Shapes::ListShape.new(name: 'Columns')
- ComputeType = Shapes::StringShape.new(name: 'ComputeType')
- ContainerDatasetAction = Shapes::StructureShape.new(name: 'ContainerDatasetAction')
- CreateChannelRequest = Shapes::StructureShape.new(name: 'CreateChannelRequest')
- CreateChannelResponse = Shapes::StructureShape.new(name: 'CreateChannelResponse')
- CreateDatasetContentRequest = Shapes::StructureShape.new(name: 'CreateDatasetContentRequest')
- CreateDatasetContentResponse = Shapes::StructureShape.new(name: 'CreateDatasetContentResponse')
- CreateDatasetRequest = Shapes::StructureShape.new(name: 'CreateDatasetRequest')
- CreateDatasetResponse = Shapes::StructureShape.new(name: 'CreateDatasetResponse')
- CreateDatastoreRequest = Shapes::StructureShape.new(name: 'CreateDatastoreRequest')
- CreateDatastoreResponse = Shapes::StructureShape.new(name: 'CreateDatastoreResponse')
- CreatePipelineRequest = Shapes::StructureShape.new(name: 'CreatePipelineRequest')
- CreatePipelineResponse = Shapes::StructureShape.new(name: 'CreatePipelineResponse')
- CustomerManagedChannelS3Storage = Shapes::StructureShape.new(name: 'CustomerManagedChannelS3Storage')
- CustomerManagedChannelS3StorageSummary = Shapes::StructureShape.new(name: 'CustomerManagedChannelS3StorageSummary')
- CustomerManagedDatastoreS3Storage = Shapes::StructureShape.new(name: 'CustomerManagedDatastoreS3Storage')
- CustomerManagedDatastoreS3StorageSummary = Shapes::StructureShape.new(name: 'CustomerManagedDatastoreS3StorageSummary')
- Dataset = Shapes::StructureShape.new(name: 'Dataset')
- DatasetAction = Shapes::StructureShape.new(name: 'DatasetAction')
- DatasetActionName = Shapes::StringShape.new(name: 'DatasetActionName')
- DatasetActionSummaries = Shapes::ListShape.new(name: 'DatasetActionSummaries')
- DatasetActionSummary = Shapes::StructureShape.new(name: 'DatasetActionSummary')
- DatasetActionType = Shapes::StringShape.new(name: 'DatasetActionType')
- DatasetActions = Shapes::ListShape.new(name: 'DatasetActions')
- DatasetArn = Shapes::StringShape.new(name: 'DatasetArn')
- DatasetContentDeliveryDestination = Shapes::StructureShape.new(name: 'DatasetContentDeliveryDestination')
- DatasetContentDeliveryRule = Shapes::StructureShape.new(name: 'DatasetContentDeliveryRule')
- DatasetContentDeliveryRules = Shapes::ListShape.new(name: 'DatasetContentDeliveryRules')
- DatasetContentState = Shapes::StringShape.new(name: 'DatasetContentState')
- DatasetContentStatus = Shapes::StructureShape.new(name: 'DatasetContentStatus')
- DatasetContentSummaries = Shapes::ListShape.new(name: 'DatasetContentSummaries')
- DatasetContentSummary = Shapes::StructureShape.new(name: 'DatasetContentSummary')
- DatasetContentVersion = Shapes::StringShape.new(name: 'DatasetContentVersion')
- DatasetContentVersionValue = Shapes::StructureShape.new(name: 'DatasetContentVersionValue')
- DatasetEntries = Shapes::ListShape.new(name: 'DatasetEntries')
- DatasetEntry = Shapes::StructureShape.new(name: 'DatasetEntry')
- DatasetName = Shapes::StringShape.new(name: 'DatasetName')
- DatasetStatus = Shapes::StringShape.new(name: 'DatasetStatus')
- DatasetSummaries = Shapes::ListShape.new(name: 'DatasetSummaries')
- DatasetSummary = Shapes::StructureShape.new(name: 'DatasetSummary')
- DatasetTrigger = Shapes::StructureShape.new(name: 'DatasetTrigger')
- DatasetTriggers = Shapes::ListShape.new(name: 'DatasetTriggers')
- Datastore = Shapes::StructureShape.new(name: 'Datastore')
- DatastoreActivity = Shapes::StructureShape.new(name: 'DatastoreActivity')
- DatastoreArn = Shapes::StringShape.new(name: 'DatastoreArn')
- DatastoreIotSiteWiseMultiLayerStorage = Shapes::StructureShape.new(name: 'DatastoreIotSiteWiseMultiLayerStorage')
- DatastoreIotSiteWiseMultiLayerStorageSummary = Shapes::StructureShape.new(name: 'DatastoreIotSiteWiseMultiLayerStorageSummary')
- DatastoreName = Shapes::StringShape.new(name: 'DatastoreName')
- DatastorePartition = Shapes::StructureShape.new(name: 'DatastorePartition')
- DatastorePartitions = Shapes::StructureShape.new(name: 'DatastorePartitions')
- DatastoreStatistics = Shapes::StructureShape.new(name: 'DatastoreStatistics')
- DatastoreStatus = Shapes::StringShape.new(name: 'DatastoreStatus')
- DatastoreStorage = Shapes::StructureShape.new(name: 'DatastoreStorage')
- DatastoreStorageSummary = Shapes::StructureShape.new(name: 'DatastoreStorageSummary')
- DatastoreSummaries = Shapes::ListShape.new(name: 'DatastoreSummaries')
- DatastoreSummary = Shapes::StructureShape.new(name: 'DatastoreSummary')
- DeleteChannelRequest = Shapes::StructureShape.new(name: 'DeleteChannelRequest')
- DeleteDatasetContentRequest = Shapes::StructureShape.new(name: 'DeleteDatasetContentRequest')
- DeleteDatasetRequest = Shapes::StructureShape.new(name: 'DeleteDatasetRequest')
- DeleteDatastoreRequest = Shapes::StructureShape.new(name: 'DeleteDatastoreRequest')
- DeletePipelineRequest = Shapes::StructureShape.new(name: 'DeletePipelineRequest')
- DeltaTime = Shapes::StructureShape.new(name: 'DeltaTime')
- DeltaTimeSessionWindowConfiguration = Shapes::StructureShape.new(name: 'DeltaTimeSessionWindowConfiguration')
- DescribeChannelRequest = Shapes::StructureShape.new(name: 'DescribeChannelRequest')
- DescribeChannelResponse = Shapes::StructureShape.new(name: 'DescribeChannelResponse')
- DescribeDatasetRequest = Shapes::StructureShape.new(name: 'DescribeDatasetRequest')
- DescribeDatasetResponse = Shapes::StructureShape.new(name: 'DescribeDatasetResponse')
- DescribeDatastoreRequest = Shapes::StructureShape.new(name: 'DescribeDatastoreRequest')
- DescribeDatastoreResponse = Shapes::StructureShape.new(name: 'DescribeDatastoreResponse')
- DescribeLoggingOptionsRequest = Shapes::StructureShape.new(name: 'DescribeLoggingOptionsRequest')
- DescribeLoggingOptionsResponse = Shapes::StructureShape.new(name: 'DescribeLoggingOptionsResponse')
- DescribePipelineRequest = Shapes::StructureShape.new(name: 'DescribePipelineRequest')
- DescribePipelineResponse = Shapes::StructureShape.new(name: 'DescribePipelineResponse')
- DeviceRegistryEnrichActivity = Shapes::StructureShape.new(name: 'DeviceRegistryEnrichActivity')
- DeviceShadowEnrichActivity = Shapes::StructureShape.new(name: 'DeviceShadowEnrichActivity')
- DoubleValue = Shapes::FloatShape.new(name: 'DoubleValue')
- EndTime = Shapes::TimestampShape.new(name: 'EndTime')
- EntryName = Shapes::StringShape.new(name: 'EntryName')
- ErrorCode = Shapes::StringShape.new(name: 'ErrorCode')
- ErrorMessage = Shapes::StringShape.new(name: 'ErrorMessage')
- EstimatedResourceSize = Shapes::StructureShape.new(name: 'EstimatedResourceSize')
- FileFormatConfiguration = Shapes::StructureShape.new(name: 'FileFormatConfiguration')
- FileFormatType = Shapes::StringShape.new(name: 'FileFormatType')
- FilterActivity = Shapes::StructureShape.new(name: 'FilterActivity')
- FilterExpression = Shapes::StringShape.new(name: 'FilterExpression')
- GetDatasetContentRequest = Shapes::StructureShape.new(name: 'GetDatasetContentRequest')
- GetDatasetContentResponse = Shapes::StructureShape.new(name: 'GetDatasetContentResponse')
- GlueConfiguration = Shapes::StructureShape.new(name: 'GlueConfiguration')
- GlueDatabaseName = Shapes::StringShape.new(name: 'GlueDatabaseName')
- GlueTableName = Shapes::StringShape.new(name: 'GlueTableName')
- Image = Shapes::StringShape.new(name: 'Image')
- IncludeStatisticsFlag = Shapes::BooleanShape.new(name: 'IncludeStatisticsFlag')
- InternalFailureException = Shapes::StructureShape.new(name: 'InternalFailureException')
- InvalidRequestException = Shapes::StructureShape.new(name: 'InvalidRequestException')
- IotEventsDestinationConfiguration = Shapes::StructureShape.new(name: 'IotEventsDestinationConfiguration')
- IotEventsInputName = Shapes::StringShape.new(name: 'IotEventsInputName')
- IotSiteWiseCustomerManagedDatastoreS3Storage = Shapes::StructureShape.new(name: 'IotSiteWiseCustomerManagedDatastoreS3Storage')
- IotSiteWiseCustomerManagedDatastoreS3StorageSummary = Shapes::StructureShape.new(name: 'IotSiteWiseCustomerManagedDatastoreS3StorageSummary')
- JsonConfiguration = Shapes::StructureShape.new(name: 'JsonConfiguration')
- LambdaActivity = Shapes::StructureShape.new(name: 'LambdaActivity')
- LambdaName = Shapes::StringShape.new(name: 'LambdaName')
- LateDataRule = Shapes::StructureShape.new(name: 'LateDataRule')
- LateDataRuleConfiguration = Shapes::StructureShape.new(name: 'LateDataRuleConfiguration')
- LateDataRuleName = Shapes::StringShape.new(name: 'LateDataRuleName')
- LateDataRules = Shapes::ListShape.new(name: 'LateDataRules')
- LimitExceededException = Shapes::StructureShape.new(name: 'LimitExceededException')
- ListChannelsRequest = Shapes::StructureShape.new(name: 'ListChannelsRequest')
- ListChannelsResponse = Shapes::StructureShape.new(name: 'ListChannelsResponse')
- ListDatasetContentsRequest = Shapes::StructureShape.new(name: 'ListDatasetContentsRequest')
- ListDatasetContentsResponse = Shapes::StructureShape.new(name: 'ListDatasetContentsResponse')
- ListDatasetsRequest = Shapes::StructureShape.new(name: 'ListDatasetsRequest')
- ListDatasetsResponse = Shapes::StructureShape.new(name: 'ListDatasetsResponse')
- ListDatastoresRequest = Shapes::StructureShape.new(name: 'ListDatastoresRequest')
- ListDatastoresResponse = Shapes::StructureShape.new(name: 'ListDatastoresResponse')
- ListPipelinesRequest = Shapes::StructureShape.new(name: 'ListPipelinesRequest')
- ListPipelinesResponse = Shapes::StructureShape.new(name: 'ListPipelinesResponse')
- ListTagsForResourceRequest = Shapes::StructureShape.new(name: 'ListTagsForResourceRequest')
- ListTagsForResourceResponse = Shapes::StructureShape.new(name: 'ListTagsForResourceResponse')
- LogResult = Shapes::StringShape.new(name: 'LogResult')
- LoggingEnabled = Shapes::BooleanShape.new(name: 'LoggingEnabled')
- LoggingLevel = Shapes::StringShape.new(name: 'LoggingLevel')
- LoggingOptions = Shapes::StructureShape.new(name: 'LoggingOptions')
- MathActivity = Shapes::StructureShape.new(name: 'MathActivity')
- MathExpression = Shapes::StringShape.new(name: 'MathExpression')
- MaxMessages = Shapes::IntegerShape.new(name: 'MaxMessages')
- MaxResults = Shapes::IntegerShape.new(name: 'MaxResults')
- MaxVersions = Shapes::IntegerShape.new(name: 'MaxVersions')
- Message = Shapes::StructureShape.new(name: 'Message')
- MessageId = Shapes::StringShape.new(name: 'MessageId')
- MessagePayload = Shapes::BlobShape.new(name: 'MessagePayload')
- MessagePayloads = Shapes::ListShape.new(name: 'MessagePayloads')
- Messages = Shapes::ListShape.new(name: 'Messages')
- NextToken = Shapes::StringShape.new(name: 'NextToken')
- OffsetSeconds = Shapes::IntegerShape.new(name: 'OffsetSeconds')
- OutputFileName = Shapes::StringShape.new(name: 'OutputFileName')
- OutputFileUriValue = Shapes::StructureShape.new(name: 'OutputFileUriValue')
- ParquetConfiguration = Shapes::StructureShape.new(name: 'ParquetConfiguration')
- Partition = Shapes::StructureShape.new(name: 'Partition')
- PartitionAttributeName = Shapes::StringShape.new(name: 'PartitionAttributeName')
- Partitions = Shapes::ListShape.new(name: 'Partitions')
- Pipeline = Shapes::StructureShape.new(name: 'Pipeline')
- PipelineActivities = Shapes::ListShape.new(name: 'PipelineActivities')
- PipelineActivity = Shapes::StructureShape.new(name: 'PipelineActivity')
- PipelineArn = Shapes::StringShape.new(name: 'PipelineArn')
- PipelineName = Shapes::StringShape.new(name: 'PipelineName')
- PipelineSummaries = Shapes::ListShape.new(name: 'PipelineSummaries')
- PipelineSummary = Shapes::StructureShape.new(name: 'PipelineSummary')
- PresignedURI = Shapes::StringShape.new(name: 'PresignedURI')
- PutLoggingOptionsRequest = Shapes::StructureShape.new(name: 'PutLoggingOptionsRequest')
- QueryFilter = Shapes::StructureShape.new(name: 'QueryFilter')
- QueryFilters = Shapes::ListShape.new(name: 'QueryFilters')
- Reason = Shapes::StringShape.new(name: 'Reason')
- RemoveAttributesActivity = Shapes::StructureShape.new(name: 'RemoveAttributesActivity')
- ReprocessingId = Shapes::StringShape.new(name: 'ReprocessingId')
- ReprocessingStatus = Shapes::StringShape.new(name: 'ReprocessingStatus')
- ReprocessingSummaries = Shapes::ListShape.new(name: 'ReprocessingSummaries')
- ReprocessingSummary = Shapes::StructureShape.new(name: 'ReprocessingSummary')
- ResourceAlreadyExistsException = Shapes::StructureShape.new(name: 'ResourceAlreadyExistsException')
- ResourceArn = Shapes::StringShape.new(name: 'ResourceArn')
- ResourceConfiguration = Shapes::StructureShape.new(name: 'ResourceConfiguration')
- ResourceNotFoundException = Shapes::StructureShape.new(name: 'ResourceNotFoundException')
- RetentionPeriod = Shapes::StructureShape.new(name: 'RetentionPeriod')
- RetentionPeriodInDays = Shapes::IntegerShape.new(name: 'RetentionPeriodInDays')
- RoleArn = Shapes::StringShape.new(name: 'RoleArn')
- RunPipelineActivityRequest = Shapes::StructureShape.new(name: 'RunPipelineActivityRequest')
- RunPipelineActivityResponse = Shapes::StructureShape.new(name: 'RunPipelineActivityResponse')
- S3DestinationConfiguration = Shapes::StructureShape.new(name: 'S3DestinationConfiguration')
- S3KeyPrefix = Shapes::StringShape.new(name: 'S3KeyPrefix')
- S3PathChannelMessage = Shapes::StringShape.new(name: 'S3PathChannelMessage')
- S3PathChannelMessages = Shapes::ListShape.new(name: 'S3PathChannelMessages')
- SampleChannelDataRequest = Shapes::StructureShape.new(name: 'SampleChannelDataRequest')
- SampleChannelDataResponse = Shapes::StructureShape.new(name: 'SampleChannelDataResponse')
- Schedule = Shapes::StructureShape.new(name: 'Schedule')
- ScheduleExpression = Shapes::StringShape.new(name: 'ScheduleExpression')
- SchemaDefinition = Shapes::StructureShape.new(name: 'SchemaDefinition')
- SelectAttributesActivity = Shapes::StructureShape.new(name: 'SelectAttributesActivity')
- ServiceManagedChannelS3Storage = Shapes::StructureShape.new(name: 'ServiceManagedChannelS3Storage')
- ServiceManagedChannelS3StorageSummary = Shapes::StructureShape.new(name: 'ServiceManagedChannelS3StorageSummary')
- ServiceManagedDatastoreS3Storage = Shapes::StructureShape.new(name: 'ServiceManagedDatastoreS3Storage')
- ServiceManagedDatastoreS3StorageSummary = Shapes::StructureShape.new(name: 'ServiceManagedDatastoreS3StorageSummary')
- ServiceUnavailableException = Shapes::StructureShape.new(name: 'ServiceUnavailableException')
- SessionTimeoutInMinutes = Shapes::IntegerShape.new(name: 'SessionTimeoutInMinutes')
- SizeInBytes = Shapes::FloatShape.new(name: 'SizeInBytes')
- SqlQuery = Shapes::StringShape.new(name: 'SqlQuery')
- SqlQueryDatasetAction = Shapes::StructureShape.new(name: 'SqlQueryDatasetAction')
- StartPipelineReprocessingRequest = Shapes::StructureShape.new(name: 'StartPipelineReprocessingRequest')
- StartPipelineReprocessingResponse = Shapes::StructureShape.new(name: 'StartPipelineReprocessingResponse')
- StartTime = Shapes::TimestampShape.new(name: 'StartTime')
- StringValue = Shapes::StringShape.new(name: 'StringValue')
- Tag = Shapes::StructureShape.new(name: 'Tag')
- TagKey = Shapes::StringShape.new(name: 'TagKey')
- TagKeyList = Shapes::ListShape.new(name: 'TagKeyList')
- TagList = Shapes::ListShape.new(name: 'TagList')
- TagResourceRequest = Shapes::StructureShape.new(name: 'TagResourceRequest')
- TagResourceResponse = Shapes::StructureShape.new(name: 'TagResourceResponse')
- TagValue = Shapes::StringShape.new(name: 'TagValue')
- ThrottlingException = Shapes::StructureShape.new(name: 'ThrottlingException')
- TimeExpression = Shapes::StringShape.new(name: 'TimeExpression')
- Timestamp = Shapes::TimestampShape.new(name: 'Timestamp')
- TimestampFormat = Shapes::StringShape.new(name: 'TimestampFormat')
- TimestampPartition = Shapes::StructureShape.new(name: 'TimestampPartition')
- TriggeringDataset = Shapes::StructureShape.new(name: 'TriggeringDataset')
- UnlimitedRetentionPeriod = Shapes::BooleanShape.new(name: 'UnlimitedRetentionPeriod')
- UnlimitedVersioning = Shapes::BooleanShape.new(name: 'UnlimitedVersioning')
- UntagResourceRequest = Shapes::StructureShape.new(name: 'UntagResourceRequest')
- UntagResourceResponse = Shapes::StructureShape.new(name: 'UntagResourceResponse')
- UpdateChannelRequest = Shapes::StructureShape.new(name: 'UpdateChannelRequest')
- UpdateDatasetRequest = Shapes::StructureShape.new(name: 'UpdateDatasetRequest')
- UpdateDatastoreRequest = Shapes::StructureShape.new(name: 'UpdateDatastoreRequest')
- UpdatePipelineRequest = Shapes::StructureShape.new(name: 'UpdatePipelineRequest')
- Variable = Shapes::StructureShape.new(name: 'Variable')
- VariableName = Shapes::StringShape.new(name: 'VariableName')
- Variables = Shapes::ListShape.new(name: 'Variables')
- VersioningConfiguration = Shapes::StructureShape.new(name: 'VersioningConfiguration')
- VolumeSizeInGB = Shapes::IntegerShape.new(name: 'VolumeSizeInGB')
- errorMessage = Shapes::StringShape.new(name: 'errorMessage')
- resourceArn = Shapes::StringShape.new(name: 'resourceArn')
- resourceId = Shapes::StringShape.new(name: 'resourceId')
-
- AddAttributesActivity.add_member(:name, Shapes::ShapeRef.new(shape: ActivityName, required: true, location_name: "name"))
- AddAttributesActivity.add_member(:attributes, Shapes::ShapeRef.new(shape: AttributeNameMapping, required: true, location_name: "attributes"))
- AddAttributesActivity.add_member(:next, Shapes::ShapeRef.new(shape: ActivityName, location_name: "next"))
- AddAttributesActivity.struct_class = Types::AddAttributesActivity
-
- AttributeNameMapping.key = Shapes::ShapeRef.new(shape: AttributeName)
- AttributeNameMapping.value = Shapes::ShapeRef.new(shape: AttributeName)
-
- AttributeNames.member = Shapes::ShapeRef.new(shape: AttributeName)
-
- BatchPutMessageErrorEntries.member = Shapes::ShapeRef.new(shape: BatchPutMessageErrorEntry)
-
- BatchPutMessageErrorEntry.add_member(:message_id, Shapes::ShapeRef.new(shape: MessageId, location_name: "messageId"))
- BatchPutMessageErrorEntry.add_member(:error_code, Shapes::ShapeRef.new(shape: ErrorCode, location_name: "errorCode"))
- BatchPutMessageErrorEntry.add_member(:error_message, Shapes::ShapeRef.new(shape: ErrorMessage, location_name: "errorMessage"))
- BatchPutMessageErrorEntry.struct_class = Types::BatchPutMessageErrorEntry
-
- BatchPutMessageRequest.add_member(:channel_name, Shapes::ShapeRef.new(shape: ChannelName, required: true, location_name: "channelName"))
- BatchPutMessageRequest.add_member(:messages, Shapes::ShapeRef.new(shape: Messages, required: true, location_name: "messages"))
- BatchPutMessageRequest.struct_class = Types::BatchPutMessageRequest
-
- BatchPutMessageResponse.add_member(:batch_put_message_error_entries, Shapes::ShapeRef.new(shape: BatchPutMessageErrorEntries, location_name: "batchPutMessageErrorEntries"))
- BatchPutMessageResponse.struct_class = Types::BatchPutMessageResponse
-
- CancelPipelineReprocessingRequest.add_member(:pipeline_name, Shapes::ShapeRef.new(shape: PipelineName, required: true, location: "uri", location_name: "pipelineName"))
- CancelPipelineReprocessingRequest.add_member(:reprocessing_id, Shapes::ShapeRef.new(shape: ReprocessingId, required: true, location: "uri", location_name: "reprocessingId"))
- CancelPipelineReprocessingRequest.struct_class = Types::CancelPipelineReprocessingRequest
-
- CancelPipelineReprocessingResponse.struct_class = Types::CancelPipelineReprocessingResponse
-
- Channel.add_member(:name, Shapes::ShapeRef.new(shape: ChannelName, location_name: "name"))
- Channel.add_member(:storage, Shapes::ShapeRef.new(shape: ChannelStorage, location_name: "storage"))
- Channel.add_member(:arn, Shapes::ShapeRef.new(shape: ChannelArn, location_name: "arn"))
- Channel.add_member(:status, Shapes::ShapeRef.new(shape: ChannelStatus, location_name: "status"))
- Channel.add_member(:retention_period, Shapes::ShapeRef.new(shape: RetentionPeriod, location_name: "retentionPeriod"))
- Channel.add_member(:creation_time, Shapes::ShapeRef.new(shape: Timestamp, location_name: "creationTime"))
- Channel.add_member(:last_update_time, Shapes::ShapeRef.new(shape: Timestamp, location_name: "lastUpdateTime"))
- Channel.add_member(:last_message_arrival_time, Shapes::ShapeRef.new(shape: Timestamp, location_name: "lastMessageArrivalTime"))
- Channel.struct_class = Types::Channel
-
- ChannelActivity.add_member(:name, Shapes::ShapeRef.new(shape: ActivityName, required: true, location_name: "name"))
- ChannelActivity.add_member(:channel_name, Shapes::ShapeRef.new(shape: ChannelName, required: true, location_name: "channelName"))
- ChannelActivity.add_member(:next, Shapes::ShapeRef.new(shape: ActivityName, location_name: "next"))
- ChannelActivity.struct_class = Types::ChannelActivity
-
- ChannelMessages.add_member(:s3_paths, Shapes::ShapeRef.new(shape: S3PathChannelMessages, location_name: "s3Paths"))
- ChannelMessages.struct_class = Types::ChannelMessages
-
- ChannelStatistics.add_member(:size, Shapes::ShapeRef.new(shape: EstimatedResourceSize, location_name: "size"))
- ChannelStatistics.struct_class = Types::ChannelStatistics
-
- ChannelStorage.add_member(:service_managed_s3, Shapes::ShapeRef.new(shape: ServiceManagedChannelS3Storage, location_name: "serviceManagedS3"))
- ChannelStorage.add_member(:customer_managed_s3, Shapes::ShapeRef.new(shape: CustomerManagedChannelS3Storage, location_name: "customerManagedS3"))
- ChannelStorage.struct_class = Types::ChannelStorage
-
- ChannelStorageSummary.add_member(:service_managed_s3, Shapes::ShapeRef.new(shape: ServiceManagedChannelS3StorageSummary, location_name: "serviceManagedS3"))
- ChannelStorageSummary.add_member(:customer_managed_s3, Shapes::ShapeRef.new(shape: CustomerManagedChannelS3StorageSummary, location_name: "customerManagedS3"))
- ChannelStorageSummary.struct_class = Types::ChannelStorageSummary
-
- ChannelSummaries.member = Shapes::ShapeRef.new(shape: ChannelSummary)
-
- ChannelSummary.add_member(:channel_name, Shapes::ShapeRef.new(shape: ChannelName, location_name: "channelName"))
- ChannelSummary.add_member(:channel_storage, Shapes::ShapeRef.new(shape: ChannelStorageSummary, location_name: "channelStorage"))
- ChannelSummary.add_member(:status, Shapes::ShapeRef.new(shape: ChannelStatus, location_name: "status"))
- ChannelSummary.add_member(:creation_time, Shapes::ShapeRef.new(shape: Timestamp, location_name: "creationTime"))
- ChannelSummary.add_member(:last_update_time, Shapes::ShapeRef.new(shape: Timestamp, location_name: "lastUpdateTime"))
- ChannelSummary.add_member(:last_message_arrival_time, Shapes::ShapeRef.new(shape: Timestamp, location_name: "lastMessageArrivalTime"))
- ChannelSummary.struct_class = Types::ChannelSummary
-
- Column.add_member(:name, Shapes::ShapeRef.new(shape: ColumnName, required: true, location_name: "name"))
- Column.add_member(:type, Shapes::ShapeRef.new(shape: ColumnDataType, required: true, location_name: "type"))
- Column.struct_class = Types::Column
-
- Columns.member = Shapes::ShapeRef.new(shape: Column)
-
- ContainerDatasetAction.add_member(:image, Shapes::ShapeRef.new(shape: Image, required: true, location_name: "image"))
- ContainerDatasetAction.add_member(:execution_role_arn, Shapes::ShapeRef.new(shape: RoleArn, required: true, location_name: "executionRoleArn"))
- ContainerDatasetAction.add_member(:resource_configuration, Shapes::ShapeRef.new(shape: ResourceConfiguration, required: true, location_name: "resourceConfiguration"))
- ContainerDatasetAction.add_member(:variables, Shapes::ShapeRef.new(shape: Variables, location_name: "variables"))
- ContainerDatasetAction.struct_class = Types::ContainerDatasetAction
-
- CreateChannelRequest.add_member(:channel_name, Shapes::ShapeRef.new(shape: ChannelName, required: true, location_name: "channelName"))
- CreateChannelRequest.add_member(:channel_storage, Shapes::ShapeRef.new(shape: ChannelStorage, location_name: "channelStorage"))
- CreateChannelRequest.add_member(:retention_period, Shapes::ShapeRef.new(shape: RetentionPeriod, location_name: "retentionPeriod"))
- CreateChannelRequest.add_member(:tags, Shapes::ShapeRef.new(shape: TagList, location_name: "tags"))
- CreateChannelRequest.struct_class = Types::CreateChannelRequest
-
- CreateChannelResponse.add_member(:channel_name, Shapes::ShapeRef.new(shape: ChannelName, location_name: "channelName"))
- CreateChannelResponse.add_member(:channel_arn, Shapes::ShapeRef.new(shape: ChannelArn, location_name: "channelArn"))
- CreateChannelResponse.add_member(:retention_period, Shapes::ShapeRef.new(shape: RetentionPeriod, location_name: "retentionPeriod"))
- CreateChannelResponse.struct_class = Types::CreateChannelResponse
-
- CreateDatasetContentRequest.add_member(:dataset_name, Shapes::ShapeRef.new(shape: DatasetName, required: true, location: "uri", location_name: "datasetName"))
- CreateDatasetContentRequest.add_member(:version_id, Shapes::ShapeRef.new(shape: DatasetContentVersion, location_name: "versionId"))
- CreateDatasetContentRequest.struct_class = Types::CreateDatasetContentRequest
-
- CreateDatasetContentResponse.add_member(:version_id, Shapes::ShapeRef.new(shape: DatasetContentVersion, location_name: "versionId"))
- CreateDatasetContentResponse.struct_class = Types::CreateDatasetContentResponse
-
- CreateDatasetRequest.add_member(:dataset_name, Shapes::ShapeRef.new(shape: DatasetName, required: true, location_name: "datasetName"))
- CreateDatasetRequest.add_member(:actions, Shapes::ShapeRef.new(shape: DatasetActions, required: true, location_name: "actions"))
- CreateDatasetRequest.add_member(:triggers, Shapes::ShapeRef.new(shape: DatasetTriggers, location_name: "triggers"))
- CreateDatasetRequest.add_member(:content_delivery_rules, Shapes::ShapeRef.new(shape: DatasetContentDeliveryRules, location_name: "contentDeliveryRules"))
- CreateDatasetRequest.add_member(:retention_period, Shapes::ShapeRef.new(shape: RetentionPeriod, location_name: "retentionPeriod"))
- CreateDatasetRequest.add_member(:versioning_configuration, Shapes::ShapeRef.new(shape: VersioningConfiguration, location_name: "versioningConfiguration"))
- CreateDatasetRequest.add_member(:tags, Shapes::ShapeRef.new(shape: TagList, location_name: "tags"))
- CreateDatasetRequest.add_member(:late_data_rules, Shapes::ShapeRef.new(shape: LateDataRules, location_name: "lateDataRules"))
- CreateDatasetRequest.struct_class = Types::CreateDatasetRequest
-
- CreateDatasetResponse.add_member(:dataset_name, Shapes::ShapeRef.new(shape: DatasetName, location_name: "datasetName"))
- CreateDatasetResponse.add_member(:dataset_arn, Shapes::ShapeRef.new(shape: DatasetArn, location_name: "datasetArn"))
- CreateDatasetResponse.add_member(:retention_period, Shapes::ShapeRef.new(shape: RetentionPeriod, location_name: "retentionPeriod"))
- CreateDatasetResponse.struct_class = Types::CreateDatasetResponse
-
- CreateDatastoreRequest.add_member(:datastore_name, Shapes::ShapeRef.new(shape: DatastoreName, required: true, location_name: "datastoreName"))
- CreateDatastoreRequest.add_member(:datastore_storage, Shapes::ShapeRef.new(shape: DatastoreStorage, location_name: "datastoreStorage"))
- CreateDatastoreRequest.add_member(:retention_period, Shapes::ShapeRef.new(shape: RetentionPeriod, location_name: "retentionPeriod"))
- CreateDatastoreRequest.add_member(:tags, Shapes::ShapeRef.new(shape: TagList, location_name: "tags"))
- CreateDatastoreRequest.add_member(:file_format_configuration, Shapes::ShapeRef.new(shape: FileFormatConfiguration, location_name: "fileFormatConfiguration"))
- CreateDatastoreRequest.add_member(:datastore_partitions, Shapes::ShapeRef.new(shape: DatastorePartitions, location_name: "datastorePartitions"))
- CreateDatastoreRequest.struct_class = Types::CreateDatastoreRequest
-
- CreateDatastoreResponse.add_member(:datastore_name, Shapes::ShapeRef.new(shape: DatastoreName, location_name: "datastoreName"))
- CreateDatastoreResponse.add_member(:datastore_arn, Shapes::ShapeRef.new(shape: DatastoreArn, location_name: "datastoreArn"))
- CreateDatastoreResponse.add_member(:retention_period, Shapes::ShapeRef.new(shape: RetentionPeriod, location_name: "retentionPeriod"))
- CreateDatastoreResponse.struct_class = Types::CreateDatastoreResponse
-
- CreatePipelineRequest.add_member(:pipeline_name, Shapes::ShapeRef.new(shape: PipelineName, required: true, location_name: "pipelineName"))
- CreatePipelineRequest.add_member(:pipeline_activities, Shapes::ShapeRef.new(shape: PipelineActivities, required: true, location_name: "pipelineActivities"))
- CreatePipelineRequest.add_member(:tags, Shapes::ShapeRef.new(shape: TagList, location_name: "tags"))
- CreatePipelineRequest.struct_class = Types::CreatePipelineRequest
-
- CreatePipelineResponse.add_member(:pipeline_name, Shapes::ShapeRef.new(shape: PipelineName, location_name: "pipelineName"))
- CreatePipelineResponse.add_member(:pipeline_arn, Shapes::ShapeRef.new(shape: PipelineArn, location_name: "pipelineArn"))
- CreatePipelineResponse.struct_class = Types::CreatePipelineResponse
-
- CustomerManagedChannelS3Storage.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location_name: "bucket"))
- CustomerManagedChannelS3Storage.add_member(:key_prefix, Shapes::ShapeRef.new(shape: S3KeyPrefix, location_name: "keyPrefix"))
- CustomerManagedChannelS3Storage.add_member(:role_arn, Shapes::ShapeRef.new(shape: RoleArn, required: true, location_name: "roleArn"))
- CustomerManagedChannelS3Storage.struct_class = Types::CustomerManagedChannelS3Storage
-
- CustomerManagedChannelS3StorageSummary.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, location_name: "bucket"))
- CustomerManagedChannelS3StorageSummary.add_member(:key_prefix, Shapes::ShapeRef.new(shape: S3KeyPrefix, location_name: "keyPrefix"))
- CustomerManagedChannelS3StorageSummary.add_member(:role_arn, Shapes::ShapeRef.new(shape: RoleArn, location_name: "roleArn"))
- CustomerManagedChannelS3StorageSummary.struct_class = Types::CustomerManagedChannelS3StorageSummary
-
- CustomerManagedDatastoreS3Storage.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location_name: "bucket"))
- CustomerManagedDatastoreS3Storage.add_member(:key_prefix, Shapes::ShapeRef.new(shape: S3KeyPrefix, location_name: "keyPrefix"))
- CustomerManagedDatastoreS3Storage.add_member(:role_arn, Shapes::ShapeRef.new(shape: RoleArn, required: true, location_name: "roleArn"))
- CustomerManagedDatastoreS3Storage.struct_class = Types::CustomerManagedDatastoreS3Storage
-
- CustomerManagedDatastoreS3StorageSummary.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, location_name: "bucket"))
- CustomerManagedDatastoreS3StorageSummary.add_member(:key_prefix, Shapes::ShapeRef.new(shape: S3KeyPrefix, location_name: "keyPrefix"))
- CustomerManagedDatastoreS3StorageSummary.add_member(:role_arn, Shapes::ShapeRef.new(shape: RoleArn, location_name: "roleArn"))
- CustomerManagedDatastoreS3StorageSummary.struct_class = Types::CustomerManagedDatastoreS3StorageSummary
-
- Dataset.add_member(:name, Shapes::ShapeRef.new(shape: DatasetName, location_name: "name"))
- Dataset.add_member(:arn, Shapes::ShapeRef.new(shape: DatasetArn, location_name: "arn"))
- Dataset.add_member(:actions, Shapes::ShapeRef.new(shape: DatasetActions, location_name: "actions"))
- Dataset.add_member(:triggers, Shapes::ShapeRef.new(shape: DatasetTriggers, location_name: "triggers"))
- Dataset.add_member(:content_delivery_rules, Shapes::ShapeRef.new(shape: DatasetContentDeliveryRules, location_name: "contentDeliveryRules"))
- Dataset.add_member(:status, Shapes::ShapeRef.new(shape: DatasetStatus, location_name: "status"))
- Dataset.add_member(:creation_time, Shapes::ShapeRef.new(shape: Timestamp, location_name: "creationTime"))
- Dataset.add_member(:last_update_time, Shapes::ShapeRef.new(shape: Timestamp, location_name: "lastUpdateTime"))
- Dataset.add_member(:retention_period, Shapes::ShapeRef.new(shape: RetentionPeriod, location_name: "retentionPeriod"))
- Dataset.add_member(:versioning_configuration, Shapes::ShapeRef.new(shape: VersioningConfiguration, location_name: "versioningConfiguration"))
- Dataset.add_member(:late_data_rules, Shapes::ShapeRef.new(shape: LateDataRules, location_name: "lateDataRules"))
- Dataset.struct_class = Types::Dataset
-
- DatasetAction.add_member(:action_name, Shapes::ShapeRef.new(shape: DatasetActionName, location_name: "actionName"))
- DatasetAction.add_member(:query_action, Shapes::ShapeRef.new(shape: SqlQueryDatasetAction, location_name: "queryAction"))
- DatasetAction.add_member(:container_action, Shapes::ShapeRef.new(shape: ContainerDatasetAction, location_name: "containerAction"))
- DatasetAction.struct_class = Types::DatasetAction
-
- DatasetActionSummaries.member = Shapes::ShapeRef.new(shape: DatasetActionSummary)
-
- DatasetActionSummary.add_member(:action_name, Shapes::ShapeRef.new(shape: DatasetActionName, location_name: "actionName"))
- DatasetActionSummary.add_member(:action_type, Shapes::ShapeRef.new(shape: DatasetActionType, location_name: "actionType"))
- DatasetActionSummary.struct_class = Types::DatasetActionSummary
-
- DatasetActions.member = Shapes::ShapeRef.new(shape: DatasetAction)
-
- DatasetContentDeliveryDestination.add_member(:iot_events_destination_configuration, Shapes::ShapeRef.new(shape: IotEventsDestinationConfiguration, location_name: "iotEventsDestinationConfiguration"))
- DatasetContentDeliveryDestination.add_member(:s3_destination_configuration, Shapes::ShapeRef.new(shape: S3DestinationConfiguration, location_name: "s3DestinationConfiguration"))
- DatasetContentDeliveryDestination.struct_class = Types::DatasetContentDeliveryDestination
-
- DatasetContentDeliveryRule.add_member(:entry_name, Shapes::ShapeRef.new(shape: EntryName, location_name: "entryName"))
- DatasetContentDeliveryRule.add_member(:destination, Shapes::ShapeRef.new(shape: DatasetContentDeliveryDestination, required: true, location_name: "destination"))
- DatasetContentDeliveryRule.struct_class = Types::DatasetContentDeliveryRule
-
- DatasetContentDeliveryRules.member = Shapes::ShapeRef.new(shape: DatasetContentDeliveryRule)
-
- DatasetContentStatus.add_member(:state, Shapes::ShapeRef.new(shape: DatasetContentState, location_name: "state"))
- DatasetContentStatus.add_member(:reason, Shapes::ShapeRef.new(shape: Reason, location_name: "reason"))
- DatasetContentStatus.struct_class = Types::DatasetContentStatus
-
- DatasetContentSummaries.member = Shapes::ShapeRef.new(shape: DatasetContentSummary)
-
- DatasetContentSummary.add_member(:version, Shapes::ShapeRef.new(shape: DatasetContentVersion, location_name: "version"))
- DatasetContentSummary.add_member(:status, Shapes::ShapeRef.new(shape: DatasetContentStatus, location_name: "status"))
- DatasetContentSummary.add_member(:creation_time, Shapes::ShapeRef.new(shape: Timestamp, location_name: "creationTime"))
- DatasetContentSummary.add_member(:schedule_time, Shapes::ShapeRef.new(shape: Timestamp, location_name: "scheduleTime"))
- DatasetContentSummary.add_member(:completion_time, Shapes::ShapeRef.new(shape: Timestamp, location_name: "completionTime"))
- DatasetContentSummary.struct_class = Types::DatasetContentSummary
-
- DatasetContentVersionValue.add_member(:dataset_name, Shapes::ShapeRef.new(shape: DatasetName, required: true, location_name: "datasetName"))
- DatasetContentVersionValue.struct_class = Types::DatasetContentVersionValue
-
- DatasetEntries.member = Shapes::ShapeRef.new(shape: DatasetEntry)
-
- DatasetEntry.add_member(:entry_name, Shapes::ShapeRef.new(shape: EntryName, location_name: "entryName"))
- DatasetEntry.add_member(:data_uri, Shapes::ShapeRef.new(shape: PresignedURI, location_name: "dataURI"))
- DatasetEntry.struct_class = Types::DatasetEntry
-
- DatasetSummaries.member = Shapes::ShapeRef.new(shape: DatasetSummary)
-
- DatasetSummary.add_member(:dataset_name, Shapes::ShapeRef.new(shape: DatasetName, location_name: "datasetName"))
- DatasetSummary.add_member(:status, Shapes::ShapeRef.new(shape: DatasetStatus, location_name: "status"))
- DatasetSummary.add_member(:creation_time, Shapes::ShapeRef.new(shape: Timestamp, location_name: "creationTime"))
- DatasetSummary.add_member(:last_update_time, Shapes::ShapeRef.new(shape: Timestamp, location_name: "lastUpdateTime"))
- DatasetSummary.add_member(:triggers, Shapes::ShapeRef.new(shape: DatasetTriggers, location_name: "triggers"))
- DatasetSummary.add_member(:actions, Shapes::ShapeRef.new(shape: DatasetActionSummaries, location_name: "actions"))
- DatasetSummary.struct_class = Types::DatasetSummary
-
- DatasetTrigger.add_member(:schedule, Shapes::ShapeRef.new(shape: Schedule, location_name: "schedule"))
- DatasetTrigger.add_member(:dataset, Shapes::ShapeRef.new(shape: TriggeringDataset, location_name: "dataset"))
- DatasetTrigger.struct_class = Types::DatasetTrigger
-
- DatasetTriggers.member = Shapes::ShapeRef.new(shape: DatasetTrigger)
-
- Datastore.add_member(:name, Shapes::ShapeRef.new(shape: DatastoreName, location_name: "name"))
- Datastore.add_member(:storage, Shapes::ShapeRef.new(shape: DatastoreStorage, location_name: "storage"))
- Datastore.add_member(:arn, Shapes::ShapeRef.new(shape: DatastoreArn, location_name: "arn"))
- Datastore.add_member(:status, Shapes::ShapeRef.new(shape: DatastoreStatus, location_name: "status"))
- Datastore.add_member(:retention_period, Shapes::ShapeRef.new(shape: RetentionPeriod, location_name: "retentionPeriod"))
- Datastore.add_member(:creation_time, Shapes::ShapeRef.new(shape: Timestamp, location_name: "creationTime"))
- Datastore.add_member(:last_update_time, Shapes::ShapeRef.new(shape: Timestamp, location_name: "lastUpdateTime"))
- Datastore.add_member(:last_message_arrival_time, Shapes::ShapeRef.new(shape: Timestamp, location_name: "lastMessageArrivalTime"))
- Datastore.add_member(:file_format_configuration, Shapes::ShapeRef.new(shape: FileFormatConfiguration, location_name: "fileFormatConfiguration"))
- Datastore.add_member(:datastore_partitions, Shapes::ShapeRef.new(shape: DatastorePartitions, location_name: "datastorePartitions"))
- Datastore.struct_class = Types::Datastore
-
- DatastoreActivity.add_member(:name, Shapes::ShapeRef.new(shape: ActivityName, required: true, location_name: "name"))
- DatastoreActivity.add_member(:datastore_name, Shapes::ShapeRef.new(shape: DatastoreName, required: true, location_name: "datastoreName"))
- DatastoreActivity.struct_class = Types::DatastoreActivity
-
- DatastoreIotSiteWiseMultiLayerStorage.add_member(:customer_managed_s3_storage, Shapes::ShapeRef.new(shape: IotSiteWiseCustomerManagedDatastoreS3Storage, required: true, location_name: "customerManagedS3Storage"))
- DatastoreIotSiteWiseMultiLayerStorage.struct_class = Types::DatastoreIotSiteWiseMultiLayerStorage
-
- DatastoreIotSiteWiseMultiLayerStorageSummary.add_member(:customer_managed_s3_storage, Shapes::ShapeRef.new(shape: IotSiteWiseCustomerManagedDatastoreS3StorageSummary, location_name: "customerManagedS3Storage"))
- DatastoreIotSiteWiseMultiLayerStorageSummary.struct_class = Types::DatastoreIotSiteWiseMultiLayerStorageSummary
-
- DatastorePartition.add_member(:attribute_partition, Shapes::ShapeRef.new(shape: Partition, location_name: "attributePartition"))
- DatastorePartition.add_member(:timestamp_partition, Shapes::ShapeRef.new(shape: TimestampPartition, location_name: "timestampPartition"))
- DatastorePartition.struct_class = Types::DatastorePartition
-
- DatastorePartitions.add_member(:partitions, Shapes::ShapeRef.new(shape: Partitions, location_name: "partitions"))
- DatastorePartitions.struct_class = Types::DatastorePartitions
-
- DatastoreStatistics.add_member(:size, Shapes::ShapeRef.new(shape: EstimatedResourceSize, location_name: "size"))
- DatastoreStatistics.struct_class = Types::DatastoreStatistics
-
- DatastoreStorage.add_member(:service_managed_s3, Shapes::ShapeRef.new(shape: ServiceManagedDatastoreS3Storage, location_name: "serviceManagedS3"))
- DatastoreStorage.add_member(:customer_managed_s3, Shapes::ShapeRef.new(shape: CustomerManagedDatastoreS3Storage, location_name: "customerManagedS3"))
- DatastoreStorage.add_member(:iot_site_wise_multi_layer_storage, Shapes::ShapeRef.new(shape: DatastoreIotSiteWiseMultiLayerStorage, location_name: "iotSiteWiseMultiLayerStorage"))
- DatastoreStorage.struct_class = Types::DatastoreStorage
-
- DatastoreStorageSummary.add_member(:service_managed_s3, Shapes::ShapeRef.new(shape: ServiceManagedDatastoreS3StorageSummary, location_name: "serviceManagedS3"))
- DatastoreStorageSummary.add_member(:customer_managed_s3, Shapes::ShapeRef.new(shape: CustomerManagedDatastoreS3StorageSummary, location_name: "customerManagedS3"))
- DatastoreStorageSummary.add_member(:iot_site_wise_multi_layer_storage, Shapes::ShapeRef.new(shape: DatastoreIotSiteWiseMultiLayerStorageSummary, location_name: "iotSiteWiseMultiLayerStorage"))
- DatastoreStorageSummary.struct_class = Types::DatastoreStorageSummary
-
- DatastoreSummaries.member = Shapes::ShapeRef.new(shape: DatastoreSummary)
-
- DatastoreSummary.add_member(:datastore_name, Shapes::ShapeRef.new(shape: DatastoreName, location_name: "datastoreName"))
- DatastoreSummary.add_member(:datastore_storage, Shapes::ShapeRef.new(shape: DatastoreStorageSummary, location_name: "datastoreStorage"))
- DatastoreSummary.add_member(:status, Shapes::ShapeRef.new(shape: DatastoreStatus, location_name: "status"))
- DatastoreSummary.add_member(:creation_time, Shapes::ShapeRef.new(shape: Timestamp, location_name: "creationTime"))
- DatastoreSummary.add_member(:last_update_time, Shapes::ShapeRef.new(shape: Timestamp, location_name: "lastUpdateTime"))
- DatastoreSummary.add_member(:last_message_arrival_time, Shapes::ShapeRef.new(shape: Timestamp, location_name: "lastMessageArrivalTime"))
- DatastoreSummary.add_member(:file_format_type, Shapes::ShapeRef.new(shape: FileFormatType, location_name: "fileFormatType"))
- DatastoreSummary.add_member(:datastore_partitions, Shapes::ShapeRef.new(shape: DatastorePartitions, location_name: "datastorePartitions"))
- DatastoreSummary.struct_class = Types::DatastoreSummary
-
- DeleteChannelRequest.add_member(:channel_name, Shapes::ShapeRef.new(shape: ChannelName, required: true, location: "uri", location_name: "channelName"))
- DeleteChannelRequest.struct_class = Types::DeleteChannelRequest
-
- DeleteDatasetContentRequest.add_member(:dataset_name, Shapes::ShapeRef.new(shape: DatasetName, required: true, location: "uri", location_name: "datasetName"))
- DeleteDatasetContentRequest.add_member(:version_id, Shapes::ShapeRef.new(shape: DatasetContentVersion, location: "querystring", location_name: "versionId"))
- DeleteDatasetContentRequest.struct_class = Types::DeleteDatasetContentRequest
-
- DeleteDatasetRequest.add_member(:dataset_name, Shapes::ShapeRef.new(shape: DatasetName, required: true, location: "uri", location_name: "datasetName"))
- DeleteDatasetRequest.struct_class = Types::DeleteDatasetRequest
-
- DeleteDatastoreRequest.add_member(:datastore_name, Shapes::ShapeRef.new(shape: DatastoreName, required: true, location: "uri", location_name: "datastoreName"))
- DeleteDatastoreRequest.struct_class = Types::DeleteDatastoreRequest
-
- DeletePipelineRequest.add_member(:pipeline_name, Shapes::ShapeRef.new(shape: PipelineName, required: true, location: "uri", location_name: "pipelineName"))
- DeletePipelineRequest.struct_class = Types::DeletePipelineRequest
-
- DeltaTime.add_member(:offset_seconds, Shapes::ShapeRef.new(shape: OffsetSeconds, required: true, location_name: "offsetSeconds"))
- DeltaTime.add_member(:time_expression, Shapes::ShapeRef.new(shape: TimeExpression, required: true, location_name: "timeExpression"))
- DeltaTime.struct_class = Types::DeltaTime
-
- DeltaTimeSessionWindowConfiguration.add_member(:timeout_in_minutes, Shapes::ShapeRef.new(shape: SessionTimeoutInMinutes, required: true, location_name: "timeoutInMinutes"))
- DeltaTimeSessionWindowConfiguration.struct_class = Types::DeltaTimeSessionWindowConfiguration
-
- DescribeChannelRequest.add_member(:channel_name, Shapes::ShapeRef.new(shape: ChannelName, required: true, location: "uri", location_name: "channelName"))
- DescribeChannelRequest.add_member(:include_statistics, Shapes::ShapeRef.new(shape: IncludeStatisticsFlag, location: "querystring", location_name: "includeStatistics"))
- DescribeChannelRequest.struct_class = Types::DescribeChannelRequest
-
- DescribeChannelResponse.add_member(:channel, Shapes::ShapeRef.new(shape: Channel, location_name: "channel"))
- DescribeChannelResponse.add_member(:statistics, Shapes::ShapeRef.new(shape: ChannelStatistics, location_name: "statistics"))
- DescribeChannelResponse.struct_class = Types::DescribeChannelResponse
-
- DescribeDatasetRequest.add_member(:dataset_name, Shapes::ShapeRef.new(shape: DatasetName, required: true, location: "uri", location_name: "datasetName"))
- DescribeDatasetRequest.struct_class = Types::DescribeDatasetRequest
-
- DescribeDatasetResponse.add_member(:dataset, Shapes::ShapeRef.new(shape: Dataset, location_name: "dataset"))
- DescribeDatasetResponse.struct_class = Types::DescribeDatasetResponse
-
- DescribeDatastoreRequest.add_member(:datastore_name, Shapes::ShapeRef.new(shape: DatastoreName, required: true, location: "uri", location_name: "datastoreName"))
- DescribeDatastoreRequest.add_member(:include_statistics, Shapes::ShapeRef.new(shape: IncludeStatisticsFlag, location: "querystring", location_name: "includeStatistics"))
- DescribeDatastoreRequest.struct_class = Types::DescribeDatastoreRequest
-
- DescribeDatastoreResponse.add_member(:datastore, Shapes::ShapeRef.new(shape: Datastore, location_name: "datastore"))
- DescribeDatastoreResponse.add_member(:statistics, Shapes::ShapeRef.new(shape: DatastoreStatistics, location_name: "statistics"))
- DescribeDatastoreResponse.struct_class = Types::DescribeDatastoreResponse
-
- DescribeLoggingOptionsRequest.struct_class = Types::DescribeLoggingOptionsRequest
-
- DescribeLoggingOptionsResponse.add_member(:logging_options, Shapes::ShapeRef.new(shape: LoggingOptions, location_name: "loggingOptions"))
- DescribeLoggingOptionsResponse.struct_class = Types::DescribeLoggingOptionsResponse
-
- DescribePipelineRequest.add_member(:pipeline_name, Shapes::ShapeRef.new(shape: PipelineName, required: true, location: "uri", location_name: "pipelineName"))
- DescribePipelineRequest.struct_class = Types::DescribePipelineRequest
-
- DescribePipelineResponse.add_member(:pipeline, Shapes::ShapeRef.new(shape: Pipeline, location_name: "pipeline"))
- DescribePipelineResponse.struct_class = Types::DescribePipelineResponse
-
- DeviceRegistryEnrichActivity.add_member(:name, Shapes::ShapeRef.new(shape: ActivityName, required: true, location_name: "name"))
- DeviceRegistryEnrichActivity.add_member(:attribute, Shapes::ShapeRef.new(shape: AttributeName, required: true, location_name: "attribute"))
- DeviceRegistryEnrichActivity.add_member(:thing_name, Shapes::ShapeRef.new(shape: AttributeName, required: true, location_name: "thingName"))
- DeviceRegistryEnrichActivity.add_member(:role_arn, Shapes::ShapeRef.new(shape: RoleArn, required: true, location_name: "roleArn"))
- DeviceRegistryEnrichActivity.add_member(:next, Shapes::ShapeRef.new(shape: ActivityName, location_name: "next"))
- DeviceRegistryEnrichActivity.struct_class = Types::DeviceRegistryEnrichActivity
-
- DeviceShadowEnrichActivity.add_member(:name, Shapes::ShapeRef.new(shape: ActivityName, required: true, location_name: "name"))
- DeviceShadowEnrichActivity.add_member(:attribute, Shapes::ShapeRef.new(shape: AttributeName, required: true, location_name: "attribute"))
- DeviceShadowEnrichActivity.add_member(:thing_name, Shapes::ShapeRef.new(shape: AttributeName, required: true, location_name: "thingName"))
- DeviceShadowEnrichActivity.add_member(:role_arn, Shapes::ShapeRef.new(shape: RoleArn, required: true, location_name: "roleArn"))
- DeviceShadowEnrichActivity.add_member(:next, Shapes::ShapeRef.new(shape: ActivityName, location_name: "next"))
- DeviceShadowEnrichActivity.struct_class = Types::DeviceShadowEnrichActivity
-
- EstimatedResourceSize.add_member(:estimated_size_in_bytes, Shapes::ShapeRef.new(shape: SizeInBytes, location_name: "estimatedSizeInBytes"))
- EstimatedResourceSize.add_member(:estimated_on, Shapes::ShapeRef.new(shape: Timestamp, location_name: "estimatedOn"))
- EstimatedResourceSize.struct_class = Types::EstimatedResourceSize
-
- FileFormatConfiguration.add_member(:json_configuration, Shapes::ShapeRef.new(shape: JsonConfiguration, location_name: "jsonConfiguration"))
- FileFormatConfiguration.add_member(:parquet_configuration, Shapes::ShapeRef.new(shape: ParquetConfiguration, location_name: "parquetConfiguration"))
- FileFormatConfiguration.struct_class = Types::FileFormatConfiguration
-
- FilterActivity.add_member(:name, Shapes::ShapeRef.new(shape: ActivityName, required: true, location_name: "name"))
- FilterActivity.add_member(:filter, Shapes::ShapeRef.new(shape: FilterExpression, required: true, location_name: "filter"))
- FilterActivity.add_member(:next, Shapes::ShapeRef.new(shape: ActivityName, location_name: "next"))
- FilterActivity.struct_class = Types::FilterActivity
-
- GetDatasetContentRequest.add_member(:dataset_name, Shapes::ShapeRef.new(shape: DatasetName, required: true, location: "uri", location_name: "datasetName"))
- GetDatasetContentRequest.add_member(:version_id, Shapes::ShapeRef.new(shape: DatasetContentVersion, location: "querystring", location_name: "versionId"))
- GetDatasetContentRequest.struct_class = Types::GetDatasetContentRequest
-
- GetDatasetContentResponse.add_member(:entries, Shapes::ShapeRef.new(shape: DatasetEntries, location_name: "entries"))
- GetDatasetContentResponse.add_member(:timestamp, Shapes::ShapeRef.new(shape: Timestamp, location_name: "timestamp"))
- GetDatasetContentResponse.add_member(:status, Shapes::ShapeRef.new(shape: DatasetContentStatus, location_name: "status"))
- GetDatasetContentResponse.struct_class = Types::GetDatasetContentResponse
-
- GlueConfiguration.add_member(:table_name, Shapes::ShapeRef.new(shape: GlueTableName, required: true, location_name: "tableName"))
- GlueConfiguration.add_member(:database_name, Shapes::ShapeRef.new(shape: GlueDatabaseName, required: true, location_name: "databaseName"))
- GlueConfiguration.struct_class = Types::GlueConfiguration
-
- InternalFailureException.add_member(:message, Shapes::ShapeRef.new(shape: errorMessage, location_name: "message"))
- InternalFailureException.struct_class = Types::InternalFailureException
-
- InvalidRequestException.add_member(:message, Shapes::ShapeRef.new(shape: errorMessage, location_name: "message"))
- InvalidRequestException.struct_class = Types::InvalidRequestException
-
- IotEventsDestinationConfiguration.add_member(:input_name, Shapes::ShapeRef.new(shape: IotEventsInputName, required: true, location_name: "inputName"))
- IotEventsDestinationConfiguration.add_member(:role_arn, Shapes::ShapeRef.new(shape: RoleArn, required: true, location_name: "roleArn"))
- IotEventsDestinationConfiguration.struct_class = Types::IotEventsDestinationConfiguration
-
- IotSiteWiseCustomerManagedDatastoreS3Storage.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location_name: "bucket"))
- IotSiteWiseCustomerManagedDatastoreS3Storage.add_member(:key_prefix, Shapes::ShapeRef.new(shape: S3KeyPrefix, location_name: "keyPrefix"))
- IotSiteWiseCustomerManagedDatastoreS3Storage.struct_class = Types::IotSiteWiseCustomerManagedDatastoreS3Storage
-
- IotSiteWiseCustomerManagedDatastoreS3StorageSummary.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, location_name: "bucket"))
- IotSiteWiseCustomerManagedDatastoreS3StorageSummary.add_member(:key_prefix, Shapes::ShapeRef.new(shape: S3KeyPrefix, location_name: "keyPrefix"))
- IotSiteWiseCustomerManagedDatastoreS3StorageSummary.struct_class = Types::IotSiteWiseCustomerManagedDatastoreS3StorageSummary
-
- JsonConfiguration.struct_class = Types::JsonConfiguration
-
- LambdaActivity.add_member(:name, Shapes::ShapeRef.new(shape: ActivityName, required: true, location_name: "name"))
- LambdaActivity.add_member(:lambda_name, Shapes::ShapeRef.new(shape: LambdaName, required: true, location_name: "lambdaName"))
- LambdaActivity.add_member(:batch_size, Shapes::ShapeRef.new(shape: ActivityBatchSize, required: true, location_name: "batchSize"))
- LambdaActivity.add_member(:next, Shapes::ShapeRef.new(shape: ActivityName, location_name: "next"))
- LambdaActivity.struct_class = Types::LambdaActivity
-
- LateDataRule.add_member(:rule_name, Shapes::ShapeRef.new(shape: LateDataRuleName, location_name: "ruleName"))
- LateDataRule.add_member(:rule_configuration, Shapes::ShapeRef.new(shape: LateDataRuleConfiguration, required: true, location_name: "ruleConfiguration"))
- LateDataRule.struct_class = Types::LateDataRule
-
- LateDataRuleConfiguration.add_member(:delta_time_session_window_configuration, Shapes::ShapeRef.new(shape: DeltaTimeSessionWindowConfiguration, location_name: "deltaTimeSessionWindowConfiguration"))
- LateDataRuleConfiguration.struct_class = Types::LateDataRuleConfiguration
-
- LateDataRules.member = Shapes::ShapeRef.new(shape: LateDataRule)
-
- LimitExceededException.add_member(:message, Shapes::ShapeRef.new(shape: errorMessage, location_name: "message"))
- LimitExceededException.struct_class = Types::LimitExceededException
-
- ListChannelsRequest.add_member(:next_token, Shapes::ShapeRef.new(shape: NextToken, location: "querystring", location_name: "nextToken"))
- ListChannelsRequest.add_member(:max_results, Shapes::ShapeRef.new(shape: MaxResults, location: "querystring", location_name: "maxResults"))
- ListChannelsRequest.struct_class = Types::ListChannelsRequest
-
- ListChannelsResponse.add_member(:channel_summaries, Shapes::ShapeRef.new(shape: ChannelSummaries, location_name: "channelSummaries"))
- ListChannelsResponse.add_member(:next_token, Shapes::ShapeRef.new(shape: NextToken, location_name: "nextToken"))
- ListChannelsResponse.struct_class = Types::ListChannelsResponse
-
- ListDatasetContentsRequest.add_member(:dataset_name, Shapes::ShapeRef.new(shape: DatasetName, required: true, location: "uri", location_name: "datasetName"))
- ListDatasetContentsRequest.add_member(:next_token, Shapes::ShapeRef.new(shape: NextToken, location: "querystring", location_name: "nextToken"))
- ListDatasetContentsRequest.add_member(:max_results, Shapes::ShapeRef.new(shape: MaxResults, location: "querystring", location_name: "maxResults"))
- ListDatasetContentsRequest.add_member(:scheduled_on_or_after, Shapes::ShapeRef.new(shape: Timestamp, location: "querystring", location_name: "scheduledOnOrAfter"))
- ListDatasetContentsRequest.add_member(:scheduled_before, Shapes::ShapeRef.new(shape: Timestamp, location: "querystring", location_name: "scheduledBefore"))
- ListDatasetContentsRequest.struct_class = Types::ListDatasetContentsRequest
-
- ListDatasetContentsResponse.add_member(:dataset_content_summaries, Shapes::ShapeRef.new(shape: DatasetContentSummaries, location_name: "datasetContentSummaries"))
- ListDatasetContentsResponse.add_member(:next_token, Shapes::ShapeRef.new(shape: NextToken, location_name: "nextToken"))
- ListDatasetContentsResponse.struct_class = Types::ListDatasetContentsResponse
-
- ListDatasetsRequest.add_member(:next_token, Shapes::ShapeRef.new(shape: NextToken, location: "querystring", location_name: "nextToken"))
- ListDatasetsRequest.add_member(:max_results, Shapes::ShapeRef.new(shape: MaxResults, location: "querystring", location_name: "maxResults"))
- ListDatasetsRequest.struct_class = Types::ListDatasetsRequest
-
- ListDatasetsResponse.add_member(:dataset_summaries, Shapes::ShapeRef.new(shape: DatasetSummaries, location_name: "datasetSummaries"))
- ListDatasetsResponse.add_member(:next_token, Shapes::ShapeRef.new(shape: NextToken, location_name: "nextToken"))
- ListDatasetsResponse.struct_class = Types::ListDatasetsResponse
-
- ListDatastoresRequest.add_member(:next_token, Shapes::ShapeRef.new(shape: NextToken, location: "querystring", location_name: "nextToken"))
- ListDatastoresRequest.add_member(:max_results, Shapes::ShapeRef.new(shape: MaxResults, location: "querystring", location_name: "maxResults"))
- ListDatastoresRequest.struct_class = Types::ListDatastoresRequest
-
- ListDatastoresResponse.add_member(:datastore_summaries, Shapes::ShapeRef.new(shape: DatastoreSummaries, location_name: "datastoreSummaries"))
- ListDatastoresResponse.add_member(:next_token, Shapes::ShapeRef.new(shape: NextToken, location_name: "nextToken"))
- ListDatastoresResponse.struct_class = Types::ListDatastoresResponse
-
- ListPipelinesRequest.add_member(:next_token, Shapes::ShapeRef.new(shape: NextToken, location: "querystring", location_name: "nextToken"))
- ListPipelinesRequest.add_member(:max_results, Shapes::ShapeRef.new(shape: MaxResults, location: "querystring", location_name: "maxResults"))
- ListPipelinesRequest.struct_class = Types::ListPipelinesRequest
-
- ListPipelinesResponse.add_member(:pipeline_summaries, Shapes::ShapeRef.new(shape: PipelineSummaries, location_name: "pipelineSummaries"))
- ListPipelinesResponse.add_member(:next_token, Shapes::ShapeRef.new(shape: NextToken, location_name: "nextToken"))
- ListPipelinesResponse.struct_class = Types::ListPipelinesResponse
-
- ListTagsForResourceRequest.add_member(:resource_arn, Shapes::ShapeRef.new(shape: ResourceArn, required: true, location: "querystring", location_name: "resourceArn"))
- ListTagsForResourceRequest.struct_class = Types::ListTagsForResourceRequest
-
- ListTagsForResourceResponse.add_member(:tags, Shapes::ShapeRef.new(shape: TagList, location_name: "tags"))
- ListTagsForResourceResponse.struct_class = Types::ListTagsForResourceResponse
-
- LoggingOptions.add_member(:role_arn, Shapes::ShapeRef.new(shape: RoleArn, required: true, location_name: "roleArn"))
- LoggingOptions.add_member(:level, Shapes::ShapeRef.new(shape: LoggingLevel, required: true, location_name: "level"))
- LoggingOptions.add_member(:enabled, Shapes::ShapeRef.new(shape: LoggingEnabled, required: true, location_name: "enabled"))
- LoggingOptions.struct_class = Types::LoggingOptions
-
- MathActivity.add_member(:name, Shapes::ShapeRef.new(shape: ActivityName, required: true, location_name: "name"))
- MathActivity.add_member(:attribute, Shapes::ShapeRef.new(shape: AttributeName, required: true, location_name: "attribute"))
- MathActivity.add_member(:math, Shapes::ShapeRef.new(shape: MathExpression, required: true, location_name: "math"))
- MathActivity.add_member(:next, Shapes::ShapeRef.new(shape: ActivityName, location_name: "next"))
- MathActivity.struct_class = Types::MathActivity
-
- Message.add_member(:message_id, Shapes::ShapeRef.new(shape: MessageId, required: true, location_name: "messageId"))
- Message.add_member(:payload, Shapes::ShapeRef.new(shape: MessagePayload, required: true, location_name: "payload"))
- Message.struct_class = Types::Message
-
- MessagePayloads.member = Shapes::ShapeRef.new(shape: MessagePayload)
-
- Messages.member = Shapes::ShapeRef.new(shape: Message)
-
- OutputFileUriValue.add_member(:file_name, Shapes::ShapeRef.new(shape: OutputFileName, required: true, location_name: "fileName"))
- OutputFileUriValue.struct_class = Types::OutputFileUriValue
-
- ParquetConfiguration.add_member(:schema_definition, Shapes::ShapeRef.new(shape: SchemaDefinition, location_name: "schemaDefinition"))
- ParquetConfiguration.struct_class = Types::ParquetConfiguration
-
- Partition.add_member(:attribute_name, Shapes::ShapeRef.new(shape: PartitionAttributeName, required: true, location_name: "attributeName"))
- Partition.struct_class = Types::Partition
-
- Partitions.member = Shapes::ShapeRef.new(shape: DatastorePartition)
-
- Pipeline.add_member(:name, Shapes::ShapeRef.new(shape: PipelineName, location_name: "name"))
- Pipeline.add_member(:arn, Shapes::ShapeRef.new(shape: PipelineArn, location_name: "arn"))
- Pipeline.add_member(:activities, Shapes::ShapeRef.new(shape: PipelineActivities, location_name: "activities"))
- Pipeline.add_member(:reprocessing_summaries, Shapes::ShapeRef.new(shape: ReprocessingSummaries, location_name: "reprocessingSummaries"))
- Pipeline.add_member(:creation_time, Shapes::ShapeRef.new(shape: Timestamp, location_name: "creationTime"))
- Pipeline.add_member(:last_update_time, Shapes::ShapeRef.new(shape: Timestamp, location_name: "lastUpdateTime"))
- Pipeline.struct_class = Types::Pipeline
-
- PipelineActivities.member = Shapes::ShapeRef.new(shape: PipelineActivity)
-
- PipelineActivity.add_member(:channel, Shapes::ShapeRef.new(shape: ChannelActivity, location_name: "channel"))
- PipelineActivity.add_member(:lambda, Shapes::ShapeRef.new(shape: LambdaActivity, location_name: "lambda"))
- PipelineActivity.add_member(:datastore, Shapes::ShapeRef.new(shape: DatastoreActivity, location_name: "datastore"))
- PipelineActivity.add_member(:add_attributes, Shapes::ShapeRef.new(shape: AddAttributesActivity, location_name: "addAttributes"))
- PipelineActivity.add_member(:remove_attributes, Shapes::ShapeRef.new(shape: RemoveAttributesActivity, location_name: "removeAttributes"))
- PipelineActivity.add_member(:select_attributes, Shapes::ShapeRef.new(shape: SelectAttributesActivity, location_name: "selectAttributes"))
- PipelineActivity.add_member(:filter, Shapes::ShapeRef.new(shape: FilterActivity, location_name: "filter"))
- PipelineActivity.add_member(:math, Shapes::ShapeRef.new(shape: MathActivity, location_name: "math"))
- PipelineActivity.add_member(:device_registry_enrich, Shapes::ShapeRef.new(shape: DeviceRegistryEnrichActivity, location_name: "deviceRegistryEnrich"))
- PipelineActivity.add_member(:device_shadow_enrich, Shapes::ShapeRef.new(shape: DeviceShadowEnrichActivity, location_name: "deviceShadowEnrich"))
- PipelineActivity.struct_class = Types::PipelineActivity
-
- PipelineSummaries.member = Shapes::ShapeRef.new(shape: PipelineSummary)
-
- PipelineSummary.add_member(:pipeline_name, Shapes::ShapeRef.new(shape: PipelineName, location_name: "pipelineName"))
- PipelineSummary.add_member(:reprocessing_summaries, Shapes::ShapeRef.new(shape: ReprocessingSummaries, location_name: "reprocessingSummaries"))
- PipelineSummary.add_member(:creation_time, Shapes::ShapeRef.new(shape: Timestamp, location_name: "creationTime"))
- PipelineSummary.add_member(:last_update_time, Shapes::ShapeRef.new(shape: Timestamp, location_name: "lastUpdateTime"))
- PipelineSummary.struct_class = Types::PipelineSummary
-
- PutLoggingOptionsRequest.add_member(:logging_options, Shapes::ShapeRef.new(shape: LoggingOptions, required: true, location_name: "loggingOptions"))
- PutLoggingOptionsRequest.struct_class = Types::PutLoggingOptionsRequest
-
- QueryFilter.add_member(:delta_time, Shapes::ShapeRef.new(shape: DeltaTime, location_name: "deltaTime"))
- QueryFilter.struct_class = Types::QueryFilter
-
- QueryFilters.member = Shapes::ShapeRef.new(shape: QueryFilter)
-
- RemoveAttributesActivity.add_member(:name, Shapes::ShapeRef.new(shape: ActivityName, required: true, location_name: "name"))
- RemoveAttributesActivity.add_member(:attributes, Shapes::ShapeRef.new(shape: AttributeNames, required: true, location_name: "attributes"))
- RemoveAttributesActivity.add_member(:next, Shapes::ShapeRef.new(shape: ActivityName, location_name: "next"))
- RemoveAttributesActivity.struct_class = Types::RemoveAttributesActivity
-
- ReprocessingSummaries.member = Shapes::ShapeRef.new(shape: ReprocessingSummary)
-
- ReprocessingSummary.add_member(:id, Shapes::ShapeRef.new(shape: ReprocessingId, location_name: "id"))
- ReprocessingSummary.add_member(:status, Shapes::ShapeRef.new(shape: ReprocessingStatus, location_name: "status"))
- ReprocessingSummary.add_member(:creation_time, Shapes::ShapeRef.new(shape: Timestamp, location_name: "creationTime"))
- ReprocessingSummary.struct_class = Types::ReprocessingSummary
-
- ResourceAlreadyExistsException.add_member(:message, Shapes::ShapeRef.new(shape: errorMessage, location_name: "message"))
- ResourceAlreadyExistsException.add_member(:resource_id, Shapes::ShapeRef.new(shape: resourceId, location_name: "resourceId"))
- ResourceAlreadyExistsException.add_member(:resource_arn, Shapes::ShapeRef.new(shape: resourceArn, location_name: "resourceArn"))
- ResourceAlreadyExistsException.struct_class = Types::ResourceAlreadyExistsException
-
- ResourceConfiguration.add_member(:compute_type, Shapes::ShapeRef.new(shape: ComputeType, required: true, location_name: "computeType"))
- ResourceConfiguration.add_member(:volume_size_in_gb, Shapes::ShapeRef.new(shape: VolumeSizeInGB, required: true, location_name: "volumeSizeInGB"))
- ResourceConfiguration.struct_class = Types::ResourceConfiguration
-
- ResourceNotFoundException.add_member(:message, Shapes::ShapeRef.new(shape: errorMessage, location_name: "message"))
- ResourceNotFoundException.struct_class = Types::ResourceNotFoundException
-
- RetentionPeriod.add_member(:unlimited, Shapes::ShapeRef.new(shape: UnlimitedRetentionPeriod, location_name: "unlimited"))
- RetentionPeriod.add_member(:number_of_days, Shapes::ShapeRef.new(shape: RetentionPeriodInDays, location_name: "numberOfDays"))
- RetentionPeriod.struct_class = Types::RetentionPeriod
-
- RunPipelineActivityRequest.add_member(:pipeline_activity, Shapes::ShapeRef.new(shape: PipelineActivity, required: true, location_name: "pipelineActivity"))
- RunPipelineActivityRequest.add_member(:payloads, Shapes::ShapeRef.new(shape: MessagePayloads, required: true, location_name: "payloads"))
- RunPipelineActivityRequest.struct_class = Types::RunPipelineActivityRequest
-
- RunPipelineActivityResponse.add_member(:payloads, Shapes::ShapeRef.new(shape: MessagePayloads, location_name: "payloads"))
- RunPipelineActivityResponse.add_member(:log_result, Shapes::ShapeRef.new(shape: LogResult, location_name: "logResult"))
- RunPipelineActivityResponse.struct_class = Types::RunPipelineActivityResponse
-
- S3DestinationConfiguration.add_member(:bucket, Shapes::ShapeRef.new(shape: BucketName, required: true, location_name: "bucket"))
- S3DestinationConfiguration.add_member(:key, Shapes::ShapeRef.new(shape: BucketKeyExpression, required: true, location_name: "key"))
- S3DestinationConfiguration.add_member(:glue_configuration, Shapes::ShapeRef.new(shape: GlueConfiguration, location_name: "glueConfiguration"))
- S3DestinationConfiguration.add_member(:role_arn, Shapes::ShapeRef.new(shape: RoleArn, required: true, location_name: "roleArn"))
- S3DestinationConfiguration.struct_class = Types::S3DestinationConfiguration
-
- S3PathChannelMessages.member = Shapes::ShapeRef.new(shape: S3PathChannelMessage)
-
- SampleChannelDataRequest.add_member(:channel_name, Shapes::ShapeRef.new(shape: ChannelName, required: true, location: "uri", location_name: "channelName"))
- SampleChannelDataRequest.add_member(:max_messages, Shapes::ShapeRef.new(shape: MaxMessages, location: "querystring", location_name: "maxMessages"))
- SampleChannelDataRequest.add_member(:start_time, Shapes::ShapeRef.new(shape: StartTime, location: "querystring", location_name: "startTime"))
- SampleChannelDataRequest.add_member(:end_time, Shapes::ShapeRef.new(shape: EndTime, location: "querystring", location_name: "endTime"))
- SampleChannelDataRequest.struct_class = Types::SampleChannelDataRequest
-
- SampleChannelDataResponse.add_member(:payloads, Shapes::ShapeRef.new(shape: MessagePayloads, location_name: "payloads"))
- SampleChannelDataResponse.struct_class = Types::SampleChannelDataResponse
-
- Schedule.add_member(:expression, Shapes::ShapeRef.new(shape: ScheduleExpression, location_name: "expression"))
- Schedule.struct_class = Types::Schedule
-
- SchemaDefinition.add_member(:columns, Shapes::ShapeRef.new(shape: Columns, location_name: "columns"))
- SchemaDefinition.struct_class = Types::SchemaDefinition
-
- SelectAttributesActivity.add_member(:name, Shapes::ShapeRef.new(shape: ActivityName, required: true, location_name: "name"))
- SelectAttributesActivity.add_member(:attributes, Shapes::ShapeRef.new(shape: AttributeNames, required: true, location_name: "attributes"))
- SelectAttributesActivity.add_member(:next, Shapes::ShapeRef.new(shape: ActivityName, location_name: "next"))
- SelectAttributesActivity.struct_class = Types::SelectAttributesActivity
-
- ServiceManagedChannelS3Storage.struct_class = Types::ServiceManagedChannelS3Storage
-
- ServiceManagedChannelS3StorageSummary.struct_class = Types::ServiceManagedChannelS3StorageSummary
-
- ServiceManagedDatastoreS3Storage.struct_class = Types::ServiceManagedDatastoreS3Storage
-
- ServiceManagedDatastoreS3StorageSummary.struct_class = Types::ServiceManagedDatastoreS3StorageSummary
-
- ServiceUnavailableException.add_member(:message, Shapes::ShapeRef.new(shape: errorMessage, location_name: "message"))
- ServiceUnavailableException.struct_class = Types::ServiceUnavailableException
-
- SqlQueryDatasetAction.add_member(:sql_query, Shapes::ShapeRef.new(shape: SqlQuery, required: true, location_name: "sqlQuery"))
- SqlQueryDatasetAction.add_member(:filters, Shapes::ShapeRef.new(shape: QueryFilters, location_name: "filters"))
- SqlQueryDatasetAction.struct_class = Types::SqlQueryDatasetAction
-
- StartPipelineReprocessingRequest.add_member(:pipeline_name, Shapes::ShapeRef.new(shape: PipelineName, required: true, location: "uri", location_name: "pipelineName"))
- StartPipelineReprocessingRequest.add_member(:start_time, Shapes::ShapeRef.new(shape: StartTime, location_name: "startTime"))
- StartPipelineReprocessingRequest.add_member(:end_time, Shapes::ShapeRef.new(shape: EndTime, location_name: "endTime"))
- StartPipelineReprocessingRequest.add_member(:channel_messages, Shapes::ShapeRef.new(shape: ChannelMessages, location_name: "channelMessages"))
- StartPipelineReprocessingRequest.struct_class = Types::StartPipelineReprocessingRequest
-
- StartPipelineReprocessingResponse.add_member(:reprocessing_id, Shapes::ShapeRef.new(shape: ReprocessingId, location_name: "reprocessingId"))
- StartPipelineReprocessingResponse.struct_class = Types::StartPipelineReprocessingResponse
-
- Tag.add_member(:key, Shapes::ShapeRef.new(shape: TagKey, required: true, location_name: "key"))
- Tag.add_member(:value, Shapes::ShapeRef.new(shape: TagValue, required: true, location_name: "value"))
- Tag.struct_class = Types::Tag
-
- TagKeyList.member = Shapes::ShapeRef.new(shape: TagKey)
-
- TagList.member = Shapes::ShapeRef.new(shape: Tag)
-
- TagResourceRequest.add_member(:resource_arn, Shapes::ShapeRef.new(shape: ResourceArn, required: true, location: "querystring", location_name: "resourceArn"))
- TagResourceRequest.add_member(:tags, Shapes::ShapeRef.new(shape: TagList, required: true, location_name: "tags"))
- TagResourceRequest.struct_class = Types::TagResourceRequest
-
- TagResourceResponse.struct_class = Types::TagResourceResponse
-
- ThrottlingException.add_member(:message, Shapes::ShapeRef.new(shape: errorMessage, location_name: "message"))
- ThrottlingException.struct_class = Types::ThrottlingException
-
- TimestampPartition.add_member(:attribute_name, Shapes::ShapeRef.new(shape: PartitionAttributeName, required: true, location_name: "attributeName"))
- TimestampPartition.add_member(:timestamp_format, Shapes::ShapeRef.new(shape: TimestampFormat, location_name: "timestampFormat"))
- TimestampPartition.struct_class = Types::TimestampPartition
-
- TriggeringDataset.add_member(:name, Shapes::ShapeRef.new(shape: DatasetName, required: true, location_name: "name"))
- TriggeringDataset.struct_class = Types::TriggeringDataset
-
- UntagResourceRequest.add_member(:resource_arn, Shapes::ShapeRef.new(shape: ResourceArn, required: true, location: "querystring", location_name: "resourceArn"))
- UntagResourceRequest.add_member(:tag_keys, Shapes::ShapeRef.new(shape: TagKeyList, required: true, location: "querystring", location_name: "tagKeys"))
- UntagResourceRequest.struct_class = Types::UntagResourceRequest
-
- UntagResourceResponse.struct_class = Types::UntagResourceResponse
-
- UpdateChannelRequest.add_member(:channel_name, Shapes::ShapeRef.new(shape: ChannelName, required: true, location: "uri", location_name: "channelName"))
- UpdateChannelRequest.add_member(:channel_storage, Shapes::ShapeRef.new(shape: ChannelStorage, location_name: "channelStorage"))
- UpdateChannelRequest.add_member(:retention_period, Shapes::ShapeRef.new(shape: RetentionPeriod, location_name: "retentionPeriod"))
- UpdateChannelRequest.struct_class = Types::UpdateChannelRequest
-
- UpdateDatasetRequest.add_member(:dataset_name, Shapes::ShapeRef.new(shape: DatasetName, required: true, location: "uri", location_name: "datasetName"))
- UpdateDatasetRequest.add_member(:actions, Shapes::ShapeRef.new(shape: DatasetActions, required: true, location_name: "actions"))
- UpdateDatasetRequest.add_member(:triggers, Shapes::ShapeRef.new(shape: DatasetTriggers, location_name: "triggers"))
- UpdateDatasetRequest.add_member(:content_delivery_rules, Shapes::ShapeRef.new(shape: DatasetContentDeliveryRules, location_name: "contentDeliveryRules"))
- UpdateDatasetRequest.add_member(:retention_period, Shapes::ShapeRef.new(shape: RetentionPeriod, location_name: "retentionPeriod"))
- UpdateDatasetRequest.add_member(:versioning_configuration, Shapes::ShapeRef.new(shape: VersioningConfiguration, location_name: "versioningConfiguration"))
- UpdateDatasetRequest.add_member(:late_data_rules, Shapes::ShapeRef.new(shape: LateDataRules, location_name: "lateDataRules"))
- UpdateDatasetRequest.struct_class = Types::UpdateDatasetRequest
-
- UpdateDatastoreRequest.add_member(:datastore_name, Shapes::ShapeRef.new(shape: DatastoreName, required: true, location: "uri", location_name: "datastoreName"))
- UpdateDatastoreRequest.add_member(:retention_period, Shapes::ShapeRef.new(shape: RetentionPeriod, location_name: "retentionPeriod"))
- UpdateDatastoreRequest.add_member(:datastore_storage, Shapes::ShapeRef.new(shape: DatastoreStorage, location_name: "datastoreStorage"))
- UpdateDatastoreRequest.add_member(:file_format_configuration, Shapes::ShapeRef.new(shape: FileFormatConfiguration, location_name: "fileFormatConfiguration"))
- UpdateDatastoreRequest.struct_class = Types::UpdateDatastoreRequest
-
- UpdatePipelineRequest.add_member(:pipeline_name, Shapes::ShapeRef.new(shape: PipelineName, required: true, location: "uri", location_name: "pipelineName"))
- UpdatePipelineRequest.add_member(:pipeline_activities, Shapes::ShapeRef.new(shape: PipelineActivities, required: true, location_name: "pipelineActivities"))
- UpdatePipelineRequest.struct_class = Types::UpdatePipelineRequest
-
- Variable.add_member(:name, Shapes::ShapeRef.new(shape: VariableName, required: true, location_name: "name"))
- Variable.add_member(:string_value, Shapes::ShapeRef.new(shape: StringValue, location_name: "stringValue"))
- Variable.add_member(:double_value, Shapes::ShapeRef.new(shape: DoubleValue, location_name: "doubleValue", metadata: {"box" => true}))
- Variable.add_member(:dataset_content_version_value, Shapes::ShapeRef.new(shape: DatasetContentVersionValue, location_name: "datasetContentVersionValue"))
- Variable.add_member(:output_file_uri_value, Shapes::ShapeRef.new(shape: OutputFileUriValue, location_name: "outputFileUriValue"))
- Variable.struct_class = Types::Variable
-
- Variables.member = Shapes::ShapeRef.new(shape: Variable)
-
- VersioningConfiguration.add_member(:unlimited, Shapes::ShapeRef.new(shape: UnlimitedVersioning, location_name: "unlimited"))
- VersioningConfiguration.add_member(:max_versions, Shapes::ShapeRef.new(shape: MaxVersions, location_name: "maxVersions"))
- VersioningConfiguration.struct_class = Types::VersioningConfiguration
-
-
- # @api private
- API = Seahorse::Model::Api.new.tap do |api|
-
- api.version = "2017-11-27"
-
- api.metadata = {
- "apiVersion" => "2017-11-27",
- "auth" => ["aws.auth#sigv4"],
- "endpointPrefix" => "iotanalytics",
- "protocol" => "rest-json",
- "protocols" => ["rest-json"],
- "serviceFullName" => "AWS IoT Analytics",
- "serviceId" => "IoTAnalytics",
- "signatureVersion" => "v4",
- "signingName" => "iotanalytics",
- "uid" => "iotanalytics-2017-11-27",
- }
-
- api.add_operation(:batch_put_message, Seahorse::Model::Operation.new.tap do |o|
- o.name = "BatchPutMessage"
- o.http_method = "POST"
- o.http_request_uri = "/messages/batch"
- o.input = Shapes::ShapeRef.new(shape: BatchPutMessageRequest)
- o.output = Shapes::ShapeRef.new(shape: BatchPutMessageResponse)
- o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException)
- o.errors << Shapes::ShapeRef.new(shape: InvalidRequestException)
- o.errors << Shapes::ShapeRef.new(shape: InternalFailureException)
- o.errors << Shapes::ShapeRef.new(shape: ServiceUnavailableException)
- o.errors << Shapes::ShapeRef.new(shape: ThrottlingException)
- end)
-
- api.add_operation(:cancel_pipeline_reprocessing, Seahorse::Model::Operation.new.tap do |o|
- o.name = "CancelPipelineReprocessing"
- o.http_method = "DELETE"
- o.http_request_uri = "/pipelines/{pipelineName}/reprocessing/{reprocessingId}"
- o.input = Shapes::ShapeRef.new(shape: CancelPipelineReprocessingRequest)
- o.output = Shapes::ShapeRef.new(shape: CancelPipelineReprocessingResponse)
- o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException)
- o.errors << Shapes::ShapeRef.new(shape: InvalidRequestException)
- o.errors << Shapes::ShapeRef.new(shape: InternalFailureException)
- o.errors << Shapes::ShapeRef.new(shape: ServiceUnavailableException)
- o.errors << Shapes::ShapeRef.new(shape: ThrottlingException)
- end)
-
- api.add_operation(:create_channel, Seahorse::Model::Operation.new.tap do |o|
- o.name = "CreateChannel"
- o.http_method = "POST"
- o.http_request_uri = "/channels"
- o.input = Shapes::ShapeRef.new(shape: CreateChannelRequest)
- o.output = Shapes::ShapeRef.new(shape: CreateChannelResponse)
- o.errors << Shapes::ShapeRef.new(shape: InvalidRequestException)
- o.errors << Shapes::ShapeRef.new(shape: ResourceAlreadyExistsException)
- o.errors << Shapes::ShapeRef.new(shape: InternalFailureException)
- o.errors << Shapes::ShapeRef.new(shape: ServiceUnavailableException)
- o.errors << Shapes::ShapeRef.new(shape: ThrottlingException)
- o.errors << Shapes::ShapeRef.new(shape: LimitExceededException)
- end)
-
- api.add_operation(:create_dataset, Seahorse::Model::Operation.new.tap do |o|
- o.name = "CreateDataset"
- o.http_method = "POST"
- o.http_request_uri = "/datasets"
- o.input = Shapes::ShapeRef.new(shape: CreateDatasetRequest)
- o.output = Shapes::ShapeRef.new(shape: CreateDatasetResponse)
- o.errors << Shapes::ShapeRef.new(shape: InvalidRequestException)
- o.errors << Shapes::ShapeRef.new(shape: ResourceAlreadyExistsException)
- o.errors << Shapes::ShapeRef.new(shape: InternalFailureException)
- o.errors << Shapes::ShapeRef.new(shape: ServiceUnavailableException)
- o.errors << Shapes::ShapeRef.new(shape: ThrottlingException)
- o.errors << Shapes::ShapeRef.new(shape: LimitExceededException)
- end)
-
- api.add_operation(:create_dataset_content, Seahorse::Model::Operation.new.tap do |o|
- o.name = "CreateDatasetContent"
- o.http_method = "POST"
- o.http_request_uri = "/datasets/{datasetName}/content"
- o.input = Shapes::ShapeRef.new(shape: CreateDatasetContentRequest)
- o.output = Shapes::ShapeRef.new(shape: CreateDatasetContentResponse)
- o.errors << Shapes::ShapeRef.new(shape: InvalidRequestException)
- o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException)
- o.errors << Shapes::ShapeRef.new(shape: InternalFailureException)
- o.errors << Shapes::ShapeRef.new(shape: ServiceUnavailableException)
- o.errors << Shapes::ShapeRef.new(shape: ThrottlingException)
- end)
-
- api.add_operation(:create_datastore, Seahorse::Model::Operation.new.tap do |o|
- o.name = "CreateDatastore"
- o.http_method = "POST"
- o.http_request_uri = "/datastores"
- o.input = Shapes::ShapeRef.new(shape: CreateDatastoreRequest)
- o.output = Shapes::ShapeRef.new(shape: CreateDatastoreResponse)
- o.errors << Shapes::ShapeRef.new(shape: InvalidRequestException)
- o.errors << Shapes::ShapeRef.new(shape: ResourceAlreadyExistsException)
- o.errors << Shapes::ShapeRef.new(shape: InternalFailureException)
- o.errors << Shapes::ShapeRef.new(shape: ServiceUnavailableException)
- o.errors << Shapes::ShapeRef.new(shape: ThrottlingException)
- o.errors << Shapes::ShapeRef.new(shape: LimitExceededException)
- end)
-
- api.add_operation(:create_pipeline, Seahorse::Model::Operation.new.tap do |o|
- o.name = "CreatePipeline"
- o.http_method = "POST"
- o.http_request_uri = "/pipelines"
- o.input = Shapes::ShapeRef.new(shape: CreatePipelineRequest)
- o.output = Shapes::ShapeRef.new(shape: CreatePipelineResponse)
- o.errors << Shapes::ShapeRef.new(shape: InvalidRequestException)
- o.errors << Shapes::ShapeRef.new(shape: ResourceAlreadyExistsException)
- o.errors << Shapes::ShapeRef.new(shape: InternalFailureException)
- o.errors << Shapes::ShapeRef.new(shape: ServiceUnavailableException)
- o.errors << Shapes::ShapeRef.new(shape: ThrottlingException)
- o.errors << Shapes::ShapeRef.new(shape: LimitExceededException)
- end)
-
- api.add_operation(:delete_channel, Seahorse::Model::Operation.new.tap do |o|
- o.name = "DeleteChannel"
- o.http_method = "DELETE"
- o.http_request_uri = "/channels/{channelName}"
- o.input = Shapes::ShapeRef.new(shape: DeleteChannelRequest)
- o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure))
- o.errors << Shapes::ShapeRef.new(shape: InvalidRequestException)
- o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException)
- o.errors << Shapes::ShapeRef.new(shape: InternalFailureException)
- o.errors << Shapes::ShapeRef.new(shape: ServiceUnavailableException)
- o.errors << Shapes::ShapeRef.new(shape: ThrottlingException)
- end)
-
- api.add_operation(:delete_dataset, Seahorse::Model::Operation.new.tap do |o|
- o.name = "DeleteDataset"
- o.http_method = "DELETE"
- o.http_request_uri = "/datasets/{datasetName}"
- o.input = Shapes::ShapeRef.new(shape: DeleteDatasetRequest)
- o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure))
- o.errors << Shapes::ShapeRef.new(shape: InvalidRequestException)
- o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException)
- o.errors << Shapes::ShapeRef.new(shape: InternalFailureException)
- o.errors << Shapes::ShapeRef.new(shape: ServiceUnavailableException)
- o.errors << Shapes::ShapeRef.new(shape: ThrottlingException)
- end)
-
- api.add_operation(:delete_dataset_content, Seahorse::Model::Operation.new.tap do |o|
- o.name = "DeleteDatasetContent"
- o.http_method = "DELETE"
- o.http_request_uri = "/datasets/{datasetName}/content"
- o.input = Shapes::ShapeRef.new(shape: DeleteDatasetContentRequest)
- o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure))
- o.errors << Shapes::ShapeRef.new(shape: InvalidRequestException)
- o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException)
- o.errors << Shapes::ShapeRef.new(shape: InternalFailureException)
- o.errors << Shapes::ShapeRef.new(shape: ServiceUnavailableException)
- o.errors << Shapes::ShapeRef.new(shape: ThrottlingException)
- end)
-
- api.add_operation(:delete_datastore, Seahorse::Model::Operation.new.tap do |o|
- o.name = "DeleteDatastore"
- o.http_method = "DELETE"
- o.http_request_uri = "/datastores/{datastoreName}"
- o.input = Shapes::ShapeRef.new(shape: DeleteDatastoreRequest)
- o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure))
- o.errors << Shapes::ShapeRef.new(shape: InvalidRequestException)
- o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException)
- o.errors << Shapes::ShapeRef.new(shape: InternalFailureException)
- o.errors << Shapes::ShapeRef.new(shape: ServiceUnavailableException)
- o.errors << Shapes::ShapeRef.new(shape: ThrottlingException)
- end)
-
- api.add_operation(:delete_pipeline, Seahorse::Model::Operation.new.tap do |o|
- o.name = "DeletePipeline"
- o.http_method = "DELETE"
- o.http_request_uri = "/pipelines/{pipelineName}"
- o.input = Shapes::ShapeRef.new(shape: DeletePipelineRequest)
- o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure))
- o.errors << Shapes::ShapeRef.new(shape: InvalidRequestException)
- o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException)
- o.errors << Shapes::ShapeRef.new(shape: InternalFailureException)
- o.errors << Shapes::ShapeRef.new(shape: ServiceUnavailableException)
- o.errors << Shapes::ShapeRef.new(shape: ThrottlingException)
- end)
-
- api.add_operation(:describe_channel, Seahorse::Model::Operation.new.tap do |o|
- o.name = "DescribeChannel"
- o.http_method = "GET"
- o.http_request_uri = "/channels/{channelName}"
- o.input = Shapes::ShapeRef.new(shape: DescribeChannelRequest)
- o.output = Shapes::ShapeRef.new(shape: DescribeChannelResponse)
- o.errors << Shapes::ShapeRef.new(shape: InvalidRequestException)
- o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException)
- o.errors << Shapes::ShapeRef.new(shape: InternalFailureException)
- o.errors << Shapes::ShapeRef.new(shape: ServiceUnavailableException)
- o.errors << Shapes::ShapeRef.new(shape: ThrottlingException)
- end)
-
- api.add_operation(:describe_dataset, Seahorse::Model::Operation.new.tap do |o|
- o.name = "DescribeDataset"
- o.http_method = "GET"
- o.http_request_uri = "/datasets/{datasetName}"
- o.input = Shapes::ShapeRef.new(shape: DescribeDatasetRequest)
- o.output = Shapes::ShapeRef.new(shape: DescribeDatasetResponse)
- o.errors << Shapes::ShapeRef.new(shape: InvalidRequestException)
- o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException)
- o.errors << Shapes::ShapeRef.new(shape: InternalFailureException)
- o.errors << Shapes::ShapeRef.new(shape: ServiceUnavailableException)
- o.errors << Shapes::ShapeRef.new(shape: ThrottlingException)
- end)
-
- api.add_operation(:describe_datastore, Seahorse::Model::Operation.new.tap do |o|
- o.name = "DescribeDatastore"
- o.http_method = "GET"
- o.http_request_uri = "/datastores/{datastoreName}"
- o.input = Shapes::ShapeRef.new(shape: DescribeDatastoreRequest)
- o.output = Shapes::ShapeRef.new(shape: DescribeDatastoreResponse)
- o.errors << Shapes::ShapeRef.new(shape: InvalidRequestException)
- o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException)
- o.errors << Shapes::ShapeRef.new(shape: InternalFailureException)
- o.errors << Shapes::ShapeRef.new(shape: ServiceUnavailableException)
- o.errors << Shapes::ShapeRef.new(shape: ThrottlingException)
- end)
-
- api.add_operation(:describe_logging_options, Seahorse::Model::Operation.new.tap do |o|
- o.name = "DescribeLoggingOptions"
- o.http_method = "GET"
- o.http_request_uri = "/logging"
- o.input = Shapes::ShapeRef.new(shape: DescribeLoggingOptionsRequest)
- o.output = Shapes::ShapeRef.new(shape: DescribeLoggingOptionsResponse)
- o.errors << Shapes::ShapeRef.new(shape: InvalidRequestException)
- o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException)
- o.errors << Shapes::ShapeRef.new(shape: InternalFailureException)
- o.errors << Shapes::ShapeRef.new(shape: ServiceUnavailableException)
- o.errors << Shapes::ShapeRef.new(shape: ThrottlingException)
- end)
-
- api.add_operation(:describe_pipeline, Seahorse::Model::Operation.new.tap do |o|
- o.name = "DescribePipeline"
- o.http_method = "GET"
- o.http_request_uri = "/pipelines/{pipelineName}"
- o.input = Shapes::ShapeRef.new(shape: DescribePipelineRequest)
- o.output = Shapes::ShapeRef.new(shape: DescribePipelineResponse)
- o.errors << Shapes::ShapeRef.new(shape: InvalidRequestException)
- o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException)
- o.errors << Shapes::ShapeRef.new(shape: InternalFailureException)
- o.errors << Shapes::ShapeRef.new(shape: ServiceUnavailableException)
- o.errors << Shapes::ShapeRef.new(shape: ThrottlingException)
- end)
-
- api.add_operation(:get_dataset_content, Seahorse::Model::Operation.new.tap do |o|
- o.name = "GetDatasetContent"
- o.http_method = "GET"
- o.http_request_uri = "/datasets/{datasetName}/content"
- o.input = Shapes::ShapeRef.new(shape: GetDatasetContentRequest)
- o.output = Shapes::ShapeRef.new(shape: GetDatasetContentResponse)
- o.errors << Shapes::ShapeRef.new(shape: InvalidRequestException)
- o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException)
- o.errors << Shapes::ShapeRef.new(shape: InternalFailureException)
- o.errors << Shapes::ShapeRef.new(shape: ServiceUnavailableException)
- o.errors << Shapes::ShapeRef.new(shape: ThrottlingException)
- end)
-
- api.add_operation(:list_channels, Seahorse::Model::Operation.new.tap do |o|
- o.name = "ListChannels"
- o.http_method = "GET"
- o.http_request_uri = "/channels"
- o.input = Shapes::ShapeRef.new(shape: ListChannelsRequest)
- o.output = Shapes::ShapeRef.new(shape: ListChannelsResponse)
- o.errors << Shapes::ShapeRef.new(shape: InvalidRequestException)
- o.errors << Shapes::ShapeRef.new(shape: InternalFailureException)
- o.errors << Shapes::ShapeRef.new(shape: ServiceUnavailableException)
- o.errors << Shapes::ShapeRef.new(shape: ThrottlingException)
- o[:pager] = Aws::Pager.new(
- limit_key: "max_results",
- tokens: {
- "next_token" => "next_token"
- }
- )
- end)
-
- api.add_operation(:list_dataset_contents, Seahorse::Model::Operation.new.tap do |o|
- o.name = "ListDatasetContents"
- o.http_method = "GET"
- o.http_request_uri = "/datasets/{datasetName}/contents"
- o.input = Shapes::ShapeRef.new(shape: ListDatasetContentsRequest)
- o.output = Shapes::ShapeRef.new(shape: ListDatasetContentsResponse)
- o.errors << Shapes::ShapeRef.new(shape: InvalidRequestException)
- o.errors << Shapes::ShapeRef.new(shape: InternalFailureException)
- o.errors << Shapes::ShapeRef.new(shape: ServiceUnavailableException)
- o.errors << Shapes::ShapeRef.new(shape: ThrottlingException)
- o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException)
- o[:pager] = Aws::Pager.new(
- limit_key: "max_results",
- tokens: {
- "next_token" => "next_token"
- }
- )
- end)
-
- api.add_operation(:list_datasets, Seahorse::Model::Operation.new.tap do |o|
- o.name = "ListDatasets"
- o.http_method = "GET"
- o.http_request_uri = "/datasets"
- o.input = Shapes::ShapeRef.new(shape: ListDatasetsRequest)
- o.output = Shapes::ShapeRef.new(shape: ListDatasetsResponse)
- o.errors << Shapes::ShapeRef.new(shape: InvalidRequestException)
- o.errors << Shapes::ShapeRef.new(shape: InternalFailureException)
- o.errors << Shapes::ShapeRef.new(shape: ServiceUnavailableException)
- o.errors << Shapes::ShapeRef.new(shape: ThrottlingException)
- o[:pager] = Aws::Pager.new(
- limit_key: "max_results",
- tokens: {
- "next_token" => "next_token"
- }
- )
- end)
-
- api.add_operation(:list_datastores, Seahorse::Model::Operation.new.tap do |o|
- o.name = "ListDatastores"
- o.http_method = "GET"
- o.http_request_uri = "/datastores"
- o.input = Shapes::ShapeRef.new(shape: ListDatastoresRequest)
- o.output = Shapes::ShapeRef.new(shape: ListDatastoresResponse)
- o.errors << Shapes::ShapeRef.new(shape: InvalidRequestException)
- o.errors << Shapes::ShapeRef.new(shape: InternalFailureException)
- o.errors << Shapes::ShapeRef.new(shape: ServiceUnavailableException)
- o.errors << Shapes::ShapeRef.new(shape: ThrottlingException)
- o[:pager] = Aws::Pager.new(
- limit_key: "max_results",
- tokens: {
- "next_token" => "next_token"
- }
- )
- end)
-
- api.add_operation(:list_pipelines, Seahorse::Model::Operation.new.tap do |o|
- o.name = "ListPipelines"
- o.http_method = "GET"
- o.http_request_uri = "/pipelines"
- o.input = Shapes::ShapeRef.new(shape: ListPipelinesRequest)
- o.output = Shapes::ShapeRef.new(shape: ListPipelinesResponse)
- o.errors << Shapes::ShapeRef.new(shape: InvalidRequestException)
- o.errors << Shapes::ShapeRef.new(shape: InternalFailureException)
- o.errors << Shapes::ShapeRef.new(shape: ServiceUnavailableException)
- o.errors << Shapes::ShapeRef.new(shape: ThrottlingException)
- o[:pager] = Aws::Pager.new(
- limit_key: "max_results",
- tokens: {
- "next_token" => "next_token"
- }
- )
- end)
-
- api.add_operation(:list_tags_for_resource, Seahorse::Model::Operation.new.tap do |o|
- o.name = "ListTagsForResource"
- o.http_method = "GET"
- o.http_request_uri = "/tags"
- o.input = Shapes::ShapeRef.new(shape: ListTagsForResourceRequest)
- o.output = Shapes::ShapeRef.new(shape: ListTagsForResourceResponse)
- o.errors << Shapes::ShapeRef.new(shape: InvalidRequestException)
- o.errors << Shapes::ShapeRef.new(shape: InternalFailureException)
- o.errors << Shapes::ShapeRef.new(shape: ServiceUnavailableException)
- o.errors << Shapes::ShapeRef.new(shape: ThrottlingException)
- o.errors << Shapes::ShapeRef.new(shape: LimitExceededException)
- o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException)
- end)
-
- api.add_operation(:put_logging_options, Seahorse::Model::Operation.new.tap do |o|
- o.name = "PutLoggingOptions"
- o.http_method = "PUT"
- o.http_request_uri = "/logging"
- o.input = Shapes::ShapeRef.new(shape: PutLoggingOptionsRequest)
- o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure))
- o.errors << Shapes::ShapeRef.new(shape: InvalidRequestException)
- o.errors << Shapes::ShapeRef.new(shape: InternalFailureException)
- o.errors << Shapes::ShapeRef.new(shape: ServiceUnavailableException)
- o.errors << Shapes::ShapeRef.new(shape: ThrottlingException)
- end)
-
- api.add_operation(:run_pipeline_activity, Seahorse::Model::Operation.new.tap do |o|
- o.name = "RunPipelineActivity"
- o.http_method = "POST"
- o.http_request_uri = "/pipelineactivities/run"
- o.input = Shapes::ShapeRef.new(shape: RunPipelineActivityRequest)
- o.output = Shapes::ShapeRef.new(shape: RunPipelineActivityResponse)
- o.errors << Shapes::ShapeRef.new(shape: InvalidRequestException)
- o.errors << Shapes::ShapeRef.new(shape: InternalFailureException)
- o.errors << Shapes::ShapeRef.new(shape: ServiceUnavailableException)
- o.errors << Shapes::ShapeRef.new(shape: ThrottlingException)
- end)
-
- api.add_operation(:sample_channel_data, Seahorse::Model::Operation.new.tap do |o|
- o.name = "SampleChannelData"
- o.http_method = "GET"
- o.http_request_uri = "/channels/{channelName}/sample"
- o.input = Shapes::ShapeRef.new(shape: SampleChannelDataRequest)
- o.output = Shapes::ShapeRef.new(shape: SampleChannelDataResponse)
- o.errors << Shapes::ShapeRef.new(shape: InvalidRequestException)
- o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException)
- o.errors << Shapes::ShapeRef.new(shape: InternalFailureException)
- o.errors << Shapes::ShapeRef.new(shape: ServiceUnavailableException)
- o.errors << Shapes::ShapeRef.new(shape: ThrottlingException)
- end)
-
- api.add_operation(:start_pipeline_reprocessing, Seahorse::Model::Operation.new.tap do |o|
- o.name = "StartPipelineReprocessing"
- o.http_method = "POST"
- o.http_request_uri = "/pipelines/{pipelineName}/reprocessing"
- o.input = Shapes::ShapeRef.new(shape: StartPipelineReprocessingRequest)
- o.output = Shapes::ShapeRef.new(shape: StartPipelineReprocessingResponse)
- o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException)
- o.errors << Shapes::ShapeRef.new(shape: ResourceAlreadyExistsException)
- o.errors << Shapes::ShapeRef.new(shape: InvalidRequestException)
- o.errors << Shapes::ShapeRef.new(shape: InternalFailureException)
- o.errors << Shapes::ShapeRef.new(shape: ServiceUnavailableException)
- o.errors << Shapes::ShapeRef.new(shape: ThrottlingException)
- end)
-
- api.add_operation(:tag_resource, Seahorse::Model::Operation.new.tap do |o|
- o.name = "TagResource"
- o.http_method = "POST"
- o.http_request_uri = "/tags"
- o.input = Shapes::ShapeRef.new(shape: TagResourceRequest)
- o.output = Shapes::ShapeRef.new(shape: TagResourceResponse)
- o.errors << Shapes::ShapeRef.new(shape: InvalidRequestException)
- o.errors << Shapes::ShapeRef.new(shape: InternalFailureException)
- o.errors << Shapes::ShapeRef.new(shape: ServiceUnavailableException)
- o.errors << Shapes::ShapeRef.new(shape: ThrottlingException)
- o.errors << Shapes::ShapeRef.new(shape: LimitExceededException)
- o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException)
- end)
-
- api.add_operation(:untag_resource, Seahorse::Model::Operation.new.tap do |o|
- o.name = "UntagResource"
- o.http_method = "DELETE"
- o.http_request_uri = "/tags"
- o.input = Shapes::ShapeRef.new(shape: UntagResourceRequest)
- o.output = Shapes::ShapeRef.new(shape: UntagResourceResponse)
- o.errors << Shapes::ShapeRef.new(shape: InvalidRequestException)
- o.errors << Shapes::ShapeRef.new(shape: InternalFailureException)
- o.errors << Shapes::ShapeRef.new(shape: ServiceUnavailableException)
- o.errors << Shapes::ShapeRef.new(shape: ThrottlingException)
- o.errors << Shapes::ShapeRef.new(shape: LimitExceededException)
- o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException)
- end)
-
- api.add_operation(:update_channel, Seahorse::Model::Operation.new.tap do |o|
- o.name = "UpdateChannel"
- o.http_method = "PUT"
- o.http_request_uri = "/channels/{channelName}"
- o.input = Shapes::ShapeRef.new(shape: UpdateChannelRequest)
- o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure))
- o.errors << Shapes::ShapeRef.new(shape: InvalidRequestException)
- o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException)
- o.errors << Shapes::ShapeRef.new(shape: InternalFailureException)
- o.errors << Shapes::ShapeRef.new(shape: ServiceUnavailableException)
- o.errors << Shapes::ShapeRef.new(shape: ThrottlingException)
- end)
-
- api.add_operation(:update_dataset, Seahorse::Model::Operation.new.tap do |o|
- o.name = "UpdateDataset"
- o.http_method = "PUT"
- o.http_request_uri = "/datasets/{datasetName}"
- o.input = Shapes::ShapeRef.new(shape: UpdateDatasetRequest)
- o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure))
- o.errors << Shapes::ShapeRef.new(shape: InvalidRequestException)
- o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException)
- o.errors << Shapes::ShapeRef.new(shape: InternalFailureException)
- o.errors << Shapes::ShapeRef.new(shape: ServiceUnavailableException)
- o.errors << Shapes::ShapeRef.new(shape: ThrottlingException)
- end)
-
- api.add_operation(:update_datastore, Seahorse::Model::Operation.new.tap do |o|
- o.name = "UpdateDatastore"
- o.http_method = "PUT"
- o.http_request_uri = "/datastores/{datastoreName}"
- o.input = Shapes::ShapeRef.new(shape: UpdateDatastoreRequest)
- o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure))
- o.errors << Shapes::ShapeRef.new(shape: InvalidRequestException)
- o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException)
- o.errors << Shapes::ShapeRef.new(shape: InternalFailureException)
- o.errors << Shapes::ShapeRef.new(shape: ServiceUnavailableException)
- o.errors << Shapes::ShapeRef.new(shape: ThrottlingException)
- end)
-
- api.add_operation(:update_pipeline, Seahorse::Model::Operation.new.tap do |o|
- o.name = "UpdatePipeline"
- o.http_method = "PUT"
- o.http_request_uri = "/pipelines/{pipelineName}"
- o.input = Shapes::ShapeRef.new(shape: UpdatePipelineRequest)
- o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure))
- o.errors << Shapes::ShapeRef.new(shape: InvalidRequestException)
- o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException)
- o.errors << Shapes::ShapeRef.new(shape: InternalFailureException)
- o.errors << Shapes::ShapeRef.new(shape: ServiceUnavailableException)
- o.errors << Shapes::ShapeRef.new(shape: ThrottlingException)
- o.errors << Shapes::ShapeRef.new(shape: LimitExceededException)
- end)
- end
-
- end
-end
diff --git a/gems/aws-sdk-iotanalytics/lib/aws-sdk-iotanalytics/customizations.rb b/gems/aws-sdk-iotanalytics/lib/aws-sdk-iotanalytics/customizations.rb
deleted file mode 100644
index e69de29bb2d..00000000000
diff --git a/gems/aws-sdk-iotanalytics/lib/aws-sdk-iotanalytics/endpoint_parameters.rb b/gems/aws-sdk-iotanalytics/lib/aws-sdk-iotanalytics/endpoint_parameters.rb
deleted file mode 100644
index 563537307b6..00000000000
--- a/gems/aws-sdk-iotanalytics/lib/aws-sdk-iotanalytics/endpoint_parameters.rb
+++ /dev/null
@@ -1,69 +0,0 @@
-# frozen_string_literal: true
-
-# WARNING ABOUT GENERATED CODE
-#
-# This file is generated. See the contributing guide for more information:
-# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md
-#
-# WARNING ABOUT GENERATED CODE
-
-module Aws::IoTAnalytics
- # Endpoint parameters used to influence endpoints per request.
- #
- # @!attribute region
- # The AWS region used to dispatch the request.
- #
- # @return [string]
- #
- # @!attribute use_dual_stack
- # When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.
- #
- # @return [boolean]
- #
- # @!attribute use_fips
- # When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.
- #
- # @return [boolean]
- #
- # @!attribute endpoint
- # Override the endpoint used to send this request
- #
- # @return [string]
- #
- EndpointParameters = Struct.new(
- :region,
- :use_dual_stack,
- :use_fips,
- :endpoint,
- ) do
- include Aws::Structure
-
- # @api private
- class << self
- PARAM_MAP = {
- 'Region' => :region,
- 'UseDualStack' => :use_dual_stack,
- 'UseFIPS' => :use_fips,
- 'Endpoint' => :endpoint,
- }.freeze
- end
-
- def initialize(options = {})
- self[:region] = options[:region]
- self[:use_dual_stack] = options[:use_dual_stack]
- self[:use_dual_stack] = false if self[:use_dual_stack].nil?
- self[:use_fips] = options[:use_fips]
- self[:use_fips] = false if self[:use_fips].nil?
- self[:endpoint] = options[:endpoint]
- end
-
- def self.create(config, options={})
- new({
- region: config.region,
- use_dual_stack: config.use_dualstack_endpoint,
- use_fips: config.use_fips_endpoint,
- endpoint: (config.endpoint.to_s unless config.regional_endpoint),
- }.merge(options))
- end
- end
-end
diff --git a/gems/aws-sdk-iotanalytics/lib/aws-sdk-iotanalytics/endpoint_provider.rb b/gems/aws-sdk-iotanalytics/lib/aws-sdk-iotanalytics/endpoint_provider.rb
deleted file mode 100644
index b7863810451..00000000000
--- a/gems/aws-sdk-iotanalytics/lib/aws-sdk-iotanalytics/endpoint_provider.rb
+++ /dev/null
@@ -1,50 +0,0 @@
-# frozen_string_literal: true
-
-# WARNING ABOUT GENERATED CODE
-#
-# This file is generated. See the contributing guide for more information:
-# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md
-#
-# WARNING ABOUT GENERATED CODE
-
-module Aws::IoTAnalytics
- class EndpointProvider
- def resolve_endpoint(parameters)
- if Aws::Endpoints::Matchers.set?(parameters.endpoint)
- if Aws::Endpoints::Matchers.boolean_equals?(parameters.use_fips, true)
- raise ArgumentError, "Invalid Configuration: FIPS and custom endpoint are not supported"
- end
- if Aws::Endpoints::Matchers.boolean_equals?(parameters.use_dual_stack, true)
- raise ArgumentError, "Invalid Configuration: Dualstack and custom endpoint are not supported"
- end
- return Aws::Endpoints::Endpoint.new(url: parameters.endpoint, headers: {}, properties: {})
- end
- if Aws::Endpoints::Matchers.set?(parameters.region)
- if (partition_result = Aws::Endpoints::Matchers.aws_partition(parameters.region))
- if Aws::Endpoints::Matchers.boolean_equals?(parameters.use_fips, true) && Aws::Endpoints::Matchers.boolean_equals?(parameters.use_dual_stack, true)
- if Aws::Endpoints::Matchers.boolean_equals?(true, Aws::Endpoints::Matchers.attr(partition_result, "supportsFIPS")) && Aws::Endpoints::Matchers.boolean_equals?(true, Aws::Endpoints::Matchers.attr(partition_result, "supportsDualStack"))
- return Aws::Endpoints::Endpoint.new(url: "https://iotanalytics-fips.#{parameters.region}.#{partition_result['dualStackDnsSuffix']}", headers: {}, properties: {})
- end
- raise ArgumentError, "FIPS and DualStack are enabled, but this partition does not support one or both"
- end
- if Aws::Endpoints::Matchers.boolean_equals?(parameters.use_fips, true)
- if Aws::Endpoints::Matchers.boolean_equals?(Aws::Endpoints::Matchers.attr(partition_result, "supportsFIPS"), true)
- return Aws::Endpoints::Endpoint.new(url: "https://iotanalytics-fips.#{parameters.region}.#{partition_result['dnsSuffix']}", headers: {}, properties: {})
- end
- raise ArgumentError, "FIPS is enabled but this partition does not support FIPS"
- end
- if Aws::Endpoints::Matchers.boolean_equals?(parameters.use_dual_stack, true)
- if Aws::Endpoints::Matchers.boolean_equals?(true, Aws::Endpoints::Matchers.attr(partition_result, "supportsDualStack"))
- return Aws::Endpoints::Endpoint.new(url: "https://iotanalytics.#{parameters.region}.#{partition_result['dualStackDnsSuffix']}", headers: {}, properties: {})
- end
- raise ArgumentError, "DualStack is enabled but this partition does not support DualStack"
- end
- return Aws::Endpoints::Endpoint.new(url: "https://iotanalytics.#{parameters.region}.#{partition_result['dnsSuffix']}", headers: {}, properties: {})
- end
- end
- raise ArgumentError, "Invalid Configuration: Missing Region"
- raise ArgumentError, 'No endpoint could be resolved'
-
- end
- end
-end
diff --git a/gems/aws-sdk-iotanalytics/lib/aws-sdk-iotanalytics/endpoints.rb b/gems/aws-sdk-iotanalytics/lib/aws-sdk-iotanalytics/endpoints.rb
deleted file mode 100644
index 572f796b4f1..00000000000
--- a/gems/aws-sdk-iotanalytics/lib/aws-sdk-iotanalytics/endpoints.rb
+++ /dev/null
@@ -1,20 +0,0 @@
-# frozen_string_literal: true
-
-# WARNING ABOUT GENERATED CODE
-#
-# This file is generated. See the contributing guide for more information:
-# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md
-#
-# WARNING ABOUT GENERATED CODE
-
-
-module Aws::IoTAnalytics
- # @api private
- module Endpoints
-
-
- def self.parameters_for_operation(context)
- Aws::IoTAnalytics::EndpointParameters.create(context.config)
- end
- end
-end
diff --git a/gems/aws-sdk-iotanalytics/lib/aws-sdk-iotanalytics/errors.rb b/gems/aws-sdk-iotanalytics/lib/aws-sdk-iotanalytics/errors.rb
deleted file mode 100644
index 5c9d489c475..00000000000
--- a/gems/aws-sdk-iotanalytics/lib/aws-sdk-iotanalytics/errors.rb
+++ /dev/null
@@ -1,160 +0,0 @@
-# frozen_string_literal: true
-
-# WARNING ABOUT GENERATED CODE
-#
-# This file is generated. See the contributing guide for more information:
-# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md
-#
-# WARNING ABOUT GENERATED CODE
-
-module Aws::IoTAnalytics
-
- # When IoTAnalytics returns an error response, the Ruby SDK constructs and raises an error.
- # These errors all extend Aws::IoTAnalytics::Errors::ServiceError < {Aws::Errors::ServiceError}
- #
- # You can rescue all IoTAnalytics errors using ServiceError:
- #
- # begin
- # # do stuff
- # rescue Aws::IoTAnalytics::Errors::ServiceError
- # # rescues all IoTAnalytics API errors
- # end
- #
- #
- # ## Request Context
- # ServiceError objects have a {Aws::Errors::ServiceError#context #context} method that returns
- # information about the request that generated the error.
- # See {Seahorse::Client::RequestContext} for more information.
- #
- # ## Error Classes
- # * {InternalFailureException}
- # * {InvalidRequestException}
- # * {LimitExceededException}
- # * {ResourceAlreadyExistsException}
- # * {ResourceNotFoundException}
- # * {ServiceUnavailableException}
- # * {ThrottlingException}
- #
- # Additionally, error classes are dynamically generated for service errors based on the error code
- # if they are not defined above.
- module Errors
-
- extend Aws::Errors::DynamicErrors
-
- class InternalFailureException < ServiceError
-
- # @param [Seahorse::Client::RequestContext] context
- # @param [String] message
- # @param [Aws::IoTAnalytics::Types::InternalFailureException] data
- def initialize(context, message, data = Aws::EmptyStructure.new)
- super(context, message, data)
- end
-
- # @return [String]
- def message
- @message || @data[:message]
- end
- end
-
- class InvalidRequestException < ServiceError
-
- # @param [Seahorse::Client::RequestContext] context
- # @param [String] message
- # @param [Aws::IoTAnalytics::Types::InvalidRequestException] data
- def initialize(context, message, data = Aws::EmptyStructure.new)
- super(context, message, data)
- end
-
- # @return [String]
- def message
- @message || @data[:message]
- end
- end
-
- class LimitExceededException < ServiceError
-
- # @param [Seahorse::Client::RequestContext] context
- # @param [String] message
- # @param [Aws::IoTAnalytics::Types::LimitExceededException] data
- def initialize(context, message, data = Aws::EmptyStructure.new)
- super(context, message, data)
- end
-
- # @return [String]
- def message
- @message || @data[:message]
- end
- end
-
- class ResourceAlreadyExistsException < ServiceError
-
- # @param [Seahorse::Client::RequestContext] context
- # @param [String] message
- # @param [Aws::IoTAnalytics::Types::ResourceAlreadyExistsException] data
- def initialize(context, message, data = Aws::EmptyStructure.new)
- super(context, message, data)
- end
-
- # @return [String]
- def message
- @message || @data[:message]
- end
-
- # @return [String]
- def resource_id
- @data[:resource_id]
- end
-
- # @return [String]
- def resource_arn
- @data[:resource_arn]
- end
- end
-
- class ResourceNotFoundException < ServiceError
-
- # @param [Seahorse::Client::RequestContext] context
- # @param [String] message
- # @param [Aws::IoTAnalytics::Types::ResourceNotFoundException] data
- def initialize(context, message, data = Aws::EmptyStructure.new)
- super(context, message, data)
- end
-
- # @return [String]
- def message
- @message || @data[:message]
- end
- end
-
- class ServiceUnavailableException < ServiceError
-
- # @param [Seahorse::Client::RequestContext] context
- # @param [String] message
- # @param [Aws::IoTAnalytics::Types::ServiceUnavailableException] data
- def initialize(context, message, data = Aws::EmptyStructure.new)
- super(context, message, data)
- end
-
- # @return [String]
- def message
- @message || @data[:message]
- end
- end
-
- class ThrottlingException < ServiceError
-
- # @param [Seahorse::Client::RequestContext] context
- # @param [String] message
- # @param [Aws::IoTAnalytics::Types::ThrottlingException] data
- def initialize(context, message, data = Aws::EmptyStructure.new)
- super(context, message, data)
- end
-
- # @return [String]
- def message
- @message || @data[:message]
- end
- end
-
- end
-end
diff --git a/gems/aws-sdk-iotanalytics/lib/aws-sdk-iotanalytics/plugins/endpoints.rb b/gems/aws-sdk-iotanalytics/lib/aws-sdk-iotanalytics/plugins/endpoints.rb
deleted file mode 100644
index b8bc1299934..00000000000
--- a/gems/aws-sdk-iotanalytics/lib/aws-sdk-iotanalytics/plugins/endpoints.rb
+++ /dev/null
@@ -1,77 +0,0 @@
-# frozen_string_literal: true
-
-# WARNING ABOUT GENERATED CODE
-#
-# This file is generated. See the contributing guide for more information:
-# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md
-#
-# WARNING ABOUT GENERATED CODE
-
-
-module Aws::IoTAnalytics
- module Plugins
- class Endpoints < Seahorse::Client::Plugin
- option(
- :endpoint_provider,
- doc_type: 'Aws::IoTAnalytics::EndpointProvider',
- rbs_type: 'untyped',
- docstring: <<~DOCS) do |_cfg|
-The endpoint provider used to resolve endpoints. Any object that responds to
-`#resolve_endpoint(parameters)` where `parameters` is a Struct similar to
-`Aws::IoTAnalytics::EndpointParameters`.
- DOCS
- Aws::IoTAnalytics::EndpointProvider.new
- end
-
- # @api private
- class Handler < Seahorse::Client::Handler
- def call(context)
- unless context[:discovered_endpoint]
- params = Aws::IoTAnalytics::Endpoints.parameters_for_operation(context)
- endpoint = context.config.endpoint_provider.resolve_endpoint(params)
-
- context.http_request.endpoint = endpoint.url
- apply_endpoint_headers(context, endpoint.headers)
-
- context[:endpoint_params] = params
- context[:endpoint_properties] = endpoint.properties
- end
-
- context[:auth_scheme] =
- Aws::Endpoints.resolve_auth_scheme(context, endpoint)
-
- with_metrics(context) { @handler.call(context) }
- end
-
- private
-
- def with_metrics(context, &block)
- metrics = []
- metrics << 'ENDPOINT_OVERRIDE' unless context.config.regional_endpoint
- if context[:auth_scheme] && context[:auth_scheme]['name'] == 'sigv4a'
- metrics << 'SIGV4A_SIGNING'
- end
- if context.config.credentials&.credentials&.account_id
- metrics << 'RESOLVED_ACCOUNT_ID'
- end
- Aws::Plugins::UserAgent.metric(*metrics, &block)
- end
-
- def apply_endpoint_headers(context, headers)
- headers.each do |key, values|
- value = values
- .compact
- .map { |s| Seahorse::Util.escape_header_list_string(s.to_s) }
- .join(',')
-
- context.http_request.headers[key] = value
- end
- end
- end
-
- def add_handlers(handlers, _config)
- handlers.add(Handler, step: :build, priority: 75)
- end
- end
- end
-end
diff --git a/gems/aws-sdk-iotanalytics/lib/aws-sdk-iotanalytics/resource.rb b/gems/aws-sdk-iotanalytics/lib/aws-sdk-iotanalytics/resource.rb
deleted file mode 100644
index 993b30db9c5..00000000000
--- a/gems/aws-sdk-iotanalytics/lib/aws-sdk-iotanalytics/resource.rb
+++ /dev/null
@@ -1,26 +0,0 @@
-# frozen_string_literal: true
-
-# WARNING ABOUT GENERATED CODE
-#
-# This file is generated. See the contributing guide for more information:
-# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md
-#
-# WARNING ABOUT GENERATED CODE
-
-module Aws::IoTAnalytics
-
- class Resource
-
- # @param options ({})
- # @option options [Client] :client
- def initialize(options = {})
- @client = options[:client] || Client.new(options)
- end
-
- # @return [Client]
- def client
- @client
- end
-
- end
-end
diff --git a/gems/aws-sdk-iotanalytics/lib/aws-sdk-iotanalytics/types.rb b/gems/aws-sdk-iotanalytics/lib/aws-sdk-iotanalytics/types.rb
deleted file mode 100644
index fe21d51b1d3..00000000000
--- a/gems/aws-sdk-iotanalytics/lib/aws-sdk-iotanalytics/types.rb
+++ /dev/null
@@ -1,3141 +0,0 @@
-# frozen_string_literal: true
-
-# WARNING ABOUT GENERATED CODE
-#
-# This file is generated. See the contributing guide for more information:
-# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md
-#
-# WARNING ABOUT GENERATED CODE
-
-module Aws::IoTAnalytics
- module Types
-
- # An activity that adds other attributes based on existing attributes in
- # the message.
- #
- # @!attribute [rw] name
- # The name of the addAttributes activity.
- # @return [String]
- #
- # @!attribute [rw] attributes
- # A list of 1-50 `AttributeNameMapping` objects that map an existing
- # attribute to a new attribute.
- #
- # The existing attributes remain in the message, so if you want to
- # remove the originals, use `RemoveAttributeActivity`.
- #
- #
- # @return [Hash]
- #
- # @!attribute [rw] next
- # The next activity in the pipeline.
- # @return [String]
- #
- class AddAttributesActivity < Struct.new(
- :name,
- :attributes,
- :next)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # Contains informations about errors.
- #
- # @!attribute [rw] message_id
- # The ID of the message that caused the error. See the value
- # corresponding to the `messageId` key in the message object.
- # @return [String]
- #
- # @!attribute [rw] error_code
- # The code associated with the error.
- # @return [String]
- #
- # @!attribute [rw] error_message
- # The message associated with the error.
- # @return [String]
- #
- class BatchPutMessageErrorEntry < Struct.new(
- :message_id,
- :error_code,
- :error_message)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] channel_name
- # The name of the channel where the messages are sent.
- # @return [String]
- #
- # @!attribute [rw] messages
- # The list of messages to be sent. Each message has the format: \{
- # "messageId": "string", "payload": "string"}.
- #
- # The field names of message payloads (data) that you send to IoT
- # Analytics:
- #
- # * Must contain only alphanumeric characters and undescores (\_). No
- # other special characters are allowed.
- #
- # * Must begin with an alphabetic character or single underscore (\_).
- #
- # * Cannot contain hyphens (-).
- #
- # * In regular expression terms:
- # "^\[A-Za-z\_\](\[A-Za-z0-9\]*\|\[A-Za-z0-9\]\[A-Za-z0-9\_\]*)$".
- #
- # * Cannot be more than 255 characters.
- #
- # * Are case insensitive. (Fields named foo and FOO in the same
- # payload are considered duplicates.)
- #
- # For example, \{"temp\_01": 29} or \{"\_temp\_01": 29} are valid,
- # but \{"temp-01": 29}, \{"01\_temp": 29} or \{"\_\_temp\_01":
- # 29} are invalid in message payloads.
- # @return [Array]
- #
- class BatchPutMessageRequest < Struct.new(
- :channel_name,
- :messages)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] batch_put_message_error_entries
- # A list of any errors encountered when sending the messages to the
- # channel.
- # @return [Array]
- #
- class BatchPutMessageResponse < Struct.new(
- :batch_put_message_error_entries)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] pipeline_name
- # The name of pipeline for which data reprocessing is canceled.
- # @return [String]
- #
- # @!attribute [rw] reprocessing_id
- # The ID of the reprocessing task (returned by
- # `StartPipelineReprocessing`).
- # @return [String]
- #
- class CancelPipelineReprocessingRequest < Struct.new(
- :pipeline_name,
- :reprocessing_id)
- SENSITIVE = []
- include Aws::Structure
- end
-
- class CancelPipelineReprocessingResponse < Aws::EmptyStructure; end
-
- # A collection of data from an MQTT topic. Channels archive the raw,
- # unprocessed messages before publishing the data to a pipeline.
- #
- # @!attribute [rw] name
- # The name of the channel.
- # @return [String]
- #
- # @!attribute [rw] storage
- # Where channel data is stored. You can choose one of
- # `serviceManagedS3` or `customerManagedS3` storage. If not specified,
- # the default is `serviceManagedS3`. You can't change this storage
- # option after the channel is created.
- # @return [Types::ChannelStorage]
- #
- # @!attribute [rw] arn
- # The ARN of the channel.
- # @return [String]
- #
- # @!attribute [rw] status
- # The status of the channel.
- # @return [String]
- #
- # @!attribute [rw] retention_period
- # How long, in days, message data is kept for the channel.
- # @return [Types::RetentionPeriod]
- #
- # @!attribute [rw] creation_time
- # When the channel was created.
- # @return [Time]
- #
- # @!attribute [rw] last_update_time
- # When the channel was last updated.
- # @return [Time]
- #
- # @!attribute [rw] last_message_arrival_time
- # The last time when a new message arrived in the channel.
- #
- # IoT Analytics updates this value at most once per minute for one
- # channel. Hence, the `lastMessageArrivalTime` value is an
- # approximation.
- #
- # This feature only applies to messages that arrived in the data store
- # after October 23, 2020.
- # @return [Time]
- #
- class Channel < Struct.new(
- :name,
- :storage,
- :arn,
- :status,
- :retention_period,
- :creation_time,
- :last_update_time,
- :last_message_arrival_time)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # The activity that determines the source of the messages to be
- # processed.
- #
- # @!attribute [rw] name
- # The name of the channel activity.
- # @return [String]
- #
- # @!attribute [rw] channel_name
- # The name of the channel from which the messages are processed.
- # @return [String]
- #
- # @!attribute [rw] next
- # The next activity in the pipeline.
- # @return [String]
- #
- class ChannelActivity < Struct.new(
- :name,
- :channel_name,
- :next)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # Specifies one or more sets of channel messages.
- #
- # @!attribute [rw] s3_paths
- # Specifies one or more keys that identify the Amazon Simple Storage
- # Service (Amazon S3) objects that save your channel messages.
- #
- # You must use the full path for the key.
- #
- # Example path: `channel/mychannel/__dt=2020-02-29
- # 00:00:00/1582940490000_1582940520000_123456789012_mychannel_0_2118.0.json.gz`
- # @return [Array]
- #
- class ChannelMessages < Struct.new(
- :s3_paths)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # Statistics information about the channel.
- #
- # @!attribute [rw] size
- # The estimated size of the channel.
- # @return [Types::EstimatedResourceSize]
- #
- class ChannelStatistics < Struct.new(
- :size)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # Where channel data is stored. You may choose one of
- # `serviceManagedS3`, `customerManagedS3` storage. If not specified, the
- # default is `serviceManagedS3`. This can't be changed after creation
- # of the channel.
- #
- # @!attribute [rw] service_managed_s3
- # Used to store channel data in an S3 bucket managed by IoT Analytics.
- # You can't change the choice of S3 storage after the data store is
- # created.
- # @return [Types::ServiceManagedChannelS3Storage]
- #
- # @!attribute [rw] customer_managed_s3
- # Used to store channel data in an S3 bucket that you manage. If
- # customer managed storage is selected, the `retentionPeriod`
- # parameter is ignored. You can't change the choice of S3 storage
- # after the data store is created.
- # @return [Types::CustomerManagedChannelS3Storage]
- #
- class ChannelStorage < Struct.new(
- :service_managed_s3,
- :customer_managed_s3)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # Where channel data is stored.
- #
- # @!attribute [rw] service_managed_s3
- # Used to store channel data in an S3 bucket managed by IoT Analytics.
- # @return [Types::ServiceManagedChannelS3StorageSummary]
- #
- # @!attribute [rw] customer_managed_s3
- # Used to store channel data in an S3 bucket that you manage.
- # @return [Types::CustomerManagedChannelS3StorageSummary]
- #
- class ChannelStorageSummary < Struct.new(
- :service_managed_s3,
- :customer_managed_s3)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # A summary of information about a channel.
- #
- # @!attribute [rw] channel_name
- # The name of the channel.
- # @return [String]
- #
- # @!attribute [rw] channel_storage
- # Where channel data is stored.
- # @return [Types::ChannelStorageSummary]
- #
- # @!attribute [rw] status
- # The status of the channel.
- # @return [String]
- #
- # @!attribute [rw] creation_time
- # When the channel was created.
- # @return [Time]
- #
- # @!attribute [rw] last_update_time
- # The last time the channel was updated.
- # @return [Time]
- #
- # @!attribute [rw] last_message_arrival_time
- # The last time when a new message arrived in the channel.
- #
- # IoT Analytics updates this value at most once per minute for one
- # channel. Hence, the `lastMessageArrivalTime` value is an
- # approximation.
- #
- # This feature only applies to messages that arrived in the data store
- # after October 23, 2020.
- # @return [Time]
- #
- class ChannelSummary < Struct.new(
- :channel_name,
- :channel_storage,
- :status,
- :creation_time,
- :last_update_time,
- :last_message_arrival_time)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # Contains information about a column that stores your data.
- #
- # @!attribute [rw] name
- # The name of the column.
- # @return [String]
- #
- # @!attribute [rw] type
- # The type of data. For more information about the supported data
- # types, see [Common data types][1] in the *Glue Developer Guide*.
- #
- #
- #
- # [1]: https://docs.aws.amazon.com/glue/latest/dg/aws-glue-api-common.html
- # @return [String]
- #
- class Column < Struct.new(
- :name,
- :type)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # Information required to run the `containerAction` to produce dataset
- # contents.
- #
- # @!attribute [rw] image
- # The ARN of the Docker container stored in your account. The Docker
- # container contains an application and required support libraries and
- # is used to generate dataset contents.
- # @return [String]
- #
- # @!attribute [rw] execution_role_arn
- # The ARN of the role that gives permission to the system to access
- # required resources to run the `containerAction`. This includes, at
- # minimum, permission to retrieve the dataset contents that are the
- # input to the containerized application.
- # @return [String]
- #
- # @!attribute [rw] resource_configuration
- # Configuration of the resource that executes the `containerAction`.
- # @return [Types::ResourceConfiguration]
- #
- # @!attribute [rw] variables
- # The values of variables used in the context of the execution of the
- # containerized application (basically, parameters passed to the
- # application). Each variable must have a name and a value given by
- # one of `stringValue`, `datasetContentVersionValue`, or
- # `outputFileUriValue`.
- # @return [Array]
- #
- class ContainerDatasetAction < Struct.new(
- :image,
- :execution_role_arn,
- :resource_configuration,
- :variables)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] channel_name
- # The name of the channel.
- # @return [String]
- #
- # @!attribute [rw] channel_storage
- # Where channel data is stored. You can choose one of
- # `serviceManagedS3` or `customerManagedS3` storage. If not specified,
- # the default is `serviceManagedS3`. You can't change this storage
- # option after the channel is created.
- # @return [Types::ChannelStorage]
- #
- # @!attribute [rw] retention_period
- # How long, in days, message data is kept for the channel. When
- # `customerManagedS3` storage is selected, this parameter is ignored.
- # @return [Types::RetentionPeriod]
- #
- # @!attribute [rw] tags
- # Metadata which can be used to manage the channel.
- # @return [Array]
- #
- class CreateChannelRequest < Struct.new(
- :channel_name,
- :channel_storage,
- :retention_period,
- :tags)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] channel_name
- # The name of the channel.
- # @return [String]
- #
- # @!attribute [rw] channel_arn
- # The ARN of the channel.
- # @return [String]
- #
- # @!attribute [rw] retention_period
- # How long, in days, message data is kept for the channel.
- # @return [Types::RetentionPeriod]
- #
- class CreateChannelResponse < Struct.new(
- :channel_name,
- :channel_arn,
- :retention_period)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] dataset_name
- # The name of the dataset.
- # @return [String]
- #
- # @!attribute [rw] version_id
- # The version ID of the dataset content. To specify `versionId` for a
- # dataset content, the dataset must use a [DeltaTimer][1] filter.
- #
- #
- #
- # [1]: https://docs.aws.amazon.com/iotanalytics/latest/APIReference/API_DeltaTime.html
- # @return [String]
- #
- class CreateDatasetContentRequest < Struct.new(
- :dataset_name,
- :version_id)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] version_id
- # The version ID of the dataset contents that are being created.
- # @return [String]
- #
- class CreateDatasetContentResponse < Struct.new(
- :version_id)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] dataset_name
- # The name of the dataset.
- # @return [String]
- #
- # @!attribute [rw] actions
- # A list of actions that create the dataset contents.
- # @return [Array]
- #
- # @!attribute [rw] triggers
- # A list of triggers. A trigger causes dataset contents to be
- # populated at a specified time interval or when another dataset's
- # contents are created. The list of triggers can be empty or contain
- # up to five `DataSetTrigger` objects.
- # @return [Array]
- #
- # @!attribute [rw] content_delivery_rules
- # When dataset contents are created, they are delivered to
- # destinations specified here.
- # @return [Array]
- #
- # @!attribute [rw] retention_period
- # Optional. How long, in days, versions of dataset contents are kept
- # for the dataset. If not specified or set to `null`, versions of
- # dataset contents are retained for at most 90 days. The number of
- # versions of dataset contents retained is determined by the
- # `versioningConfiguration` parameter. For more information, see [
- # Keeping Multiple Versions of IoT Analytics datasets][1] in the *IoT
- # Analytics User Guide*.
- #
- #
- #
- # [1]: https://docs.aws.amazon.com/iotanalytics/latest/userguide/getting-started.html#aws-iot-analytics-dataset-versions
- # @return [Types::RetentionPeriod]
- #
- # @!attribute [rw] versioning_configuration
- # Optional. How many versions of dataset contents are kept. If not
- # specified or set to null, only the latest version plus the latest
- # succeeded version (if they are different) are kept for the time
- # period specified by the `retentionPeriod` parameter. For more
- # information, see [Keeping Multiple Versions of IoT Analytics
- # datasets][1] in the *IoT Analytics User Guide*.
- #
- #
- #
- # [1]: https://docs.aws.amazon.com/iotanalytics/latest/userguide/getting-started.html#aws-iot-analytics-dataset-versions
- # @return [Types::VersioningConfiguration]
- #
- # @!attribute [rw] tags
- # Metadata which can be used to manage the dataset.
- # @return [Array]
- #
- # @!attribute [rw] late_data_rules
- # A list of data rules that send notifications to CloudWatch, when
- # data arrives late. To specify `lateDataRules`, the dataset must use
- # a [DeltaTimer][1] filter.
- #
- #
- #
- # [1]: https://docs.aws.amazon.com/iotanalytics/latest/APIReference/API_DeltaTime.html
- # @return [Array]
- #
- class CreateDatasetRequest < Struct.new(
- :dataset_name,
- :actions,
- :triggers,
- :content_delivery_rules,
- :retention_period,
- :versioning_configuration,
- :tags,
- :late_data_rules)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] dataset_name
- # The name of the dataset.
- # @return [String]
- #
- # @!attribute [rw] dataset_arn
- # The ARN of the dataset.
- # @return [String]
- #
- # @!attribute [rw] retention_period
- # How long, in days, dataset contents are kept for the dataset.
- # @return [Types::RetentionPeriod]
- #
- class CreateDatasetResponse < Struct.new(
- :dataset_name,
- :dataset_arn,
- :retention_period)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] datastore_name
- # The name of the data store.
- # @return [String]
- #
- # @!attribute [rw] datastore_storage
- # Where data in a data store is stored.. You can choose
- # `serviceManagedS3` storage, `customerManagedS3` storage, or
- # `iotSiteWiseMultiLayerStorage` storage. The default is
- # `serviceManagedS3`. You can't change the choice of Amazon S3
- # storage after your data store is created.
- # @return [Types::DatastoreStorage]
- #
- # @!attribute [rw] retention_period
- # How long, in days, message data is kept for the data store. When
- # `customerManagedS3` storage is selected, this parameter is ignored.
- # @return [Types::RetentionPeriod]
- #
- # @!attribute [rw] tags
- # Metadata which can be used to manage the data store.
- # @return [Array]
- #
- # @!attribute [rw] file_format_configuration
- # Contains the configuration information of file formats. IoT
- # Analytics data stores support JSON and [Parquet][1].
- #
- # The default file format is JSON. You can specify only one format.
- #
- # You can't change the file format after you create the data store.
- #
- #
- #
- # [1]: https://parquet.apache.org/
- # @return [Types::FileFormatConfiguration]
- #
- # @!attribute [rw] datastore_partitions
- # Contains information about the partition dimensions in a data store.
- # @return [Types::DatastorePartitions]
- #
- class CreateDatastoreRequest < Struct.new(
- :datastore_name,
- :datastore_storage,
- :retention_period,
- :tags,
- :file_format_configuration,
- :datastore_partitions)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] datastore_name
- # The name of the data store.
- # @return [String]
- #
- # @!attribute [rw] datastore_arn
- # The ARN of the data store.
- # @return [String]
- #
- # @!attribute [rw] retention_period
- # How long, in days, message data is kept for the data store.
- # @return [Types::RetentionPeriod]
- #
- class CreateDatastoreResponse < Struct.new(
- :datastore_name,
- :datastore_arn,
- :retention_period)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] pipeline_name
- # The name of the pipeline.
- # @return [String]
- #
- # @!attribute [rw] pipeline_activities
- # A list of `PipelineActivity` objects. Activities perform
- # transformations on your messages, such as removing, renaming or
- # adding message attributes; filtering messages based on attribute
- # values; invoking your Lambda unctions on messages for advanced
- # processing; or performing mathematical transformations to normalize
- # device data.
- #
- # The list can be 2-25 `PipelineActivity` objects and must contain
- # both a `channel` and a `datastore` activity. Each entry in the list
- # must contain only one activity. For example:
- #
- # `pipelineActivities = [ { "channel": { ... } }, { "lambda": { ... }
- # }, ... ]`
- # @return [Array]
- #
- # @!attribute [rw] tags
- # Metadata which can be used to manage the pipeline.
- # @return [Array]
- #
- class CreatePipelineRequest < Struct.new(
- :pipeline_name,
- :pipeline_activities,
- :tags)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] pipeline_name
- # The name of the pipeline.
- # @return [String]
- #
- # @!attribute [rw] pipeline_arn
- # The ARN of the pipeline.
- # @return [String]
- #
- class CreatePipelineResponse < Struct.new(
- :pipeline_name,
- :pipeline_arn)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # Used to store channel data in an S3 bucket that you manage. If
- # customer-managed storage is selected, the `retentionPeriod` parameter
- # is ignored. You can't change the choice of S3 storage after the data
- # store is created.
- #
- # @!attribute [rw] bucket
- # The name of the S3 bucket in which channel data is stored.
- # @return [String]
- #
- # @!attribute [rw] key_prefix
- # (Optional) The prefix used to create the keys of the channel data
- # objects. Each object in an S3 bucket has a key that is its unique
- # identifier in the bucket. Each object in a bucket has exactly one
- # key. The prefix must end with a forward slash (/).
- # @return [String]
- #
- # @!attribute [rw] role_arn
- # The ARN of the role that grants IoT Analytics permission to interact
- # with your Amazon S3 resources.
- # @return [String]
- #
- class CustomerManagedChannelS3Storage < Struct.new(
- :bucket,
- :key_prefix,
- :role_arn)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # Used to store channel data in an S3 bucket that you manage.
- #
- # @!attribute [rw] bucket
- # The name of the S3 bucket in which channel data is stored.
- # @return [String]
- #
- # @!attribute [rw] key_prefix
- # (Optional) The prefix used to create the keys of the channel data
- # objects. Each object in an S3 bucket has a key that is its unique
- # identifier within the bucket (each object in a bucket has exactly
- # one key). The prefix must end with a forward slash (/).
- # @return [String]
- #
- # @!attribute [rw] role_arn
- # The ARN of the role that grants IoT Analytics permission to interact
- # with your Amazon S3 resources.
- # @return [String]
- #
- class CustomerManagedChannelS3StorageSummary < Struct.new(
- :bucket,
- :key_prefix,
- :role_arn)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # S3-customer-managed; When you choose customer-managed storage, the
- # `retentionPeriod` parameter is ignored. You can't change the choice
- # of Amazon S3 storage after your data store is created.
- #
- # @!attribute [rw] bucket
- # The name of the Amazon S3 bucket where your data is stored.
- # @return [String]
- #
- # @!attribute [rw] key_prefix
- # (Optional) The prefix used to create the keys of the data store data
- # objects. Each object in an Amazon S3 bucket has a key that is its
- # unique identifier in the bucket. Each object in a bucket has exactly
- # one key. The prefix must end with a forward slash (/).
- # @return [String]
- #
- # @!attribute [rw] role_arn
- # The ARN of the role that grants IoT Analytics permission to interact
- # with your Amazon S3 resources.
- # @return [String]
- #
- class CustomerManagedDatastoreS3Storage < Struct.new(
- :bucket,
- :key_prefix,
- :role_arn)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # Contains information about the data store that you manage.
- #
- # @!attribute [rw] bucket
- # The name of the Amazon S3 bucket where your data is stored.
- # @return [String]
- #
- # @!attribute [rw] key_prefix
- # (Optional) The prefix used to create the keys of the data store data
- # objects. Each object in an Amazon S3 bucket has a key that is its
- # unique identifier in the bucket. Each object in a bucket has exactly
- # one key. The prefix must end with a forward slash (/).
- # @return [String]
- #
- # @!attribute [rw] role_arn
- # The ARN of the role that grants IoT Analytics permission to interact
- # with your Amazon S3 resources.
- # @return [String]
- #
- class CustomerManagedDatastoreS3StorageSummary < Struct.new(
- :bucket,
- :key_prefix,
- :role_arn)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # Information about a dataset.
- #
- # @!attribute [rw] name
- # The name of the dataset.
- # @return [String]
- #
- # @!attribute [rw] arn
- # The ARN of the dataset.
- # @return [String]
- #
- # @!attribute [rw] actions
- # The `DatasetAction` objects that automatically create the dataset
- # contents.
- # @return [Array]
- #
- # @!attribute [rw] triggers
- # The `DatasetTrigger` objects that specify when the dataset is
- # automatically updated.
- # @return [Array]
- #
- # @!attribute [rw] content_delivery_rules
- # When dataset contents are created they are delivered to destinations
- # specified here.
- # @return [Array]
- #
- # @!attribute [rw] status
- # The status of the dataset.
- # @return [String]
- #
- # @!attribute [rw] creation_time
- # When the dataset was created.
- # @return [Time]
- #
- # @!attribute [rw] last_update_time
- # The last time the dataset was updated.
- # @return [Time]
- #
- # @!attribute [rw] retention_period
- # Optional. How long, in days, message data is kept for the dataset.
- # @return [Types::RetentionPeriod]
- #
- # @!attribute [rw] versioning_configuration
- # Optional. How many versions of dataset contents are kept. If not
- # specified or set to null, only the latest version plus the latest
- # succeeded version (if they are different) are kept for the time
- # period specified by the `retentionPeriod` parameter. For more
- # information, see [ Keeping Multiple Versions of IoT Analytics
- # datasets][1] in the *IoT Analytics User Guide*.
- #
- #
- #
- # [1]: https://docs.aws.amazon.com/iotanalytics/latest/userguide/getting-started.html#aws-iot-analytics-dataset-versions
- # @return [Types::VersioningConfiguration]
- #
- # @!attribute [rw] late_data_rules
- # A list of data rules that send notifications to CloudWatch, when
- # data arrives late. To specify `lateDataRules`, the dataset must use
- # a [DeltaTimer][1] filter.
- #
- #
- #
- # [1]: https://docs.aws.amazon.com/iotanalytics/latest/APIReference/API_DeltaTime.html
- # @return [Array]
- #
- class Dataset < Struct.new(
- :name,
- :arn,
- :actions,
- :triggers,
- :content_delivery_rules,
- :status,
- :creation_time,
- :last_update_time,
- :retention_period,
- :versioning_configuration,
- :late_data_rules)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # A `DatasetAction` object that specifies how dataset contents are
- # automatically created.
- #
- # @!attribute [rw] action_name
- # The name of the dataset action by which dataset contents are
- # automatically created.
- # @return [String]
- #
- # @!attribute [rw] query_action
- # An `SqlQueryDatasetAction` object that uses an SQL query to
- # automatically create dataset contents.
- # @return [Types::SqlQueryDatasetAction]
- #
- # @!attribute [rw] container_action
- # Information that allows the system to run a containerized
- # application to create the dataset contents. The application must be
- # in a Docker container along with any required support libraries.
- # @return [Types::ContainerDatasetAction]
- #
- class DatasetAction < Struct.new(
- :action_name,
- :query_action,
- :container_action)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # Information about the action that automatically creates the dataset's
- # contents.
- #
- # @!attribute [rw] action_name
- # The name of the action that automatically creates the dataset's
- # contents.
- # @return [String]
- #
- # @!attribute [rw] action_type
- # The type of action by which the dataset's contents are
- # automatically created.
- # @return [String]
- #
- class DatasetActionSummary < Struct.new(
- :action_name,
- :action_type)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # The destination to which dataset contents are delivered.
- #
- # @!attribute [rw] iot_events_destination_configuration
- # Configuration information for delivery of dataset contents to IoT
- # Events.
- # @return [Types::IotEventsDestinationConfiguration]
- #
- # @!attribute [rw] s3_destination_configuration
- # Configuration information for delivery of dataset contents to Amazon
- # S3.
- # @return [Types::S3DestinationConfiguration]
- #
- class DatasetContentDeliveryDestination < Struct.new(
- :iot_events_destination_configuration,
- :s3_destination_configuration)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # When dataset contents are created, they are delivered to destination
- # specified here.
- #
- # @!attribute [rw] entry_name
- # The name of the dataset content delivery rules entry.
- # @return [String]
- #
- # @!attribute [rw] destination
- # The destination to which dataset contents are delivered.
- # @return [Types::DatasetContentDeliveryDestination]
- #
- class DatasetContentDeliveryRule < Struct.new(
- :entry_name,
- :destination)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # The state of the dataset contents and the reason they are in this
- # state.
- #
- # @!attribute [rw] state
- # The state of the dataset contents. Can be one of READY, CREATING,
- # SUCCEEDED, or FAILED.
- # @return [String]
- #
- # @!attribute [rw] reason
- # The reason the dataset contents are in this state.
- # @return [String]
- #
- class DatasetContentStatus < Struct.new(
- :state,
- :reason)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # Summary information about dataset contents.
- #
- # @!attribute [rw] version
- # The version of the dataset contents.
- # @return [String]
- #
- # @!attribute [rw] status
- # The status of the dataset contents.
- # @return [Types::DatasetContentStatus]
- #
- # @!attribute [rw] creation_time
- # The actual time the creation of the dataset contents was started.
- # @return [Time]
- #
- # @!attribute [rw] schedule_time
- # The time the creation of the dataset contents was scheduled to
- # start.
- # @return [Time]
- #
- # @!attribute [rw] completion_time
- # The time the dataset content status was updated to SUCCEEDED or
- # FAILED.
- # @return [Time]
- #
- class DatasetContentSummary < Struct.new(
- :version,
- :status,
- :creation_time,
- :schedule_time,
- :completion_time)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # The dataset whose latest contents are used as input to the notebook or
- # application.
- #
- # @!attribute [rw] dataset_name
- # The name of the dataset whose latest contents are used as input to
- # the notebook or application.
- # @return [String]
- #
- class DatasetContentVersionValue < Struct.new(
- :dataset_name)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # The reference to a dataset entry.
- #
- # @!attribute [rw] entry_name
- # The name of the dataset item.
- # @return [String]
- #
- # @!attribute [rw] data_uri
- # The presigned URI of the dataset item.
- # @return [String]
- #
- class DatasetEntry < Struct.new(
- :entry_name,
- :data_uri)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # A summary of information about a dataset.
- #
- # @!attribute [rw] dataset_name
- # The name of the dataset.
- # @return [String]
- #
- # @!attribute [rw] status
- # The status of the dataset.
- # @return [String]
- #
- # @!attribute [rw] creation_time
- # The time the dataset was created.
- # @return [Time]
- #
- # @!attribute [rw] last_update_time
- # The last time the dataset was updated.
- # @return [Time]
- #
- # @!attribute [rw] triggers
- # A list of triggers. A trigger causes dataset content to be populated
- # at a specified time interval or when another dataset is populated.
- # The list of triggers can be empty or contain up to five
- # `DataSetTrigger` objects
- # @return [Array]
- #
- # @!attribute [rw] actions
- # A list of `DataActionSummary` objects.
- # @return [Array]
- #
- class DatasetSummary < Struct.new(
- :dataset_name,
- :status,
- :creation_time,
- :last_update_time,
- :triggers,
- :actions)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # The `DatasetTrigger` that specifies when the dataset is automatically
- # updated.
- #
- # @!attribute [rw] schedule
- # The Schedule when the trigger is initiated.
- # @return [Types::Schedule]
- #
- # @!attribute [rw] dataset
- # The dataset whose content creation triggers the creation of this
- # dataset's contents.
- # @return [Types::TriggeringDataset]
- #
- class DatasetTrigger < Struct.new(
- :schedule,
- :dataset)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # Information about a data store.
- #
- # @!attribute [rw] name
- # The name of the data store.
- # @return [String]
- #
- # @!attribute [rw] storage
- # Where data in a data store is stored.. You can choose
- # `serviceManagedS3` storage, `customerManagedS3` storage, or
- # `iotSiteWiseMultiLayerStorage` storage. The default is
- # `serviceManagedS3`. You can't change the choice of Amazon S3
- # storage after your data store is created.
- # @return [Types::DatastoreStorage]
- #
- # @!attribute [rw] arn
- # The ARN of the data store.
- # @return [String]
- #
- # @!attribute [rw] status
- # The status of a data store:
- #
- # CREATING
- #
- # : The data store is being created.
- #
- # ACTIVE
- #
- # : The data store has been created and can be used.
- #
- # DELETING
- #
- # : The data store is being deleted.
- # @return [String]
- #
- # @!attribute [rw] retention_period
- # How long, in days, message data is kept for the data store. When
- # `customerManagedS3` storage is selected, this parameter is ignored.
- # @return [Types::RetentionPeriod]
- #
- # @!attribute [rw] creation_time
- # When the data store was created.
- # @return [Time]
- #
- # @!attribute [rw] last_update_time
- # The last time the data store was updated.
- # @return [Time]
- #
- # @!attribute [rw] last_message_arrival_time
- # The last time when a new message arrived in the data store.
- #
- # IoT Analytics updates this value at most once per minute for Amazon
- # Simple Storage Service one data store. Hence, the
- # `lastMessageArrivalTime` value is an approximation.
- #
- # This feature only applies to messages that arrived in the data store
- # after October 23, 2020.
- # @return [Time]
- #
- # @!attribute [rw] file_format_configuration
- # Contains the configuration information of file formats. IoT
- # Analytics data stores support JSON and [Parquet][1].
- #
- # The default file format is JSON. You can specify only one format.
- #
- # You can't change the file format after you create the data store.
- #
- #
- #
- # [1]: https://parquet.apache.org/
- # @return [Types::FileFormatConfiguration]
- #
- # @!attribute [rw] datastore_partitions
- # Contains information about the partition dimensions in a data store.
- # @return [Types::DatastorePartitions]
- #
- class Datastore < Struct.new(
- :name,
- :storage,
- :arn,
- :status,
- :retention_period,
- :creation_time,
- :last_update_time,
- :last_message_arrival_time,
- :file_format_configuration,
- :datastore_partitions)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # The datastore activity that specifies where to store the processed
- # data.
- #
- # @!attribute [rw] name
- # The name of the datastore activity.
- # @return [String]
- #
- # @!attribute [rw] datastore_name
- # The name of the data store where processed messages are stored.
- # @return [String]
- #
- class DatastoreActivity < Struct.new(
- :name,
- :datastore_name)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # Used to store data used by IoT SiteWise in an Amazon S3 bucket that
- # you manage. You can't change the choice of Amazon S3 storage after
- # your data store is created.
- #
- # @!attribute [rw] customer_managed_s3_storage
- # Used to store data used by IoT SiteWise in an Amazon S3 bucket that
- # you manage.
- # @return [Types::IotSiteWiseCustomerManagedDatastoreS3Storage]
- #
- class DatastoreIotSiteWiseMultiLayerStorage < Struct.new(
- :customer_managed_s3_storage)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # Contains information about the data store that you manage, which
- # stores data used by IoT SiteWise.
- #
- # @!attribute [rw] customer_managed_s3_storage
- # Used to store data used by IoT SiteWise in an Amazon S3 bucket that
- # you manage.
- # @return [Types::IotSiteWiseCustomerManagedDatastoreS3StorageSummary]
- #
- class DatastoreIotSiteWiseMultiLayerStorageSummary < Struct.new(
- :customer_managed_s3_storage)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # A single dimension to partition a data store. The dimension must be an
- # `AttributePartition` or a `TimestampPartition`.
- #
- # @!attribute [rw] attribute_partition
- # A partition dimension defined by an `attributeName`.
- # @return [Types::Partition]
- #
- # @!attribute [rw] timestamp_partition
- # A partition dimension defined by a timestamp attribute.
- # @return [Types::TimestampPartition]
- #
- class DatastorePartition < Struct.new(
- :attribute_partition,
- :timestamp_partition)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # Contains information about the partition dimensions in a data store.
- #
- # @!attribute [rw] partitions
- # A list of partition dimensions in a data store.
- # @return [Array]
- #
- class DatastorePartitions < Struct.new(
- :partitions)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # Statistical information about the data store.
- #
- # @!attribute [rw] size
- # The estimated size of the data store.
- # @return [Types::EstimatedResourceSize]
- #
- class DatastoreStatistics < Struct.new(
- :size)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # Where data in a data store is stored.. You can choose
- # `serviceManagedS3` storage, `customerManagedS3` storage, or
- # `iotSiteWiseMultiLayerStorage` storage. The default is
- # `serviceManagedS3`. You can't change the choice of Amazon S3 storage
- # after your data store is created.
- #
- # @!attribute [rw] service_managed_s3
- # Used to store data in an Amazon S3 bucket managed by IoT Analytics.
- # You can't change the choice of Amazon S3 storage after your data
- # store is created.
- # @return [Types::ServiceManagedDatastoreS3Storage]
- #
- # @!attribute [rw] customer_managed_s3
- # S3-customer-managed; When you choose customer-managed storage, the
- # `retentionPeriod` parameter is ignored. You can't change the choice
- # of Amazon S3 storage after your data store is created.
- # @return [Types::CustomerManagedDatastoreS3Storage]
- #
- # @!attribute [rw] iot_site_wise_multi_layer_storage
- # Used to store data used by IoT SiteWise in an Amazon S3 bucket that
- # you manage. You can't change the choice of Amazon S3 storage after
- # your data store is created.
- # @return [Types::DatastoreIotSiteWiseMultiLayerStorage]
- #
- class DatastoreStorage < Struct.new(
- :service_managed_s3,
- :customer_managed_s3,
- :iot_site_wise_multi_layer_storage)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # Contains information about your data store.
- #
- # @!attribute [rw] service_managed_s3
- # Used to store data in an Amazon S3 bucket managed by IoT Analytics.
- # @return [Types::ServiceManagedDatastoreS3StorageSummary]
- #
- # @!attribute [rw] customer_managed_s3
- # Used to store data in an Amazon S3 bucket managed by IoT Analytics.
- # @return [Types::CustomerManagedDatastoreS3StorageSummary]
- #
- # @!attribute [rw] iot_site_wise_multi_layer_storage
- # Used to store data used by IoT SiteWise in an Amazon S3 bucket that
- # you manage.
- # @return [Types::DatastoreIotSiteWiseMultiLayerStorageSummary]
- #
- class DatastoreStorageSummary < Struct.new(
- :service_managed_s3,
- :customer_managed_s3,
- :iot_site_wise_multi_layer_storage)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # A summary of information about a data store.
- #
- # @!attribute [rw] datastore_name
- # The name of the data store.
- # @return [String]
- #
- # @!attribute [rw] datastore_storage
- # Where data in a data store is stored.
- # @return [Types::DatastoreStorageSummary]
- #
- # @!attribute [rw] status
- # The status of the data store.
- # @return [String]
- #
- # @!attribute [rw] creation_time
- # When the data store was created.
- # @return [Time]
- #
- # @!attribute [rw] last_update_time
- # The last time the data store was updated.
- # @return [Time]
- #
- # @!attribute [rw] last_message_arrival_time
- # The last time when a new message arrived in the data store.
- #
- # IoT Analytics updates this value at most once per minute for Amazon
- # Simple Storage Service one data store. Hence, the
- # `lastMessageArrivalTime` value is an approximation.
- #
- # This feature only applies to messages that arrived in the data store
- # after October 23, 2020.
- # @return [Time]
- #
- # @!attribute [rw] file_format_type
- # The file format of the data in the data store.
- # @return [String]
- #
- # @!attribute [rw] datastore_partitions
- # Contains information about the partition dimensions in a data store.
- # @return [Types::DatastorePartitions]
- #
- class DatastoreSummary < Struct.new(
- :datastore_name,
- :datastore_storage,
- :status,
- :creation_time,
- :last_update_time,
- :last_message_arrival_time,
- :file_format_type,
- :datastore_partitions)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] channel_name
- # The name of the channel to delete.
- # @return [String]
- #
- class DeleteChannelRequest < Struct.new(
- :channel_name)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] dataset_name
- # The name of the dataset whose content is deleted.
- # @return [String]
- #
- # @!attribute [rw] version_id
- # The version of the dataset whose content is deleted. You can also
- # use the strings "$LATEST" or "$LATEST\_SUCCEEDED" to delete the
- # latest or latest successfully completed data set. If not specified,
- # "$LATEST\_SUCCEEDED" is the default.
- # @return [String]
- #
- class DeleteDatasetContentRequest < Struct.new(
- :dataset_name,
- :version_id)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] dataset_name
- # The name of the dataset to delete.
- # @return [String]
- #
- class DeleteDatasetRequest < Struct.new(
- :dataset_name)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] datastore_name
- # The name of the data store to delete.
- # @return [String]
- #
- class DeleteDatastoreRequest < Struct.new(
- :datastore_name)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] pipeline_name
- # The name of the pipeline to delete.
- # @return [String]
- #
- class DeletePipelineRequest < Struct.new(
- :pipeline_name)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # Used to limit data to that which has arrived since the last execution
- # of the action.
- #
- # @!attribute [rw] offset_seconds
- # The number of seconds of estimated in-flight lag time of message
- # data. When you create dataset contents using message data from a
- # specified timeframe, some message data might still be in flight when
- # processing begins, and so do not arrive in time to be processed. Use
- # this field to make allowances for the in flight time of your message
- # data, so that data not processed from a previous timeframe is
- # included with the next timeframe. Otherwise, missed message data
- # would be excluded from processing during the next timeframe too,
- # because its timestamp places it within the previous timeframe.
- # @return [Integer]
- #
- # @!attribute [rw] time_expression
- # An expression by which the time of the message data might be
- # determined. This can be the name of a timestamp field or a SQL
- # expression that is used to derive the time the message data was
- # generated.
- # @return [String]
- #
- class DeltaTime < Struct.new(
- :offset_seconds,
- :time_expression)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # A structure that contains the configuration information of a delta
- # time session window.
- #
- # [ `DeltaTime` ][1] specifies a time interval. You can use `DeltaTime`
- # to create dataset contents with data that has arrived in the data
- # store since the last execution. For an example of `DeltaTime`, see [
- # Creating a SQL dataset with a delta window (CLI)][2] in the *IoT
- # Analytics User Guide*.
- #
- #
- #
- # [1]: https://docs.aws.amazon.com/iotanalytics/latest/APIReference/API_DeltaTime.html
- # [2]: https://docs.aws.amazon.com/iotanalytics/latest/userguide/automate-create-dataset.html#automate-example6
- #
- # @!attribute [rw] timeout_in_minutes
- # A time interval. You can use `timeoutInMinutes` so that IoT
- # Analytics can batch up late data notifications that have been
- # generated since the last execution. IoT Analytics sends one batch of
- # notifications to Amazon CloudWatch Events at one time.
- #
- # For more information about how to write a timestamp expression, see
- # [Date and Time Functions and Operators][1], in the *Presto 0.172
- # Documentation*.
- #
- #
- #
- # [1]: https://prestodb.io/docs/0.172/functions/datetime.html
- # @return [Integer]
- #
- class DeltaTimeSessionWindowConfiguration < Struct.new(
- :timeout_in_minutes)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] channel_name
- # The name of the channel whose information is retrieved.
- # @return [String]
- #
- # @!attribute [rw] include_statistics
- # If true, additional statistical information about the channel is
- # included in the response. This feature can't be used with a channel
- # whose S3 storage is customer-managed.
- # @return [Boolean]
- #
- class DescribeChannelRequest < Struct.new(
- :channel_name,
- :include_statistics)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] channel
- # An object that contains information about the channel.
- # @return [Types::Channel]
- #
- # @!attribute [rw] statistics
- # Statistics about the channel. Included if the `includeStatistics`
- # parameter is set to `true` in the request.
- # @return [Types::ChannelStatistics]
- #
- class DescribeChannelResponse < Struct.new(
- :channel,
- :statistics)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] dataset_name
- # The name of the dataset whose information is retrieved.
- # @return [String]
- #
- class DescribeDatasetRequest < Struct.new(
- :dataset_name)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] dataset
- # An object that contains information about the dataset.
- # @return [Types::Dataset]
- #
- class DescribeDatasetResponse < Struct.new(
- :dataset)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] datastore_name
- # The name of the data store
- # @return [String]
- #
- # @!attribute [rw] include_statistics
- # If true, additional statistical information about the data store is
- # included in the response. This feature can't be used with a data
- # store whose S3 storage is customer-managed.
- # @return [Boolean]
- #
- class DescribeDatastoreRequest < Struct.new(
- :datastore_name,
- :include_statistics)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] datastore
- # Information about the data store.
- # @return [Types::Datastore]
- #
- # @!attribute [rw] statistics
- # Additional statistical information about the data store. Included if
- # the `includeStatistics` parameter is set to `true` in the request.
- # @return [Types::DatastoreStatistics]
- #
- class DescribeDatastoreResponse < Struct.new(
- :datastore,
- :statistics)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @api private
- #
- class DescribeLoggingOptionsRequest < Aws::EmptyStructure; end
-
- # @!attribute [rw] logging_options
- # The current settings of the IoT Analytics logging options.
- # @return [Types::LoggingOptions]
- #
- class DescribeLoggingOptionsResponse < Struct.new(
- :logging_options)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] pipeline_name
- # The name of the pipeline whose information is retrieved.
- # @return [String]
- #
- class DescribePipelineRequest < Struct.new(
- :pipeline_name)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] pipeline
- # A `Pipeline` object that contains information about the pipeline.
- # @return [Types::Pipeline]
- #
- class DescribePipelineResponse < Struct.new(
- :pipeline)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # An activity that adds data from the IoT device registry to your
- # message.
- #
- # @!attribute [rw] name
- # The name of the `deviceRegistryEnrich` activity.
- # @return [String]
- #
- # @!attribute [rw] attribute
- # The name of the attribute that is added to the message.
- # @return [String]
- #
- # @!attribute [rw] thing_name
- # The name of the IoT device whose registry information is added to
- # the message.
- # @return [String]
- #
- # @!attribute [rw] role_arn
- # The ARN of the role that allows access to the device's registry
- # information.
- # @return [String]
- #
- # @!attribute [rw] next
- # The next activity in the pipeline.
- # @return [String]
- #
- class DeviceRegistryEnrichActivity < Struct.new(
- :name,
- :attribute,
- :thing_name,
- :role_arn,
- :next)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # An activity that adds information from the IoT Device Shadow service
- # to a message.
- #
- # @!attribute [rw] name
- # The name of the `deviceShadowEnrich` activity.
- # @return [String]
- #
- # @!attribute [rw] attribute
- # The name of the attribute that is added to the message.
- # @return [String]
- #
- # @!attribute [rw] thing_name
- # The name of the IoT device whose shadow information is added to the
- # message.
- # @return [String]
- #
- # @!attribute [rw] role_arn
- # The ARN of the role that allows access to the device's shadow.
- # @return [String]
- #
- # @!attribute [rw] next
- # The next activity in the pipeline.
- # @return [String]
- #
- class DeviceShadowEnrichActivity < Struct.new(
- :name,
- :attribute,
- :thing_name,
- :role_arn,
- :next)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # The estimated size of the resource.
- #
- # @!attribute [rw] estimated_size_in_bytes
- # The estimated size of the resource, in bytes.
- # @return [Float]
- #
- # @!attribute [rw] estimated_on
- # The time when the estimate of the size of the resource was made.
- # @return [Time]
- #
- class EstimatedResourceSize < Struct.new(
- :estimated_size_in_bytes,
- :estimated_on)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # Contains the configuration information of file formats. IoT Analytics
- # data stores support JSON and [Parquet][1].
- #
- # The default file format is JSON. You can specify only one format.
- #
- # You can't change the file format after you create the data store.
- #
- #
- #
- # [1]: https://parquet.apache.org/
- #
- # @!attribute [rw] json_configuration
- # Contains the configuration information of the JSON format.
- # @return [Types::JsonConfiguration]
- #
- # @!attribute [rw] parquet_configuration
- # Contains the configuration information of the Parquet format.
- # @return [Types::ParquetConfiguration]
- #
- class FileFormatConfiguration < Struct.new(
- :json_configuration,
- :parquet_configuration)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # An activity that filters a message based on its attributes.
- #
- # @!attribute [rw] name
- # The name of the filter activity.
- # @return [String]
- #
- # @!attribute [rw] filter
- # An expression that looks like a SQL WHERE clause that must return a
- # Boolean value. Messages that satisfy the condition are passed to the
- # next activity.
- # @return [String]
- #
- # @!attribute [rw] next
- # The next activity in the pipeline.
- # @return [String]
- #
- class FilterActivity < Struct.new(
- :name,
- :filter,
- :next)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] dataset_name
- # The name of the dataset whose contents are retrieved.
- # @return [String]
- #
- # @!attribute [rw] version_id
- # The version of the dataset whose contents are retrieved. You can
- # also use the strings "$LATEST" or "$LATEST\_SUCCEEDED" to
- # retrieve the contents of the latest or latest successfully completed
- # dataset. If not specified, "$LATEST\_SUCCEEDED" is the default.
- # @return [String]
- #
- class GetDatasetContentRequest < Struct.new(
- :dataset_name,
- :version_id)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] entries
- # A list of `DatasetEntry` objects.
- # @return [Array]
- #
- # @!attribute [rw] timestamp
- # The time when the request was made.
- # @return [Time]
- #
- # @!attribute [rw] status
- # The status of the dataset content.
- # @return [Types::DatasetContentStatus]
- #
- class GetDatasetContentResponse < Struct.new(
- :entries,
- :timestamp,
- :status)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # Configuration information for coordination with Glue, a fully managed
- # extract, transform and load (ETL) service.
- #
- # @!attribute [rw] table_name
- # The name of the table in your Glue Data Catalog that is used to
- # perform the ETL operations. An Glue Data Catalog table contains
- # partitioned data and descriptions of data sources and targets.
- # @return [String]
- #
- # @!attribute [rw] database_name
- # The name of the database in your Glue Data Catalog in which the
- # table is located. An Glue Data Catalog database contains metadata
- # tables.
- # @return [String]
- #
- class GlueConfiguration < Struct.new(
- :table_name,
- :database_name)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # There was an internal failure.
- #
- # @!attribute [rw] message
- # @return [String]
- #
- class InternalFailureException < Struct.new(
- :message)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # The request was not valid.
- #
- # @!attribute [rw] message
- # @return [String]
- #
- class InvalidRequestException < Struct.new(
- :message)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # Configuration information for delivery of dataset contents to IoT
- # Events.
- #
- # @!attribute [rw] input_name
- # The name of the IoT Events input to which dataset contents are
- # delivered.
- # @return [String]
- #
- # @!attribute [rw] role_arn
- # The ARN of the role that grants IoT Analytics permission to deliver
- # dataset contents to an IoT Events input.
- # @return [String]
- #
- class IotEventsDestinationConfiguration < Struct.new(
- :input_name,
- :role_arn)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # Used to store data used by IoT SiteWise in an Amazon S3 bucket that
- # you manage. You can't change the choice of Amazon S3 storage after
- # your data store is created.
- #
- # @!attribute [rw] bucket
- # The name of the Amazon S3 bucket where your data is stored.
- # @return [String]
- #
- # @!attribute [rw] key_prefix
- # (Optional) The prefix used to create the keys of the data store data
- # objects. Each object in an Amazon S3 bucket has a key that is its
- # unique identifier in the bucket. Each object in a bucket has exactly
- # one key. The prefix must end with a forward slash (/).
- # @return [String]
- #
- class IotSiteWiseCustomerManagedDatastoreS3Storage < Struct.new(
- :bucket,
- :key_prefix)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # Contains information about the data store that you manage, which
- # stores data used by IoT SiteWise.
- #
- # @!attribute [rw] bucket
- # The name of the Amazon S3 bucket where your data is stored.
- # @return [String]
- #
- # @!attribute [rw] key_prefix
- # (Optional) The prefix used to create the keys of the data store data
- # objects. Each object in an Amazon S3 bucket has a key that is its
- # unique identifier in the bucket. Each object in a bucket has exactly
- # one key. The prefix must end with a forward slash (/).
- # @return [String]
- #
- class IotSiteWiseCustomerManagedDatastoreS3StorageSummary < Struct.new(
- :bucket,
- :key_prefix)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # Contains the configuration information of the JSON format.
- #
- # @api private
- #
- class JsonConfiguration < Aws::EmptyStructure; end
-
- # An activity that runs a Lambda function to modify the message.
- #
- # @!attribute [rw] name
- # The name of the lambda activity.
- # @return [String]
- #
- # @!attribute [rw] lambda_name
- # The name of the Lambda function that is run on the message.
- # @return [String]
- #
- # @!attribute [rw] batch_size
- # The number of messages passed to the Lambda function for processing.
- #
- # The Lambda function must be able to process all of these messages
- # within five minutes, which is the maximum timeout duration for
- # Lambda functions.
- # @return [Integer]
- #
- # @!attribute [rw] next
- # The next activity in the pipeline.
- # @return [String]
- #
- class LambdaActivity < Struct.new(
- :name,
- :lambda_name,
- :batch_size,
- :next)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # A structure that contains the name and configuration information of a
- # late data rule.
- #
- # @!attribute [rw] rule_name
- # The name of the late data rule.
- # @return [String]
- #
- # @!attribute [rw] rule_configuration
- # The information needed to configure the late data rule.
- # @return [Types::LateDataRuleConfiguration]
- #
- class LateDataRule < Struct.new(
- :rule_name,
- :rule_configuration)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # The information needed to configure a delta time session window.
- #
- # @!attribute [rw] delta_time_session_window_configuration
- # The information needed to configure a delta time session window.
- # @return [Types::DeltaTimeSessionWindowConfiguration]
- #
- class LateDataRuleConfiguration < Struct.new(
- :delta_time_session_window_configuration)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # The command caused an internal limit to be exceeded.
- #
- # @!attribute [rw] message
- # @return [String]
- #
- class LimitExceededException < Struct.new(
- :message)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] next_token
- # The token for the next set of results.
- # @return [String]
- #
- # @!attribute [rw] max_results
- # The maximum number of results to return in this request.
- #
- # The default value is 100.
- # @return [Integer]
- #
- class ListChannelsRequest < Struct.new(
- :next_token,
- :max_results)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] channel_summaries
- # A list of `ChannelSummary` objects.
- # @return [Array]
- #
- # @!attribute [rw] next_token
- # The token to retrieve the next set of results, or `null` if there
- # are no more results.
- # @return [String]
- #
- class ListChannelsResponse < Struct.new(
- :channel_summaries,
- :next_token)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] dataset_name
- # The name of the dataset whose contents information you want to list.
- # @return [String]
- #
- # @!attribute [rw] next_token
- # The token for the next set of results.
- # @return [String]
- #
- # @!attribute [rw] max_results
- # The maximum number of results to return in this request.
- # @return [Integer]
- #
- # @!attribute [rw] scheduled_on_or_after
- # A filter to limit results to those dataset contents whose creation
- # is scheduled on or after the given time. See the field
- # `triggers.schedule` in the `CreateDataset` request. (timestamp)
- # @return [Time]
- #
- # @!attribute [rw] scheduled_before
- # A filter to limit results to those dataset contents whose creation
- # is scheduled before the given time. See the field
- # `triggers.schedule` in the `CreateDataset` request. (timestamp)
- # @return [Time]
- #
- class ListDatasetContentsRequest < Struct.new(
- :dataset_name,
- :next_token,
- :max_results,
- :scheduled_on_or_after,
- :scheduled_before)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] dataset_content_summaries
- # Summary information about dataset contents that have been created.
- # @return [Array]
- #
- # @!attribute [rw] next_token
- # The token to retrieve the next set of results, or `null` if there
- # are no more results.
- # @return [String]
- #
- class ListDatasetContentsResponse < Struct.new(
- :dataset_content_summaries,
- :next_token)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] next_token
- # The token for the next set of results.
- # @return [String]
- #
- # @!attribute [rw] max_results
- # The maximum number of results to return in this request.
- #
- # The default value is 100.
- # @return [Integer]
- #
- class ListDatasetsRequest < Struct.new(
- :next_token,
- :max_results)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] dataset_summaries
- # A list of `DatasetSummary` objects.
- # @return [Array]
- #
- # @!attribute [rw] next_token
- # The token to retrieve the next set of results, or `null` if there
- # are no more results.
- # @return [String]
- #
- class ListDatasetsResponse < Struct.new(
- :dataset_summaries,
- :next_token)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] next_token
- # The token for the next set of results.
- # @return [String]
- #
- # @!attribute [rw] max_results
- # The maximum number of results to return in this request.
- #
- # The default value is 100.
- # @return [Integer]
- #
- class ListDatastoresRequest < Struct.new(
- :next_token,
- :max_results)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] datastore_summaries
- # A list of `DatastoreSummary` objects.
- # @return [Array]
- #
- # @!attribute [rw] next_token
- # The token to retrieve the next set of results, or `null` if there
- # are no more results.
- # @return [String]
- #
- class ListDatastoresResponse < Struct.new(
- :datastore_summaries,
- :next_token)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] next_token
- # The token for the next set of results.
- # @return [String]
- #
- # @!attribute [rw] max_results
- # The maximum number of results to return in this request.
- #
- # The default value is 100.
- # @return [Integer]
- #
- class ListPipelinesRequest < Struct.new(
- :next_token,
- :max_results)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] pipeline_summaries
- # A list of `PipelineSummary` objects.
- # @return [Array]
- #
- # @!attribute [rw] next_token
- # The token to retrieve the next set of results, or `null` if there
- # are no more results.
- # @return [String]
- #
- class ListPipelinesResponse < Struct.new(
- :pipeline_summaries,
- :next_token)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] resource_arn
- # The ARN of the resource whose tags you want to list.
- # @return [String]
- #
- class ListTagsForResourceRequest < Struct.new(
- :resource_arn)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] tags
- # The tags (metadata) that you have assigned to the resource.
- # @return [Array]
- #
- class ListTagsForResourceResponse < Struct.new(
- :tags)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # Information about logging options.
- #
- # @!attribute [rw] role_arn
- # The ARN of the role that grants permission to IoT Analytics to
- # perform logging.
- # @return [String]
- #
- # @!attribute [rw] level
- # The logging level. Currently, only ERROR is supported.
- # @return [String]
- #
- # @!attribute [rw] enabled
- # If true, logging is enabled for IoT Analytics.
- # @return [Boolean]
- #
- class LoggingOptions < Struct.new(
- :role_arn,
- :level,
- :enabled)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # An activity that computes an arithmetic expression using the
- # message's attributes.
- #
- # @!attribute [rw] name
- # The name of the math activity.
- # @return [String]
- #
- # @!attribute [rw] attribute
- # The name of the attribute that contains the result of the math
- # operation.
- # @return [String]
- #
- # @!attribute [rw] math
- # An expression that uses one or more existing attributes and must
- # return an integer value.
- # @return [String]
- #
- # @!attribute [rw] next
- # The next activity in the pipeline.
- # @return [String]
- #
- class MathActivity < Struct.new(
- :name,
- :attribute,
- :math,
- :next)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # Information about a message.
- #
- # @!attribute [rw] message_id
- # The ID you want to assign to the message. Each `messageId` must be
- # unique within each batch sent.
- # @return [String]
- #
- # @!attribute [rw] payload
- # The payload of the message. This can be a JSON string or a
- # base64-encoded string representing binary data, in which case you
- # must decode it by means of a pipeline activity.
- # @return [String]
- #
- class Message < Struct.new(
- :message_id,
- :payload)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # The value of the variable as a structure that specifies an output file
- # URI.
- #
- # @!attribute [rw] file_name
- # The URI of the location where dataset contents are stored, usually
- # the URI of a file in an S3 bucket.
- # @return [String]
- #
- class OutputFileUriValue < Struct.new(
- :file_name)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # Contains the configuration information of the Parquet format.
- #
- # @!attribute [rw] schema_definition
- # Information needed to define a schema.
- # @return [Types::SchemaDefinition]
- #
- class ParquetConfiguration < Struct.new(
- :schema_definition)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # A partition dimension defined by an attribute.
- #
- # @!attribute [rw] attribute_name
- # The name of the attribute that defines a partition dimension.
- # @return [String]
- #
- class Partition < Struct.new(
- :attribute_name)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # Contains information about a pipeline.
- #
- # @!attribute [rw] name
- # The name of the pipeline.
- # @return [String]
- #
- # @!attribute [rw] arn
- # The ARN of the pipeline.
- # @return [String]
- #
- # @!attribute [rw] activities
- # The activities that perform transformations on the messages.
- # @return [Array]
- #
- # @!attribute [rw] reprocessing_summaries
- # A summary of information about the pipeline reprocessing.
- # @return [Array]
- #
- # @!attribute [rw] creation_time
- # When the pipeline was created.
- # @return [Time]
- #
- # @!attribute [rw] last_update_time
- # The last time the pipeline was updated.
- # @return [Time]
- #
- class Pipeline < Struct.new(
- :name,
- :arn,
- :activities,
- :reprocessing_summaries,
- :creation_time,
- :last_update_time)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # An activity that performs a transformation on a message.
- #
- # @!attribute [rw] channel
- # Determines the source of the messages to be processed.
- # @return [Types::ChannelActivity]
- #
- # @!attribute [rw] lambda
- # Runs a Lambda function to modify the message.
- # @return [Types::LambdaActivity]
- #
- # @!attribute [rw] datastore
- # Specifies where to store the processed message data.
- # @return [Types::DatastoreActivity]
- #
- # @!attribute [rw] add_attributes
- # Adds other attributes based on existing attributes in the message.
- # @return [Types::AddAttributesActivity]
- #
- # @!attribute [rw] remove_attributes
- # Removes attributes from a message.
- # @return [Types::RemoveAttributesActivity]
- #
- # @!attribute [rw] select_attributes
- # Used to create a new message using only the specified attributes
- # from the original message.
- # @return [Types::SelectAttributesActivity]
- #
- # @!attribute [rw] filter
- # Filters a message based on its attributes.
- # @return [Types::FilterActivity]
- #
- # @!attribute [rw] math
- # Computes an arithmetic expression using the message's attributes
- # and adds it to the message.
- # @return [Types::MathActivity]
- #
- # @!attribute [rw] device_registry_enrich
- # Adds data from the IoT device registry to your message.
- # @return [Types::DeviceRegistryEnrichActivity]
- #
- # @!attribute [rw] device_shadow_enrich
- # Adds information from the IoT Device Shadow service to a message.
- # @return [Types::DeviceShadowEnrichActivity]
- #
- class PipelineActivity < Struct.new(
- :channel,
- :lambda,
- :datastore,
- :add_attributes,
- :remove_attributes,
- :select_attributes,
- :filter,
- :math,
- :device_registry_enrich,
- :device_shadow_enrich)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # A summary of information about a pipeline.
- #
- # @!attribute [rw] pipeline_name
- # The name of the pipeline.
- # @return [String]
- #
- # @!attribute [rw] reprocessing_summaries
- # A summary of information about the pipeline reprocessing.
- # @return [Array]
- #
- # @!attribute [rw] creation_time
- # When the pipeline was created.
- # @return [Time]
- #
- # @!attribute [rw] last_update_time
- # When the pipeline was last updated.
- # @return [Time]
- #
- class PipelineSummary < Struct.new(
- :pipeline_name,
- :reprocessing_summaries,
- :creation_time,
- :last_update_time)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] logging_options
- # The new values of the IoT Analytics logging options.
- # @return [Types::LoggingOptions]
- #
- class PutLoggingOptionsRequest < Struct.new(
- :logging_options)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # Information that is used to filter message data, to segregate it
- # according to the timeframe in which it arrives.
- #
- # @!attribute [rw] delta_time
- # Used to limit data to that which has arrived since the last
- # execution of the action.
- # @return [Types::DeltaTime]
- #
- class QueryFilter < Struct.new(
- :delta_time)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # An activity that removes attributes from a message.
- #
- # @!attribute [rw] name
- # The name of the `removeAttributes` activity.
- # @return [String]
- #
- # @!attribute [rw] attributes
- # A list of 1-50 attributes to remove from the message.
- # @return [Array]
- #
- # @!attribute [rw] next
- # The next activity in the pipeline.
- # @return [String]
- #
- class RemoveAttributesActivity < Struct.new(
- :name,
- :attributes,
- :next)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # Information about pipeline reprocessing.
- #
- # @!attribute [rw] id
- # The `reprocessingId` returned by `StartPipelineReprocessing`.
- # @return [String]
- #
- # @!attribute [rw] status
- # The status of the pipeline reprocessing.
- # @return [String]
- #
- # @!attribute [rw] creation_time
- # The time the pipeline reprocessing was created.
- # @return [Time]
- #
- class ReprocessingSummary < Struct.new(
- :id,
- :status,
- :creation_time)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # A resource with the same name already exists.
- #
- # @!attribute [rw] message
- # @return [String]
- #
- # @!attribute [rw] resource_id
- # The ID of the resource.
- # @return [String]
- #
- # @!attribute [rw] resource_arn
- # The ARN of the resource.
- # @return [String]
- #
- class ResourceAlreadyExistsException < Struct.new(
- :message,
- :resource_id,
- :resource_arn)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # The configuration of the resource used to execute the
- # `containerAction`.
- #
- # @!attribute [rw] compute_type
- # The type of the compute resource used to execute the
- # `containerAction`. Possible values are: `ACU_1` (vCPU=4, memory=16
- # GiB) or `ACU_2` (vCPU=8, memory=32 GiB).
- # @return [String]
- #
- # @!attribute [rw] volume_size_in_gb
- # The size, in GB, of the persistent storage available to the resource
- # instance used to execute the `containerAction` (min: 1, max: 50).
- # @return [Integer]
- #
- class ResourceConfiguration < Struct.new(
- :compute_type,
- :volume_size_in_gb)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # A resource with the specified name could not be found.
- #
- # @!attribute [rw] message
- # @return [String]
- #
- class ResourceNotFoundException < Struct.new(
- :message)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # How long, in days, message data is kept.
- #
- # @!attribute [rw] unlimited
- # If true, message data is kept indefinitely.
- # @return [Boolean]
- #
- # @!attribute [rw] number_of_days
- # The number of days that message data is kept. The `unlimited`
- # parameter must be false.
- # @return [Integer]
- #
- class RetentionPeriod < Struct.new(
- :unlimited,
- :number_of_days)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] pipeline_activity
- # The pipeline activity that is run. This must not be a channel
- # activity or a data store activity because these activities are used
- # in a pipeline only to load the original message and to store the
- # (possibly) transformed message. If a Lambda activity is specified,
- # only short-running Lambda functions (those with a timeout of less
- # than 30 seconds or less) can be used.
- # @return [Types::PipelineActivity]
- #
- # @!attribute [rw] payloads
- # The sample message payloads on which the pipeline activity is run.
- # @return [Array]
- #
- class RunPipelineActivityRequest < Struct.new(
- :pipeline_activity,
- :payloads)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] payloads
- # The enriched or transformed sample message payloads as
- # base64-encoded strings. (The results of running the pipeline
- # activity on each input sample message payload, encoded in base64.)
- # @return [Array]
- #
- # @!attribute [rw] log_result
- # In case the pipeline activity fails, the log message that is
- # generated.
- # @return [String]
- #
- class RunPipelineActivityResponse < Struct.new(
- :payloads,
- :log_result)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # Configuration information for delivery of dataset contents to Amazon
- # Simple Storage Service (Amazon S3).
- #
- # @!attribute [rw] bucket
- # The name of the S3 bucket to which dataset contents are delivered.
- # @return [String]
- #
- # @!attribute [rw] key
- # The key of the dataset contents object in an S3 bucket. Each object
- # has a key that is a unique identifier. Each object has exactly one
- # key.
- #
- # You can create a unique key with the following options:
- #
- # * Use `!{iotanalytics:scheduleTime}` to insert the time of a
- # scheduled SQL query run.
- #
- # * Use `!{iotanalytics:versionId}` to insert a unique hash that
- # identifies a dataset content.
- #
- # * Use `!{iotanalytics:creationTime}` to insert the creation time of
- # a dataset content.
- #
- # The following example creates a unique key for a CSV file:
- # `dataset/mydataset/!{iotanalytics:scheduleTime}/!{iotanalytics:versionId}.csv`
- #
- # If you don't use `!{iotanalytics:versionId}` to specify the key,
- # you might get duplicate keys. For example, you might have two
- # dataset contents with the same `scheduleTime` but different
- # `versionId`s. This means that one dataset content overwrites the
- # other.
- #
- #
- # @return [String]
- #
- # @!attribute [rw] glue_configuration
- # Configuration information for coordination with Glue, a fully
- # managed extract, transform and load (ETL) service.
- # @return [Types::GlueConfiguration]
- #
- # @!attribute [rw] role_arn
- # The ARN of the role that grants IoT Analytics permission to interact
- # with your Amazon S3 and Glue resources.
- # @return [String]
- #
- class S3DestinationConfiguration < Struct.new(
- :bucket,
- :key,
- :glue_configuration,
- :role_arn)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] channel_name
- # The name of the channel whose message samples are retrieved.
- # @return [String]
- #
- # @!attribute [rw] max_messages
- # The number of sample messages to be retrieved. The limit is 10. The
- # default is also 10.
- # @return [Integer]
- #
- # @!attribute [rw] start_time
- # The start of the time window from which sample messages are
- # retrieved.
- # @return [Time]
- #
- # @!attribute [rw] end_time
- # The end of the time window from which sample messages are retrieved.
- # @return [Time]
- #
- class SampleChannelDataRequest < Struct.new(
- :channel_name,
- :max_messages,
- :start_time,
- :end_time)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] payloads
- # The list of message samples. Each sample message is returned as a
- # base64-encoded string.
- # @return [Array]
- #
- class SampleChannelDataResponse < Struct.new(
- :payloads)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # The schedule for when to trigger an update.
- #
- # @!attribute [rw] expression
- # The expression that defines when to trigger an update. For more
- # information, see [Schedule Expressions for Rules][1] in the *Amazon
- # CloudWatch Events User Guide*.
- #
- #
- #
- # [1]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/ScheduledEvents.html
- # @return [String]
- #
- class Schedule < Struct.new(
- :expression)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # Information needed to define a schema.
- #
- # @!attribute [rw] columns
- # Specifies one or more columns that store your data.
- #
- # Each schema can have up to 100 columns. Each column can have up to
- # 100 nested types.
- # @return [Array]
- #
- class SchemaDefinition < Struct.new(
- :columns)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # Used to create a new message using only the specified attributes from
- # the original message.
- #
- # @!attribute [rw] name
- # The name of the `selectAttributes` activity.
- # @return [String]
- #
- # @!attribute [rw] attributes
- # A list of the attributes to select from the message.
- # @return [Array]
- #
- # @!attribute [rw] next
- # The next activity in the pipeline.
- # @return [String]
- #
- class SelectAttributesActivity < Struct.new(
- :name,
- :attributes,
- :next)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # Used to store channel data in an S3 bucket managed by IoT Analytics.
- # You can't change the choice of S3 storage after the data store is
- # created.
- #
- # @api private
- #
- class ServiceManagedChannelS3Storage < Aws::EmptyStructure; end
-
- # Used to store channel data in an S3 bucket managed by IoT Analytics.
- #
- class ServiceManagedChannelS3StorageSummary < Aws::EmptyStructure; end
-
- # Used to store data in an Amazon S3 bucket managed by IoT Analytics.
- # You can't change the choice of Amazon S3 storage after your data
- # store is created.
- #
- # @api private
- #
- class ServiceManagedDatastoreS3Storage < Aws::EmptyStructure; end
-
- # Contains information about the data store that is managed by IoT
- # Analytics.
- #
- class ServiceManagedDatastoreS3StorageSummary < Aws::EmptyStructure; end
-
- # The service is temporarily unavailable.
- #
- # @!attribute [rw] message
- # @return [String]
- #
- class ServiceUnavailableException < Struct.new(
- :message)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # The SQL query to modify the message.
- #
- # @!attribute [rw] sql_query
- # A SQL query string.
- # @return [String]
- #
- # @!attribute [rw] filters
- # Prefilters applied to message data.
- # @return [Array]
- #
- class SqlQueryDatasetAction < Struct.new(
- :sql_query,
- :filters)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] pipeline_name
- # The name of the pipeline on which to start reprocessing.
- # @return [String]
- #
- # @!attribute [rw] start_time
- # The start time (inclusive) of raw message data that is reprocessed.
- #
- # If you specify a value for the `startTime` parameter, you must not
- # use the `channelMessages` object.
- # @return [Time]
- #
- # @!attribute [rw] end_time
- # The end time (exclusive) of raw message data that is reprocessed.
- #
- # If you specify a value for the `endTime` parameter, you must not use
- # the `channelMessages` object.
- # @return [Time]
- #
- # @!attribute [rw] channel_messages
- # Specifies one or more sets of channel messages that you want to
- # reprocess.
- #
- # If you use the `channelMessages` object, you must not specify a
- # value for `startTime` and `endTime`.
- # @return [Types::ChannelMessages]
- #
- class StartPipelineReprocessingRequest < Struct.new(
- :pipeline_name,
- :start_time,
- :end_time,
- :channel_messages)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] reprocessing_id
- # The ID of the pipeline reprocessing activity that was started.
- # @return [String]
- #
- class StartPipelineReprocessingResponse < Struct.new(
- :reprocessing_id)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # A set of key-value pairs that are used to manage the resource.
- #
- # @!attribute [rw] key
- # The tag's key.
- # @return [String]
- #
- # @!attribute [rw] value
- # The tag's value.
- # @return [String]
- #
- class Tag < Struct.new(
- :key,
- :value)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] resource_arn
- # The ARN of the resource whose tags you want to modify.
- # @return [String]
- #
- # @!attribute [rw] tags
- # The new or modified tags for the resource.
- # @return [Array]
- #
- class TagResourceRequest < Struct.new(
- :resource_arn,
- :tags)
- SENSITIVE = []
- include Aws::Structure
- end
-
- class TagResourceResponse < Aws::EmptyStructure; end
-
- # The request was denied due to request throttling.
- #
- # @!attribute [rw] message
- # @return [String]
- #
- class ThrottlingException < Struct.new(
- :message)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # A partition dimension defined by a timestamp attribute.
- #
- # @!attribute [rw] attribute_name
- # The attribute name of the partition defined by a timestamp.
- # @return [String]
- #
- # @!attribute [rw] timestamp_format
- # The timestamp format of a partition defined by a timestamp. The
- # default format is seconds since epoch (January 1, 1970 at midnight
- # UTC time).
- # @return [String]
- #
- class TimestampPartition < Struct.new(
- :attribute_name,
- :timestamp_format)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # Information about the dataset whose content generation triggers the
- # new dataset content generation.
- #
- # @!attribute [rw] name
- # The name of the dataset whose content generation triggers the new
- # dataset content generation.
- # @return [String]
- #
- class TriggeringDataset < Struct.new(
- :name)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] resource_arn
- # The ARN of the resource whose tags you want to remove.
- # @return [String]
- #
- # @!attribute [rw] tag_keys
- # The keys of those tags which you want to remove.
- # @return [Array]
- #
- class UntagResourceRequest < Struct.new(
- :resource_arn,
- :tag_keys)
- SENSITIVE = []
- include Aws::Structure
- end
-
- class UntagResourceResponse < Aws::EmptyStructure; end
-
- # @!attribute [rw] channel_name
- # The name of the channel to be updated.
- # @return [String]
- #
- # @!attribute [rw] channel_storage
- # Where channel data is stored. You can choose one of
- # `serviceManagedS3` or `customerManagedS3` storage. If not specified,
- # the default is `serviceManagedS3`. You can't change this storage
- # option after the channel is created.
- # @return [Types::ChannelStorage]
- #
- # @!attribute [rw] retention_period
- # How long, in days, message data is kept for the channel. The
- # retention period can't be updated if the channel's Amazon S3
- # storage is customer-managed.
- # @return [Types::RetentionPeriod]
- #
- class UpdateChannelRequest < Struct.new(
- :channel_name,
- :channel_storage,
- :retention_period)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] dataset_name
- # The name of the dataset to update.
- # @return [String]
- #
- # @!attribute [rw] actions
- # A list of `DatasetAction` objects.
- # @return [Array]
- #
- # @!attribute [rw] triggers
- # A list of `DatasetTrigger` objects. The list can be empty or can
- # contain up to five `DatasetTrigger` objects.
- # @return [Array]
- #
- # @!attribute [rw] content_delivery_rules
- # When dataset contents are created, they are delivered to
- # destinations specified here.
- # @return [Array]
- #
- # @!attribute [rw] retention_period
- # How long, in days, dataset contents are kept for the dataset.
- # @return [Types::RetentionPeriod]
- #
- # @!attribute [rw] versioning_configuration
- # Optional. How many versions of dataset contents are kept. If not
- # specified or set to null, only the latest version plus the latest
- # succeeded version (if they are different) are kept for the time
- # period specified by the `retentionPeriod` parameter. For more
- # information, see [Keeping Multiple Versions of IoT Analytics
- # datasets][1] in the *IoT Analytics User Guide*.
- #
- #
- #
- # [1]: https://docs.aws.amazon.com/iotanalytics/latest/userguide/getting-started.html#aws-iot-analytics-dataset-versions
- # @return [Types::VersioningConfiguration]
- #
- # @!attribute [rw] late_data_rules
- # A list of data rules that send notifications to CloudWatch, when
- # data arrives late. To specify `lateDataRules`, the dataset must use
- # a [DeltaTimer][1] filter.
- #
- #
- #
- # [1]: https://docs.aws.amazon.com/iotanalytics/latest/APIReference/API_DeltaTime.html
- # @return [Array]
- #
- class UpdateDatasetRequest < Struct.new(
- :dataset_name,
- :actions,
- :triggers,
- :content_delivery_rules,
- :retention_period,
- :versioning_configuration,
- :late_data_rules)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] datastore_name
- # The name of the data store to be updated.
- # @return [String]
- #
- # @!attribute [rw] retention_period
- # How long, in days, message data is kept for the data store. The
- # retention period can't be updated if the data store's Amazon S3
- # storage is customer-managed.
- # @return [Types::RetentionPeriod]
- #
- # @!attribute [rw] datastore_storage
- # Where data in a data store is stored.. You can choose
- # `serviceManagedS3` storage, `customerManagedS3` storage, or
- # `iotSiteWiseMultiLayerStorage` storage. The default is
- # `serviceManagedS3`. You can't change the choice of Amazon S3
- # storage after your data store is created.
- # @return [Types::DatastoreStorage]
- #
- # @!attribute [rw] file_format_configuration
- # Contains the configuration information of file formats. IoT
- # Analytics data stores support JSON and [Parquet][1].
- #
- # The default file format is JSON. You can specify only one format.
- #
- # You can't change the file format after you create the data store.
- #
- #
- #
- # [1]: https://parquet.apache.org/
- # @return [Types::FileFormatConfiguration]
- #
- class UpdateDatastoreRequest < Struct.new(
- :datastore_name,
- :retention_period,
- :datastore_storage,
- :file_format_configuration)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # @!attribute [rw] pipeline_name
- # The name of the pipeline to update.
- # @return [String]
- #
- # @!attribute [rw] pipeline_activities
- # A list of `PipelineActivity` objects. Activities perform
- # transformations on your messages, such as removing, renaming or
- # adding message attributes; filtering messages based on attribute
- # values; invoking your Lambda functions on messages for advanced
- # processing; or performing mathematical transformations to normalize
- # device data.
- #
- # The list can be 2-25 `PipelineActivity` objects and must contain
- # both a `channel` and a `datastore` activity. Each entry in the list
- # must contain only one activity. For example:
- #
- # `pipelineActivities = [ { "channel": { ... } }, { "lambda": { ... }
- # }, ... ]`
- # @return [Array]
- #
- class UpdatePipelineRequest < Struct.new(
- :pipeline_name,
- :pipeline_activities)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # An instance of a variable to be passed to the `containerAction`
- # execution. Each variable must have a name and a value given by one of
- # `stringValue`, `datasetContentVersionValue`, or `outputFileUriValue`.
- #
- # @!attribute [rw] name
- # The name of the variable.
- # @return [String]
- #
- # @!attribute [rw] string_value
- # The value of the variable as a string.
- # @return [String]
- #
- # @!attribute [rw] double_value
- # The value of the variable as a double (numeric).
- # @return [Float]
- #
- # @!attribute [rw] dataset_content_version_value
- # The value of the variable as a structure that specifies a dataset
- # content version.
- # @return [Types::DatasetContentVersionValue]
- #
- # @!attribute [rw] output_file_uri_value
- # The value of the variable as a structure that specifies an output
- # file URI.
- # @return [Types::OutputFileUriValue]
- #
- class Variable < Struct.new(
- :name,
- :string_value,
- :double_value,
- :dataset_content_version_value,
- :output_file_uri_value)
- SENSITIVE = []
- include Aws::Structure
- end
-
- # Information about the versioning of dataset contents.
- #
- # @!attribute [rw] unlimited
- # If true, unlimited versions of dataset contents are kept.
- # @return [Boolean]
- #
- # @!attribute [rw] max_versions
- # How many versions of dataset contents are kept. The `unlimited`
- # parameter must be `false`.
- # @return [Integer]
- #
- class VersioningConfiguration < Struct.new(
- :unlimited,
- :max_versions)
- SENSITIVE = []
- include Aws::Structure
- end
-
- end
-end
-
diff --git a/gems/aws-sdk-iotanalytics/sig/client.rbs b/gems/aws-sdk-iotanalytics/sig/client.rbs
deleted file mode 100644
index 985ec7b9d99..00000000000
--- a/gems/aws-sdk-iotanalytics/sig/client.rbs
+++ /dev/null
@@ -1,909 +0,0 @@
-# WARNING ABOUT GENERATED CODE
-#
-# This file is generated. See the contributing guide for more information:
-# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md
-#
-# WARNING ABOUT GENERATED CODE
-
-module Aws
- module IoTAnalytics
- class Client < ::Seahorse::Client::Base
- include ::Aws::ClientStubs
-
- # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/IoTAnalytics/Client.html#initialize-instance_method
- def self.new: (
- ?credentials: untyped,
- ?region: String,
- ?access_key_id: String,
- ?account_id: String,
- ?active_endpoint_cache: bool,
- ?adaptive_retry_wait_to_fill: bool,
- ?auth_scheme_preference: Array[String],
- ?client_side_monitoring: bool,
- ?client_side_monitoring_client_id: String,
- ?client_side_monitoring_host: String,
- ?client_side_monitoring_port: Integer,
- ?client_side_monitoring_publisher: untyped,
- ?convert_params: bool,
- ?correct_clock_skew: bool,
- ?defaults_mode: String,
- ?disable_host_prefix_injection: bool,
- ?disable_request_compression: bool,
- ?endpoint: String,
- ?endpoint_cache_max_entries: Integer,
- ?endpoint_cache_max_threads: Integer,
- ?endpoint_cache_poll_interval: Integer,
- ?endpoint_discovery: bool,
- ?ignore_configured_endpoint_urls: bool,
- ?log_formatter: untyped,
- ?log_level: Symbol,
- ?logger: untyped,
- ?max_attempts: Integer,
- ?profile: String,
- ?request_checksum_calculation: String,
- ?request_min_compression_size_bytes: Integer,
- ?response_checksum_validation: String,
- ?retry_backoff: Proc,
- ?retry_base_delay: Float,
- ?retry_jitter: (:none | :equal | :full | ^(Integer) -> Integer),
- ?retry_limit: Integer,
- ?retry_max_delay: Integer,
- ?retry_mode: ("legacy" | "standard" | "adaptive"),
- ?sdk_ua_app_id: String,
- ?secret_access_key: String,
- ?session_token: String,
- ?sigv4a_signing_region_set: Array[String],
- ?stub_responses: untyped,
- ?telemetry_provider: Aws::Telemetry::TelemetryProviderBase,
- ?token_provider: untyped,
- ?use_dualstack_endpoint: bool,
- ?use_fips_endpoint: bool,
- ?validate_params: bool,
- ?endpoint_provider: untyped,
- ?http_proxy: String,
- ?http_open_timeout: (Float | Integer),
- ?http_read_timeout: (Float | Integer),
- ?http_idle_timeout: (Float | Integer),
- ?http_continue_timeout: (Float | Integer),
- ?ssl_timeout: (Float | Integer | nil),
- ?http_wire_trace: bool,
- ?ssl_verify_peer: bool,
- ?ssl_ca_bundle: String,
- ?ssl_ca_directory: String,
- ?ssl_ca_store: String,
- ?on_chunk_received: Proc,
- ?on_chunk_sent: Proc,
- ?raise_response_errors: bool
- ) -> instance
- | (?Hash[Symbol, untyped]) -> instance
-
-
- interface _BatchPutMessageResponseSuccess
- include ::Seahorse::Client::_ResponseSuccess[Types::BatchPutMessageResponse]
- def batch_put_message_error_entries: () -> ::Array[Types::BatchPutMessageErrorEntry]
- end
- # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/IoTAnalytics/Client.html#batch_put_message-instance_method
- def batch_put_message: (
- channel_name: ::String,
- messages: Array[
- {
- message_id: ::String,
- payload: ::String
- },
- ]
- ) -> _BatchPutMessageResponseSuccess
- | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _BatchPutMessageResponseSuccess
-
- interface _CancelPipelineReprocessingResponseSuccess
- include ::Seahorse::Client::_ResponseSuccess[Types::CancelPipelineReprocessingResponse]
- end
- # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/IoTAnalytics/Client.html#cancel_pipeline_reprocessing-instance_method
- def cancel_pipeline_reprocessing: (
- pipeline_name: ::String,
- reprocessing_id: ::String
- ) -> _CancelPipelineReprocessingResponseSuccess
- | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _CancelPipelineReprocessingResponseSuccess
-
- interface _CreateChannelResponseSuccess
- include ::Seahorse::Client::_ResponseSuccess[Types::CreateChannelResponse]
- def channel_name: () -> ::String
- def channel_arn: () -> ::String
- def retention_period: () -> Types::RetentionPeriod
- end
- # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/IoTAnalytics/Client.html#create_channel-instance_method
- def create_channel: (
- channel_name: ::String,
- ?channel_storage: {
- service_managed_s3: {
- }?,
- customer_managed_s3: {
- bucket: ::String,
- key_prefix: ::String?,
- role_arn: ::String
- }?
- },
- ?retention_period: {
- unlimited: bool?,
- number_of_days: ::Integer?
- },
- ?tags: Array[
- {
- key: ::String,
- value: ::String
- },
- ]
- ) -> _CreateChannelResponseSuccess
- | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _CreateChannelResponseSuccess
-
- interface _CreateDatasetResponseSuccess
- include ::Seahorse::Client::_ResponseSuccess[Types::CreateDatasetResponse]
- def dataset_name: () -> ::String
- def dataset_arn: () -> ::String
- def retention_period: () -> Types::RetentionPeriod
- end
- # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/IoTAnalytics/Client.html#create_dataset-instance_method
- def create_dataset: (
- dataset_name: ::String,
- actions: Array[
- {
- action_name: ::String?,
- query_action: {
- sql_query: ::String,
- filters: Array[
- {
- delta_time: {
- offset_seconds: ::Integer,
- time_expression: ::String
- }?
- },
- ]?
- }?,
- container_action: {
- image: ::String,
- execution_role_arn: ::String,
- resource_configuration: {
- compute_type: ("ACU_1" | "ACU_2"),
- volume_size_in_gb: ::Integer
- },
- variables: Array[
- {
- name: ::String,
- string_value: ::String?,
- double_value: ::Float?,
- dataset_content_version_value: {
- dataset_name: ::String
- }?,
- output_file_uri_value: {
- file_name: ::String
- }?
- },
- ]?
- }?
- },
- ],
- ?triggers: Array[
- {
- schedule: {
- expression: ::String?
- }?,
- dataset: {
- name: ::String
- }?
- },
- ],
- ?content_delivery_rules: Array[
- {
- entry_name: ::String?,
- destination: {
- iot_events_destination_configuration: {
- input_name: ::String,
- role_arn: ::String
- }?,
- s3_destination_configuration: {
- bucket: ::String,
- key: ::String,
- glue_configuration: {
- table_name: ::String,
- database_name: ::String
- }?,
- role_arn: ::String
- }?
- }
- },
- ],
- ?retention_period: {
- unlimited: bool?,
- number_of_days: ::Integer?
- },
- ?versioning_configuration: {
- unlimited: bool?,
- max_versions: ::Integer?
- },
- ?tags: Array[
- {
- key: ::String,
- value: ::String
- },
- ],
- ?late_data_rules: Array[
- {
- rule_name: ::String?,
- rule_configuration: {
- delta_time_session_window_configuration: {
- timeout_in_minutes: ::Integer
- }?
- }
- },
- ]
- ) -> _CreateDatasetResponseSuccess
- | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _CreateDatasetResponseSuccess
-
- interface _CreateDatasetContentResponseSuccess
- include ::Seahorse::Client::_ResponseSuccess[Types::CreateDatasetContentResponse]
- def version_id: () -> ::String
- end
- # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/IoTAnalytics/Client.html#create_dataset_content-instance_method
- def create_dataset_content: (
- dataset_name: ::String,
- ?version_id: ::String
- ) -> _CreateDatasetContentResponseSuccess
- | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _CreateDatasetContentResponseSuccess
-
- interface _CreateDatastoreResponseSuccess
- include ::Seahorse::Client::_ResponseSuccess[Types::CreateDatastoreResponse]
- def datastore_name: () -> ::String
- def datastore_arn: () -> ::String
- def retention_period: () -> Types::RetentionPeriod
- end
- # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/IoTAnalytics/Client.html#create_datastore-instance_method
- def create_datastore: (
- datastore_name: ::String,
- ?datastore_storage: {
- service_managed_s3: {
- }?,
- customer_managed_s3: {
- bucket: ::String,
- key_prefix: ::String?,
- role_arn: ::String
- }?,
- iot_site_wise_multi_layer_storage: {
- customer_managed_s3_storage: {
- bucket: ::String,
- key_prefix: ::String?
- }
- }?
- },
- ?retention_period: {
- unlimited: bool?,
- number_of_days: ::Integer?
- },
- ?tags: Array[
- {
- key: ::String,
- value: ::String
- },
- ],
- ?file_format_configuration: {
- json_configuration: {
- }?,
- parquet_configuration: {
- schema_definition: {
- columns: Array[
- {
- name: ::String,
- type: ::String
- },
- ]?
- }?
- }?
- },
- ?datastore_partitions: {
- partitions: Array[
- {
- attribute_partition: {
- attribute_name: ::String
- }?,
- timestamp_partition: {
- attribute_name: ::String,
- timestamp_format: ::String?
- }?
- },
- ]?
- }
- ) -> _CreateDatastoreResponseSuccess
- | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _CreateDatastoreResponseSuccess
-
- interface _CreatePipelineResponseSuccess
- include ::Seahorse::Client::_ResponseSuccess[Types::CreatePipelineResponse]
- def pipeline_name: () -> ::String
- def pipeline_arn: () -> ::String
- end
- # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/IoTAnalytics/Client.html#create_pipeline-instance_method
- def create_pipeline: (
- pipeline_name: ::String,
- pipeline_activities: Array[
- {
- channel: {
- name: ::String,
- channel_name: ::String,
- next: ::String?
- }?,
- lambda: {
- name: ::String,
- lambda_name: ::String,
- batch_size: ::Integer,
- next: ::String?
- }?,
- datastore: {
- name: ::String,
- datastore_name: ::String
- }?,
- add_attributes: {
- name: ::String,
- attributes: Hash[::String, ::String],
- next: ::String?
- }?,
- remove_attributes: {
- name: ::String,
- attributes: Array[::String],
- next: ::String?
- }?,
- select_attributes: {
- name: ::String,
- attributes: Array[::String],
- next: ::String?
- }?,
- filter: {
- name: ::String,
- filter: ::String,
- next: ::String?
- }?,
- math: {
- name: ::String,
- attribute: ::String,
- math: ::String,
- next: ::String?
- }?,
- device_registry_enrich: {
- name: ::String,
- attribute: ::String,
- thing_name: ::String,
- role_arn: ::String,
- next: ::String?
- }?,
- device_shadow_enrich: {
- name: ::String,
- attribute: ::String,
- thing_name: ::String,
- role_arn: ::String,
- next: ::String?
- }?
- },
- ],
- ?tags: Array[
- {
- key: ::String,
- value: ::String
- },
- ]
- ) -> _CreatePipelineResponseSuccess
- | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _CreatePipelineResponseSuccess
-
- # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/IoTAnalytics/Client.html#delete_channel-instance_method
- def delete_channel: (
- channel_name: ::String
- ) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure]
- | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure]
-
- # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/IoTAnalytics/Client.html#delete_dataset-instance_method
- def delete_dataset: (
- dataset_name: ::String
- ) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure]
- | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure]
-
- # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/IoTAnalytics/Client.html#delete_dataset_content-instance_method
- def delete_dataset_content: (
- dataset_name: ::String,
- ?version_id: ::String
- ) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure]
- | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure]
-
- # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/IoTAnalytics/Client.html#delete_datastore-instance_method
- def delete_datastore: (
- datastore_name: ::String
- ) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure]
- | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure]
-
- # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/IoTAnalytics/Client.html#delete_pipeline-instance_method
- def delete_pipeline: (
- pipeline_name: ::String
- ) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure]
- | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure]
-
- interface _DescribeChannelResponseSuccess
- include ::Seahorse::Client::_ResponseSuccess[Types::DescribeChannelResponse]
- def channel: () -> Types::Channel
- def statistics: () -> Types::ChannelStatistics
- end
- # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/IoTAnalytics/Client.html#describe_channel-instance_method
- def describe_channel: (
- channel_name: ::String,
- ?include_statistics: bool
- ) -> _DescribeChannelResponseSuccess
- | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _DescribeChannelResponseSuccess
-
- interface _DescribeDatasetResponseSuccess
- include ::Seahorse::Client::_ResponseSuccess[Types::DescribeDatasetResponse]
- def dataset: () -> Types::Dataset
- end
- # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/IoTAnalytics/Client.html#describe_dataset-instance_method
- def describe_dataset: (
- dataset_name: ::String
- ) -> _DescribeDatasetResponseSuccess
- | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _DescribeDatasetResponseSuccess
-
- interface _DescribeDatastoreResponseSuccess
- include ::Seahorse::Client::_ResponseSuccess[Types::DescribeDatastoreResponse]
- def datastore: () -> Types::Datastore
- def statistics: () -> Types::DatastoreStatistics
- end
- # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/IoTAnalytics/Client.html#describe_datastore-instance_method
- def describe_datastore: (
- datastore_name: ::String,
- ?include_statistics: bool
- ) -> _DescribeDatastoreResponseSuccess
- | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _DescribeDatastoreResponseSuccess
-
- interface _DescribeLoggingOptionsResponseSuccess
- include ::Seahorse::Client::_ResponseSuccess[Types::DescribeLoggingOptionsResponse]
- def logging_options: () -> Types::LoggingOptions
- end
- # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/IoTAnalytics/Client.html#describe_logging_options-instance_method
- def describe_logging_options: (
- ) -> _DescribeLoggingOptionsResponseSuccess
- | (?Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _DescribeLoggingOptionsResponseSuccess
-
- interface _DescribePipelineResponseSuccess
- include ::Seahorse::Client::_ResponseSuccess[Types::DescribePipelineResponse]
- def pipeline: () -> Types::Pipeline
- end
- # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/IoTAnalytics/Client.html#describe_pipeline-instance_method
- def describe_pipeline: (
- pipeline_name: ::String
- ) -> _DescribePipelineResponseSuccess
- | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _DescribePipelineResponseSuccess
-
- interface _GetDatasetContentResponseSuccess
- include ::Seahorse::Client::_ResponseSuccess[Types::GetDatasetContentResponse]
- def entries: () -> ::Array[Types::DatasetEntry]
- def timestamp: () -> ::Time
- def status: () -> Types::DatasetContentStatus
- end
- # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/IoTAnalytics/Client.html#get_dataset_content-instance_method
- def get_dataset_content: (
- dataset_name: ::String,
- ?version_id: ::String
- ) -> _GetDatasetContentResponseSuccess
- | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _GetDatasetContentResponseSuccess
-
- interface _ListChannelsResponseSuccess
- include ::Seahorse::Client::_ResponseSuccess[Types::ListChannelsResponse]
- def channel_summaries: () -> ::Array[Types::ChannelSummary]
- def next_token: () -> ::String
- end
- # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/IoTAnalytics/Client.html#list_channels-instance_method
- def list_channels: (
- ?next_token: ::String,
- ?max_results: ::Integer
- ) -> _ListChannelsResponseSuccess
- | (?Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _ListChannelsResponseSuccess
-
- interface _ListDatasetContentsResponseSuccess
- include ::Seahorse::Client::_ResponseSuccess[Types::ListDatasetContentsResponse]
- def dataset_content_summaries: () -> ::Array[Types::DatasetContentSummary]
- def next_token: () -> ::String
- end
- # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/IoTAnalytics/Client.html#list_dataset_contents-instance_method
- def list_dataset_contents: (
- dataset_name: ::String,
- ?next_token: ::String,
- ?max_results: ::Integer,
- ?scheduled_on_or_after: ::Time,
- ?scheduled_before: ::Time
- ) -> _ListDatasetContentsResponseSuccess
- | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _ListDatasetContentsResponseSuccess
-
- interface _ListDatasetsResponseSuccess
- include ::Seahorse::Client::_ResponseSuccess[Types::ListDatasetsResponse]
- def dataset_summaries: () -> ::Array[Types::DatasetSummary]
- def next_token: () -> ::String
- end
- # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/IoTAnalytics/Client.html#list_datasets-instance_method
- def list_datasets: (
- ?next_token: ::String,
- ?max_results: ::Integer
- ) -> _ListDatasetsResponseSuccess
- | (?Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _ListDatasetsResponseSuccess
-
- interface _ListDatastoresResponseSuccess
- include ::Seahorse::Client::_ResponseSuccess[Types::ListDatastoresResponse]
- def datastore_summaries: () -> ::Array[Types::DatastoreSummary]
- def next_token: () -> ::String
- end
- # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/IoTAnalytics/Client.html#list_datastores-instance_method
- def list_datastores: (
- ?next_token: ::String,
- ?max_results: ::Integer
- ) -> _ListDatastoresResponseSuccess
- | (?Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _ListDatastoresResponseSuccess
-
- interface _ListPipelinesResponseSuccess
- include ::Seahorse::Client::_ResponseSuccess[Types::ListPipelinesResponse]
- def pipeline_summaries: () -> ::Array[Types::PipelineSummary]
- def next_token: () -> ::String
- end
- # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/IoTAnalytics/Client.html#list_pipelines-instance_method
- def list_pipelines: (
- ?next_token: ::String,
- ?max_results: ::Integer
- ) -> _ListPipelinesResponseSuccess
- | (?Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _ListPipelinesResponseSuccess
-
- interface _ListTagsForResourceResponseSuccess
- include ::Seahorse::Client::_ResponseSuccess[Types::ListTagsForResourceResponse]
- def tags: () -> ::Array[Types::Tag]
- end
- # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/IoTAnalytics/Client.html#list_tags_for_resource-instance_method
- def list_tags_for_resource: (
- resource_arn: ::String
- ) -> _ListTagsForResourceResponseSuccess
- | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _ListTagsForResourceResponseSuccess
-
- # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/IoTAnalytics/Client.html#put_logging_options-instance_method
- def put_logging_options: (
- logging_options: {
- role_arn: ::String,
- level: ("ERROR"),
- enabled: bool
- }
- ) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure]
- | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure]
-
- interface _RunPipelineActivityResponseSuccess
- include ::Seahorse::Client::_ResponseSuccess[Types::RunPipelineActivityResponse]
- def payloads: () -> ::Array[::String]
- def log_result: () -> ::String
- end
- # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/IoTAnalytics/Client.html#run_pipeline_activity-instance_method
- def run_pipeline_activity: (
- pipeline_activity: {
- channel: {
- name: ::String,
- channel_name: ::String,
- next: ::String?
- }?,
- lambda: {
- name: ::String,
- lambda_name: ::String,
- batch_size: ::Integer,
- next: ::String?
- }?,
- datastore: {
- name: ::String,
- datastore_name: ::String
- }?,
- add_attributes: {
- name: ::String,
- attributes: Hash[::String, ::String],
- next: ::String?
- }?,
- remove_attributes: {
- name: ::String,
- attributes: Array[::String],
- next: ::String?
- }?,
- select_attributes: {
- name: ::String,
- attributes: Array[::String],
- next: ::String?
- }?,
- filter: {
- name: ::String,
- filter: ::String,
- next: ::String?
- }?,
- math: {
- name: ::String,
- attribute: ::String,
- math: ::String,
- next: ::String?
- }?,
- device_registry_enrich: {
- name: ::String,
- attribute: ::String,
- thing_name: ::String,
- role_arn: ::String,
- next: ::String?
- }?,
- device_shadow_enrich: {
- name: ::String,
- attribute: ::String,
- thing_name: ::String,
- role_arn: ::String,
- next: ::String?
- }?
- },
- payloads: Array[::String]
- ) -> _RunPipelineActivityResponseSuccess
- | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _RunPipelineActivityResponseSuccess
-
- interface _SampleChannelDataResponseSuccess
- include ::Seahorse::Client::_ResponseSuccess[Types::SampleChannelDataResponse]
- def payloads: () -> ::Array[::String]
- end
- # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/IoTAnalytics/Client.html#sample_channel_data-instance_method
- def sample_channel_data: (
- channel_name: ::String,
- ?max_messages: ::Integer,
- ?start_time: ::Time,
- ?end_time: ::Time
- ) -> _SampleChannelDataResponseSuccess
- | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _SampleChannelDataResponseSuccess
-
- interface _StartPipelineReprocessingResponseSuccess
- include ::Seahorse::Client::_ResponseSuccess[Types::StartPipelineReprocessingResponse]
- def reprocessing_id: () -> ::String
- end
- # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/IoTAnalytics/Client.html#start_pipeline_reprocessing-instance_method
- def start_pipeline_reprocessing: (
- pipeline_name: ::String,
- ?start_time: ::Time,
- ?end_time: ::Time,
- ?channel_messages: {
- s3_paths: Array[::String]?
- }
- ) -> _StartPipelineReprocessingResponseSuccess
- | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _StartPipelineReprocessingResponseSuccess
-
- interface _TagResourceResponseSuccess
- include ::Seahorse::Client::_ResponseSuccess[Types::TagResourceResponse]
- end
- # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/IoTAnalytics/Client.html#tag_resource-instance_method
- def tag_resource: (
- resource_arn: ::String,
- tags: Array[
- {
- key: ::String,
- value: ::String
- },
- ]
- ) -> _TagResourceResponseSuccess
- | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _TagResourceResponseSuccess
-
- interface _UntagResourceResponseSuccess
- include ::Seahorse::Client::_ResponseSuccess[Types::UntagResourceResponse]
- end
- # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/IoTAnalytics/Client.html#untag_resource-instance_method
- def untag_resource: (
- resource_arn: ::String,
- tag_keys: Array[::String]
- ) -> _UntagResourceResponseSuccess
- | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _UntagResourceResponseSuccess
-
- # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/IoTAnalytics/Client.html#update_channel-instance_method
- def update_channel: (
- channel_name: ::String,
- ?channel_storage: {
- service_managed_s3: {
- }?,
- customer_managed_s3: {
- bucket: ::String,
- key_prefix: ::String?,
- role_arn: ::String
- }?
- },
- ?retention_period: {
- unlimited: bool?,
- number_of_days: ::Integer?
- }
- ) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure]
- | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure]
-
- # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/IoTAnalytics/Client.html#update_dataset-instance_method
- def update_dataset: (
- dataset_name: ::String,
- actions: Array[
- {
- action_name: ::String?,
- query_action: {
- sql_query: ::String,
- filters: Array[
- {
- delta_time: {
- offset_seconds: ::Integer,
- time_expression: ::String
- }?
- },
- ]?
- }?,
- container_action: {
- image: ::String,
- execution_role_arn: ::String,
- resource_configuration: {
- compute_type: ("ACU_1" | "ACU_2"),
- volume_size_in_gb: ::Integer
- },
- variables: Array[
- {
- name: ::String,
- string_value: ::String?,
- double_value: ::Float?,
- dataset_content_version_value: {
- dataset_name: ::String
- }?,
- output_file_uri_value: {
- file_name: ::String
- }?
- },
- ]?
- }?
- },
- ],
- ?triggers: Array[
- {
- schedule: {
- expression: ::String?
- }?,
- dataset: {
- name: ::String
- }?
- },
- ],
- ?content_delivery_rules: Array[
- {
- entry_name: ::String?,
- destination: {
- iot_events_destination_configuration: {
- input_name: ::String,
- role_arn: ::String
- }?,
- s3_destination_configuration: {
- bucket: ::String,
- key: ::String,
- glue_configuration: {
- table_name: ::String,
- database_name: ::String
- }?,
- role_arn: ::String
- }?
- }
- },
- ],
- ?retention_period: {
- unlimited: bool?,
- number_of_days: ::Integer?
- },
- ?versioning_configuration: {
- unlimited: bool?,
- max_versions: ::Integer?
- },
- ?late_data_rules: Array[
- {
- rule_name: ::String?,
- rule_configuration: {
- delta_time_session_window_configuration: {
- timeout_in_minutes: ::Integer
- }?
- }
- },
- ]
- ) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure]
- | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure]
-
- # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/IoTAnalytics/Client.html#update_datastore-instance_method
- def update_datastore: (
- datastore_name: ::String,
- ?retention_period: {
- unlimited: bool?,
- number_of_days: ::Integer?
- },
- ?datastore_storage: {
- service_managed_s3: {
- }?,
- customer_managed_s3: {
- bucket: ::String,
- key_prefix: ::String?,
- role_arn: ::String
- }?,
- iot_site_wise_multi_layer_storage: {
- customer_managed_s3_storage: {
- bucket: ::String,
- key_prefix: ::String?
- }
- }?
- },
- ?file_format_configuration: {
- json_configuration: {
- }?,
- parquet_configuration: {
- schema_definition: {
- columns: Array[
- {
- name: ::String,
- type: ::String
- },
- ]?
- }?
- }?
- }
- ) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure]
- | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure]
-
- # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/IoTAnalytics/Client.html#update_pipeline-instance_method
- def update_pipeline: (
- pipeline_name: ::String,
- pipeline_activities: Array[
- {
- channel: {
- name: ::String,
- channel_name: ::String,
- next: ::String?
- }?,
- lambda: {
- name: ::String,
- lambda_name: ::String,
- batch_size: ::Integer,
- next: ::String?
- }?,
- datastore: {
- name: ::String,
- datastore_name: ::String
- }?,
- add_attributes: {
- name: ::String,
- attributes: Hash[::String, ::String],
- next: ::String?
- }?,
- remove_attributes: {
- name: ::String,
- attributes: Array[::String],
- next: ::String?
- }?,
- select_attributes: {
- name: ::String,
- attributes: Array[::String],
- next: ::String?
- }?,
- filter: {
- name: ::String,
- filter: ::String,
- next: ::String?
- }?,
- math: {
- name: ::String,
- attribute: ::String,
- math: ::String,
- next: ::String?
- }?,
- device_registry_enrich: {
- name: ::String,
- attribute: ::String,
- thing_name: ::String,
- role_arn: ::String,
- next: ::String?
- }?,
- device_shadow_enrich: {
- name: ::String,
- attribute: ::String,
- thing_name: ::String,
- role_arn: ::String,
- next: ::String?
- }?
- },
- ]
- ) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure]
- | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure]
- end
- end
-end
-
diff --git a/gems/aws-sdk-iotanalytics/sig/errors.rbs b/gems/aws-sdk-iotanalytics/sig/errors.rbs
deleted file mode 100644
index 08cd9c6d7f9..00000000000
--- a/gems/aws-sdk-iotanalytics/sig/errors.rbs
+++ /dev/null
@@ -1,39 +0,0 @@
-# WARNING ABOUT GENERATED CODE
-#
-# This file is generated. See the contributing guide for more information:
-# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md
-#
-# WARNING ABOUT GENERATED CODE
-
-module Aws
- module IoTAnalytics
- module Errors
- class ServiceError < ::Aws::Errors::ServiceError
- end
-
- class InternalFailureException < ::Aws::Errors::ServiceError
- def message: () -> ::String
- end
- class InvalidRequestException < ::Aws::Errors::ServiceError
- def message: () -> ::String
- end
- class LimitExceededException < ::Aws::Errors::ServiceError
- def message: () -> ::String
- end
- class ResourceAlreadyExistsException < ::Aws::Errors::ServiceError
- def message: () -> ::String
- def resource_id: () -> ::String
- def resource_arn: () -> ::String
- end
- class ResourceNotFoundException < ::Aws::Errors::ServiceError
- def message: () -> ::String
- end
- class ServiceUnavailableException < ::Aws::Errors::ServiceError
- def message: () -> ::String
- end
- class ThrottlingException < ::Aws::Errors::ServiceError
- def message: () -> ::String
- end
- end
- end
-end
diff --git a/gems/aws-sdk-iotanalytics/sig/resource.rbs b/gems/aws-sdk-iotanalytics/sig/resource.rbs
deleted file mode 100644
index 04a9e230dc0..00000000000
--- a/gems/aws-sdk-iotanalytics/sig/resource.rbs
+++ /dev/null
@@ -1,85 +0,0 @@
-# WARNING ABOUT GENERATED CODE
-#
-# This file is generated. See the contributing guide for more information:
-# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md
-#
-# WARNING ABOUT GENERATED CODE
-
-module Aws
- module IoTAnalytics
- # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/IoTAnalytics/Resource.html
- class Resource
- # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/IoTAnalytics/Resource.html#initialize-instance_method
- def initialize: (
- ?client: Client,
- ?credentials: untyped,
- ?region: String,
- ?access_key_id: String,
- ?account_id: String,
- ?active_endpoint_cache: bool,
- ?adaptive_retry_wait_to_fill: bool,
- ?auth_scheme_preference: Array[String],
- ?client_side_monitoring: bool,
- ?client_side_monitoring_client_id: String,
- ?client_side_monitoring_host: String,
- ?client_side_monitoring_port: Integer,
- ?client_side_monitoring_publisher: untyped,
- ?convert_params: bool,
- ?correct_clock_skew: bool,
- ?defaults_mode: String,
- ?disable_host_prefix_injection: bool,
- ?disable_request_compression: bool,
- ?endpoint: String,
- ?endpoint_cache_max_entries: Integer,
- ?endpoint_cache_max_threads: Integer,
- ?endpoint_cache_poll_interval: Integer,
- ?endpoint_discovery: bool,
- ?ignore_configured_endpoint_urls: bool,
- ?log_formatter: untyped,
- ?log_level: Symbol,
- ?logger: untyped,
- ?max_attempts: Integer,
- ?profile: String,
- ?request_checksum_calculation: String,
- ?request_min_compression_size_bytes: Integer,
- ?response_checksum_validation: String,
- ?retry_backoff: Proc,
- ?retry_base_delay: Float,
- ?retry_jitter: (:none | :equal | :full | ^(Integer) -> Integer),
- ?retry_limit: Integer,
- ?retry_max_delay: Integer,
- ?retry_mode: ("legacy" | "standard" | "adaptive"),
- ?sdk_ua_app_id: String,
- ?secret_access_key: String,
- ?session_token: String,
- ?sigv4a_signing_region_set: Array[String],
- ?stub_responses: untyped,
- ?telemetry_provider: Aws::Telemetry::TelemetryProviderBase,
- ?token_provider: untyped,
- ?use_dualstack_endpoint: bool,
- ?use_fips_endpoint: bool,
- ?validate_params: bool,
- ?endpoint_provider: untyped,
- ?http_proxy: String,
- ?http_open_timeout: (Float | Integer),
- ?http_read_timeout: (Float | Integer),
- ?http_idle_timeout: (Float | Integer),
- ?http_continue_timeout: (Float | Integer),
- ?ssl_timeout: (Float | Integer | nil),
- ?http_wire_trace: bool,
- ?ssl_verify_peer: bool,
- ?ssl_ca_bundle: String,
- ?ssl_ca_directory: String,
- ?ssl_ca_store: String,
- ?on_chunk_received: Proc,
- ?on_chunk_sent: Proc,
- ?raise_response_errors: bool
- ) -> void
- | (?Hash[Symbol, untyped]) -> void
-
- def client: () -> Client
-
-
- end
- end
-end
diff --git a/gems/aws-sdk-iotanalytics/sig/types.rbs b/gems/aws-sdk-iotanalytics/sig/types.rbs
deleted file mode 100644
index 7b95497aaea..00000000000
--- a/gems/aws-sdk-iotanalytics/sig/types.rbs
+++ /dev/null
@@ -1,929 +0,0 @@
-# WARNING ABOUT GENERATED CODE
-#
-# This file is generated. See the contributing guide for more information:
-# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md
-#
-# WARNING ABOUT GENERATED CODE
-
-module Aws::IoTAnalytics
- module Types
-
- class AddAttributesActivity
- attr_accessor name: ::String
- attr_accessor attributes: ::Hash[::String, ::String]
- attr_accessor next: ::String
- SENSITIVE: []
- end
-
- class BatchPutMessageErrorEntry
- attr_accessor message_id: ::String
- attr_accessor error_code: ::String
- attr_accessor error_message: ::String
- SENSITIVE: []
- end
-
- class BatchPutMessageRequest
- attr_accessor channel_name: ::String
- attr_accessor messages: ::Array[Types::Message]
- SENSITIVE: []
- end
-
- class BatchPutMessageResponse
- attr_accessor batch_put_message_error_entries: ::Array[Types::BatchPutMessageErrorEntry]
- SENSITIVE: []
- end
-
- class CancelPipelineReprocessingRequest
- attr_accessor pipeline_name: ::String
- attr_accessor reprocessing_id: ::String
- SENSITIVE: []
- end
-
- class CancelPipelineReprocessingResponse < Aws::EmptyStructure
- end
-
- class Channel
- attr_accessor name: ::String
- attr_accessor storage: Types::ChannelStorage
- attr_accessor arn: ::String
- attr_accessor status: ("CREATING" | "ACTIVE" | "DELETING")
- attr_accessor retention_period: Types::RetentionPeriod
- attr_accessor creation_time: ::Time
- attr_accessor last_update_time: ::Time
- attr_accessor last_message_arrival_time: ::Time
- SENSITIVE: []
- end
-
- class ChannelActivity
- attr_accessor name: ::String
- attr_accessor channel_name: ::String
- attr_accessor next: ::String
- SENSITIVE: []
- end
-
- class ChannelMessages
- attr_accessor s3_paths: ::Array[::String]
- SENSITIVE: []
- end
-
- class ChannelStatistics
- attr_accessor size: Types::EstimatedResourceSize
- SENSITIVE: []
- end
-
- class ChannelStorage
- attr_accessor service_managed_s3: Types::ServiceManagedChannelS3Storage
- attr_accessor customer_managed_s3: Types::CustomerManagedChannelS3Storage
- SENSITIVE: []
- end
-
- class ChannelStorageSummary
- attr_accessor service_managed_s3: Types::ServiceManagedChannelS3StorageSummary
- attr_accessor customer_managed_s3: Types::CustomerManagedChannelS3StorageSummary
- SENSITIVE: []
- end
-
- class ChannelSummary
- attr_accessor channel_name: ::String
- attr_accessor channel_storage: Types::ChannelStorageSummary
- attr_accessor status: ("CREATING" | "ACTIVE" | "DELETING")
- attr_accessor creation_time: ::Time
- attr_accessor last_update_time: ::Time
- attr_accessor last_message_arrival_time: ::Time
- SENSITIVE: []
- end
-
- class Column
- attr_accessor name: ::String
- attr_accessor type: ::String
- SENSITIVE: []
- end
-
- class ContainerDatasetAction
- attr_accessor image: ::String
- attr_accessor execution_role_arn: ::String
- attr_accessor resource_configuration: Types::ResourceConfiguration
- attr_accessor variables: ::Array[Types::Variable]
- SENSITIVE: []
- end
-
- class CreateChannelRequest
- attr_accessor channel_name: ::String
- attr_accessor channel_storage: Types::ChannelStorage
- attr_accessor retention_period: Types::RetentionPeriod
- attr_accessor tags: ::Array[Types::Tag]
- SENSITIVE: []
- end
-
- class CreateChannelResponse
- attr_accessor channel_name: ::String
- attr_accessor channel_arn: ::String
- attr_accessor retention_period: Types::RetentionPeriod
- SENSITIVE: []
- end
-
- class CreateDatasetContentRequest
- attr_accessor dataset_name: ::String
- attr_accessor version_id: ::String
- SENSITIVE: []
- end
-
- class CreateDatasetContentResponse
- attr_accessor version_id: ::String
- SENSITIVE: []
- end
-
- class CreateDatasetRequest
- attr_accessor dataset_name: ::String
- attr_accessor actions: ::Array[Types::DatasetAction]
- attr_accessor triggers: ::Array[Types::DatasetTrigger]
- attr_accessor content_delivery_rules: ::Array[Types::DatasetContentDeliveryRule]
- attr_accessor retention_period: Types::RetentionPeriod
- attr_accessor versioning_configuration: Types::VersioningConfiguration
- attr_accessor tags: ::Array[Types::Tag]
- attr_accessor late_data_rules: ::Array[Types::LateDataRule]
- SENSITIVE: []
- end
-
- class CreateDatasetResponse
- attr_accessor dataset_name: ::String
- attr_accessor dataset_arn: ::String
- attr_accessor retention_period: Types::RetentionPeriod
- SENSITIVE: []
- end
-
- class CreateDatastoreRequest
- attr_accessor datastore_name: ::String
- attr_accessor datastore_storage: Types::DatastoreStorage
- attr_accessor retention_period: Types::RetentionPeriod
- attr_accessor tags: ::Array[Types::Tag]
- attr_accessor file_format_configuration: Types::FileFormatConfiguration
- attr_accessor datastore_partitions: Types::DatastorePartitions
- SENSITIVE: []
- end
-
- class CreateDatastoreResponse
- attr_accessor datastore_name: ::String
- attr_accessor datastore_arn: ::String
- attr_accessor retention_period: Types::RetentionPeriod
- SENSITIVE: []
- end
-
- class CreatePipelineRequest
- attr_accessor pipeline_name: ::String
- attr_accessor pipeline_activities: ::Array[Types::PipelineActivity]
- attr_accessor tags: ::Array[Types::Tag]
- SENSITIVE: []
- end
-
- class CreatePipelineResponse
- attr_accessor pipeline_name: ::String
- attr_accessor pipeline_arn: ::String
- SENSITIVE: []
- end
-
- class CustomerManagedChannelS3Storage
- attr_accessor bucket: ::String
- attr_accessor key_prefix: ::String
- attr_accessor role_arn: ::String
- SENSITIVE: []
- end
-
- class CustomerManagedChannelS3StorageSummary
- attr_accessor bucket: ::String
- attr_accessor key_prefix: ::String
- attr_accessor role_arn: ::String
- SENSITIVE: []
- end
-
- class CustomerManagedDatastoreS3Storage
- attr_accessor bucket: ::String
- attr_accessor key_prefix: ::String
- attr_accessor role_arn: ::String
- SENSITIVE: []
- end
-
- class CustomerManagedDatastoreS3StorageSummary
- attr_accessor bucket: ::String
- attr_accessor key_prefix: ::String
- attr_accessor role_arn: ::String
- SENSITIVE: []
- end
-
- class Dataset
- attr_accessor name: ::String
- attr_accessor arn: ::String
- attr_accessor actions: ::Array[Types::DatasetAction]
- attr_accessor triggers: ::Array[Types::DatasetTrigger]
- attr_accessor content_delivery_rules: ::Array[Types::DatasetContentDeliveryRule]
- attr_accessor status: ("CREATING" | "ACTIVE" | "DELETING")
- attr_accessor creation_time: ::Time
- attr_accessor last_update_time: ::Time
- attr_accessor retention_period: Types::RetentionPeriod
- attr_accessor versioning_configuration: Types::VersioningConfiguration
- attr_accessor late_data_rules: ::Array[Types::LateDataRule]
- SENSITIVE: []
- end
-
- class DatasetAction
- attr_accessor action_name: ::String
- attr_accessor query_action: Types::SqlQueryDatasetAction
- attr_accessor container_action: Types::ContainerDatasetAction
- SENSITIVE: []
- end
-
- class DatasetActionSummary
- attr_accessor action_name: ::String
- attr_accessor action_type: ("QUERY" | "CONTAINER")
- SENSITIVE: []
- end
-
- class DatasetContentDeliveryDestination
- attr_accessor iot_events_destination_configuration: Types::IotEventsDestinationConfiguration
- attr_accessor s3_destination_configuration: Types::S3DestinationConfiguration
- SENSITIVE: []
- end
-
- class DatasetContentDeliveryRule
- attr_accessor entry_name: ::String
- attr_accessor destination: Types::DatasetContentDeliveryDestination
- SENSITIVE: []
- end
-
- class DatasetContentStatus
- attr_accessor state: ("CREATING" | "SUCCEEDED" | "FAILED")
- attr_accessor reason: ::String
- SENSITIVE: []
- end
-
- class DatasetContentSummary
- attr_accessor version: ::String
- attr_accessor status: Types::DatasetContentStatus
- attr_accessor creation_time: ::Time
- attr_accessor schedule_time: ::Time
- attr_accessor completion_time: ::Time
- SENSITIVE: []
- end
-
- class DatasetContentVersionValue
- attr_accessor dataset_name: ::String
- SENSITIVE: []
- end
-
- class DatasetEntry
- attr_accessor entry_name: ::String
- attr_accessor data_uri: ::String
- SENSITIVE: []
- end
-
- class DatasetSummary
- attr_accessor dataset_name: ::String
- attr_accessor status: ("CREATING" | "ACTIVE" | "DELETING")
- attr_accessor creation_time: ::Time
- attr_accessor last_update_time: ::Time
- attr_accessor triggers: ::Array[Types::DatasetTrigger]
- attr_accessor actions: ::Array[Types::DatasetActionSummary]
- SENSITIVE: []
- end
-
- class DatasetTrigger
- attr_accessor schedule: Types::Schedule
- attr_accessor dataset: Types::TriggeringDataset
- SENSITIVE: []
- end
-
- class Datastore
- attr_accessor name: ::String
- attr_accessor storage: Types::DatastoreStorage
- attr_accessor arn: ::String
- attr_accessor status: ("CREATING" | "ACTIVE" | "DELETING")
- attr_accessor retention_period: Types::RetentionPeriod
- attr_accessor creation_time: ::Time
- attr_accessor last_update_time: ::Time
- attr_accessor last_message_arrival_time: ::Time
- attr_accessor file_format_configuration: Types::FileFormatConfiguration
- attr_accessor datastore_partitions: Types::DatastorePartitions
- SENSITIVE: []
- end
-
- class DatastoreActivity
- attr_accessor name: ::String
- attr_accessor datastore_name: ::String
- SENSITIVE: []
- end
-
- class DatastoreIotSiteWiseMultiLayerStorage
- attr_accessor customer_managed_s3_storage: Types::IotSiteWiseCustomerManagedDatastoreS3Storage
- SENSITIVE: []
- end
-
- class DatastoreIotSiteWiseMultiLayerStorageSummary
- attr_accessor customer_managed_s3_storage: Types::IotSiteWiseCustomerManagedDatastoreS3StorageSummary
- SENSITIVE: []
- end
-
- class DatastorePartition
- attr_accessor attribute_partition: Types::Partition
- attr_accessor timestamp_partition: Types::TimestampPartition
- SENSITIVE: []
- end
-
- class DatastorePartitions
- attr_accessor partitions: ::Array[Types::DatastorePartition]
- SENSITIVE: []
- end
-
- class DatastoreStatistics
- attr_accessor size: Types::EstimatedResourceSize
- SENSITIVE: []
- end
-
- class DatastoreStorage
- attr_accessor service_managed_s3: Types::ServiceManagedDatastoreS3Storage
- attr_accessor customer_managed_s3: Types::CustomerManagedDatastoreS3Storage
- attr_accessor iot_site_wise_multi_layer_storage: Types::DatastoreIotSiteWiseMultiLayerStorage
- SENSITIVE: []
- end
-
- class DatastoreStorageSummary
- attr_accessor service_managed_s3: Types::ServiceManagedDatastoreS3StorageSummary
- attr_accessor customer_managed_s3: Types::CustomerManagedDatastoreS3StorageSummary
- attr_accessor iot_site_wise_multi_layer_storage: Types::DatastoreIotSiteWiseMultiLayerStorageSummary
- SENSITIVE: []
- end
-
- class DatastoreSummary
- attr_accessor datastore_name: ::String
- attr_accessor datastore_storage: Types::DatastoreStorageSummary
- attr_accessor status: ("CREATING" | "ACTIVE" | "DELETING")
- attr_accessor creation_time: ::Time
- attr_accessor last_update_time: ::Time
- attr_accessor last_message_arrival_time: ::Time
- attr_accessor file_format_type: ("JSON" | "PARQUET")
- attr_accessor datastore_partitions: Types::DatastorePartitions
- SENSITIVE: []
- end
-
- class DeleteChannelRequest
- attr_accessor channel_name: ::String
- SENSITIVE: []
- end
-
- class DeleteDatasetContentRequest
- attr_accessor dataset_name: ::String
- attr_accessor version_id: ::String
- SENSITIVE: []
- end
-
- class DeleteDatasetRequest
- attr_accessor dataset_name: ::String
- SENSITIVE: []
- end
-
- class DeleteDatastoreRequest
- attr_accessor datastore_name: ::String
- SENSITIVE: []
- end
-
- class DeletePipelineRequest
- attr_accessor pipeline_name: ::String
- SENSITIVE: []
- end
-
- class DeltaTime
- attr_accessor offset_seconds: ::Integer
- attr_accessor time_expression: ::String
- SENSITIVE: []
- end
-
- class DeltaTimeSessionWindowConfiguration
- attr_accessor timeout_in_minutes: ::Integer
- SENSITIVE: []
- end
-
- class DescribeChannelRequest
- attr_accessor channel_name: ::String
- attr_accessor include_statistics: bool
- SENSITIVE: []
- end
-
- class DescribeChannelResponse
- attr_accessor channel: Types::Channel
- attr_accessor statistics: Types::ChannelStatistics
- SENSITIVE: []
- end
-
- class DescribeDatasetRequest
- attr_accessor dataset_name: ::String
- SENSITIVE: []
- end
-
- class DescribeDatasetResponse
- attr_accessor dataset: Types::Dataset
- SENSITIVE: []
- end
-
- class DescribeDatastoreRequest
- attr_accessor datastore_name: ::String
- attr_accessor include_statistics: bool
- SENSITIVE: []
- end
-
- class DescribeDatastoreResponse
- attr_accessor datastore: Types::Datastore
- attr_accessor statistics: Types::DatastoreStatistics
- SENSITIVE: []
- end
-
- class DescribeLoggingOptionsRequest < Aws::EmptyStructure
- end
-
- class DescribeLoggingOptionsResponse
- attr_accessor logging_options: Types::LoggingOptions
- SENSITIVE: []
- end
-
- class DescribePipelineRequest
- attr_accessor pipeline_name: ::String
- SENSITIVE: []
- end
-
- class DescribePipelineResponse
- attr_accessor pipeline: Types::Pipeline
- SENSITIVE: []
- end
-
- class DeviceRegistryEnrichActivity
- attr_accessor name: ::String
- attr_accessor attribute: ::String
- attr_accessor thing_name: ::String
- attr_accessor role_arn: ::String
- attr_accessor next: ::String
- SENSITIVE: []
- end
-
- class DeviceShadowEnrichActivity
- attr_accessor name: ::String
- attr_accessor attribute: ::String
- attr_accessor thing_name: ::String
- attr_accessor role_arn: ::String
- attr_accessor next: ::String
- SENSITIVE: []
- end
-
- class EstimatedResourceSize
- attr_accessor estimated_size_in_bytes: ::Float
- attr_accessor estimated_on: ::Time
- SENSITIVE: []
- end
-
- class FileFormatConfiguration
- attr_accessor json_configuration: Types::JsonConfiguration
- attr_accessor parquet_configuration: Types::ParquetConfiguration
- SENSITIVE: []
- end
-
- class FilterActivity
- attr_accessor name: ::String
- attr_accessor filter: ::String
- attr_accessor next: ::String
- SENSITIVE: []
- end
-
- class GetDatasetContentRequest
- attr_accessor dataset_name: ::String
- attr_accessor version_id: ::String
- SENSITIVE: []
- end
-
- class GetDatasetContentResponse
- attr_accessor entries: ::Array[Types::DatasetEntry]
- attr_accessor timestamp: ::Time
- attr_accessor status: Types::DatasetContentStatus
- SENSITIVE: []
- end
-
- class GlueConfiguration
- attr_accessor table_name: ::String
- attr_accessor database_name: ::String
- SENSITIVE: []
- end
-
- class InternalFailureException
- attr_accessor message: ::String
- SENSITIVE: []
- end
-
- class InvalidRequestException
- attr_accessor message: ::String
- SENSITIVE: []
- end
-
- class IotEventsDestinationConfiguration
- attr_accessor input_name: ::String
- attr_accessor role_arn: ::String
- SENSITIVE: []
- end
-
- class IotSiteWiseCustomerManagedDatastoreS3Storage
- attr_accessor bucket: ::String
- attr_accessor key_prefix: ::String
- SENSITIVE: []
- end
-
- class IotSiteWiseCustomerManagedDatastoreS3StorageSummary
- attr_accessor bucket: ::String
- attr_accessor key_prefix: ::String
- SENSITIVE: []
- end
-
- class JsonConfiguration < Aws::EmptyStructure
- end
-
- class LambdaActivity
- attr_accessor name: ::String
- attr_accessor lambda_name: ::String
- attr_accessor batch_size: ::Integer
- attr_accessor next: ::String
- SENSITIVE: []
- end
-
- class LateDataRule
- attr_accessor rule_name: ::String
- attr_accessor rule_configuration: Types::LateDataRuleConfiguration
- SENSITIVE: []
- end
-
- class LateDataRuleConfiguration
- attr_accessor delta_time_session_window_configuration: Types::DeltaTimeSessionWindowConfiguration
- SENSITIVE: []
- end
-
- class LimitExceededException
- attr_accessor message: ::String
- SENSITIVE: []
- end
-
- class ListChannelsRequest
- attr_accessor next_token: ::String
- attr_accessor max_results: ::Integer
- SENSITIVE: []
- end
-
- class ListChannelsResponse
- attr_accessor channel_summaries: ::Array[Types::ChannelSummary]
- attr_accessor next_token: ::String
- SENSITIVE: []
- end
-
- class ListDatasetContentsRequest
- attr_accessor dataset_name: ::String
- attr_accessor next_token: ::String
- attr_accessor max_results: ::Integer
- attr_accessor scheduled_on_or_after: ::Time
- attr_accessor scheduled_before: ::Time
- SENSITIVE: []
- end
-
- class ListDatasetContentsResponse
- attr_accessor dataset_content_summaries: ::Array[Types::DatasetContentSummary]
- attr_accessor next_token: ::String
- SENSITIVE: []
- end
-
- class ListDatasetsRequest
- attr_accessor next_token: ::String
- attr_accessor max_results: ::Integer
- SENSITIVE: []
- end
-
- class ListDatasetsResponse
- attr_accessor dataset_summaries: ::Array[Types::DatasetSummary]
- attr_accessor next_token: ::String
- SENSITIVE: []
- end
-
- class ListDatastoresRequest
- attr_accessor next_token: ::String
- attr_accessor max_results: ::Integer
- SENSITIVE: []
- end
-
- class ListDatastoresResponse
- attr_accessor datastore_summaries: ::Array[Types::DatastoreSummary]
- attr_accessor next_token: ::String
- SENSITIVE: []
- end
-
- class ListPipelinesRequest
- attr_accessor next_token: ::String
- attr_accessor max_results: ::Integer
- SENSITIVE: []
- end
-
- class ListPipelinesResponse
- attr_accessor pipeline_summaries: ::Array[Types::PipelineSummary]
- attr_accessor next_token: ::String
- SENSITIVE: []
- end
-
- class ListTagsForResourceRequest
- attr_accessor resource_arn: ::String
- SENSITIVE: []
- end
-
- class ListTagsForResourceResponse
- attr_accessor tags: ::Array[Types::Tag]
- SENSITIVE: []
- end
-
- class LoggingOptions
- attr_accessor role_arn: ::String
- attr_accessor level: ("ERROR")
- attr_accessor enabled: bool
- SENSITIVE: []
- end
-
- class MathActivity
- attr_accessor name: ::String
- attr_accessor attribute: ::String
- attr_accessor math: ::String
- attr_accessor next: ::String
- SENSITIVE: []
- end
-
- class Message
- attr_accessor message_id: ::String
- attr_accessor payload: ::String
- SENSITIVE: []
- end
-
- class OutputFileUriValue
- attr_accessor file_name: ::String
- SENSITIVE: []
- end
-
- class ParquetConfiguration
- attr_accessor schema_definition: Types::SchemaDefinition
- SENSITIVE: []
- end
-
- class Partition
- attr_accessor attribute_name: ::String
- SENSITIVE: []
- end
-
- class Pipeline
- attr_accessor name: ::String
- attr_accessor arn: ::String
- attr_accessor activities: ::Array[Types::PipelineActivity]
- attr_accessor reprocessing_summaries: ::Array[Types::ReprocessingSummary]
- attr_accessor creation_time: ::Time
- attr_accessor last_update_time: ::Time
- SENSITIVE: []
- end
-
- class PipelineActivity
- attr_accessor channel: Types::ChannelActivity
- attr_accessor lambda: Types::LambdaActivity
- attr_accessor datastore: Types::DatastoreActivity
- attr_accessor add_attributes: Types::AddAttributesActivity
- attr_accessor remove_attributes: Types::RemoveAttributesActivity
- attr_accessor select_attributes: Types::SelectAttributesActivity
- attr_accessor filter: Types::FilterActivity
- attr_accessor math: Types::MathActivity
- attr_accessor device_registry_enrich: Types::DeviceRegistryEnrichActivity
- attr_accessor device_shadow_enrich: Types::DeviceShadowEnrichActivity
- SENSITIVE: []
- end
-
- class PipelineSummary
- attr_accessor pipeline_name: ::String
- attr_accessor reprocessing_summaries: ::Array[Types::ReprocessingSummary]
- attr_accessor creation_time: ::Time
- attr_accessor last_update_time: ::Time
- SENSITIVE: []
- end
-
- class PutLoggingOptionsRequest
- attr_accessor logging_options: Types::LoggingOptions
- SENSITIVE: []
- end
-
- class QueryFilter
- attr_accessor delta_time: Types::DeltaTime
- SENSITIVE: []
- end
-
- class RemoveAttributesActivity
- attr_accessor name: ::String
- attr_accessor attributes: ::Array[::String]
- attr_accessor next: ::String
- SENSITIVE: []
- end
-
- class ReprocessingSummary
- attr_accessor id: ::String
- attr_accessor status: ("RUNNING" | "SUCCEEDED" | "CANCELLED" | "FAILED")
- attr_accessor creation_time: ::Time
- SENSITIVE: []
- end
-
- class ResourceAlreadyExistsException
- attr_accessor message: ::String
- attr_accessor resource_id: ::String
- attr_accessor resource_arn: ::String
- SENSITIVE: []
- end
-
- class ResourceConfiguration
- attr_accessor compute_type: ("ACU_1" | "ACU_2")
- attr_accessor volume_size_in_gb: ::Integer
- SENSITIVE: []
- end
-
- class ResourceNotFoundException
- attr_accessor message: ::String
- SENSITIVE: []
- end
-
- class RetentionPeriod
- attr_accessor unlimited: bool
- attr_accessor number_of_days: ::Integer
- SENSITIVE: []
- end
-
- class RunPipelineActivityRequest
- attr_accessor pipeline_activity: Types::PipelineActivity
- attr_accessor payloads: ::Array[::String]
- SENSITIVE: []
- end
-
- class RunPipelineActivityResponse
- attr_accessor payloads: ::Array[::String]
- attr_accessor log_result: ::String
- SENSITIVE: []
- end
-
- class S3DestinationConfiguration
- attr_accessor bucket: ::String
- attr_accessor key: ::String
- attr_accessor glue_configuration: Types::GlueConfiguration
- attr_accessor role_arn: ::String
- SENSITIVE: []
- end
-
- class SampleChannelDataRequest
- attr_accessor channel_name: ::String
- attr_accessor max_messages: ::Integer
- attr_accessor start_time: ::Time
- attr_accessor end_time: ::Time
- SENSITIVE: []
- end
-
- class SampleChannelDataResponse
- attr_accessor payloads: ::Array[::String]
- SENSITIVE: []
- end
-
- class Schedule
- attr_accessor expression: ::String
- SENSITIVE: []
- end
-
- class SchemaDefinition
- attr_accessor columns: ::Array[Types::Column]
- SENSITIVE: []
- end
-
- class SelectAttributesActivity
- attr_accessor name: ::String
- attr_accessor attributes: ::Array[::String]
- attr_accessor next: ::String
- SENSITIVE: []
- end
-
- class ServiceManagedChannelS3Storage < Aws::EmptyStructure
- end
-
- class ServiceManagedChannelS3StorageSummary < Aws::EmptyStructure
- end
-
- class ServiceManagedDatastoreS3Storage < Aws::EmptyStructure
- end
-
- class ServiceManagedDatastoreS3StorageSummary < Aws::EmptyStructure
- end
-
- class ServiceUnavailableException
- attr_accessor message: ::String
- SENSITIVE: []
- end
-
- class SqlQueryDatasetAction
- attr_accessor sql_query: ::String
- attr_accessor filters: ::Array[Types::QueryFilter]
- SENSITIVE: []
- end
-
- class StartPipelineReprocessingRequest
- attr_accessor pipeline_name: ::String
- attr_accessor start_time: ::Time
- attr_accessor end_time: ::Time
- attr_accessor channel_messages: Types::ChannelMessages
- SENSITIVE: []
- end
-
- class StartPipelineReprocessingResponse
- attr_accessor reprocessing_id: ::String
- SENSITIVE: []
- end
-
- class Tag
- attr_accessor key: ::String
- attr_accessor value: ::String
- SENSITIVE: []
- end
-
- class TagResourceRequest
- attr_accessor resource_arn: ::String
- attr_accessor tags: ::Array[Types::Tag]
- SENSITIVE: []
- end
-
- class TagResourceResponse < Aws::EmptyStructure
- end
-
- class ThrottlingException
- attr_accessor message: ::String
- SENSITIVE: []
- end
-
- class TimestampPartition
- attr_accessor attribute_name: ::String
- attr_accessor timestamp_format: ::String
- SENSITIVE: []
- end
-
- class TriggeringDataset
- attr_accessor name: ::String
- SENSITIVE: []
- end
-
- class UntagResourceRequest
- attr_accessor resource_arn: ::String
- attr_accessor tag_keys: ::Array[::String]
- SENSITIVE: []
- end
-
- class UntagResourceResponse < Aws::EmptyStructure
- end
-
- class UpdateChannelRequest
- attr_accessor channel_name: ::String
- attr_accessor channel_storage: Types::ChannelStorage
- attr_accessor retention_period: Types::RetentionPeriod
- SENSITIVE: []
- end
-
- class UpdateDatasetRequest
- attr_accessor dataset_name: ::String
- attr_accessor actions: ::Array[Types::DatasetAction]
- attr_accessor triggers: ::Array[Types::DatasetTrigger]
- attr_accessor content_delivery_rules: ::Array[Types::DatasetContentDeliveryRule]
- attr_accessor retention_period: Types::RetentionPeriod
- attr_accessor versioning_configuration: Types::VersioningConfiguration
- attr_accessor late_data_rules: ::Array[Types::LateDataRule]
- SENSITIVE: []
- end
-
- class UpdateDatastoreRequest
- attr_accessor datastore_name: ::String
- attr_accessor retention_period: Types::RetentionPeriod
- attr_accessor datastore_storage: Types::DatastoreStorage
- attr_accessor file_format_configuration: Types::FileFormatConfiguration
- SENSITIVE: []
- end
-
- class UpdatePipelineRequest
- attr_accessor pipeline_name: ::String
- attr_accessor pipeline_activities: ::Array[Types::PipelineActivity]
- SENSITIVE: []
- end
-
- class Variable
- attr_accessor name: ::String
- attr_accessor string_value: ::String
- attr_accessor double_value: ::Float
- attr_accessor dataset_content_version_value: Types::DatasetContentVersionValue
- attr_accessor output_file_uri_value: Types::OutputFileUriValue
- SENSITIVE: []
- end
-
- class VersioningConfiguration
- attr_accessor unlimited: bool
- attr_accessor max_versions: ::Integer
- SENSITIVE: []
- end
- end
-end
diff --git a/gems/aws-sdk-iotanalytics/sig/waiters.rbs b/gems/aws-sdk-iotanalytics/sig/waiters.rbs
deleted file mode 100644
index d847fd94e37..00000000000
--- a/gems/aws-sdk-iotanalytics/sig/waiters.rbs
+++ /dev/null
@@ -1,13 +0,0 @@
-# WARNING ABOUT GENERATED CODE
-#
-# This file is generated. See the contributing guide for more information:
-# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md
-#
-# WARNING ABOUT GENERATED CODE
-
-module Aws
- module IoTAnalytics
- module Waiters
- end
- end
-end
diff --git a/gems/aws-sdk-iotanalytics/spec/endpoint_provider_spec.rb b/gems/aws-sdk-iotanalytics/spec/endpoint_provider_spec.rb
deleted file mode 100644
index 02e6753bfcb..00000000000
--- a/gems/aws-sdk-iotanalytics/spec/endpoint_provider_spec.rb
+++ /dev/null
@@ -1,407 +0,0 @@
-# frozen_string_literal: true
-
-# WARNING ABOUT GENERATED CODE
-#
-# This file is generated. See the contributing guide for more information:
-# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md
-#
-# WARNING ABOUT GENERATED CODE
-
-
-require_relative 'spec_helper'
-
-module Aws::IoTAnalytics
- describe EndpointProvider do
- subject { Aws::IoTAnalytics::EndpointProvider.new }
-
- context "For region ap-northeast-1 with FIPS disabled and DualStack disabled" do
- let(:expected) do
- {"endpoint" => {"url" => "https://iotanalytics.ap-northeast-1.amazonaws.com"}}
- end
-
- it 'produces the expected output from the EndpointProvider' do
- params = EndpointParameters.new(**{region: "ap-northeast-1", use_fips: false, use_dual_stack: false})
- endpoint = subject.resolve_endpoint(params)
- expect(endpoint.url).to eq(expected['endpoint']['url'])
- expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {})
- expect(endpoint.properties).to eq(expected['endpoint']['properties'] || {})
- end
- end
-
- context "For region ap-south-1 with FIPS disabled and DualStack disabled" do
- let(:expected) do
- {"endpoint" => {"url" => "https://iotanalytics.ap-south-1.amazonaws.com"}}
- end
-
- it 'produces the expected output from the EndpointProvider' do
- params = EndpointParameters.new(**{region: "ap-south-1", use_fips: false, use_dual_stack: false})
- endpoint = subject.resolve_endpoint(params)
- expect(endpoint.url).to eq(expected['endpoint']['url'])
- expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {})
- expect(endpoint.properties).to eq(expected['endpoint']['properties'] || {})
- end
- end
-
- context "For region ap-southeast-2 with FIPS disabled and DualStack disabled" do
- let(:expected) do
- {"endpoint" => {"url" => "https://iotanalytics.ap-southeast-2.amazonaws.com"}}
- end
-
- it 'produces the expected output from the EndpointProvider' do
- params = EndpointParameters.new(**{region: "ap-southeast-2", use_fips: false, use_dual_stack: false})
- endpoint = subject.resolve_endpoint(params)
- expect(endpoint.url).to eq(expected['endpoint']['url'])
- expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {})
- expect(endpoint.properties).to eq(expected['endpoint']['properties'] || {})
- end
- end
-
- context "For region eu-central-1 with FIPS disabled and DualStack disabled" do
- let(:expected) do
- {"endpoint" => {"url" => "https://iotanalytics.eu-central-1.amazonaws.com"}}
- end
-
- it 'produces the expected output from the EndpointProvider' do
- params = EndpointParameters.new(**{region: "eu-central-1", use_fips: false, use_dual_stack: false})
- endpoint = subject.resolve_endpoint(params)
- expect(endpoint.url).to eq(expected['endpoint']['url'])
- expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {})
- expect(endpoint.properties).to eq(expected['endpoint']['properties'] || {})
- end
- end
-
- context "For region eu-west-1 with FIPS disabled and DualStack disabled" do
- let(:expected) do
- {"endpoint" => {"url" => "https://iotanalytics.eu-west-1.amazonaws.com"}}
- end
-
- it 'produces the expected output from the EndpointProvider' do
- params = EndpointParameters.new(**{region: "eu-west-1", use_fips: false, use_dual_stack: false})
- endpoint = subject.resolve_endpoint(params)
- expect(endpoint.url).to eq(expected['endpoint']['url'])
- expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {})
- expect(endpoint.properties).to eq(expected['endpoint']['properties'] || {})
- end
- end
-
- context "For region us-east-1 with FIPS disabled and DualStack disabled" do
- let(:expected) do
- {"endpoint" => {"url" => "https://iotanalytics.us-east-1.amazonaws.com"}}
- end
-
- it 'produces the expected output from the EndpointProvider' do
- params = EndpointParameters.new(**{region: "us-east-1", use_fips: false, use_dual_stack: false})
- endpoint = subject.resolve_endpoint(params)
- expect(endpoint.url).to eq(expected['endpoint']['url'])
- expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {})
- expect(endpoint.properties).to eq(expected['endpoint']['properties'] || {})
- end
- end
-
- context "For region us-east-2 with FIPS disabled and DualStack disabled" do
- let(:expected) do
- {"endpoint" => {"url" => "https://iotanalytics.us-east-2.amazonaws.com"}}
- end
-
- it 'produces the expected output from the EndpointProvider' do
- params = EndpointParameters.new(**{region: "us-east-2", use_fips: false, use_dual_stack: false})
- endpoint = subject.resolve_endpoint(params)
- expect(endpoint.url).to eq(expected['endpoint']['url'])
- expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {})
- expect(endpoint.properties).to eq(expected['endpoint']['properties'] || {})
- end
- end
-
- context "For region us-west-2 with FIPS disabled and DualStack disabled" do
- let(:expected) do
- {"endpoint" => {"url" => "https://iotanalytics.us-west-2.amazonaws.com"}}
- end
-
- it 'produces the expected output from the EndpointProvider' do
- params = EndpointParameters.new(**{region: "us-west-2", use_fips: false, use_dual_stack: false})
- endpoint = subject.resolve_endpoint(params)
- expect(endpoint.url).to eq(expected['endpoint']['url'])
- expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {})
- expect(endpoint.properties).to eq(expected['endpoint']['properties'] || {})
- end
- end
-
- context "For region us-east-1 with FIPS enabled and DualStack enabled" do
- let(:expected) do
- {"endpoint" => {"url" => "https://iotanalytics-fips.us-east-1.api.aws"}}
- end
-
- it 'produces the expected output from the EndpointProvider' do
- params = EndpointParameters.new(**{region: "us-east-1", use_fips: true, use_dual_stack: true})
- endpoint = subject.resolve_endpoint(params)
- expect(endpoint.url).to eq(expected['endpoint']['url'])
- expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {})
- expect(endpoint.properties).to eq(expected['endpoint']['properties'] || {})
- end
- end
-
- context "For region us-east-1 with FIPS enabled and DualStack disabled" do
- let(:expected) do
- {"endpoint" => {"url" => "https://iotanalytics-fips.us-east-1.amazonaws.com"}}
- end
-
- it 'produces the expected output from the EndpointProvider' do
- params = EndpointParameters.new(**{region: "us-east-1", use_fips: true, use_dual_stack: false})
- endpoint = subject.resolve_endpoint(params)
- expect(endpoint.url).to eq(expected['endpoint']['url'])
- expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {})
- expect(endpoint.properties).to eq(expected['endpoint']['properties'] || {})
- end
- end
-
- context "For region us-east-1 with FIPS disabled and DualStack enabled" do
- let(:expected) do
- {"endpoint" => {"url" => "https://iotanalytics.us-east-1.api.aws"}}
- end
-
- it 'produces the expected output from the EndpointProvider' do
- params = EndpointParameters.new(**{region: "us-east-1", use_fips: false, use_dual_stack: true})
- endpoint = subject.resolve_endpoint(params)
- expect(endpoint.url).to eq(expected['endpoint']['url'])
- expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {})
- expect(endpoint.properties).to eq(expected['endpoint']['properties'] || {})
- end
- end
-
- context "For region cn-north-1 with FIPS disabled and DualStack disabled" do
- let(:expected) do
- {"endpoint" => {"url" => "https://iotanalytics.cn-north-1.amazonaws.com.cn"}}
- end
-
- it 'produces the expected output from the EndpointProvider' do
- params = EndpointParameters.new(**{region: "cn-north-1", use_fips: false, use_dual_stack: false})
- endpoint = subject.resolve_endpoint(params)
- expect(endpoint.url).to eq(expected['endpoint']['url'])
- expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {})
- expect(endpoint.properties).to eq(expected['endpoint']['properties'] || {})
- end
- end
-
- context "For region cn-north-1 with FIPS enabled and DualStack enabled" do
- let(:expected) do
- {"endpoint" => {"url" => "https://iotanalytics-fips.cn-north-1.api.amazonwebservices.com.cn"}}
- end
-
- it 'produces the expected output from the EndpointProvider' do
- params = EndpointParameters.new(**{region: "cn-north-1", use_fips: true, use_dual_stack: true})
- endpoint = subject.resolve_endpoint(params)
- expect(endpoint.url).to eq(expected['endpoint']['url'])
- expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {})
- expect(endpoint.properties).to eq(expected['endpoint']['properties'] || {})
- end
- end
-
- context "For region cn-north-1 with FIPS enabled and DualStack disabled" do
- let(:expected) do
- {"endpoint" => {"url" => "https://iotanalytics-fips.cn-north-1.amazonaws.com.cn"}}
- end
-
- it 'produces the expected output from the EndpointProvider' do
- params = EndpointParameters.new(**{region: "cn-north-1", use_fips: true, use_dual_stack: false})
- endpoint = subject.resolve_endpoint(params)
- expect(endpoint.url).to eq(expected['endpoint']['url'])
- expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {})
- expect(endpoint.properties).to eq(expected['endpoint']['properties'] || {})
- end
- end
-
- context "For region cn-north-1 with FIPS disabled and DualStack enabled" do
- let(:expected) do
- {"endpoint" => {"url" => "https://iotanalytics.cn-north-1.api.amazonwebservices.com.cn"}}
- end
-
- it 'produces the expected output from the EndpointProvider' do
- params = EndpointParameters.new(**{region: "cn-north-1", use_fips: false, use_dual_stack: true})
- endpoint = subject.resolve_endpoint(params)
- expect(endpoint.url).to eq(expected['endpoint']['url'])
- expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {})
- expect(endpoint.properties).to eq(expected['endpoint']['properties'] || {})
- end
- end
-
- context "For region us-gov-east-1 with FIPS enabled and DualStack enabled" do
- let(:expected) do
- {"endpoint" => {"url" => "https://iotanalytics-fips.us-gov-east-1.api.aws"}}
- end
-
- it 'produces the expected output from the EndpointProvider' do
- params = EndpointParameters.new(**{region: "us-gov-east-1", use_fips: true, use_dual_stack: true})
- endpoint = subject.resolve_endpoint(params)
- expect(endpoint.url).to eq(expected['endpoint']['url'])
- expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {})
- expect(endpoint.properties).to eq(expected['endpoint']['properties'] || {})
- end
- end
-
- context "For region us-gov-east-1 with FIPS enabled and DualStack disabled" do
- let(:expected) do
- {"endpoint" => {"url" => "https://iotanalytics-fips.us-gov-east-1.amazonaws.com"}}
- end
-
- it 'produces the expected output from the EndpointProvider' do
- params = EndpointParameters.new(**{region: "us-gov-east-1", use_fips: true, use_dual_stack: false})
- endpoint = subject.resolve_endpoint(params)
- expect(endpoint.url).to eq(expected['endpoint']['url'])
- expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {})
- expect(endpoint.properties).to eq(expected['endpoint']['properties'] || {})
- end
- end
-
- context "For region us-gov-east-1 with FIPS disabled and DualStack enabled" do
- let(:expected) do
- {"endpoint" => {"url" => "https://iotanalytics.us-gov-east-1.api.aws"}}
- end
-
- it 'produces the expected output from the EndpointProvider' do
- params = EndpointParameters.new(**{region: "us-gov-east-1", use_fips: false, use_dual_stack: true})
- endpoint = subject.resolve_endpoint(params)
- expect(endpoint.url).to eq(expected['endpoint']['url'])
- expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {})
- expect(endpoint.properties).to eq(expected['endpoint']['properties'] || {})
- end
- end
-
- context "For region us-gov-east-1 with FIPS disabled and DualStack disabled" do
- let(:expected) do
- {"endpoint" => {"url" => "https://iotanalytics.us-gov-east-1.amazonaws.com"}}
- end
-
- it 'produces the expected output from the EndpointProvider' do
- params = EndpointParameters.new(**{region: "us-gov-east-1", use_fips: false, use_dual_stack: false})
- endpoint = subject.resolve_endpoint(params)
- expect(endpoint.url).to eq(expected['endpoint']['url'])
- expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {})
- expect(endpoint.properties).to eq(expected['endpoint']['properties'] || {})
- end
- end
-
- context "For region us-iso-east-1 with FIPS enabled and DualStack disabled" do
- let(:expected) do
- {"endpoint" => {"url" => "https://iotanalytics-fips.us-iso-east-1.c2s.ic.gov"}}
- end
-
- it 'produces the expected output from the EndpointProvider' do
- params = EndpointParameters.new(**{region: "us-iso-east-1", use_fips: true, use_dual_stack: false})
- endpoint = subject.resolve_endpoint(params)
- expect(endpoint.url).to eq(expected['endpoint']['url'])
- expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {})
- expect(endpoint.properties).to eq(expected['endpoint']['properties'] || {})
- end
- end
-
- context "For region us-iso-east-1 with FIPS disabled and DualStack disabled" do
- let(:expected) do
- {"endpoint" => {"url" => "https://iotanalytics.us-iso-east-1.c2s.ic.gov"}}
- end
-
- it 'produces the expected output from the EndpointProvider' do
- params = EndpointParameters.new(**{region: "us-iso-east-1", use_fips: false, use_dual_stack: false})
- endpoint = subject.resolve_endpoint(params)
- expect(endpoint.url).to eq(expected['endpoint']['url'])
- expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {})
- expect(endpoint.properties).to eq(expected['endpoint']['properties'] || {})
- end
- end
-
- context "For region us-isob-east-1 with FIPS enabled and DualStack disabled" do
- let(:expected) do
- {"endpoint" => {"url" => "https://iotanalytics-fips.us-isob-east-1.sc2s.sgov.gov"}}
- end
-
- it 'produces the expected output from the EndpointProvider' do
- params = EndpointParameters.new(**{region: "us-isob-east-1", use_fips: true, use_dual_stack: false})
- endpoint = subject.resolve_endpoint(params)
- expect(endpoint.url).to eq(expected['endpoint']['url'])
- expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {})
- expect(endpoint.properties).to eq(expected['endpoint']['properties'] || {})
- end
- end
-
- context "For region us-isob-east-1 with FIPS disabled and DualStack disabled" do
- let(:expected) do
- {"endpoint" => {"url" => "https://iotanalytics.us-isob-east-1.sc2s.sgov.gov"}}
- end
-
- it 'produces the expected output from the EndpointProvider' do
- params = EndpointParameters.new(**{region: "us-isob-east-1", use_fips: false, use_dual_stack: false})
- endpoint = subject.resolve_endpoint(params)
- expect(endpoint.url).to eq(expected['endpoint']['url'])
- expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {})
- expect(endpoint.properties).to eq(expected['endpoint']['properties'] || {})
- end
- end
-
- context "For custom endpoint with region set and fips disabled and dualstack disabled" do
- let(:expected) do
- {"endpoint" => {"url" => "https://example.com"}}
- end
-
- it 'produces the expected output from the EndpointProvider' do
- params = EndpointParameters.new(**{region: "us-east-1", use_fips: false, use_dual_stack: false, endpoint: "https://example.com"})
- endpoint = subject.resolve_endpoint(params)
- expect(endpoint.url).to eq(expected['endpoint']['url'])
- expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {})
- expect(endpoint.properties).to eq(expected['endpoint']['properties'] || {})
- end
- end
-
- context "For custom endpoint with region not set and fips disabled and dualstack disabled" do
- let(:expected) do
- {"endpoint" => {"url" => "https://example.com"}}
- end
-
- it 'produces the expected output from the EndpointProvider' do
- params = EndpointParameters.new(**{use_fips: false, use_dual_stack: false, endpoint: "https://example.com"})
- endpoint = subject.resolve_endpoint(params)
- expect(endpoint.url).to eq(expected['endpoint']['url'])
- expect(endpoint.headers).to eq(expected['endpoint']['headers'] || {})
- expect(endpoint.properties).to eq(expected['endpoint']['properties'] || {})
- end
- end
-
- context "For custom endpoint with fips enabled and dualstack disabled" do
- let(:expected) do
- {"error" => "Invalid Configuration: FIPS and custom endpoint are not supported"}
- end
-
- it 'produces the expected output from the EndpointProvider' do
- params = EndpointParameters.new(**{region: "us-east-1", use_fips: true, use_dual_stack: false, endpoint: "https://example.com"})
- expect do
- subject.resolve_endpoint(params)
- end.to raise_error(ArgumentError, expected['error'])
- end
- end
-
- context "For custom endpoint with fips disabled and dualstack enabled" do
- let(:expected) do
- {"error" => "Invalid Configuration: Dualstack and custom endpoint are not supported"}
- end
-
- it 'produces the expected output from the EndpointProvider' do
- params = EndpointParameters.new(**{region: "us-east-1", use_fips: false, use_dual_stack: true, endpoint: "https://example.com"})
- expect do
- subject.resolve_endpoint(params)
- end.to raise_error(ArgumentError, expected['error'])
- end
- end
-
- context "Missing region" do
- let(:expected) do
- {"error" => "Invalid Configuration: Missing Region"}
- end
-
- it 'produces the expected output from the EndpointProvider' do
- params = EndpointParameters.new(**{})
- expect do
- subject.resolve_endpoint(params)
- end.to raise_error(ArgumentError, expected['error'])
- end
- end
-
- end
-end
diff --git a/gems/aws-sdk-iotanalytics/spec/spec_helper.rb b/gems/aws-sdk-iotanalytics/spec/spec_helper.rb
deleted file mode 100644
index c347e4531b7..00000000000
--- a/gems/aws-sdk-iotanalytics/spec/spec_helper.rb
+++ /dev/null
@@ -1,18 +0,0 @@
-# frozen_string_literal: true
-
-# WARNING ABOUT GENERATED CODE
-#
-# This file is generated. See the contributing guide for more information:
-# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md
-#
-# WARNING ABOUT GENERATED CODE
-
-require_relative '../../aws-sdk-core/spec/shared_spec_helper'
-
-$:.unshift(File.expand_path('../../lib', __FILE__))
-$:.unshift(File.expand_path('../../../aws-sdk-core/lib', __FILE__))
-$:.unshift(File.expand_path('../../../aws-sigv4/lib', __FILE__))
-
-require 'rspec'
-require 'webmock/rspec'
-require 'aws-sdk-iotanalytics'
diff --git a/services.json b/services.json
index 6d9ede069d8..9faaed24bd5 100644
--- a/services.json
+++ b/services.json
@@ -254,10 +254,6 @@
"CloudWatchEvents": {
"models": "events/2015-10-07"
},
- "CloudWatchEvidently": {
- "models": "evidently/2021-02-01",
- "deprecated": true
- },
"CloudWatchLogs": {
"models": "logs/2014-03-28"
},
@@ -620,10 +616,6 @@
"IoT": {
"models": "iot/2015-05-28"
},
- "IoTAnalytics": {
- "models": "iotanalytics/2017-11-27",
- "deprecated": true
- },
"IoTDataPlane": {
"models": "iot-data/2015-05-28",
"partitionsKey": null