diff --git a/clients/client-sagemaker/src/commands/CreateAutoMLJobCommand.ts b/clients/client-sagemaker/src/commands/CreateAutoMLJobCommand.ts index f1cb41ada430..4ef17283d69e 100644 --- a/clients/client-sagemaker/src/commands/CreateAutoMLJobCommand.ts +++ b/clients/client-sagemaker/src/commands/CreateAutoMLJobCommand.ts @@ -93,7 +93,7 @@ export interface CreateAutoMLJobCommandOutput extends CreateAutoMLJobResponse, _ * AlgorithmsConfig: [ // AutoMLAlgorithmsConfig * { // AutoMLAlgorithmConfig * AutoMLAlgorithms: [ // AutoMLAlgorithms // required - * "xgboost" || "linear-learner" || "mlp" || "lightgbm" || "catboost" || "randomforest" || "extra-trees" || "nn-torch" || "fastai", + * "xgboost" || "linear-learner" || "mlp" || "lightgbm" || "catboost" || "randomforest" || "extra-trees" || "nn-torch" || "fastai" || "cnn-qr" || "deepar" || "prophet" || "npts" || "arima" || "ets", * ], * }, * ], diff --git a/clients/client-sagemaker/src/commands/CreateAutoMLJobV2Command.ts b/clients/client-sagemaker/src/commands/CreateAutoMLJobV2Command.ts index 697c4443190e..cb791bf669c8 100644 --- a/clients/client-sagemaker/src/commands/CreateAutoMLJobV2Command.ts +++ b/clients/client-sagemaker/src/commands/CreateAutoMLJobV2Command.ts @@ -120,13 +120,22 @@ export interface CreateAutoMLJobV2CommandOutput extends CreateAutoMLJobV2Respons * CountryCode: "STRING_VALUE", * }, * ], - * }, - * TabularJobConfig: { // TabularJobConfig * CandidateGenerationConfig: { // CandidateGenerationConfig * AlgorithmsConfig: [ // AutoMLAlgorithmsConfig * { // AutoMLAlgorithmConfig * AutoMLAlgorithms: [ // AutoMLAlgorithms // required - * "xgboost" || "linear-learner" || "mlp" || "lightgbm" || "catboost" || "randomforest" || "extra-trees" || "nn-torch" || "fastai", + * "xgboost" || "linear-learner" || "mlp" || "lightgbm" || "catboost" || "randomforest" || "extra-trees" || "nn-torch" || "fastai" || "cnn-qr" || "deepar" || "prophet" || "npts" || "arima" || "ets", + * ], + * }, + * ], + * }, + * }, + * TabularJobConfig: { // TabularJobConfig + * CandidateGenerationConfig: { + * AlgorithmsConfig: [ + * { + * AutoMLAlgorithms: [ // required + * "xgboost" || "linear-learner" || "mlp" || "lightgbm" || "catboost" || "randomforest" || "extra-trees" || "nn-torch" || "fastai" || "cnn-qr" || "deepar" || "prophet" || "npts" || "arima" || "ets", * ], * }, * ], diff --git a/clients/client-sagemaker/src/commands/CreateModelPackageCommand.ts b/clients/client-sagemaker/src/commands/CreateModelPackageCommand.ts index 7f6a2100fae7..150eba5d51a8 100644 --- a/clients/client-sagemaker/src/commands/CreateModelPackageCommand.ts +++ b/clients/client-sagemaker/src/commands/CreateModelPackageCommand.ts @@ -5,7 +5,11 @@ import { Command as $Command } from "@smithy/smithy-client"; import { MetadataBearer as __MetadataBearer } from "@smithy/types"; import { commonParams } from "../endpoint/EndpointParameters"; -import { CreateModelPackageInput, CreateModelPackageOutput } from "../models/models_1"; +import { + CreateModelPackageInput, + CreateModelPackageInputFilterSensitiveLog, + CreateModelPackageOutput, +} from "../models/models_1"; import { de_CreateModelPackageCommand, se_CreateModelPackageCommand } from "../protocols/Aws_json1_1"; import { SageMakerClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../SageMakerClient"; @@ -299,6 +303,13 @@ export interface CreateModelPackageCommandOutput extends CreateModelPackageOutpu * ], * SkipModelValidation: "All" || "None", * SourceUri: "STRING_VALUE", + * SecurityConfig: { // ModelPackageSecurityConfig + * KmsKeyId: "STRING_VALUE", // required + * }, + * ModelCard: { // ModelPackageModelCard + * ModelCardContent: "STRING_VALUE", + * ModelCardStatus: "Draft" || "PendingReview" || "Approved" || "Archived", + * }, * }; * const command = new CreateModelPackageCommand(input); * const response = await client.send(command); @@ -346,7 +357,7 @@ export class CreateModelPackageCommand extends $Command }) .s("SageMaker", "CreateModelPackage", {}) .n("SageMakerClient", "CreateModelPackageCommand") - .f(void 0, void 0) + .f(CreateModelPackageInputFilterSensitiveLog, void 0) .ser(se_CreateModelPackageCommand) .de(de_CreateModelPackageCommand) .build() {} diff --git a/clients/client-sagemaker/src/commands/CreateStudioLifecycleConfigCommand.ts b/clients/client-sagemaker/src/commands/CreateStudioLifecycleConfigCommand.ts index 4e6d2d7b60a5..2e6538102732 100644 --- a/clients/client-sagemaker/src/commands/CreateStudioLifecycleConfigCommand.ts +++ b/clients/client-sagemaker/src/commands/CreateStudioLifecycleConfigCommand.ts @@ -5,8 +5,7 @@ import { Command as $Command } from "@smithy/smithy-client"; import { MetadataBearer as __MetadataBearer } from "@smithy/types"; import { commonParams } from "../endpoint/EndpointParameters"; -import { CreateStudioLifecycleConfigRequest } from "../models/models_1"; -import { CreateStudioLifecycleConfigResponse } from "../models/models_2"; +import { CreateStudioLifecycleConfigRequest, CreateStudioLifecycleConfigResponse } from "../models/models_2"; import { de_CreateStudioLifecycleConfigCommand, se_CreateStudioLifecycleConfigCommand } from "../protocols/Aws_json1_1"; import { SageMakerClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../SageMakerClient"; diff --git a/clients/client-sagemaker/src/commands/DescribeAutoMLJobCommand.ts b/clients/client-sagemaker/src/commands/DescribeAutoMLJobCommand.ts index 4a71b652bf9c..9ad84e360f90 100644 --- a/clients/client-sagemaker/src/commands/DescribeAutoMLJobCommand.ts +++ b/clients/client-sagemaker/src/commands/DescribeAutoMLJobCommand.ts @@ -93,7 +93,7 @@ export interface DescribeAutoMLJobCommandOutput extends DescribeAutoMLJobRespons * // AlgorithmsConfig: [ // AutoMLAlgorithmsConfig * // { // AutoMLAlgorithmConfig * // AutoMLAlgorithms: [ // AutoMLAlgorithms // required - * // "xgboost" || "linear-learner" || "mlp" || "lightgbm" || "catboost" || "randomforest" || "extra-trees" || "nn-torch" || "fastai", + * // "xgboost" || "linear-learner" || "mlp" || "lightgbm" || "catboost" || "randomforest" || "extra-trees" || "nn-torch" || "fastai" || "cnn-qr" || "deepar" || "prophet" || "npts" || "arima" || "ets", * // ], * // }, * // ], diff --git a/clients/client-sagemaker/src/commands/DescribeAutoMLJobV2Command.ts b/clients/client-sagemaker/src/commands/DescribeAutoMLJobV2Command.ts index ca8fc71d3eac..a99509e465a4 100644 --- a/clients/client-sagemaker/src/commands/DescribeAutoMLJobV2Command.ts +++ b/clients/client-sagemaker/src/commands/DescribeAutoMLJobV2Command.ts @@ -116,13 +116,22 @@ export interface DescribeAutoMLJobV2CommandOutput extends DescribeAutoMLJobV2Res * // CountryCode: "STRING_VALUE", * // }, * // ], - * // }, - * // TabularJobConfig: { // TabularJobConfig * // CandidateGenerationConfig: { // CandidateGenerationConfig * // AlgorithmsConfig: [ // AutoMLAlgorithmsConfig * // { // AutoMLAlgorithmConfig * // AutoMLAlgorithms: [ // AutoMLAlgorithms // required - * // "xgboost" || "linear-learner" || "mlp" || "lightgbm" || "catboost" || "randomforest" || "extra-trees" || "nn-torch" || "fastai", + * // "xgboost" || "linear-learner" || "mlp" || "lightgbm" || "catboost" || "randomforest" || "extra-trees" || "nn-torch" || "fastai" || "cnn-qr" || "deepar" || "prophet" || "npts" || "arima" || "ets", + * // ], + * // }, + * // ], + * // }, + * // }, + * // TabularJobConfig: { // TabularJobConfig + * // CandidateGenerationConfig: { + * // AlgorithmsConfig: [ + * // { + * // AutoMLAlgorithms: [ // required + * // "xgboost" || "linear-learner" || "mlp" || "lightgbm" || "catboost" || "randomforest" || "extra-trees" || "nn-torch" || "fastai" || "cnn-qr" || "deepar" || "prophet" || "npts" || "arima" || "ets", * // ], * // }, * // ], diff --git a/clients/client-sagemaker/src/commands/DescribeModelPackageCommand.ts b/clients/client-sagemaker/src/commands/DescribeModelPackageCommand.ts index 7cea21e078e9..ca5de5023e5a 100644 --- a/clients/client-sagemaker/src/commands/DescribeModelPackageCommand.ts +++ b/clients/client-sagemaker/src/commands/DescribeModelPackageCommand.ts @@ -5,7 +5,11 @@ import { Command as $Command } from "@smithy/smithy-client"; import { MetadataBearer as __MetadataBearer } from "@smithy/types"; import { commonParams } from "../endpoint/EndpointParameters"; -import { DescribeModelPackageInput, DescribeModelPackageOutput } from "../models/models_2"; +import { + DescribeModelPackageInput, + DescribeModelPackageOutput, + DescribeModelPackageOutputFilterSensitiveLog, +} from "../models/models_2"; import { de_DescribeModelPackageCommand, se_DescribeModelPackageCommand } from "../protocols/Aws_json1_1"; import { SageMakerClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../SageMakerClient"; @@ -29,6 +33,11 @@ export interface DescribeModelPackageCommandOutput extends DescribeModelPackageO /** *
Returns a description of the specified model package, which is used to create SageMaker * models or list them on Amazon Web Services Marketplace.
+ *If you provided a KMS Key ID when you created your model package, + * you will see the KMS + * Decrypt API call in your CloudTrail logs when you use this API.
+ *To create models in SageMaker, buyers can subscribe to model packages listed on Amazon Web Services * Marketplace.
* @example @@ -323,6 +332,13 @@ export interface DescribeModelPackageCommandOutput extends DescribeModelPackageO * // ], * // SkipModelValidation: "All" || "None", * // SourceUri: "STRING_VALUE", + * // SecurityConfig: { // ModelPackageSecurityConfig + * // KmsKeyId: "STRING_VALUE", // required + * // }, + * // ModelCard: { // ModelPackageModelCard + * // ModelCardContent: "STRING_VALUE", + * // ModelCardStatus: "Draft" || "PendingReview" || "Approved" || "Archived", + * // }, * // }; * * ``` @@ -357,7 +373,7 @@ export class DescribeModelPackageCommand extends $Command }) .s("SageMaker", "DescribeModelPackage", {}) .n("SageMakerClient", "DescribeModelPackageCommand") - .f(void 0, void 0) + .f(void 0, DescribeModelPackageOutputFilterSensitiveLog) .ser(se_DescribeModelPackageCommand) .de(de_DescribeModelPackageCommand) .build() {} diff --git a/clients/client-sagemaker/src/commands/DescribeProcessingJobCommand.ts b/clients/client-sagemaker/src/commands/DescribeProcessingJobCommand.ts index d19bb1764f30..c5a99e834894 100644 --- a/clients/client-sagemaker/src/commands/DescribeProcessingJobCommand.ts +++ b/clients/client-sagemaker/src/commands/DescribeProcessingJobCommand.ts @@ -5,7 +5,8 @@ import { Command as $Command } from "@smithy/smithy-client"; import { MetadataBearer as __MetadataBearer } from "@smithy/types"; import { commonParams } from "../endpoint/EndpointParameters"; -import { DescribeProcessingJobRequest, DescribeProcessingJobResponse } from "../models/models_2"; +import { DescribeProcessingJobRequest } from "../models/models_2"; +import { DescribeProcessingJobResponse } from "../models/models_3"; import { de_DescribeProcessingJobCommand, se_DescribeProcessingJobCommand } from "../protocols/Aws_json1_1"; import { SageMakerClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../SageMakerClient"; diff --git a/clients/client-sagemaker/src/commands/DescribeProjectCommand.ts b/clients/client-sagemaker/src/commands/DescribeProjectCommand.ts index 3b6b551bf320..efe78f9986e9 100644 --- a/clients/client-sagemaker/src/commands/DescribeProjectCommand.ts +++ b/clients/client-sagemaker/src/commands/DescribeProjectCommand.ts @@ -5,8 +5,7 @@ import { Command as $Command } from "@smithy/smithy-client"; import { MetadataBearer as __MetadataBearer } from "@smithy/types"; import { commonParams } from "../endpoint/EndpointParameters"; -import { DescribeProjectInput } from "../models/models_2"; -import { DescribeProjectOutput } from "../models/models_3"; +import { DescribeProjectInput, DescribeProjectOutput } from "../models/models_3"; import { de_DescribeProjectCommand, se_DescribeProjectCommand } from "../protocols/Aws_json1_1"; import { SageMakerClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../SageMakerClient"; diff --git a/clients/client-sagemaker/src/commands/ListNotebookInstancesCommand.ts b/clients/client-sagemaker/src/commands/ListNotebookInstancesCommand.ts index 54331304f16f..4ada4117b446 100644 --- a/clients/client-sagemaker/src/commands/ListNotebookInstancesCommand.ts +++ b/clients/client-sagemaker/src/commands/ListNotebookInstancesCommand.ts @@ -5,7 +5,8 @@ import { Command as $Command } from "@smithy/smithy-client"; import { MetadataBearer as __MetadataBearer } from "@smithy/types"; import { commonParams } from "../endpoint/EndpointParameters"; -import { ListNotebookInstancesInput, ListNotebookInstancesOutput } from "../models/models_3"; +import { ListNotebookInstancesInput } from "../models/models_3"; +import { ListNotebookInstancesOutput } from "../models/models_4"; import { de_ListNotebookInstancesCommand, se_ListNotebookInstancesCommand } from "../protocols/Aws_json1_1"; import { SageMakerClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../SageMakerClient"; diff --git a/clients/client-sagemaker/src/commands/SearchCommand.ts b/clients/client-sagemaker/src/commands/SearchCommand.ts index 660c1b525018..2e1346266c60 100644 --- a/clients/client-sagemaker/src/commands/SearchCommand.ts +++ b/clients/client-sagemaker/src/commands/SearchCommand.ts @@ -1305,6 +1305,13 @@ export interface SearchCommandOutput extends SearchResponse, __MetadataBearer {} * // }, * // ], * // SourceUri: "STRING_VALUE", + * // SecurityConfig: { // ModelPackageSecurityConfig + * // KmsKeyId: "STRING_VALUE", // required + * // }, + * // ModelCard: { // ModelPackageModelCard + * // ModelCardContent: "STRING_VALUE", + * // ModelCardStatus: "Draft" || "PendingReview" || "Approved" || "Archived", + * // }, * // Tags: "The collection of algorithms run on a dataset for training the model candidates of an - * Autopilot job.
+ *The selection of algorithms trained on your dataset to generate the model candidates for + * an Autopilot job.
* @public */ export interface AutoMLAlgorithmConfig { /** - *The selection of algorithms run on a dataset to train the model candidates of an Autopilot - * job.
- *Selected algorithms must belong to the list corresponding to the training mode set in
- * AutoMLJobConfig.Mode (ENSEMBLING
or
- * HYPERPARAMETER_TUNING
). Choose a minimum of 1 algorithm.
The selection of algorithms trained on your dataset to generate the model candidates for + * an Autopilot job.
*In ENSEMBLING
mode:
+ * For the tabular problem type TabularJobConfig
:
+ *
Selected algorithms must belong to the list corresponding to the training mode
+ * set in AutoMLJobConfig.Mode (ENSEMBLING
or
+ * HYPERPARAMETER_TUNING
). Choose a minimum of 1 algorithm.
"catboost"
- *"extra-trees"
- *"fastai"
- *"lightgbm"
- *"linear-learner"
- *"nn-torch"
- *"randomforest"
+ *In ENSEMBLING
mode:
"catboost"
+ *"extra-trees"
+ *"fastai"
+ *"lightgbm"
+ *"linear-learner"
+ *"nn-torch"
+ *"randomforest"
+ *"xgboost"
+ *"xgboost"
+ *In HYPERPARAMETER_TUNING
mode:
"linear-learner"
+ *"mlp"
+ *"xgboost"
+ *In HYPERPARAMETER_TUNING
mode:
+ * For the time-series forecasting problem type TimeSeriesForecastingJobConfig
:
+ *
"linear-learner"
- *"mlp"
- *"xgboost"
+ *Choose your algorithms from this list.
+ *"cnn-qr"
+ *"deepar"
+ *"prophet"
+ *"arima"
+ *"npts"
+ *"ets"
+ *The Amazon S3 prefix to the model insight artifacts generated for the AutoML candidate.
+ *The Amazon S3 prefix to the model insight artifacts generated for the AutoML + * candidate.
* @public */ ModelInsights?: string; /** - *The Amazon S3 prefix to the accuracy metrics and the inference results observed over the - * testing window. Available only for the time-series forecasting problem type.
+ *The Amazon S3 prefix to the accuracy metrics and the inference results observed + * over the testing window. Available only for the time-series forecasting problem + * type.
* @public */ BacktestResults?: string; @@ -5618,9 +5663,9 @@ export interface AutoMLCandidate { */ export interface AutoMLCandidateGenerationConfig { /** - *A URL to the Amazon S3 data source containing selected features from the input data source to
- * run an Autopilot job. You can input FeatureAttributeNames
(optional) in JSON
- * format as shown below:
A URL to the Amazon S3 data source containing selected features from the input
+ * data source to run an Autopilot job. You can input FeatureAttributeNames
+ * (optional) in JSON format as shown below:
* \{ "FeatureAttributeNames":["col1", "col2", ...] \}
.
You can also specify the data type of the feature (optional) in the format shown @@ -5649,33 +5694,31 @@ export interface AutoMLCandidateGenerationConfig { FeatureSpecificationS3Uri?: string; /** - *
Stores the configuration information for the selection of algorithms used to train the - * model candidates.
+ *Stores the configuration information for the selection of algorithms trained on tabular data.
*The list of available algorithms to choose from depends on the training mode set in
- *
- * AutoMLJobConfig.Mode
+ *
+ * TabularJobConfig.Mode
* .
- * AlgorithmsConfig
should not be set in AUTO
training
- * mode.
AlgorithmsConfig
should not be set if the training mode is set on AUTO
.
* When AlgorithmsConfig
is provided, one AutoMLAlgorithms
* attribute must be set and one only.
If the list of algorithms provided as values for AutoMLAlgorithms
is
- * empty, AutoMLCandidateGenerationConfig
uses the full set of algorithms
- * for the given training mode.
CandidateGenerationConfig
uses the full set of algorithms for the
+ * given training mode.
* When AlgorithmsConfig
is not provided,
- * AutoMLCandidateGenerationConfig
uses the full set of algorithms for
- * the given training mode.
CandidateGenerationConfig
uses the full set of algorithms for the
+ * given training mode.
* For the list of all algorithms per training mode, see - * AutoMLAlgorithmConfig.
+ *For the list of all algorithms per problem type and training mode, see + * AutoMLAlgorithmConfig.
*For more information on each algorithm, see the Algorithm support section in Autopilot developer guide.
* @public */ @@ -5721,8 +5764,8 @@ export interface AutoMLS3DataSource { *If you choose S3Prefix
, S3Uri
identifies a key name
- * prefix. SageMaker uses all objects that match the specified key name prefix for model
- * training.
The S3Prefix
should have the following format:
* s3://DOC-EXAMPLE-BUCKET/DOC-EXAMPLE-FOLDER-OR-FILE
@@ -5730,8 +5773,7 @@ export interface AutoMLS3DataSource {
*
If you choose ManifestFile
, S3Uri
identifies an object
- * that is a manifest file containing a list of object keys that you want SageMaker to use
- * for model training.
A ManifestFile
should have the format shown below:
* The URL to the Amazon S3 data source. The Uri refers to the Amazon S3 prefix or ManifestFile
- * depending on the data type. The URL to the Amazon S3 data source. The Uri refers to the Amazon S3
+ * prefix or ManifestFile depending on the data type. List of available metrics: Regression: Regression: Binary classification: Stores the configuration information for the selection of algorithms used to train model
- * candidates on tabular data. The list of available algorithms to choose from depends on the training mode set in
- *
- * Your Autopilot job trains a default set of algorithms on your dataset. For tabular and
+ * time-series data, you can customize the algorithm list by selecting a subset of algorithms
+ * for your problem type.
+ *
- * When If the list of algorithms provided as values for [ \{"prefix":
@@ -5770,8 +5812,8 @@ export interface AutoMLS3DataSource {
S3DataType: AutoMLS3DataType | undefined;
/**
- *
*
MAE
,
- * MSE
, R2
, RMSE
+ * MAE
, MSE
, R2
,
+ * RMSE
* Accuracy
, AUC
,
* BalancedAccuracy
, F1
,
- * Precision
, Recall
+ * Precision
, Recall
* TabularJobConfig.Mode
- * .AlgorithmsConfig
stores the customized selection of algorithms to train on
+ * your data.
*
AlgorithmsConfig
should not be set in AUTO
training
- * mode.AlgorithmsConfig
is provided, one AutoMLAlgorithms
- * attribute must be set and one only.AutoMLAlgorithms
is
- * empty, CandidateGenerationConfig
uses the full set of algorithms for the
- * given training mode.TabularJobConfig
,
+ * the list of available algorithms to choose from depends on the training mode set
+ * in
+ * AutoMLJobConfig.Mode
+ * .
+ * AlgorithmsConfig
should not be set when the training mode
+ * AutoMLJobConfig.Mode
is set to AUTO
.
When AlgorithmsConfig
is provided, one
+ * AutoMLAlgorithms
attribute must be set and one only.
If the list of algorithms provided as values for
+ * AutoMLAlgorithms
is empty,
+ * CandidateGenerationConfig
uses the full set of algorithms for
+ * the given training mode.
When AlgorithmsConfig
is not provided,
+ * CandidateGenerationConfig
uses the full set of algorithms for
+ * the given training mode.
For the list of all algorithms per training mode, see + * AlgorithmConfig.
+ *For more information on each algorithm, see the Algorithm support section in the Autopilot developer guide.
*When AlgorithmsConfig
is not provided,
- * CandidateGenerationConfig
uses the full set of algorithms for the
- * given training mode.
+ * For the time-series forecasting problem type TimeSeriesForecastingJobConfig
,
+ * choose your algorithms from the list provided in
+ *
+ * AlgorithmConfig.
For more information on each algorithm, see the Algorithms support for time-series forecasting section in the Autopilot developer guide.
+ *When AlgorithmsConfig
is provided, one
+ * AutoMLAlgorithms
attribute must be set and one only.
If the list of algorithms provided as values for
+ * AutoMLAlgorithms
is empty,
+ * CandidateGenerationConfig
uses the full set of algorithms for
+ * time-series forecasting.
When AlgorithmsConfig
is not provided,
+ * CandidateGenerationConfig
uses the full set of algorithms for
+ * time-series forecasting.
For the list of all algorithms per problem type and training mode, see - * AutoMLAlgorithmConfig.
- *For more information on each algorithm, see the Algorithm support section in Autopilot developer guide.
* @public */ AlgorithmsConfig?: AutoMLAlgorithmConfig[]; @@ -6452,9 +6528,9 @@ export interface TabularJobConfig { CompletionCriteria?: AutoMLJobCompletionCriteria; /** - *A URL to the Amazon S3 data source containing selected features from the input data source to
- * run an Autopilot job V2. You can input FeatureAttributeNames
(optional) in JSON
- * format as shown below:
A URL to the Amazon S3 data source containing selected features from the input
+ * data source to run an Autopilot job V2. You can input FeatureAttributeNames
+ * (optional) in JSON format as shown below:
* \{ "FeatureAttributeNames":["col1", "col2", ...] \}
.
You can also specify the data type of the feature (optional) in the format shown @@ -6797,12 +6873,13 @@ export interface TimeSeriesTransformations { */ export interface TimeSeriesForecastingJobConfig { /** - *
A URL to the Amazon S3 data source containing additional selected features that complement
- * the target, itemID, timestamp, and grouped columns set in TimeSeriesConfig
.
- * When not provided, the AutoML job V2 includes all the columns from the original dataset
- * that are not already declared in TimeSeriesConfig
. If provided, the AutoML job
- * V2 only considers these additional columns as a complement to the ones declared in
- * TimeSeriesConfig
.
A URL to the Amazon S3 data source containing additional selected features that
+ * complement the target, itemID, timestamp, and grouped columns set in
+ * TimeSeriesConfig
. When not provided, the AutoML job V2 includes all the
+ * columns from the original dataset that are not already declared in
+ * TimeSeriesConfig
. If provided, the AutoML job V2 only considers these
+ * additional columns as a complement to the ones declared in
+ * TimeSeriesConfig
.
You can input FeatureAttributeNames
(optional) in JSON format as shown
* below:
@@ -6898,6 +6975,13 @@ export interface TimeSeriesForecastingJobConfig { * @public */ HolidayConfig?: HolidayConfigAttributes[]; + + /** + *
Stores the configuration information for how model candidates are generated using an + * AutoML job V2.
+ * @public + */ + CandidateGenerationConfig?: CandidateGenerationConfig; } /** @@ -10846,8 +10930,8 @@ export interface CreateAutoMLJobRequest { InputDataConfig: AutoMLChannel[] | undefined; /** - *Provides information about encryption and the Amazon S3 output path needed to store artifacts - * from an AutoML job. Format(s) supported: CSV.
+ *Provides information about encryption and the Amazon S3 output path needed to + * store artifacts from an AutoML job. Format(s) supported: CSV.
* @public */ OutputDataConfig: AutoMLOutputDataConfig | undefined; @@ -10953,8 +11037,8 @@ export interface CreateAutoMLJobV2Request { AutoMLJobInputDataConfig: AutoMLJobChannel[] | undefined; /** - *Provides information about encryption and the Amazon S3 output path needed to store artifacts - * from an AutoML job.
+ *Provides information about encryption and the Amazon S3 output path needed to + * store artifacts from an AutoML job.
* @public */ OutputDataConfig: AutoMLOutputDataConfig | undefined; diff --git a/clients/client-sagemaker/src/models/models_1.ts b/clients/client-sagemaker/src/models/models_1.ts index f298214f01ac..900af2329129 100644 --- a/clients/client-sagemaker/src/models/models_1.ts +++ b/clients/client-sagemaker/src/models/models_1.ts @@ -9773,6 +9773,51 @@ export interface DriftCheckBaselines { ModelDataQuality?: DriftCheckModelDataQuality; } +/** + *The model card associated with the model package. Since ModelPackageModelCard
is
+ * tied to a model package, it is a specific usage of a model card and its schema is
+ * simplified compared to the schema of ModelCard
. The
+ * ModelPackageModelCard
schema does not include model_package_details
,
+ * and model_overview
is composed of the model_creator
and
+ * model_artifact
properties. For more information about
+ * the model card associated with the model package, see View
+ * the Details of a Model Version.
The content of the model card.
+ * @public + */ + ModelCardContent?: string; + + /** + *The approval status of the model card within your organization. Different organizations might have different criteria for model card review and approval.
+ *
+ * Draft
: The model card is a work in progress.
+ * PendingReview
: The model card is pending review.
+ * Approved
: The model card is approved.
+ * Archived
: The model card is archived. No more updates can be made to the model
+ * card content. If you try to update the model card content, you will receive the message Model Card
+ * is in Archived state
.
Contains explainability metrics for a model.
* @public @@ -9851,6 +9896,20 @@ export interface ModelMetrics { Explainability?: Explainability; } +/** + *An optional Key Management Service + * key to encrypt, decrypt, and re-encrypt model package information for regulated workloads with + * highly sensitive data.
+ * @public + */ +export interface ModelPackageSecurityConfig { + /** + *The KMS Key ID (KMSKeyId
) used for encryption of model package information.
The KMS Key ID (KMSKeyId
) used for encryption of model package information.
The model card associated with the model package. Since ModelPackageModelCard
is
+ * tied to a model package, it is a specific usage of a model card and its schema is
+ * simplified compared to the schema of ModelCard
. The
+ * ModelPackageModelCard
schema does not include model_package_details
,
+ * and model_overview
is composed of the model_creator
and
+ * model_artifact
properties. For more information about
+ * the model card associated with the model package, see View
+ * the Details of a Model Version.
The name of the Amazon SageMaker Studio Lifecycle Configuration to create.
- * @public - */ - StudioLifecycleConfigName: string | undefined; - - /** - *The content of your Amazon SageMaker Studio Lifecycle Configuration script. This content must be base64 encoded.
- * @public - */ - StudioLifecycleConfigContent: string | undefined; - - /** - *The App type that the Lifecycle Configuration is attached to.
- * @public - */ - StudioLifecycleConfigAppType: StudioLifecycleConfigAppType | undefined; - - /** - *Tags to be associated with the Lifecycle Configuration. Each tag consists of a key and an optional value. Tag keys must be unique per resource. Tags are searchable using the Search API.
- * @public - */ - Tags?: Tag[]; -} +export const ModelPackageModelCardFilterSensitiveLog = (obj: ModelPackageModelCard): any => ({ + ...obj, + ...(obj.ModelCardContent && { ModelCardContent: SENSITIVE_STRING }), +}); /** * @internal */ -export const CreateModelCardRequestFilterSensitiveLog = (obj: CreateModelCardRequest): any => ({ +export const CreateModelPackageInputFilterSensitiveLog = (obj: CreateModelPackageInput): any => ({ ...obj, - ...(obj.Content && { Content: SENSITIVE_STRING }), + ...(obj.ModelCard && { ModelCard: ModelPackageModelCardFilterSensitiveLog(obj.ModelCard) }), }); diff --git a/clients/client-sagemaker/src/models/models_2.ts b/clients/client-sagemaker/src/models/models_2.ts index d34ec469a50d..bc5c2b9b1bdf 100644 --- a/clients/client-sagemaker/src/models/models_2.ts +++ b/clients/client-sagemaker/src/models/models_2.ts @@ -11,7 +11,6 @@ import { AlgorithmValidationSpecification, AppNetworkAccessType, AppSecurityGroupManagement, - AppSpecification, AppStatus, AppType, ArtifactSource, @@ -128,6 +127,9 @@ import { ModelExplainabilityJobInput, ModelInfrastructureConfig, ModelMetrics, + ModelPackageModelCard, + ModelPackageModelCardFilterSensitiveLog, + ModelPackageSecurityConfig, ModelPackageValidationSpecification, ModelQualityAppSpecification, ModelQualityBaselineConfig, @@ -139,18 +141,13 @@ import { MonitoringStoppingCondition, MonitoringType, NeoVpcConfig, - NetworkConfig, NotebookInstanceAcceleratorType, NotebookInstanceLifecycleHook, OfflineStoreConfig, OnlineStoreConfig, OutputConfig, ParallelismConfiguration, - ProcessingInput, ProcessingInstanceType, - ProcessingOutputConfig, - ProcessingResources, - ProcessingStoppingCondition, Processor, ProductionVariant, ProductionVariantAcceleratorType, @@ -170,6 +167,52 @@ import { VendorGuidance, } from "./models_1"; +/** + * @public + * @enum + */ +export const StudioLifecycleConfigAppType = { + CodeEditor: "CodeEditor", + JupyterLab: "JupyterLab", + JupyterServer: "JupyterServer", + KernelGateway: "KernelGateway", +} as const; + +/** + * @public + */ +export type StudioLifecycleConfigAppType = + (typeof StudioLifecycleConfigAppType)[keyof typeof StudioLifecycleConfigAppType]; + +/** + * @public + */ +export interface CreateStudioLifecycleConfigRequest { + /** + *The name of the Amazon SageMaker Studio Lifecycle Configuration to create.
+ * @public + */ + StudioLifecycleConfigName: string | undefined; + + /** + *The content of your Amazon SageMaker Studio Lifecycle Configuration script. This content must be base64 encoded.
+ * @public + */ + StudioLifecycleConfigContent: string | undefined; + + /** + *The App type that the Lifecycle Configuration is attached to.
+ * @public + */ + StudioLifecycleConfigAppType: StudioLifecycleConfigAppType | undefined; + + /** + *Tags to be associated with the Lifecycle Configuration. Each tag consists of a key and an optional value. Tag keys must be unique per resource. Tags are searchable using the Search API.
+ * @public + */ + Tags?: Tag[]; +} + /** * @public */ @@ -3405,8 +3448,8 @@ export interface DescribeAutoMLJobResponse { PartialFailureReasons?: AutoMLPartialFailureReason[]; /** - *The best model candidate selected by SageMaker Autopilot using both the best objective metric and - * lowest InferenceLatency for + *
The best model candidate selected by SageMaker Autopilot using both the best + * objective metric and lowest InferenceLatency for * an experiment.
* @public */ @@ -3500,8 +3543,8 @@ export interface DescribeAutoMLJobV2Response { OutputDataConfig: AutoMLOutputDataConfig | undefined; /** - *The ARN of the IAM role that has read permission to the input data location and - * write permission to the output data location in Amazon S3.
+ *The ARN of the IAM role that has read permission to the input data + * location and write permission to the output data location in Amazon S3.
* @public */ RoleArn: string | undefined; @@ -3606,7 +3649,8 @@ export interface DescribeAutoMLJobV2Response { DataSplitConfig?: AutoMLDataSplitConfig; /** - *Returns the security configuration for traffic encryption or Amazon VPC settings.
+ *Returns the security configuration for traffic encryption or Amazon VPC + * settings.
* @public */ SecurityConfig?: AutoMLSecurityConfig; @@ -8858,6 +8902,25 @@ export interface DescribeModelPackageOutput { * @public */ SourceUri?: string; + + /** + *The KMS Key ID (KMSKeyId
) used for encryption of model package information.
The model card associated with the model package. Since ModelPackageModelCard
is
+ * tied to a model package, it is a specific usage of a model card and its schema is
+ * simplified compared to the schema of ModelCard
. The
+ * ModelPackageModelCard
schema does not include model_package_details
,
+ * and model_overview
is composed of the model_creator
and
+ * model_artifact
properties. For more information about
+ * the model card associated with the model package, see View
+ * the Details of a Model Version.
The inputs for a processing job.
- * @public - */ - ProcessingInputs?: ProcessingInput[]; - - /** - *Output configuration for the processing job.
- * @public - */ - ProcessingOutputConfig?: ProcessingOutputConfig; - - /** - *The name of the processing job. The name must be unique within an Amazon Web Services Region in the - * Amazon Web Services account.
- * @public - */ - ProcessingJobName: string | undefined; - - /** - *Identifies the resources, ML compute instances, and ML storage volumes to deploy for a - * processing job. In distributed training, you specify more than one instance.
- * @public - */ - ProcessingResources: ProcessingResources | undefined; - - /** - *The time limit for how long the processing job is allowed to run.
- * @public - */ - StoppingCondition?: ProcessingStoppingCondition; - - /** - *Configures the processing job to run a specified container image.
- * @public - */ - AppSpecification: AppSpecification | undefined; - - /** - *The environment variables set in the Docker container.
- * @public - */ - Environment?: RecordNetworking options for a processing job.
- * @public - */ - NetworkConfig?: NetworkConfig; - - /** - *The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to perform tasks on - * your behalf.
- * @public - */ - RoleArn?: string; - - /** - *The configuration information used to create an experiment.
- * @public - */ - ExperimentConfig?: ExperimentConfig; - - /** - *The Amazon Resource Name (ARN) of the processing job.
- * @public - */ - ProcessingJobArn: string | undefined; - - /** - *Provides the status of a processing job.
- * @public - */ - ProcessingJobStatus: ProcessingJobStatus | undefined; - - /** - *An optional string, up to one KB in size, that contains metadata from the processing - * container when the processing job exits.
- * @public - */ - ExitMessage?: string; - - /** - *A string, up to one KB in size, that contains the reason a processing job failed, if - * it failed.
- * @public - */ - FailureReason?: string; - - /** - *The time at which the processing job completed.
- * @public - */ - ProcessingEndTime?: Date; - - /** - *The time at which the processing job started.
- * @public - */ - ProcessingStartTime?: Date; - - /** - *The time at which the processing job was last modified.
- * @public - */ - LastModifiedTime?: Date; - - /** - *The time at which the processing job was created.
- * @public - */ - CreationTime: Date | undefined; - - /** - *The ARN of a monitoring schedule for an endpoint associated with this processing - * job.
- * @public - */ - MonitoringScheduleArn?: string; - - /** - *The ARN of an AutoML job associated with this processing job.
- * @public - */ - AutoMLJobArn?: string; - - /** - *The ARN of a training job associated with this processing job.
- * @public - */ - TrainingJobArn?: string; -} - -/** - * @public - */ -export interface DescribeProjectInput { - /** - *The name of the project to describe.
- * @public - */ - ProjectName: string | undefined; -} - /** * @internal */ @@ -9974,3 +9889,11 @@ export const DescribeModelCardResponseFilterSensitiveLog = (obj: DescribeModelCa ...obj, ...(obj.Content && { Content: SENSITIVE_STRING }), }); + +/** + * @internal + */ +export const DescribeModelPackageOutputFilterSensitiveLog = (obj: DescribeModelPackageOutput): any => ({ + ...obj, + ...(obj.ModelCard && { ModelCard: ModelPackageModelCardFilterSensitiveLog(obj.ModelCard) }), +}); diff --git a/clients/client-sagemaker/src/models/models_3.ts b/clients/client-sagemaker/src/models/models_3.ts index b93a94f0cf4a..c226adb63c1f 100644 --- a/clients/client-sagemaker/src/models/models_3.ts +++ b/clients/client-sagemaker/src/models/models_3.ts @@ -9,6 +9,7 @@ import { AppImageConfigDetails, AppImageConfigSortKey, AppSortKey, + AppSpecification, ArtifactSummary, AssociationEdgeType, AssociationSummary, @@ -50,7 +51,6 @@ import { } from "./models_0"; import { - _InstanceType, DockerSettings, EdgeOutputConfig, ExecutionRoleIdentityConfig, @@ -67,16 +67,20 @@ import { ModelCardStatus, MonitoringScheduleConfig, MonitoringType, + NetworkConfig, OfflineStoreConfig, OnlineStoreConfig, OwnershipSettings, + ProcessingInput, + ProcessingOutputConfig, + ProcessingResources, + ProcessingStoppingCondition, RecommendationJobType, ResourceLimits, RetryStrategy, ServiceCatalogProvisioningDetails, SpaceSettings, SpaceSharingSettings, - StudioLifecycleConfigAppType, UserSettings, } from "./models_1"; @@ -125,6 +129,7 @@ import { ObjectiveStatusCounters, OfflineStoreStatus, OfflineStoreStatusValue, + ProcessingJobStatus, ProductionVariantSummary, ProfilerConfig, ProfilerRuleConfiguration, @@ -134,6 +139,7 @@ import { RuleEvaluationStatus, ScheduleStatus, SourceIpConfig, + StudioLifecycleConfigAppType, TensorBoardOutputConfig, TrainingJobStatus, TrainingJobStatusCounters, @@ -143,6 +149,154 @@ import { WorkerAccessConfiguration, } from "./models_2"; +/** + * @public + */ +export interface DescribeProcessingJobResponse { + /** + *The inputs for a processing job.
+ * @public + */ + ProcessingInputs?: ProcessingInput[]; + + /** + *Output configuration for the processing job.
+ * @public + */ + ProcessingOutputConfig?: ProcessingOutputConfig; + + /** + *The name of the processing job. The name must be unique within an Amazon Web Services Region in the + * Amazon Web Services account.
+ * @public + */ + ProcessingJobName: string | undefined; + + /** + *Identifies the resources, ML compute instances, and ML storage volumes to deploy for a + * processing job. In distributed training, you specify more than one instance.
+ * @public + */ + ProcessingResources: ProcessingResources | undefined; + + /** + *The time limit for how long the processing job is allowed to run.
+ * @public + */ + StoppingCondition?: ProcessingStoppingCondition; + + /** + *Configures the processing job to run a specified container image.
+ * @public + */ + AppSpecification: AppSpecification | undefined; + + /** + *The environment variables set in the Docker container.
+ * @public + */ + Environment?: RecordNetworking options for a processing job.
+ * @public + */ + NetworkConfig?: NetworkConfig; + + /** + *The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to perform tasks on + * your behalf.
+ * @public + */ + RoleArn?: string; + + /** + *The configuration information used to create an experiment.
+ * @public + */ + ExperimentConfig?: ExperimentConfig; + + /** + *The Amazon Resource Name (ARN) of the processing job.
+ * @public + */ + ProcessingJobArn: string | undefined; + + /** + *Provides the status of a processing job.
+ * @public + */ + ProcessingJobStatus: ProcessingJobStatus | undefined; + + /** + *An optional string, up to one KB in size, that contains metadata from the processing + * container when the processing job exits.
+ * @public + */ + ExitMessage?: string; + + /** + *A string, up to one KB in size, that contains the reason a processing job failed, if + * it failed.
+ * @public + */ + FailureReason?: string; + + /** + *The time at which the processing job completed.
+ * @public + */ + ProcessingEndTime?: Date; + + /** + *The time at which the processing job started.
+ * @public + */ + ProcessingStartTime?: Date; + + /** + *The time at which the processing job was last modified.
+ * @public + */ + LastModifiedTime?: Date; + + /** + *The time at which the processing job was created.
+ * @public + */ + CreationTime: Date | undefined; + + /** + *The ARN of a monitoring schedule for an endpoint associated with this processing + * job.
+ * @public + */ + MonitoringScheduleArn?: string; + + /** + *The ARN of an AutoML job associated with this processing job.
+ * @public + */ + AutoMLJobArn?: string; + + /** + *The ARN of a training job associated with this processing job.
+ * @public + */ + TrainingJobArn?: string; +} + +/** + * @public + */ +export interface DescribeProjectInput { + /** + *The name of the project to describe.
+ * @public + */ + ProjectName: string | undefined; +} + /** * @public * @enum @@ -6023,7 +6177,7 @@ export interface ListAppsRequest { NextToken?: string; /** - *This parameter defines the maximum number of results that can be returned in a single response. The MaxResults
parameter is an upper bound, not a target. If there are
+ *
This parameter defines the maximum number of results that can be return in a single response. The MaxResults
parameter is an upper bound, not a target. If there are
* more results available than the value specified, a NextToken
* is provided in the response. The NextToken
indicates that the user should get the next set of results by providing this token as a part of a subsequent call. The default value for MaxResults
is 10.
This parameter defines the maximum number of results that can be returned in a single response. The MaxResults
parameter is an upper bound, not a target. If there are
+ *
This parameter defines the maximum number of results that can be return in a single response. The MaxResults
parameter is an upper bound, not a target. If there are
* more results available than the value specified, a NextToken
* is provided in the response. The NextToken
indicates that the user should get the next set of results by providing this token as a part of a subsequent call. The default value for MaxResults
is 10.
Provides summary information for an SageMaker notebook instance.
- * @public - */ -export interface NotebookInstanceSummary { - /** - *The name of the notebook instance that you want a summary for.
- * @public - */ - NotebookInstanceName: string | undefined; - - /** - *The Amazon Resource Name (ARN) of the notebook instance.
- * @public - */ - NotebookInstanceArn: string | undefined; - - /** - *The status of the notebook instance.
- * @public - */ - NotebookInstanceStatus?: NotebookInstanceStatus; - - /** - *The URL that you use to connect to the Jupyter notebook running in your notebook - * instance.
- * @public - */ - Url?: string; - - /** - *The type of ML compute instance that the notebook instance is running on.
- * @public - */ - InstanceType?: _InstanceType; - - /** - *A timestamp that shows when the notebook instance was created.
- * @public - */ - CreationTime?: Date; - - /** - *A timestamp that shows when the notebook instance was last modified.
- * @public - */ - LastModifiedTime?: Date; - - /** - *The name of a notebook instance lifecycle configuration associated with this notebook - * instance.
- *For information about notebook instance lifestyle configurations, see Step - * 2.1: (Optional) Customize a Notebook Instance.
- * @public - */ - NotebookInstanceLifecycleConfigName?: string; - - /** - *The Git repository associated with the notebook instance as its default code - * repository. This can be either the name of a Git repository stored as a resource in your - * account, or the URL of a Git repository in Amazon Web Services CodeCommit - * or in any other Git repository. When you open a notebook instance, it opens in the - * directory that contains this repository. For more information, see Associating Git - * Repositories with SageMaker Notebook Instances.
- * @public - */ - DefaultCodeRepository?: string; - - /** - *An array of up to three Git repositories associated with the notebook instance. These - * can be either the names of Git repositories stored as resources in your account, or the - * URL of Git repositories in Amazon Web Services CodeCommit - * or in any other Git repository. These repositories are cloned at the same level as the - * default repository of your notebook instance. For more information, see Associating Git - * Repositories with SageMaker Notebook Instances.
- * @public - */ - AdditionalCodeRepositories?: string[]; -} - -/** - * @public - */ -export interface ListNotebookInstancesOutput { - /** - *If the response to the previous ListNotebookInstances
request was
- * truncated, SageMaker returns this token. To retrieve the next set of notebook instances, use
- * the token in the next request.
An array of NotebookInstanceSummary
objects, one for each notebook
- * instance.
Provides summary information for an SageMaker notebook instance.
+ * @public + */ +export interface NotebookInstanceSummary { + /** + *The name of the notebook instance that you want a summary for.
+ * @public + */ + NotebookInstanceName: string | undefined; + + /** + *The Amazon Resource Name (ARN) of the notebook instance.
+ * @public + */ + NotebookInstanceArn: string | undefined; + + /** + *The status of the notebook instance.
+ * @public + */ + NotebookInstanceStatus?: NotebookInstanceStatus; + + /** + *The URL that you use to connect to the Jupyter notebook running in your notebook + * instance.
+ * @public + */ + Url?: string; + + /** + *The type of ML compute instance that the notebook instance is running on.
+ * @public + */ + InstanceType?: _InstanceType; + + /** + *A timestamp that shows when the notebook instance was created.
+ * @public + */ + CreationTime?: Date; + + /** + *A timestamp that shows when the notebook instance was last modified.
+ * @public + */ + LastModifiedTime?: Date; + + /** + *The name of a notebook instance lifecycle configuration associated with this notebook + * instance.
+ *For information about notebook instance lifestyle configurations, see Step + * 2.1: (Optional) Customize a Notebook Instance.
+ * @public + */ + NotebookInstanceLifecycleConfigName?: string; + + /** + *The Git repository associated with the notebook instance as its default code + * repository. This can be either the name of a Git repository stored as a resource in your + * account, or the URL of a Git repository in Amazon Web Services CodeCommit + * or in any other Git repository. When you open a notebook instance, it opens in the + * directory that contains this repository. For more information, see Associating Git + * Repositories with SageMaker Notebook Instances.
+ * @public + */ + DefaultCodeRepository?: string; + + /** + *An array of up to three Git repositories associated with the notebook instance. These + * can be either the names of Git repositories stored as resources in your account, or the + * URL of Git repositories in Amazon Web Services CodeCommit + * or in any other Git repository. These repositories are cloned at the same level as the + * default repository of your notebook instance. For more information, see Associating Git + * Repositories with SageMaker Notebook Instances.
+ * @public + */ + AdditionalCodeRepositories?: string[]; +} + +/** + * @public + */ +export interface ListNotebookInstancesOutput { + /** + *If the response to the previous ListNotebookInstances
request was
+ * truncated, SageMaker returns this token. To retrieve the next set of notebook instances, use
+ * the token in the next request.
An array of NotebookInstanceSummary
objects, one for each notebook
+ * instance.
This parameter defines the maximum number of results that can be returned in a single response. The MaxResults
parameter is an upper bound, not a target. If there are
+ *
This parameter defines the maximum number of results that can be return in a single response. The MaxResults
parameter is an upper bound, not a target. If there are
* more results available than the value specified, a NextToken
* is provided in the response. The NextToken
indicates that the user should get the next set of results by providing this token as a part of a subsequent call. The default value for MaxResults
is 10.
This parameter defines the maximum number of results that can be returned in a single response. The MaxResults
parameter is an upper bound, not a target. If there are
+ *
This parameter defines the maximum number of results that can be return in a single response. The MaxResults
parameter is an upper bound, not a target. If there are
* more results available than the value specified, a NextToken
* is provided in the response. The NextToken
indicates that the user should get the next set of results by providing this token as a part of a subsequent call. The default value for MaxResults
is 10.
An optional Key Management Service + * key to encrypt, decrypt, and re-encrypt model package information for regulated workloads with + * highly sensitive data.
+ * @public + */ + SecurityConfig?: ModelPackageSecurityConfig; + + /** + *The model card associated with the model package. Since ModelPackageModelCard
is
+ * tied to a model package, it is a specific usage of a model card and its schema is
+ * simplified compared to the schema of ModelCard
. The
+ * ModelPackageModelCard
schema does not include model_package_details
,
+ * and model_overview
is composed of the model_creator
and
+ * model_artifact
properties. For more information about
+ * the model card associated with the model package, see View
+ * the Details of a Model Version.
A list of the tags associated with the model package. For more information, see Tagging Amazon Web Services * resources in the Amazon Web Services General Reference Guide.
@@ -7033,6 +7158,19 @@ export interface UpdateModelPackageInput { * @public */ SourceUri?: string; + + /** + *The model card associated with the model package. Since ModelPackageModelCard
is
+ * tied to a model package, it is a specific usage of a model card and its schema is
+ * simplified compared to the schema of ModelCard
. The
+ * ModelPackageModelCard
schema does not include model_package_details
,
+ * and model_overview
is composed of the model_creator
and
+ * model_artifact
properties. For more information about
+ * the model card associated with the model package, see View
+ * the Details of a Model Version.
The selection of algorithms run on a dataset to train the model candidates of an Autopilot\n job.
\nSelected algorithms must belong to the list corresponding to the training mode set in\n AutoMLJobConfig.Mode (ENSEMBLING
or\n HYPERPARAMETER_TUNING
). Choose a minimum of 1 algorithm.
In ENSEMBLING
mode:
\"catboost\"
\n\"extra-trees\"
\n\"fastai\"
\n\"lightgbm\"
\n\"linear-learner\"
\n\"nn-torch\"
\n\"randomforest\"
\n\"xgboost\"
\nIn HYPERPARAMETER_TUNING
mode:
\"linear-learner\"
\n\"mlp\"
\n\"xgboost\"
\nThe selection of algorithms trained on your dataset to generate the model candidates for\n an Autopilot job.
\n\n For the tabular problem type TabularJobConfig
:\n
Selected algorithms must belong to the list corresponding to the training mode\n set in AutoMLJobConfig.Mode (ENSEMBLING
or\n HYPERPARAMETER_TUNING
). Choose a minimum of 1 algorithm.
In ENSEMBLING
mode:
\"catboost\"
\n\"extra-trees\"
\n\"fastai\"
\n\"lightgbm\"
\n\"linear-learner\"
\n\"nn-torch\"
\n\"randomforest\"
\n\"xgboost\"
\nIn HYPERPARAMETER_TUNING
mode:
\"linear-learner\"
\n\"mlp\"
\n\"xgboost\"
\n\n For the time-series forecasting problem type TimeSeriesForecastingJobConfig
:\n
Choose your algorithms from this list.
\n\"cnn-qr\"
\n\"deepar\"
\n\"prophet\"
\n\"arima\"
\n\"npts\"
\n\"ets\"
\nThe collection of algorithms run on a dataset for training the model candidates of an\n Autopilot job.
" + "smithy.api#documentation": "The selection of algorithms trained on your dataset to generate the model candidates for\n an Autopilot job.
" } }, "com.amazonaws.sagemaker#AutoMLAlgorithms": { @@ -3044,13 +3080,13 @@ "FeatureSpecificationS3Uri": { "target": "com.amazonaws.sagemaker#S3Uri", "traits": { - "smithy.api#documentation": "A URL to the Amazon S3 data source containing selected features from the input data source to\n run an Autopilot job. You can input FeatureAttributeNames
(optional) in JSON\n format as shown below:
\n { \"FeatureAttributeNames\":[\"col1\", \"col2\", ...] }
.
You can also specify the data type of the feature (optional) in the format shown\n below:
\n\n { \"FeatureDataTypes\":{\"col1\":\"numeric\", \"col2\":\"categorical\" ... } }
\n
These column keys may not include the target column.
\nIn ensembling mode, Autopilot only supports the following data types: numeric
,\n categorical
, text
, and datetime
. In HPO mode,\n Autopilot can support numeric
, categorical
, text
,\n datetime
, and sequence
.
If only FeatureDataTypes
is provided, the column keys (col1
,\n col2
,..) should be a subset of the column names in the input data.
If both FeatureDataTypes
and FeatureAttributeNames
are\n provided, then the column keys should be a subset of the column names provided in\n FeatureAttributeNames
.
The key name FeatureAttributeNames
is fixed. The values listed in\n [\"col1\", \"col2\", ...]
are case sensitive and should be a list of strings\n containing unique values that are a subset of the column names in the input data. The list\n of columns provided must not include the target column.
A URL to the Amazon S3 data source containing selected features from the input\n data source to run an Autopilot job. You can input FeatureAttributeNames
\n (optional) in JSON format as shown below:
\n { \"FeatureAttributeNames\":[\"col1\", \"col2\", ...] }
.
You can also specify the data type of the feature (optional) in the format shown\n below:
\n\n { \"FeatureDataTypes\":{\"col1\":\"numeric\", \"col2\":\"categorical\" ... } }
\n
These column keys may not include the target column.
\nIn ensembling mode, Autopilot only supports the following data types: numeric
,\n categorical
, text
, and datetime
. In HPO mode,\n Autopilot can support numeric
, categorical
, text
,\n datetime
, and sequence
.
If only FeatureDataTypes
is provided, the column keys (col1
,\n col2
,..) should be a subset of the column names in the input data.
If both FeatureDataTypes
and FeatureAttributeNames
are\n provided, then the column keys should be a subset of the column names provided in\n FeatureAttributeNames
.
The key name FeatureAttributeNames
is fixed. The values listed in\n [\"col1\", \"col2\", ...]
are case sensitive and should be a list of strings\n containing unique values that are a subset of the column names in the input data. The list\n of columns provided must not include the target column.
Stores the configuration information for the selection of algorithms used to train the\n model candidates.
\nThe list of available algorithms to choose from depends on the training mode set in\n \n AutoMLJobConfig.Mode
\n .
\n AlgorithmsConfig
should not be set in AUTO
training\n mode.
When AlgorithmsConfig
is provided, one AutoMLAlgorithms
\n attribute must be set and one only.
If the list of algorithms provided as values for AutoMLAlgorithms
is\n empty, AutoMLCandidateGenerationConfig
uses the full set of algorithms\n for the given training mode.
When AlgorithmsConfig
is not provided,\n AutoMLCandidateGenerationConfig
uses the full set of algorithms for\n the given training mode.
For the list of all algorithms per training mode, see \n AutoMLAlgorithmConfig.
\nFor more information on each algorithm, see the Algorithm support section in Autopilot developer guide.
" + "smithy.api#documentation": "Stores the configuration information for the selection of algorithms trained on tabular data.
\nThe list of available algorithms to choose from depends on the training mode set in\n \n TabularJobConfig.Mode
\n .
\n AlgorithmsConfig
should not be set if the training mode is set on AUTO
.
When AlgorithmsConfig
is provided, one AutoMLAlgorithms
\n attribute must be set and one only.
If the list of algorithms provided as values for AutoMLAlgorithms
is\n empty, CandidateGenerationConfig
uses the full set of algorithms for the\n given training mode.
When AlgorithmsConfig
is not provided,\n CandidateGenerationConfig
uses the full set of algorithms for the\n given training mode.
For the list of all algorithms per problem type and training mode, see \n AutoMLAlgorithmConfig.
\nFor more information on each algorithm, see the Algorithm support section in Autopilot developer guide.
" } } }, @@ -3429,7 +3465,7 @@ "target": "com.amazonaws.sagemaker#AutoMLMetricEnum", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "The name of the objective metric used to measure the predictive quality of a machine\n learning system. During training, the model's parameters are updated iteratively to\n optimize its performance based on the feedback provided by the objective metric when\n evaluating the model on the validation dataset.
\nThe list of available metrics supported by Autopilot and the default metric applied when you\n do not specify a metric name explicitly depend on the problem type.
\nFor tabular problem types:
\nList of available metrics:
\n Regression: MAE
,\n MSE
, R2
, RMSE
\n
Binary classification: Accuracy
, AUC
,\n BalancedAccuracy
, F1
,\n Precision
, Recall
\n
Multiclass classification: Accuracy
,\n BalancedAccuracy
, F1macro
,\n PrecisionMacro
, RecallMacro
\n
For a description of each metric, see Autopilot metrics for classification and regression.
\nDefault objective metrics:
\nRegression: MSE
.
Binary classification: F1
.
Multiclass classification: Accuracy
.
For image or text classification problem types:
\nList of available metrics: Accuracy
\n
For a description of each metric, see Autopilot metrics for text and image classification.
\nDefault objective metrics: Accuracy
\n
For time-series forecasting problem types:
\nList of available metrics: RMSE
, wQL
,\n Average wQL
, MASE
, MAPE
,\n WAPE
\n
For a description of each metric, see Autopilot metrics for\n time-series forecasting.
\nDefault objective metrics: AverageWeightedQuantileLoss
\n
For text generation problem types (LLMs fine-tuning): \n Fine-tuning language models in Autopilot does not\n require setting the AutoMLJobObjective
field. Autopilot fine-tunes LLMs\n without requiring multiple candidates to be trained and evaluated. \n Instead, using your dataset, Autopilot directly fine-tunes your target model to enhance a\n default objective metric, the cross-entropy loss. After fine-tuning a language model,\n you can evaluate the quality of its generated text using different metrics. \n For a list of the available metrics, see Metrics for\n fine-tuning LLMs in Autopilot.
The name of the objective metric used to measure the predictive quality of a machine\n learning system. During training, the model's parameters are updated iteratively to\n optimize its performance based on the feedback provided by the objective metric when\n evaluating the model on the validation dataset.
\nThe list of available metrics supported by Autopilot and the default metric applied when you\n do not specify a metric name explicitly depend on the problem type.
\nFor tabular problem types:
\nList of available metrics:
\n Regression: MAE
, MSE
, R2
,\n RMSE
\n
Binary classification: Accuracy
, AUC
,\n BalancedAccuracy
, F1
,\n Precision
, Recall
\n
Multiclass classification: Accuracy
,\n BalancedAccuracy
, F1macro
,\n PrecisionMacro
, RecallMacro
\n
For a description of each metric, see Autopilot metrics for classification and regression.
\nDefault objective metrics:
\nRegression: MSE
.
Binary classification: F1
.
Multiclass classification: Accuracy
.
For image or text classification problem types:
\nList of available metrics: Accuracy
\n
For a description of each metric, see Autopilot metrics for text and image classification.
\nDefault objective metrics: Accuracy
\n
For time-series forecasting problem types:
\nList of available metrics: RMSE
, wQL
,\n Average wQL
, MASE
, MAPE
,\n WAPE
\n
For a description of each metric, see Autopilot metrics for\n time-series forecasting.
\nDefault objective metrics: AverageWeightedQuantileLoss
\n
For text generation problem types (LLMs fine-tuning): \n Fine-tuning language models in Autopilot does not\n require setting the AutoMLJobObjective
field. Autopilot fine-tunes LLMs\n without requiring multiple candidates to be trained and evaluated. \n Instead, using your dataset, Autopilot directly fine-tunes your target model to enhance a\n default objective metric, the cross-entropy loss. After fine-tuning a language model,\n you can evaluate the quality of its generated text using different metrics. \n For a list of the available metrics, see Metrics for\n fine-tuning LLMs in Autopilot.
The data type.
\nIf you choose S3Prefix
, S3Uri
identifies a key name\n prefix. SageMaker uses all objects that match the specified key name prefix for model\n training.
The S3Prefix
should have the following format:
\n s3://DOC-EXAMPLE-BUCKET/DOC-EXAMPLE-FOLDER-OR-FILE
\n
If you choose ManifestFile
, S3Uri
identifies an object\n that is a manifest file containing a list of object keys that you want SageMaker to use\n for model training.
A ManifestFile
should have the format shown below:
\n [ {\"prefix\":\n \"s3://DOC-EXAMPLE-BUCKET/DOC-EXAMPLE-FOLDER/DOC-EXAMPLE-PREFIX/\"},
\n
\n \"DOC-EXAMPLE-RELATIVE-PATH/DOC-EXAMPLE-FOLDER/DATA-1\",
\n
\n \"DOC-EXAMPLE-RELATIVE-PATH/DOC-EXAMPLE-FOLDER/DATA-2\",
\n
\n ... \"DOC-EXAMPLE-RELATIVE-PATH/DOC-EXAMPLE-FOLDER/DATA-N\" ]
\n
If you choose AugmentedManifestFile
, S3Uri
identifies an\n object that is an augmented manifest file in JSON lines format. This file contains\n the data you want to use for model training. AugmentedManifestFile
is\n available for V2 API jobs only (for example, for jobs created by calling\n CreateAutoMLJobV2
).
Here is a minimal, single-record example of an\n AugmentedManifestFile
:
\n {\"source-ref\":\n \"s3://DOC-EXAMPLE-BUCKET/DOC-EXAMPLE-FOLDER/cats/cat.jpg\",
\n
\n \"label-metadata\": {\"class-name\": \"cat\"
}
For more information on AugmentedManifestFile
, see Provide\n Dataset Metadata to Training Jobs with an Augmented Manifest File.
The data type.
\nIf you choose S3Prefix
, S3Uri
identifies a key name\n prefix. SageMaker uses all objects that match the specified key name prefix\n for model training.
The S3Prefix
should have the following format:
\n s3://DOC-EXAMPLE-BUCKET/DOC-EXAMPLE-FOLDER-OR-FILE
\n
If you choose ManifestFile
, S3Uri
identifies an object\n that is a manifest file containing a list of object keys that you want SageMaker to use for model training.
A ManifestFile
should have the format shown below:
\n [ {\"prefix\":\n \"s3://DOC-EXAMPLE-BUCKET/DOC-EXAMPLE-FOLDER/DOC-EXAMPLE-PREFIX/\"},
\n
\n \"DOC-EXAMPLE-RELATIVE-PATH/DOC-EXAMPLE-FOLDER/DATA-1\",
\n
\n \"DOC-EXAMPLE-RELATIVE-PATH/DOC-EXAMPLE-FOLDER/DATA-2\",
\n
\n ... \"DOC-EXAMPLE-RELATIVE-PATH/DOC-EXAMPLE-FOLDER/DATA-N\" ]
\n
If you choose AugmentedManifestFile
, S3Uri
identifies an\n object that is an augmented manifest file in JSON lines format. This file contains\n the data you want to use for model training. AugmentedManifestFile
is\n available for V2 API jobs only (for example, for jobs created by calling\n CreateAutoMLJobV2
).
Here is a minimal, single-record example of an\n AugmentedManifestFile
:
\n {\"source-ref\":\n \"s3://DOC-EXAMPLE-BUCKET/DOC-EXAMPLE-FOLDER/cats/cat.jpg\",
\n
\n \"label-metadata\": {\"class-name\": \"cat\"
}
For more information on AugmentedManifestFile
, see Provide\n Dataset Metadata to Training Jobs with an Augmented Manifest File.
The URL to the Amazon S3 data source. The Uri refers to the Amazon S3 prefix or ManifestFile\n depending on the data type.
", + "smithy.api#documentation": "The URL to the Amazon S3 data source. The Uri refers to the Amazon S3\n prefix or ManifestFile depending on the data type.
", "smithy.api#required": {} } } @@ -4891,13 +4927,13 @@ "ModelInsights": { "target": "com.amazonaws.sagemaker#ModelInsightsLocation", "traits": { - "smithy.api#documentation": "The Amazon S3 prefix to the model insight artifacts generated for the AutoML candidate.
" + "smithy.api#documentation": "The Amazon S3 prefix to the model insight artifacts generated for the AutoML\n candidate.
" } }, "BacktestResults": { "target": "com.amazonaws.sagemaker#BacktestResultsLocation", "traits": { - "smithy.api#documentation": "The Amazon S3 prefix to the accuracy metrics and the inference results observed over the\n testing window. Available only for the time-series forecasting problem type.
" + "smithy.api#documentation": "The Amazon S3 prefix to the accuracy metrics and the inference results observed\n over the testing window. Available only for the time-series forecasting problem\n type.
" } } }, @@ -4919,7 +4955,7 @@ "AlgorithmsConfig": { "target": "com.amazonaws.sagemaker#AutoMLAlgorithmsConfig", "traits": { - "smithy.api#documentation": "Stores the configuration information for the selection of algorithms used to train model\n candidates on tabular data.
\nThe list of available algorithms to choose from depends on the training mode set in\n \n TabularJobConfig.Mode
\n .
\n AlgorithmsConfig
should not be set in AUTO
training\n mode.
When AlgorithmsConfig
is provided, one AutoMLAlgorithms
\n attribute must be set and one only.
If the list of algorithms provided as values for AutoMLAlgorithms
is\n empty, CandidateGenerationConfig
uses the full set of algorithms for the\n given training mode.
When AlgorithmsConfig
is not provided,\n CandidateGenerationConfig
uses the full set of algorithms for the\n given training mode.
For the list of all algorithms per problem type and training mode, see \n AutoMLAlgorithmConfig.
\nFor more information on each algorithm, see the Algorithm support section in Autopilot developer guide.
" + "smithy.api#documentation": "Your Autopilot job trains a default set of algorithms on your dataset. For tabular and\n time-series data, you can customize the algorithm list by selecting a subset of algorithms\n for your problem type.
\n\n AlgorithmsConfig
stores the customized selection of algorithms to train on\n your data.
\n For the tabular problem type TabularJobConfig
,\n the list of available algorithms to choose from depends on the training mode set\n in \n AutoMLJobConfig.Mode
\n .
\n AlgorithmsConfig
should not be set when the training mode\n AutoMLJobConfig.Mode
is set to AUTO
.
When AlgorithmsConfig
is provided, one\n AutoMLAlgorithms
attribute must be set and one only.
If the list of algorithms provided as values for\n AutoMLAlgorithms
is empty,\n CandidateGenerationConfig
uses the full set of algorithms for\n the given training mode.
When AlgorithmsConfig
is not provided,\n CandidateGenerationConfig
uses the full set of algorithms for\n the given training mode.
For the list of all algorithms per training mode, see \n AlgorithmConfig.
\nFor more information on each algorithm, see the Algorithm support section in the Autopilot developer guide.
\n\n For the time-series forecasting problem type TimeSeriesForecastingJobConfig
,\n choose your algorithms from the list provided in\n \n AlgorithmConfig.
For more information on each algorithm, see the Algorithms support for time-series forecasting section in the Autopilot developer guide.
\nWhen AlgorithmsConfig
is provided, one\n AutoMLAlgorithms
attribute must be set and one only.
If the list of algorithms provided as values for\n AutoMLAlgorithms
is empty,\n CandidateGenerationConfig
uses the full set of algorithms for\n time-series forecasting.
When AlgorithmsConfig
is not provided,\n CandidateGenerationConfig
uses the full set of algorithms for\n time-series forecasting.
Provides information about encryption and the Amazon S3 output path needed to store artifacts\n from an AutoML job. Format(s) supported: CSV.
", + "smithy.api#documentation": "Provides information about encryption and the Amazon S3 output path needed to\n store artifacts from an AutoML job. Format(s) supported: CSV.
", "smithy.api#required": {} } }, @@ -8772,7 +8808,7 @@ "target": "com.amazonaws.sagemaker#AutoMLOutputDataConfig", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "Provides information about encryption and the Amazon S3 output path needed to store artifacts\n from an AutoML job.
", + "smithy.api#documentation": "Provides information about encryption and the Amazon S3 output path needed to\n store artifacts from an AutoML job.
", "smithy.api#required": {} } }, @@ -11632,6 +11668,18 @@ "traits": { "smithy.api#documentation": "The URI of the source for the model package. If you want to clone a model package,\n set it to the model package Amazon Resource Name (ARN). If you want to register a model,\n set it to the model ARN.
" } + }, + "SecurityConfig": { + "target": "com.amazonaws.sagemaker#ModelPackageSecurityConfig", + "traits": { + "smithy.api#documentation": "The KMS Key ID (KMSKeyId
) used for encryption of model package information.
The model card associated with the model package. Since ModelPackageModelCard
is\n tied to a model package, it is a specific usage of a model card and its schema is\n simplified compared to the schema of ModelCard
. The \n ModelPackageModelCard
schema does not include model_package_details
,\n and model_overview
is composed of the model_creator
and\n model_artifact
properties. For more information about\n the model card associated with the model package, see View\n the Details of a Model Version.
The best model candidate selected by SageMaker Autopilot using both the best objective metric and\n lowest InferenceLatency for\n an experiment.
" + "smithy.api#documentation": "The best model candidate selected by SageMaker Autopilot using both the best\n objective metric and lowest InferenceLatency for\n an experiment.
" } }, "AutoMLJobStatus": { @@ -17209,7 +17257,7 @@ "target": "com.amazonaws.sagemaker#RoleArn", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "The ARN of the IAM role that has read permission to the input data location and\n write permission to the output data location in Amazon S3.
", + "smithy.api#documentation": "The ARN of the IAM role that has read permission to the input data\n location and write permission to the output data location in Amazon S3.
", "smithy.api#required": {} } }, @@ -17317,7 +17365,7 @@ "SecurityConfig": { "target": "com.amazonaws.sagemaker#AutoMLSecurityConfig", "traits": { - "smithy.api#documentation": "Returns the security configuration for traffic encryption or Amazon VPC settings.
" + "smithy.api#documentation": "Returns the security configuration for traffic encryption or Amazon VPC\n settings.
" } } }, @@ -21545,7 +21593,7 @@ "target": "com.amazonaws.sagemaker#DescribeModelPackageOutput" }, "traits": { - "smithy.api#documentation": "Returns a description of the specified model package, which is used to create SageMaker\n models or list them on Amazon Web Services Marketplace.
\nTo create models in SageMaker, buyers can subscribe to model packages listed on Amazon Web Services\n Marketplace.
" + "smithy.api#documentation": "Returns a description of the specified model package, which is used to create SageMaker\n models or list them on Amazon Web Services Marketplace.
\nIf you provided a KMS Key ID when you created your model package,\n you will see the KMS\n Decrypt API call in your CloudTrail logs when you use this API.
\nTo create models in SageMaker, buyers can subscribe to model packages listed on Amazon Web Services\n Marketplace.
" } }, "com.amazonaws.sagemaker#DescribeModelPackageGroup": { @@ -21810,6 +21858,18 @@ "traits": { "smithy.api#documentation": "The URI of the source for the model package.
" } + }, + "SecurityConfig": { + "target": "com.amazonaws.sagemaker#ModelPackageSecurityConfig", + "traits": { + "smithy.api#documentation": "The KMS Key ID (KMSKeyId
) used for encryption of model package information.
The model card associated with the model package. Since ModelPackageModelCard
is\n tied to a model package, it is a specific usage of a model card and its schema is\n simplified compared to the schema of ModelCard
. The \n ModelPackageModelCard
schema does not include model_package_details
,\n and model_overview
is composed of the model_creator
and\n model_artifact
properties. For more information about\n the model card associated with the model package, see View\n the Details of a Model Version.
This parameter defines the maximum number of results that can be returned in a single response. The MaxResults
parameter is an upper bound, not a target. If there are\n more results available than the value specified, a NextToken
\n is provided in the response. The NextToken
indicates that the user should get the next set of results by providing this token as a part of a subsequent call. The default value for MaxResults
is 10.
This parameter defines the maximum number of results that can be return in a single response. The MaxResults
parameter is an upper bound, not a target. If there are\n more results available than the value specified, a NextToken
\n is provided in the response. The NextToken
indicates that the user should get the next set of results by providing this token as a part of a subsequent call. The default value for MaxResults
is 10.
This parameter defines the maximum number of results that can be returned in a single response. The MaxResults
parameter is an upper bound, not a target. If there are\n more results available than the value specified, a NextToken
\n is provided in the response. The NextToken
indicates that the user should get the next set of results by providing this token as a part of a subsequent call. The default value for MaxResults
is 10.
This parameter defines the maximum number of results that can be return in a single response. The MaxResults
parameter is an upper bound, not a target. If there are\n more results available than the value specified, a NextToken
\n is provided in the response. The NextToken
indicates that the user should get the next set of results by providing this token as a part of a subsequent call. The default value for MaxResults
is 10.
This parameter defines the maximum number of results that can be returned in a single response. The MaxResults
parameter is an upper bound, not a target. If there are\n more results available than the value specified, a NextToken
\n is provided in the response. The NextToken
indicates that the user should get the next set of results by providing this token as a part of a subsequent call. The default value for MaxResults
is 10.
This parameter defines the maximum number of results that can be return in a single response. The MaxResults
parameter is an upper bound, not a target. If there are\n more results available than the value specified, a NextToken
\n is provided in the response. The NextToken
indicates that the user should get the next set of results by providing this token as a part of a subsequent call. The default value for MaxResults
is 10.
This parameter defines the maximum number of results that can be returned in a single response. The MaxResults
parameter is an upper bound, not a target. If there are\n more results available than the value specified, a NextToken
\n is provided in the response. The NextToken
indicates that the user should get the next set of results by providing this token as a part of a subsequent call. The default value for MaxResults
is 10.
This parameter defines the maximum number of results that can be return in a single response. The MaxResults
parameter is an upper bound, not a target. If there are\n more results available than the value specified, a NextToken
\n is provided in the response. The NextToken
indicates that the user should get the next set of results by providing this token as a part of a subsequent call. The default value for MaxResults
is 10.
The URI of the source for the model package.
" } }, + "SecurityConfig": { + "target": "com.amazonaws.sagemaker#ModelPackageSecurityConfig" + }, + "ModelCard": { + "target": "com.amazonaws.sagemaker#ModelPackageModelCard" + }, "Tags": { "target": "com.amazonaws.sagemaker#TagList", "traits": { @@ -44703,6 +44769,42 @@ "target": "com.amazonaws.sagemaker#ModelPackageGroupSummary" } }, + "com.amazonaws.sagemaker#ModelPackageModelCard": { + "type": "structure", + "members": { + "ModelCardContent": { + "target": "com.amazonaws.sagemaker#ModelCardContent", + "traits": { + "smithy.api#documentation": "The content of the model card.
" + } + }, + "ModelCardStatus": { + "target": "com.amazonaws.sagemaker#ModelCardStatus", + "traits": { + "smithy.api#documentation": "The approval status of the model card within your organization. Different organizations might have different criteria for model card review and approval.
\n\n Draft
: The model card is a work in progress.
\n PendingReview
: The model card is pending review.
\n Approved
: The model card is approved.
\n Archived
: The model card is archived. No more updates can be made to the model\n card content. If you try to update the model card content, you will receive the message Model Card\n \t is in Archived state
.
The model card associated with the model package. Since ModelPackageModelCard
is\n tied to a model package, it is a specific usage of a model card and its schema is\n simplified compared to the schema of ModelCard
. The \n ModelPackageModelCard
schema does not include model_package_details
,\n and model_overview
is composed of the model_creator
and\n model_artifact
properties. For more information about\n the model card associated with the model package, see View\n the Details of a Model Version.
The KMS Key ID (KMSKeyId
) used for encryption of model package information.
An optional Key Management Service\n key to encrypt, decrypt, and re-encrypt model package information for regulated workloads with\n highly sensitive data.
" + } + }, "com.amazonaws.sagemaker#ModelPackageSortBy": { "type": "enum", "members": { @@ -59793,7 +59895,7 @@ "FeatureSpecificationS3Uri": { "target": "com.amazonaws.sagemaker#S3Uri", "traits": { - "smithy.api#documentation": "A URL to the Amazon S3 data source containing selected features from the input data source to\n run an Autopilot job V2. You can input FeatureAttributeNames
(optional) in JSON\n format as shown below:
\n { \"FeatureAttributeNames\":[\"col1\", \"col2\", ...] }
.
You can also specify the data type of the feature (optional) in the format shown\n below:
\n\n { \"FeatureDataTypes\":{\"col1\":\"numeric\", \"col2\":\"categorical\" ... } }
\n
These column keys may not include the target column.
\nIn ensembling mode, Autopilot only supports the following data types: numeric
,\n categorical
, text
, and datetime
. In HPO mode,\n Autopilot can support numeric
, categorical
, text
,\n datetime
, and sequence
.
If only FeatureDataTypes
is provided, the column keys (col1
,\n col2
,..) should be a subset of the column names in the input data.
If both FeatureDataTypes
and FeatureAttributeNames
are\n provided, then the column keys should be a subset of the column names provided in\n FeatureAttributeNames
.
The key name FeatureAttributeNames
is fixed. The values listed in\n [\"col1\", \"col2\", ...]
are case sensitive and should be a list of strings\n containing unique values that are a subset of the column names in the input data. The list\n of columns provided must not include the target column.
A URL to the Amazon S3 data source containing selected features from the input\n data source to run an Autopilot job V2. You can input FeatureAttributeNames
\n (optional) in JSON format as shown below:
\n { \"FeatureAttributeNames\":[\"col1\", \"col2\", ...] }
.
You can also specify the data type of the feature (optional) in the format shown\n below:
\n\n { \"FeatureDataTypes\":{\"col1\":\"numeric\", \"col2\":\"categorical\" ... } }
\n
These column keys may not include the target column.
\nIn ensembling mode, Autopilot only supports the following data types: numeric
,\n categorical
, text
, and datetime
. In HPO mode,\n Autopilot can support numeric
, categorical
, text
,\n datetime
, and sequence
.
If only FeatureDataTypes
is provided, the column keys (col1
,\n col2
,..) should be a subset of the column names in the input data.
If both FeatureDataTypes
and FeatureAttributeNames
are\n provided, then the column keys should be a subset of the column names provided in\n FeatureAttributeNames
.
The key name FeatureAttributeNames
is fixed. The values listed in\n [\"col1\", \"col2\", ...]
are case sensitive and should be a list of strings\n containing unique values that are a subset of the column names in the input data. The list\n of columns provided must not include the target column.
A URL to the Amazon S3 data source containing additional selected features that complement\n the target, itemID, timestamp, and grouped columns set in TimeSeriesConfig
.\n When not provided, the AutoML job V2 includes all the columns from the original dataset\n that are not already declared in TimeSeriesConfig
. If provided, the AutoML job\n V2 only considers these additional columns as a complement to the ones declared in\n TimeSeriesConfig
.
You can input FeatureAttributeNames
(optional) in JSON format as shown\n below:
\n { \"FeatureAttributeNames\":[\"col1\", \"col2\", ...] }
.
You can also specify the data type of the feature (optional) in the format shown\n below:
\n\n { \"FeatureDataTypes\":{\"col1\":\"numeric\", \"col2\":\"categorical\" ... } }
\n
Autopilot supports the following data types: numeric
, categorical
,\n text
, and datetime
.
These column keys must not include any column set in\n TimeSeriesConfig
.
A URL to the Amazon S3 data source containing additional selected features that\n complement the target, itemID, timestamp, and grouped columns set in\n TimeSeriesConfig
. When not provided, the AutoML job V2 includes all the\n columns from the original dataset that are not already declared in\n TimeSeriesConfig
. If provided, the AutoML job V2 only considers these\n additional columns as a complement to the ones declared in\n TimeSeriesConfig
.
You can input FeatureAttributeNames
(optional) in JSON format as shown\n below:
\n { \"FeatureAttributeNames\":[\"col1\", \"col2\", ...] }
.
You can also specify the data type of the feature (optional) in the format shown\n below:
\n\n { \"FeatureDataTypes\":{\"col1\":\"numeric\", \"col2\":\"categorical\" ... } }
\n
Autopilot supports the following data types: numeric
, categorical
,\n text
, and datetime
.
These column keys must not include any column set in\n TimeSeriesConfig
.
The collection of holiday featurization attributes used to incorporate national holiday\n information into your forecasting model.
" } + }, + "CandidateGenerationConfig": { + "target": "com.amazonaws.sagemaker#CandidateGenerationConfig" } }, "traits": { @@ -65650,6 +65755,12 @@ "traits": { "smithy.api#documentation": "The URI of the source for the model package.
" } + }, + "ModelCard": { + "target": "com.amazonaws.sagemaker#ModelPackageModelCard", + "traits": { + "smithy.api#documentation": "The model card associated with the model package. Since ModelPackageModelCard
is\n tied to a model package, it is a specific usage of a model card and its schema is\n simplified compared to the schema of ModelCard
. The \n ModelPackageModelCard
schema does not include model_package_details
,\n and model_overview
is composed of the model_creator
and\n model_artifact
properties. For more information about\n the model card associated with the model package, see View\n the Details of a Model Version.