diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index 3e6708180..a74101922 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -2cee201b2e8d656f7306b2f9ec98edfa721e9829 \ No newline at end of file +b142b72bea6f30d8efb36dfa8c58e0d63ae5329b \ No newline at end of file diff --git a/.gitattributes b/.gitattributes index 067333c32..706329a62 100755 --- a/.gitattributes +++ b/.gitattributes @@ -2050,6 +2050,10 @@ databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/CspEnablem databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/CspEnablementAccountImpl.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/CspEnablementAccountService.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/CspEnablementAccountSetting.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DashboardEmailSubscriptions.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DashboardEmailSubscriptionsAPI.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DashboardEmailSubscriptionsImpl.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DashboardEmailSubscriptionsService.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DefaultNamespaceAPI.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DefaultNamespaceImpl.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DefaultNamespaceService.java linguist-generated=true @@ -2061,6 +2065,8 @@ databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteAibi databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteAibiDashboardEmbeddingAccessPolicySettingResponse.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteAibiDashboardEmbeddingApprovedDomainsSettingRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteAibiDashboardEmbeddingApprovedDomainsSettingResponse.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteDashboardEmailSubscriptionsRequest.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteDashboardEmailSubscriptionsResponse.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteDefaultNamespaceSettingRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteDefaultNamespaceSettingResponse.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteDisableLegacyAccessRequest.java linguist-generated=true @@ -2083,6 +2089,8 @@ databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeletePriv databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteResponse.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteRestrictWorkspaceAdminsSettingRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteRestrictWorkspaceAdminsSettingResponse.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteSqlResultsDownloadRequest.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteSqlResultsDownloadResponse.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteTokenManagementRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DestinationType.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DisableLegacyAccess.java linguist-generated=true @@ -2157,6 +2165,7 @@ databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetAibiDas databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetAutomaticClusterUpdateSettingRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetComplianceSecurityProfileSettingRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetCspEnablementAccountSettingRequest.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetDashboardEmailSubscriptionsRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetDefaultNamespaceSettingRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetDisableLegacyAccessRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetDisableLegacyDbfsRequest.java linguist-generated=true @@ -2175,6 +2184,7 @@ databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetNotific databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetPersonalComputeSettingRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetPrivateEndpointRuleRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetRestrictWorkspaceAdminsSettingRequest.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetSqlResultsDownloadRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetStatusRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetTokenManagementRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetTokenPermissionLevelsResponse.java linguist-generated=true @@ -2254,6 +2264,10 @@ databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SettingsAP databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SettingsImpl.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SettingsService.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SlackConfig.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SqlResultsDownload.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SqlResultsDownloadAPI.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SqlResultsDownloadImpl.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SqlResultsDownloadService.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/StringMessage.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/TokenAccessControlRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/TokenAccessControlResponse.java linguist-generated=true @@ -2276,6 +2290,7 @@ databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/UpdateAibi databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/UpdateAutomaticClusterUpdateSettingRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/UpdateComplianceSecurityProfileSettingRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/UpdateCspEnablementAccountSettingRequest.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/UpdateDashboardEmailSubscriptionsRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/UpdateDefaultNamespaceSettingRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/UpdateDisableLegacyAccessRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/UpdateDisableLegacyDbfsRequest.java linguist-generated=true @@ -2296,6 +2311,7 @@ databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/UpdatePers databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/UpdatePrivateEndpointRule.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/UpdateResponse.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/UpdateRestrictWorkspaceAdminsSettingRequest.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/UpdateSqlResultsDownloadRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/UpdateWorkspaceNetworkOptionRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/WorkspaceConfAPI.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/WorkspaceConfImpl.java linguist-generated=true diff --git a/.github/workflows/tagging.yml b/.github/workflows/tagging.yml index 558f2993a..d4486fb51 100644 --- a/.github/workflows/tagging.yml +++ b/.github/workflows/tagging.yml @@ -21,7 +21,7 @@ jobs: steps: - name: Generate GitHub App Token id: generate-token - uses: actions/create-github-app-token@v1 + uses: actions/create-github-app-token@v2 with: app-id: ${{ secrets.DECO_SDK_TAGGING_APP_ID }} private-key: ${{ secrets.DECO_SDK_TAGGING_PRIVATE_KEY }} @@ -49,4 +49,3 @@ jobs: GITHUB_REPOSITORY: ${{ github.repository }} run: | python tagging.py - diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index 5e8ed133b..808c01fcf 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -11,3 +11,25 @@ ### Internal Changes ### API Changes +* Added `workspaceClient.dashboardEmailSubscriptions()` service and `workspaceClient.sqlResultsDownload()` service. +* Added `remoteShuffleDiskIops`, `remoteShuffleDiskThroughput` and `totalInitialRemoteShuffleDiskSize` fields for `com.databricks.sdk.service.compute.ClusterAttributes`. +* Added `remoteShuffleDiskIops`, `remoteShuffleDiskThroughput` and `totalInitialRemoteShuffleDiskSize` fields for `com.databricks.sdk.service.compute.ClusterDetails`. +* Added `remoteShuffleDiskIops`, `remoteShuffleDiskThroughput` and `totalInitialRemoteShuffleDiskSize` fields for `com.databricks.sdk.service.compute.ClusterSpec`. +* Added `remoteShuffleDiskIops`, `remoteShuffleDiskThroughput` and `totalInitialRemoteShuffleDiskSize` fields for `com.databricks.sdk.service.compute.CreateCluster`. +* Added `remoteShuffleDiskIops`, `remoteShuffleDiskThroughput` and `totalInitialRemoteShuffleDiskSize` fields for `com.databricks.sdk.service.compute.EditCluster`. +* Added `remoteShuffleDiskIops`, `remoteShuffleDiskThroughput` and `totalInitialRemoteShuffleDiskSize` fields for `com.databricks.sdk.service.compute.UpdateClusterResource`. +* Added `tags` field for `com.databricks.sdk.service.pipelines.CreatePipeline`. +* Added `tags` field for `com.databricks.sdk.service.pipelines.EditPipeline`. +* Added `tags` field for `com.databricks.sdk.service.pipelines.PipelineSpec`. +* Added `maxProvisionedConcurrency` and `minProvisionedConcurrency` fields for `com.databricks.sdk.service.serving.ServedEntityInput`. +* Added `maxProvisionedConcurrency` and `minProvisionedConcurrency` fields for `com.databricks.sdk.service.serving.ServedEntityOutput`. +* Added `maxProvisionedConcurrency` and `minProvisionedConcurrency` fields for `com.databricks.sdk.service.serving.ServedModelInput`. +* Added `maxProvisionedConcurrency` and `minProvisionedConcurrency` fields for `com.databricks.sdk.service.serving.ServedModelOutput`. +* Added `DELTASHARING_CATALOG`, `FOREIGN_CATALOG`, `INTERNAL_CATALOG`, `MANAGED_CATALOG`, `MANAGED_ONLINE_CATALOG`, `SYSTEM_CATALOG` and `UNKNOWN_CATALOG_TYPE` enum values for `com.databricks.sdk.service.catalog.CatalogType`. +* Added `GA4_RAW_DATA`, `POWER_BI`, `SALESFORCE`, `SALESFORCE_DATA_CLOUD`, `SERVICENOW`, `UNKNOWN_CONNECTION_TYPE` and `WORKDAY_RAAS` enum values for `com.databricks.sdk.service.catalog.ConnectionType`. +* Added `OAUTH_ACCESS_TOKEN`, `OAUTH_M2M`, `OAUTH_REFRESH_TOKEN`, `OAUTH_RESOURCE_OWNER_PASSWORD`, `OAUTH_U2M`, `OAUTH_U2M_MAPPING`, `OIDC_TOKEN`, `PEM_PRIVATE_KEY`, `SERVICE_CREDENTIAL` and `UNKNOWN_CREDENTIAL_TYPE` enum values for `com.databricks.sdk.service.catalog.CredentialType`. +* Added `CATALOG`, `CLEAN_ROOM`, `CONNECTION`, `CREDENTIAL`, `EXTERNAL_LOCATION`, `EXTERNAL_METADATA`, `FUNCTION`, `METASTORE`, `PIPELINE`, `PROVIDER`, `RECIPIENT`, `SCHEMA`, `SHARE`, `STAGING_TABLE`, `STORAGE_CREDENTIAL`, `TABLE`, `UNKNOWN_SECURABLE_TYPE` and `VOLUME` enum values for `com.databricks.sdk.service.catalog.SecurableType`. +* Added `TERADATA` enum value for `com.databricks.sdk.service.pipelines.IngestionSourceType`. +* Added `OIDC_FEDERATION` enum value for `com.databricks.sdk.service.sharing.AuthenticationType`. +* [Breaking] Changed `securableType` field for `com.databricks.sdk.service.catalog.ConnectionInfo` to type `com.databricks.sdk.service.catalog.SecurableType` class. +* [Breaking] Changed `catalogType` field for `com.databricks.sdk.service.catalog.SchemaInfo` to type `com.databricks.sdk.service.catalog.CatalogType` class. diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/AccountClient.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/AccountClient.java index 1c813a589..be96caf24 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/AccountClient.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/AccountClient.java @@ -574,11 +574,12 @@ public WorkspaceAssignmentAPI workspaceAssignment() { } /** - * These APIs allow configuration of network settings for Databricks workspaces. Each workspace is - * always associated with exactly one network policy that controls which network destinations can - * be accessed from the Databricks environment. By default, workspaces are associated with the - * 'default-policy' network policy. You cannot create or delete a workspace's network - * configuration, only update it to associate the workspace with a different policy. + * These APIs allow configuration of network settings for Databricks workspaces by selecting which + * network policy to associate with the workspace. Each workspace is always associated with + * exactly one network policy that controls which network destinations can be accessed from the + * Databricks environment. By default, workspaces are associated with the 'default-policy' network + * policy. You cannot create or delete a workspace's network option, only update it to associate + * the workspace with a different policy */ public WorkspaceNetworkConfigurationAPI workspaceNetworkConfiguration() { return workspaceNetworkConfigurationAPI; diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/WorkspaceClient.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/WorkspaceClient.java index f6ae23552..559a5eabf 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/WorkspaceClient.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/WorkspaceClient.java @@ -479,7 +479,7 @@ public AlertsLegacyAPI alertsLegacy() { return alertsLegacyAPI; } - /** TODO: Add description */ + /** New version of SQL Alerts */ public AlertsV2API alertsV2() { return alertsV2API; } diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ConnectionInfo.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ConnectionInfo.java index 5e2e8d332..496800340 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ConnectionInfo.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ConnectionInfo.java @@ -54,7 +54,7 @@ public class ConnectionInfo { @JsonProperty("owner") private String owner; - /** An object containing map of key-value properties attached to the connection. */ + /** A map of key-value properties attached to the securable. */ @JsonProperty("properties") private Map properties; @@ -66,9 +66,9 @@ public class ConnectionInfo { @JsonProperty("read_only") private Boolean readOnly; - /** */ + /** The type of Unity Catalog securable. */ @JsonProperty("securable_type") - private String securableType; + private SecurableType securableType; /** Time at which this connection was updated, in epoch milliseconds. */ @JsonProperty("updated_at") @@ -208,12 +208,12 @@ public Boolean getReadOnly() { return readOnly; } - public ConnectionInfo setSecurableType(String securableType) { + public ConnectionInfo setSecurableType(SecurableType securableType) { this.securableType = securableType; return this; } - public String getSecurableType() { + public SecurableType getSecurableType() { return securableType; } diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ConnectionType.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ConnectionType.java index b6e6a3e33..c43cb89bd 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ConnectionType.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ConnectionType.java @@ -4,20 +4,27 @@ import com.databricks.sdk.support.Generated; -/** The type of connection. */ +/** Next Id: 30 */ @Generated public enum ConnectionType { BIGQUERY, DATABRICKS, + GA4_RAW_DATA, GLUE, HIVE_METASTORE, HTTP, MYSQL, ORACLE, POSTGRESQL, + POWER_BI, REDSHIFT, + SALESFORCE, + SALESFORCE_DATA_CLOUD, + SERVICENOW, SNOWFLAKE, SQLDW, SQLSERVER, TERADATA, + UNKNOWN_CONNECTION_TYPE, + WORKDAY_RAAS, } diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ConnectionsAPI.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ConnectionsAPI.java index eb449c1dd..ff8b2cda9 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ConnectionsAPI.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ConnectionsAPI.java @@ -85,10 +85,6 @@ public ConnectionInfo get(GetConnectionRequest request) { *

List all connections. */ public Iterable list(ListConnectionsRequest request) { - - if (request.getMaxResults() == null) { - request.setMaxResults(0L); - } return new Paginator<>( request, impl::list, diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateConnection.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateConnection.java index 2836337ce..3eea7832c 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateConnection.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateConnection.java @@ -26,7 +26,7 @@ public class CreateConnection { @JsonProperty("options") private Map options; - /** An object containing map of key-value properties attached to the connection. */ + /** A map of key-value properties attached to the securable. */ @JsonProperty("properties") private Map properties; diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CredentialType.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CredentialType.java index 7f8868e05..b5f06caf4 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CredentialType.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CredentialType.java @@ -4,9 +4,19 @@ import com.databricks.sdk.support.Generated; -/** The type of credential. */ +/** Next Id: 12 */ @Generated public enum CredentialType { BEARER_TOKEN, + OAUTH_ACCESS_TOKEN, + OAUTH_M2M, + OAUTH_REFRESH_TOKEN, + OAUTH_RESOURCE_OWNER_PASSWORD, + OAUTH_U2M, + OAUTH_U2M_MAPPING, + OIDC_TOKEN, + PEM_PRIVATE_KEY, + SERVICE_CREDENTIAL, + UNKNOWN_CREDENTIAL_TYPE, USERNAME_PASSWORD, } diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/SchemaInfo.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/SchemaInfo.java index 9358ca802..6f7da5149 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/SchemaInfo.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/SchemaInfo.java @@ -8,6 +8,7 @@ import java.util.Map; import java.util.Objects; +/** Next ID: 40 */ @Generated public class SchemaInfo { /** @@ -23,7 +24,7 @@ public class SchemaInfo { /** The type of the parent catalog. */ @JsonProperty("catalog_type") - private String catalogType; + private CatalogType catalogType; /** User-provided free-form text description. */ @JsonProperty("comment") @@ -41,7 +42,7 @@ public class SchemaInfo { @JsonProperty("effective_predictive_optimization_flag") private EffectivePredictiveOptimizationFlag effectivePredictiveOptimizationFlag; - /** */ + /** Whether predictive optimization should be enabled for this object and objects under it. */ @JsonProperty("enable_predictive_optimization") private EnablePredictiveOptimization enablePredictiveOptimization; @@ -103,12 +104,12 @@ public String getCatalogName() { return catalogName; } - public SchemaInfo setCatalogType(String catalogType) { + public SchemaInfo setCatalogType(CatalogType catalogType) { this.catalogType = catalogType; return this; } - public String getCatalogType() { + public CatalogType getCatalogType() { return catalogType; } diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/SchemasAPI.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/SchemasAPI.java index c1345a265..acaa88214 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/SchemasAPI.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/SchemasAPI.java @@ -85,10 +85,6 @@ public Iterable list(String catalogName) { * the array. */ public Iterable list(ListSchemasRequest request) { - - if (request.getMaxResults() == null) { - request.setMaxResults(0L); - } return new Paginator<>( request, impl::list, diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/UpdateSchema.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/UpdateSchema.java index 0cb434d26..5c910b1e7 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/UpdateSchema.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/UpdateSchema.java @@ -15,7 +15,7 @@ public class UpdateSchema { @JsonProperty("comment") private String comment; - /** */ + /** Whether predictive optimization should be enabled for this object and objects under it. */ @JsonProperty("enable_predictive_optimization") private EnablePredictiveOptimization enablePredictiveOptimization; diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/ClusterAttributes.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/ClusterAttributes.java index ff9668106..c359b9f8a 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/ClusterAttributes.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/ClusterAttributes.java @@ -191,6 +191,14 @@ public class ClusterAttributes { @JsonProperty("policy_id") private String policyId; + /** If set, what the configurable IOPS for the remote shuffle disk is. */ + @JsonProperty("remote_shuffle_disk_iops") + private Long remoteShuffleDiskIops; + + /** If set, what the configurable throughput (in Mb/s) for the remote shuffle disk is. */ + @JsonProperty("remote_shuffle_disk_throughput") + private Long remoteShuffleDiskThroughput; + /** * Determines the cluster's runtime engine, either standard or Photon. * @@ -246,6 +254,10 @@ public class ClusterAttributes { @JsonProperty("ssh_public_keys") private Collection sshPublicKeys; + /** If set, what the total initial volume size (in GB) of the remote shuffle disks should be. */ + @JsonProperty("total_initial_remote_shuffle_disk_size") + private Long totalInitialRemoteShuffleDiskSize; + /** * This field can only be used when `kind = CLASSIC_PREVIEW`. * @@ -430,6 +442,24 @@ public String getPolicyId() { return policyId; } + public ClusterAttributes setRemoteShuffleDiskIops(Long remoteShuffleDiskIops) { + this.remoteShuffleDiskIops = remoteShuffleDiskIops; + return this; + } + + public Long getRemoteShuffleDiskIops() { + return remoteShuffleDiskIops; + } + + public ClusterAttributes setRemoteShuffleDiskThroughput(Long remoteShuffleDiskThroughput) { + this.remoteShuffleDiskThroughput = remoteShuffleDiskThroughput; + return this; + } + + public Long getRemoteShuffleDiskThroughput() { + return remoteShuffleDiskThroughput; + } + public ClusterAttributes setRuntimeEngine(RuntimeEngine runtimeEngine) { this.runtimeEngine = runtimeEngine; return this; @@ -484,6 +514,16 @@ public Collection getSshPublicKeys() { return sshPublicKeys; } + public ClusterAttributes setTotalInitialRemoteShuffleDiskSize( + Long totalInitialRemoteShuffleDiskSize) { + this.totalInitialRemoteShuffleDiskSize = totalInitialRemoteShuffleDiskSize; + return this; + } + + public Long getTotalInitialRemoteShuffleDiskSize() { + return totalInitialRemoteShuffleDiskSize; + } + public ClusterAttributes setUseMlRuntime(Boolean useMlRuntime) { this.useMlRuntime = useMlRuntime; return this; @@ -526,12 +566,15 @@ public boolean equals(Object o) { && Objects.equals(kind, that.kind) && Objects.equals(nodeTypeId, that.nodeTypeId) && Objects.equals(policyId, that.policyId) + && Objects.equals(remoteShuffleDiskIops, that.remoteShuffleDiskIops) + && Objects.equals(remoteShuffleDiskThroughput, that.remoteShuffleDiskThroughput) && Objects.equals(runtimeEngine, that.runtimeEngine) && Objects.equals(singleUserName, that.singleUserName) && Objects.equals(sparkConf, that.sparkConf) && Objects.equals(sparkEnvVars, that.sparkEnvVars) && Objects.equals(sparkVersion, that.sparkVersion) && Objects.equals(sshPublicKeys, that.sshPublicKeys) + && Objects.equals(totalInitialRemoteShuffleDiskSize, that.totalInitialRemoteShuffleDiskSize) && Objects.equals(useMlRuntime, that.useMlRuntime) && Objects.equals(workloadType, that.workloadType); } @@ -558,12 +601,15 @@ public int hashCode() { kind, nodeTypeId, policyId, + remoteShuffleDiskIops, + remoteShuffleDiskThroughput, runtimeEngine, singleUserName, sparkConf, sparkEnvVars, sparkVersion, sshPublicKeys, + totalInitialRemoteShuffleDiskSize, useMlRuntime, workloadType); } @@ -590,12 +636,15 @@ public String toString() { .add("kind", kind) .add("nodeTypeId", nodeTypeId) .add("policyId", policyId) + .add("remoteShuffleDiskIops", remoteShuffleDiskIops) + .add("remoteShuffleDiskThroughput", remoteShuffleDiskThroughput) .add("runtimeEngine", runtimeEngine) .add("singleUserName", singleUserName) .add("sparkConf", sparkConf) .add("sparkEnvVars", sparkEnvVars) .add("sparkVersion", sparkVersion) .add("sshPublicKeys", sshPublicKeys) + .add("totalInitialRemoteShuffleDiskSize", totalInitialRemoteShuffleDiskSize) .add("useMlRuntime", useMlRuntime) .add("workloadType", workloadType) .toString(); diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/ClusterDetails.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/ClusterDetails.java index 8b957e3dc..b2309f101 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/ClusterDetails.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/ClusterDetails.java @@ -287,6 +287,14 @@ public class ClusterDetails { @JsonProperty("policy_id") private String policyId; + /** If set, what the configurable IOPS for the remote shuffle disk is. */ + @JsonProperty("remote_shuffle_disk_iops") + private Long remoteShuffleDiskIops; + + /** If set, what the configurable throughput (in Mb/s) for the remote shuffle disk is. */ + @JsonProperty("remote_shuffle_disk_throughput") + private Long remoteShuffleDiskThroughput; + /** * Determines the cluster's runtime engine, either standard or Photon. * @@ -386,6 +394,10 @@ public class ClusterDetails { @JsonProperty("termination_reason") private TerminationReason terminationReason; + /** If set, what the total initial volume size (in GB) of the remote shuffle disks should be. */ + @JsonProperty("total_initial_remote_shuffle_disk_size") + private Long totalInitialRemoteShuffleDiskSize; + /** * This field can only be used when `kind = CLASSIC_PREVIEW`. * @@ -696,6 +708,24 @@ public String getPolicyId() { return policyId; } + public ClusterDetails setRemoteShuffleDiskIops(Long remoteShuffleDiskIops) { + this.remoteShuffleDiskIops = remoteShuffleDiskIops; + return this; + } + + public Long getRemoteShuffleDiskIops() { + return remoteShuffleDiskIops; + } + + public ClusterDetails setRemoteShuffleDiskThroughput(Long remoteShuffleDiskThroughput) { + this.remoteShuffleDiskThroughput = remoteShuffleDiskThroughput; + return this; + } + + public Long getRemoteShuffleDiskThroughput() { + return remoteShuffleDiskThroughput; + } + public ClusterDetails setRuntimeEngine(RuntimeEngine runtimeEngine) { this.runtimeEngine = runtimeEngine; return this; @@ -813,6 +843,16 @@ public TerminationReason getTerminationReason() { return terminationReason; } + public ClusterDetails setTotalInitialRemoteShuffleDiskSize( + Long totalInitialRemoteShuffleDiskSize) { + this.totalInitialRemoteShuffleDiskSize = totalInitialRemoteShuffleDiskSize; + return this; + } + + public Long getTotalInitialRemoteShuffleDiskSize() { + return totalInitialRemoteShuffleDiskSize; + } + public ClusterDetails setUseMlRuntime(Boolean useMlRuntime) { this.useMlRuntime = useMlRuntime; return this; @@ -869,6 +909,8 @@ public boolean equals(Object o) { && Objects.equals(nodeTypeId, that.nodeTypeId) && Objects.equals(numWorkers, that.numWorkers) && Objects.equals(policyId, that.policyId) + && Objects.equals(remoteShuffleDiskIops, that.remoteShuffleDiskIops) + && Objects.equals(remoteShuffleDiskThroughput, that.remoteShuffleDiskThroughput) && Objects.equals(runtimeEngine, that.runtimeEngine) && Objects.equals(singleUserName, that.singleUserName) && Objects.equals(sparkConf, that.sparkConf) @@ -882,6 +924,7 @@ public boolean equals(Object o) { && Objects.equals(stateMessage, that.stateMessage) && Objects.equals(terminatedTime, that.terminatedTime) && Objects.equals(terminationReason, that.terminationReason) + && Objects.equals(totalInitialRemoteShuffleDiskSize, that.totalInitialRemoteShuffleDiskSize) && Objects.equals(useMlRuntime, that.useMlRuntime) && Objects.equals(workloadType, that.workloadType); } @@ -922,6 +965,8 @@ public int hashCode() { nodeTypeId, numWorkers, policyId, + remoteShuffleDiskIops, + remoteShuffleDiskThroughput, runtimeEngine, singleUserName, sparkConf, @@ -935,6 +980,7 @@ public int hashCode() { stateMessage, terminatedTime, terminationReason, + totalInitialRemoteShuffleDiskSize, useMlRuntime, workloadType); } @@ -975,6 +1021,8 @@ public String toString() { .add("nodeTypeId", nodeTypeId) .add("numWorkers", numWorkers) .add("policyId", policyId) + .add("remoteShuffleDiskIops", remoteShuffleDiskIops) + .add("remoteShuffleDiskThroughput", remoteShuffleDiskThroughput) .add("runtimeEngine", runtimeEngine) .add("singleUserName", singleUserName) .add("sparkConf", sparkConf) @@ -988,6 +1036,7 @@ public String toString() { .add("stateMessage", stateMessage) .add("terminatedTime", terminatedTime) .add("terminationReason", terminationReason) + .add("totalInitialRemoteShuffleDiskSize", totalInitialRemoteShuffleDiskSize) .add("useMlRuntime", useMlRuntime) .add("workloadType", workloadType) .toString(); diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/ClusterSpec.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/ClusterSpec.java index 08cd8a715..78c7ddbfa 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/ClusterSpec.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/ClusterSpec.java @@ -218,6 +218,14 @@ public class ClusterSpec { @JsonProperty("policy_id") private String policyId; + /** If set, what the configurable IOPS for the remote shuffle disk is. */ + @JsonProperty("remote_shuffle_disk_iops") + private Long remoteShuffleDiskIops; + + /** If set, what the configurable throughput (in Mb/s) for the remote shuffle disk is. */ + @JsonProperty("remote_shuffle_disk_throughput") + private Long remoteShuffleDiskThroughput; + /** * Determines the cluster's runtime engine, either standard or Photon. * @@ -273,6 +281,10 @@ public class ClusterSpec { @JsonProperty("ssh_public_keys") private Collection sshPublicKeys; + /** If set, what the total initial volume size (in GB) of the remote shuffle disks should be. */ + @JsonProperty("total_initial_remote_shuffle_disk_size") + private Long totalInitialRemoteShuffleDiskSize; + /** * This field can only be used when `kind = CLASSIC_PREVIEW`. * @@ -484,6 +496,24 @@ public String getPolicyId() { return policyId; } + public ClusterSpec setRemoteShuffleDiskIops(Long remoteShuffleDiskIops) { + this.remoteShuffleDiskIops = remoteShuffleDiskIops; + return this; + } + + public Long getRemoteShuffleDiskIops() { + return remoteShuffleDiskIops; + } + + public ClusterSpec setRemoteShuffleDiskThroughput(Long remoteShuffleDiskThroughput) { + this.remoteShuffleDiskThroughput = remoteShuffleDiskThroughput; + return this; + } + + public Long getRemoteShuffleDiskThroughput() { + return remoteShuffleDiskThroughput; + } + public ClusterSpec setRuntimeEngine(RuntimeEngine runtimeEngine) { this.runtimeEngine = runtimeEngine; return this; @@ -538,6 +568,15 @@ public Collection getSshPublicKeys() { return sshPublicKeys; } + public ClusterSpec setTotalInitialRemoteShuffleDiskSize(Long totalInitialRemoteShuffleDiskSize) { + this.totalInitialRemoteShuffleDiskSize = totalInitialRemoteShuffleDiskSize; + return this; + } + + public Long getTotalInitialRemoteShuffleDiskSize() { + return totalInitialRemoteShuffleDiskSize; + } + public ClusterSpec setUseMlRuntime(Boolean useMlRuntime) { this.useMlRuntime = useMlRuntime; return this; @@ -583,12 +622,15 @@ public boolean equals(Object o) { && Objects.equals(nodeTypeId, that.nodeTypeId) && Objects.equals(numWorkers, that.numWorkers) && Objects.equals(policyId, that.policyId) + && Objects.equals(remoteShuffleDiskIops, that.remoteShuffleDiskIops) + && Objects.equals(remoteShuffleDiskThroughput, that.remoteShuffleDiskThroughput) && Objects.equals(runtimeEngine, that.runtimeEngine) && Objects.equals(singleUserName, that.singleUserName) && Objects.equals(sparkConf, that.sparkConf) && Objects.equals(sparkEnvVars, that.sparkEnvVars) && Objects.equals(sparkVersion, that.sparkVersion) && Objects.equals(sshPublicKeys, that.sshPublicKeys) + && Objects.equals(totalInitialRemoteShuffleDiskSize, that.totalInitialRemoteShuffleDiskSize) && Objects.equals(useMlRuntime, that.useMlRuntime) && Objects.equals(workloadType, that.workloadType); } @@ -618,12 +660,15 @@ public int hashCode() { nodeTypeId, numWorkers, policyId, + remoteShuffleDiskIops, + remoteShuffleDiskThroughput, runtimeEngine, singleUserName, sparkConf, sparkEnvVars, sparkVersion, sshPublicKeys, + totalInitialRemoteShuffleDiskSize, useMlRuntime, workloadType); } @@ -653,12 +698,15 @@ public String toString() { .add("nodeTypeId", nodeTypeId) .add("numWorkers", numWorkers) .add("policyId", policyId) + .add("remoteShuffleDiskIops", remoteShuffleDiskIops) + .add("remoteShuffleDiskThroughput", remoteShuffleDiskThroughput) .add("runtimeEngine", runtimeEngine) .add("singleUserName", singleUserName) .add("sparkConf", sparkConf) .add("sparkEnvVars", sparkEnvVars) .add("sparkVersion", sparkVersion) .add("sshPublicKeys", sshPublicKeys) + .add("totalInitialRemoteShuffleDiskSize", totalInitialRemoteShuffleDiskSize) .add("useMlRuntime", useMlRuntime) .add("workloadType", workloadType) .toString(); diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/CreateCluster.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/CreateCluster.java index 79853eda0..027bae1c8 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/CreateCluster.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/CreateCluster.java @@ -221,6 +221,14 @@ public class CreateCluster { @JsonProperty("policy_id") private String policyId; + /** If set, what the configurable IOPS for the remote shuffle disk is. */ + @JsonProperty("remote_shuffle_disk_iops") + private Long remoteShuffleDiskIops; + + /** If set, what the configurable throughput (in Mb/s) for the remote shuffle disk is. */ + @JsonProperty("remote_shuffle_disk_throughput") + private Long remoteShuffleDiskThroughput; + /** * Determines the cluster's runtime engine, either standard or Photon. * @@ -276,6 +284,10 @@ public class CreateCluster { @JsonProperty("ssh_public_keys") private Collection sshPublicKeys; + /** If set, what the total initial volume size (in GB) of the remote shuffle disks should be. */ + @JsonProperty("total_initial_remote_shuffle_disk_size") + private Long totalInitialRemoteShuffleDiskSize; + /** * This field can only be used when `kind = CLASSIC_PREVIEW`. * @@ -496,6 +508,24 @@ public String getPolicyId() { return policyId; } + public CreateCluster setRemoteShuffleDiskIops(Long remoteShuffleDiskIops) { + this.remoteShuffleDiskIops = remoteShuffleDiskIops; + return this; + } + + public Long getRemoteShuffleDiskIops() { + return remoteShuffleDiskIops; + } + + public CreateCluster setRemoteShuffleDiskThroughput(Long remoteShuffleDiskThroughput) { + this.remoteShuffleDiskThroughput = remoteShuffleDiskThroughput; + return this; + } + + public Long getRemoteShuffleDiskThroughput() { + return remoteShuffleDiskThroughput; + } + public CreateCluster setRuntimeEngine(RuntimeEngine runtimeEngine) { this.runtimeEngine = runtimeEngine; return this; @@ -550,6 +580,16 @@ public Collection getSshPublicKeys() { return sshPublicKeys; } + public CreateCluster setTotalInitialRemoteShuffleDiskSize( + Long totalInitialRemoteShuffleDiskSize) { + this.totalInitialRemoteShuffleDiskSize = totalInitialRemoteShuffleDiskSize; + return this; + } + + public Long getTotalInitialRemoteShuffleDiskSize() { + return totalInitialRemoteShuffleDiskSize; + } + public CreateCluster setUseMlRuntime(Boolean useMlRuntime) { this.useMlRuntime = useMlRuntime; return this; @@ -596,12 +636,15 @@ public boolean equals(Object o) { && Objects.equals(nodeTypeId, that.nodeTypeId) && Objects.equals(numWorkers, that.numWorkers) && Objects.equals(policyId, that.policyId) + && Objects.equals(remoteShuffleDiskIops, that.remoteShuffleDiskIops) + && Objects.equals(remoteShuffleDiskThroughput, that.remoteShuffleDiskThroughput) && Objects.equals(runtimeEngine, that.runtimeEngine) && Objects.equals(singleUserName, that.singleUserName) && Objects.equals(sparkConf, that.sparkConf) && Objects.equals(sparkEnvVars, that.sparkEnvVars) && Objects.equals(sparkVersion, that.sparkVersion) && Objects.equals(sshPublicKeys, that.sshPublicKeys) + && Objects.equals(totalInitialRemoteShuffleDiskSize, that.totalInitialRemoteShuffleDiskSize) && Objects.equals(useMlRuntime, that.useMlRuntime) && Objects.equals(workloadType, that.workloadType); } @@ -632,12 +675,15 @@ public int hashCode() { nodeTypeId, numWorkers, policyId, + remoteShuffleDiskIops, + remoteShuffleDiskThroughput, runtimeEngine, singleUserName, sparkConf, sparkEnvVars, sparkVersion, sshPublicKeys, + totalInitialRemoteShuffleDiskSize, useMlRuntime, workloadType); } @@ -668,12 +714,15 @@ public String toString() { .add("nodeTypeId", nodeTypeId) .add("numWorkers", numWorkers) .add("policyId", policyId) + .add("remoteShuffleDiskIops", remoteShuffleDiskIops) + .add("remoteShuffleDiskThroughput", remoteShuffleDiskThroughput) .add("runtimeEngine", runtimeEngine) .add("singleUserName", singleUserName) .add("sparkConf", sparkConf) .add("sparkEnvVars", sparkEnvVars) .add("sparkVersion", sparkVersion) .add("sshPublicKeys", sshPublicKeys) + .add("totalInitialRemoteShuffleDiskSize", totalInitialRemoteShuffleDiskSize) .add("useMlRuntime", useMlRuntime) .add("workloadType", workloadType) .toString(); diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/EditCluster.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/EditCluster.java index 81c1b7e85..bbf12f00d 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/EditCluster.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/EditCluster.java @@ -218,6 +218,14 @@ public class EditCluster { @JsonProperty("policy_id") private String policyId; + /** If set, what the configurable IOPS for the remote shuffle disk is. */ + @JsonProperty("remote_shuffle_disk_iops") + private Long remoteShuffleDiskIops; + + /** If set, what the configurable throughput (in Mb/s) for the remote shuffle disk is. */ + @JsonProperty("remote_shuffle_disk_throughput") + private Long remoteShuffleDiskThroughput; + /** * Determines the cluster's runtime engine, either standard or Photon. * @@ -273,6 +281,10 @@ public class EditCluster { @JsonProperty("ssh_public_keys") private Collection sshPublicKeys; + /** If set, what the total initial volume size (in GB) of the remote shuffle disks should be. */ + @JsonProperty("total_initial_remote_shuffle_disk_size") + private Long totalInitialRemoteShuffleDiskSize; + /** * This field can only be used when `kind = CLASSIC_PREVIEW`. * @@ -493,6 +505,24 @@ public String getPolicyId() { return policyId; } + public EditCluster setRemoteShuffleDiskIops(Long remoteShuffleDiskIops) { + this.remoteShuffleDiskIops = remoteShuffleDiskIops; + return this; + } + + public Long getRemoteShuffleDiskIops() { + return remoteShuffleDiskIops; + } + + public EditCluster setRemoteShuffleDiskThroughput(Long remoteShuffleDiskThroughput) { + this.remoteShuffleDiskThroughput = remoteShuffleDiskThroughput; + return this; + } + + public Long getRemoteShuffleDiskThroughput() { + return remoteShuffleDiskThroughput; + } + public EditCluster setRuntimeEngine(RuntimeEngine runtimeEngine) { this.runtimeEngine = runtimeEngine; return this; @@ -547,6 +577,15 @@ public Collection getSshPublicKeys() { return sshPublicKeys; } + public EditCluster setTotalInitialRemoteShuffleDiskSize(Long totalInitialRemoteShuffleDiskSize) { + this.totalInitialRemoteShuffleDiskSize = totalInitialRemoteShuffleDiskSize; + return this; + } + + public Long getTotalInitialRemoteShuffleDiskSize() { + return totalInitialRemoteShuffleDiskSize; + } + public EditCluster setUseMlRuntime(Boolean useMlRuntime) { this.useMlRuntime = useMlRuntime; return this; @@ -593,12 +632,15 @@ public boolean equals(Object o) { && Objects.equals(nodeTypeId, that.nodeTypeId) && Objects.equals(numWorkers, that.numWorkers) && Objects.equals(policyId, that.policyId) + && Objects.equals(remoteShuffleDiskIops, that.remoteShuffleDiskIops) + && Objects.equals(remoteShuffleDiskThroughput, that.remoteShuffleDiskThroughput) && Objects.equals(runtimeEngine, that.runtimeEngine) && Objects.equals(singleUserName, that.singleUserName) && Objects.equals(sparkConf, that.sparkConf) && Objects.equals(sparkEnvVars, that.sparkEnvVars) && Objects.equals(sparkVersion, that.sparkVersion) && Objects.equals(sshPublicKeys, that.sshPublicKeys) + && Objects.equals(totalInitialRemoteShuffleDiskSize, that.totalInitialRemoteShuffleDiskSize) && Objects.equals(useMlRuntime, that.useMlRuntime) && Objects.equals(workloadType, that.workloadType); } @@ -629,12 +671,15 @@ public int hashCode() { nodeTypeId, numWorkers, policyId, + remoteShuffleDiskIops, + remoteShuffleDiskThroughput, runtimeEngine, singleUserName, sparkConf, sparkEnvVars, sparkVersion, sshPublicKeys, + totalInitialRemoteShuffleDiskSize, useMlRuntime, workloadType); } @@ -665,12 +710,15 @@ public String toString() { .add("nodeTypeId", nodeTypeId) .add("numWorkers", numWorkers) .add("policyId", policyId) + .add("remoteShuffleDiskIops", remoteShuffleDiskIops) + .add("remoteShuffleDiskThroughput", remoteShuffleDiskThroughput) .add("runtimeEngine", runtimeEngine) .add("singleUserName", singleUserName) .add("sparkConf", sparkConf) .add("sparkEnvVars", sparkEnvVars) .add("sparkVersion", sparkVersion) .add("sshPublicKeys", sshPublicKeys) + .add("totalInitialRemoteShuffleDiskSize", totalInitialRemoteShuffleDiskSize) .add("useMlRuntime", useMlRuntime) .add("workloadType", workloadType) .toString(); diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/UpdateClusterResource.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/UpdateClusterResource.java index 151d44359..7d3e13c7d 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/UpdateClusterResource.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/UpdateClusterResource.java @@ -207,6 +207,14 @@ public class UpdateClusterResource { @JsonProperty("policy_id") private String policyId; + /** If set, what the configurable IOPS for the remote shuffle disk is. */ + @JsonProperty("remote_shuffle_disk_iops") + private Long remoteShuffleDiskIops; + + /** If set, what the configurable throughput (in Mb/s) for the remote shuffle disk is. */ + @JsonProperty("remote_shuffle_disk_throughput") + private Long remoteShuffleDiskThroughput; + /** * Determines the cluster's runtime engine, either standard or Photon. * @@ -262,6 +270,10 @@ public class UpdateClusterResource { @JsonProperty("ssh_public_keys") private Collection sshPublicKeys; + /** If set, what the total initial volume size (in GB) of the remote shuffle disks should be. */ + @JsonProperty("total_initial_remote_shuffle_disk_size") + private Long totalInitialRemoteShuffleDiskSize; + /** * This field can only be used when `kind = CLASSIC_PREVIEW`. * @@ -464,6 +476,24 @@ public String getPolicyId() { return policyId; } + public UpdateClusterResource setRemoteShuffleDiskIops(Long remoteShuffleDiskIops) { + this.remoteShuffleDiskIops = remoteShuffleDiskIops; + return this; + } + + public Long getRemoteShuffleDiskIops() { + return remoteShuffleDiskIops; + } + + public UpdateClusterResource setRemoteShuffleDiskThroughput(Long remoteShuffleDiskThroughput) { + this.remoteShuffleDiskThroughput = remoteShuffleDiskThroughput; + return this; + } + + public Long getRemoteShuffleDiskThroughput() { + return remoteShuffleDiskThroughput; + } + public UpdateClusterResource setRuntimeEngine(RuntimeEngine runtimeEngine) { this.runtimeEngine = runtimeEngine; return this; @@ -518,6 +548,16 @@ public Collection getSshPublicKeys() { return sshPublicKeys; } + public UpdateClusterResource setTotalInitialRemoteShuffleDiskSize( + Long totalInitialRemoteShuffleDiskSize) { + this.totalInitialRemoteShuffleDiskSize = totalInitialRemoteShuffleDiskSize; + return this; + } + + public Long getTotalInitialRemoteShuffleDiskSize() { + return totalInitialRemoteShuffleDiskSize; + } + public UpdateClusterResource setUseMlRuntime(Boolean useMlRuntime) { this.useMlRuntime = useMlRuntime; return this; @@ -562,12 +602,15 @@ public boolean equals(Object o) { && Objects.equals(nodeTypeId, that.nodeTypeId) && Objects.equals(numWorkers, that.numWorkers) && Objects.equals(policyId, that.policyId) + && Objects.equals(remoteShuffleDiskIops, that.remoteShuffleDiskIops) + && Objects.equals(remoteShuffleDiskThroughput, that.remoteShuffleDiskThroughput) && Objects.equals(runtimeEngine, that.runtimeEngine) && Objects.equals(singleUserName, that.singleUserName) && Objects.equals(sparkConf, that.sparkConf) && Objects.equals(sparkEnvVars, that.sparkEnvVars) && Objects.equals(sparkVersion, that.sparkVersion) && Objects.equals(sshPublicKeys, that.sshPublicKeys) + && Objects.equals(totalInitialRemoteShuffleDiskSize, that.totalInitialRemoteShuffleDiskSize) && Objects.equals(useMlRuntime, that.useMlRuntime) && Objects.equals(workloadType, that.workloadType); } @@ -596,12 +639,15 @@ public int hashCode() { nodeTypeId, numWorkers, policyId, + remoteShuffleDiskIops, + remoteShuffleDiskThroughput, runtimeEngine, singleUserName, sparkConf, sparkEnvVars, sparkVersion, sshPublicKeys, + totalInitialRemoteShuffleDiskSize, useMlRuntime, workloadType); } @@ -630,12 +676,15 @@ public String toString() { .add("nodeTypeId", nodeTypeId) .add("numWorkers", numWorkers) .add("policyId", policyId) + .add("remoteShuffleDiskIops", remoteShuffleDiskIops) + .add("remoteShuffleDiskThroughput", remoteShuffleDiskThroughput) .add("runtimeEngine", runtimeEngine) .add("singleUserName", singleUserName) .add("sparkConf", sparkConf) .add("sparkEnvVars", sparkEnvVars) .add("sparkVersion", sparkVersion) .add("sshPublicKeys", sshPublicKeys) + .add("totalInitialRemoteShuffleDiskSize", totalInitialRemoteShuffleDiskSize) .add("useMlRuntime", useMlRuntime) .add("workloadType", workloadType) .toString(); diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/pipelines/CreatePipeline.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/pipelines/CreatePipeline.java index 7533ab6c4..b81984eca 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/pipelines/CreatePipeline.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/pipelines/CreatePipeline.java @@ -134,6 +134,14 @@ public class CreatePipeline { @JsonProperty("storage") private String storage; + /** + * A map of tags associated with the pipeline. These are forwarded to the cluster as cluster tags, + * and are therefore subject to the same limitations. A maximum of 25 tags can be added to the + * pipeline. + */ + @JsonProperty("tags") + private Map tags; + /** * Target schema (database) to add tables in this pipeline to. Exactly one of `schema` or `target` * must be specified. To publish to Unity Catalog, also specify `catalog`. This legacy field is @@ -380,6 +388,15 @@ public String getStorage() { return storage; } + public CreatePipeline setTags(Map tags) { + this.tags = tags; + return this; + } + + public Map getTags() { + return tags; + } + public CreatePipeline setTarget(String target) { this.target = target; return this; @@ -429,6 +446,7 @@ public boolean equals(Object o) { && Objects.equals(schema, that.schema) && Objects.equals(serverless, that.serverless) && Objects.equals(storage, that.storage) + && Objects.equals(tags, that.tags) && Objects.equals(target, that.target) && Objects.equals(trigger, that.trigger); } @@ -462,6 +480,7 @@ public int hashCode() { schema, serverless, storage, + tags, target, trigger); } @@ -495,6 +514,7 @@ public String toString() { .add("schema", schema) .add("serverless", serverless) .add("storage", storage) + .add("tags", tags) .add("target", target) .add("trigger", trigger) .toString(); diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/pipelines/EditPipeline.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/pipelines/EditPipeline.java index 444759473..776b17166 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/pipelines/EditPipeline.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/pipelines/EditPipeline.java @@ -143,6 +143,14 @@ public class EditPipeline { @JsonProperty("storage") private String storage; + /** + * A map of tags associated with the pipeline. These are forwarded to the cluster as cluster tags, + * and are therefore subject to the same limitations. A maximum of 25 tags can be added to the + * pipeline. + */ + @JsonProperty("tags") + private Map tags; + /** * Target schema (database) to add tables in this pipeline to. Exactly one of `schema` or `target` * must be specified. To publish to Unity Catalog, also specify `catalog`. This legacy field is @@ -398,6 +406,15 @@ public String getStorage() { return storage; } + public EditPipeline setTags(Map tags) { + this.tags = tags; + return this; + } + + public Map getTags() { + return tags; + } + public EditPipeline setTarget(String target) { this.target = target; return this; @@ -448,6 +465,7 @@ public boolean equals(Object o) { && Objects.equals(schema, that.schema) && Objects.equals(serverless, that.serverless) && Objects.equals(storage, that.storage) + && Objects.equals(tags, that.tags) && Objects.equals(target, that.target) && Objects.equals(trigger, that.trigger); } @@ -482,6 +500,7 @@ public int hashCode() { schema, serverless, storage, + tags, target, trigger); } @@ -516,6 +535,7 @@ public String toString() { .add("schema", schema) .add("serverless", serverless) .add("storage", storage) + .add("tags", tags) .add("target", target) .add("trigger", trigger) .toString(); diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/pipelines/IngestionSourceType.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/pipelines/IngestionSourceType.java index c7620bc7f..272a8235d 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/pipelines/IngestionSourceType.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/pipelines/IngestionSourceType.java @@ -17,5 +17,6 @@ public enum IngestionSourceType { SERVICENOW, SHAREPOINT, SQLSERVER, + TERADATA, WORKDAY_RAAS, } diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/pipelines/PipelineSpec.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/pipelines/PipelineSpec.java index 913972a57..b4c5c4d8e 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/pipelines/PipelineSpec.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/pipelines/PipelineSpec.java @@ -115,6 +115,14 @@ public class PipelineSpec { @JsonProperty("storage") private String storage; + /** + * A map of tags associated with the pipeline. These are forwarded to the cluster as cluster tags, + * and are therefore subject to the same limitations. A maximum of 25 tags can be added to the + * pipeline. + */ + @JsonProperty("tags") + private Map tags; + /** * Target schema (database) to add tables in this pipeline to. Exactly one of `schema` or `target` * must be specified. To publish to Unity Catalog, also specify `catalog`. This legacy field is @@ -334,6 +342,15 @@ public String getStorage() { return storage; } + public PipelineSpec setTags(Map tags) { + this.tags = tags; + return this; + } + + public Map getTags() { + return tags; + } + public PipelineSpec setTarget(String target) { this.target = target; return this; @@ -380,6 +397,7 @@ public boolean equals(Object o) { && Objects.equals(schema, that.schema) && Objects.equals(serverless, that.serverless) && Objects.equals(storage, that.storage) + && Objects.equals(tags, that.tags) && Objects.equals(target, that.target) && Objects.equals(trigger, that.trigger); } @@ -410,6 +428,7 @@ public int hashCode() { schema, serverless, storage, + tags, target, trigger); } @@ -440,6 +459,7 @@ public String toString() { .add("schema", schema) .add("serverless", serverless) .add("storage", storage) + .add("tags", tags) .add("target", target) .add("trigger", trigger) .toString(); diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/ServedEntityInput.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/ServedEntityInput.java index 9e9593df2..ca9ccf251 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/ServedEntityInput.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/ServedEntityInput.java @@ -48,10 +48,24 @@ public class ServedEntityInput { @JsonProperty("instance_profile_arn") private String instanceProfileArn; + /** + * The maximum provisioned concurrency that the endpoint can scale up to. Do not use if + * workload_size is specified. + */ + @JsonProperty("max_provisioned_concurrency") + private Long maxProvisionedConcurrency; + /** The maximum tokens per second that the endpoint can scale up to. */ @JsonProperty("max_provisioned_throughput") private Long maxProvisionedThroughput; + /** + * The minimum provisioned concurrency that the endpoint can scale down to. Do not use if + * workload_size is specified. + */ + @JsonProperty("min_provisioned_concurrency") + private Long minProvisionedConcurrency; + /** The minimum tokens per second that the endpoint can scale down to. */ @JsonProperty("min_provisioned_throughput") private Long minProvisionedThroughput; @@ -80,7 +94,8 @@ public class ServedEntityInput { * concurrency), "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 provisioned * concurrency). Additional custom workload sizes can also be used when available in the * workspace. If scale-to-zero is enabled, the lower bound of the provisioned concurrency for each - * workload size is 0. + * workload size is 0. Do not use if min_provisioned_concurrency and max_provisioned_concurrency + * are specified. */ @JsonProperty("workload_size") private String workloadSize; @@ -142,6 +157,15 @@ public String getInstanceProfileArn() { return instanceProfileArn; } + public ServedEntityInput setMaxProvisionedConcurrency(Long maxProvisionedConcurrency) { + this.maxProvisionedConcurrency = maxProvisionedConcurrency; + return this; + } + + public Long getMaxProvisionedConcurrency() { + return maxProvisionedConcurrency; + } + public ServedEntityInput setMaxProvisionedThroughput(Long maxProvisionedThroughput) { this.maxProvisionedThroughput = maxProvisionedThroughput; return this; @@ -151,6 +175,15 @@ public Long getMaxProvisionedThroughput() { return maxProvisionedThroughput; } + public ServedEntityInput setMinProvisionedConcurrency(Long minProvisionedConcurrency) { + this.minProvisionedConcurrency = minProvisionedConcurrency; + return this; + } + + public Long getMinProvisionedConcurrency() { + return minProvisionedConcurrency; + } + public ServedEntityInput setMinProvisionedThroughput(Long minProvisionedThroughput) { this.minProvisionedThroughput = minProvisionedThroughput; return this; @@ -215,7 +248,9 @@ public boolean equals(Object o) { && Objects.equals(environmentVars, that.environmentVars) && Objects.equals(externalModel, that.externalModel) && Objects.equals(instanceProfileArn, that.instanceProfileArn) + && Objects.equals(maxProvisionedConcurrency, that.maxProvisionedConcurrency) && Objects.equals(maxProvisionedThroughput, that.maxProvisionedThroughput) + && Objects.equals(minProvisionedConcurrency, that.minProvisionedConcurrency) && Objects.equals(minProvisionedThroughput, that.minProvisionedThroughput) && Objects.equals(name, that.name) && Objects.equals(provisionedModelUnits, that.provisionedModelUnits) @@ -232,7 +267,9 @@ public int hashCode() { environmentVars, externalModel, instanceProfileArn, + maxProvisionedConcurrency, maxProvisionedThroughput, + minProvisionedConcurrency, minProvisionedThroughput, name, provisionedModelUnits, @@ -249,7 +286,9 @@ public String toString() { .add("environmentVars", environmentVars) .add("externalModel", externalModel) .add("instanceProfileArn", instanceProfileArn) + .add("maxProvisionedConcurrency", maxProvisionedConcurrency) .add("maxProvisionedThroughput", maxProvisionedThroughput) + .add("minProvisionedConcurrency", minProvisionedConcurrency) .add("minProvisionedThroughput", minProvisionedThroughput) .add("name", name) .add("provisionedModelUnits", provisionedModelUnits) diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/ServedEntityOutput.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/ServedEntityOutput.java index 74b58f742..129841ac9 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/ServedEntityOutput.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/ServedEntityOutput.java @@ -63,10 +63,24 @@ public class ServedEntityOutput { @JsonProperty("instance_profile_arn") private String instanceProfileArn; + /** + * The maximum provisioned concurrency that the endpoint can scale up to. Do not use if + * workload_size is specified. + */ + @JsonProperty("max_provisioned_concurrency") + private Long maxProvisionedConcurrency; + /** The maximum tokens per second that the endpoint can scale up to. */ @JsonProperty("max_provisioned_throughput") private Long maxProvisionedThroughput; + /** + * The minimum provisioned concurrency that the endpoint can scale down to. Do not use if + * workload_size is specified. + */ + @JsonProperty("min_provisioned_concurrency") + private Long minProvisionedConcurrency; + /** The minimum tokens per second that the endpoint can scale down to. */ @JsonProperty("min_provisioned_throughput") private Long minProvisionedThroughput; @@ -99,7 +113,8 @@ public class ServedEntityOutput { * concurrency), "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 provisioned * concurrency). Additional custom workload sizes can also be used when available in the * workspace. If scale-to-zero is enabled, the lower bound of the provisioned concurrency for each - * workload size is 0. + * workload size is 0. Do not use if min_provisioned_concurrency and max_provisioned_concurrency + * are specified. */ @JsonProperty("workload_size") private String workloadSize; @@ -188,6 +203,15 @@ public String getInstanceProfileArn() { return instanceProfileArn; } + public ServedEntityOutput setMaxProvisionedConcurrency(Long maxProvisionedConcurrency) { + this.maxProvisionedConcurrency = maxProvisionedConcurrency; + return this; + } + + public Long getMaxProvisionedConcurrency() { + return maxProvisionedConcurrency; + } + public ServedEntityOutput setMaxProvisionedThroughput(Long maxProvisionedThroughput) { this.maxProvisionedThroughput = maxProvisionedThroughput; return this; @@ -197,6 +221,15 @@ public Long getMaxProvisionedThroughput() { return maxProvisionedThroughput; } + public ServedEntityOutput setMinProvisionedConcurrency(Long minProvisionedConcurrency) { + this.minProvisionedConcurrency = minProvisionedConcurrency; + return this; + } + + public Long getMinProvisionedConcurrency() { + return minProvisionedConcurrency; + } + public ServedEntityOutput setMinProvisionedThroughput(Long minProvisionedThroughput) { this.minProvisionedThroughput = minProvisionedThroughput; return this; @@ -273,7 +306,9 @@ public boolean equals(Object o) { && Objects.equals(externalModel, that.externalModel) && Objects.equals(foundationModel, that.foundationModel) && Objects.equals(instanceProfileArn, that.instanceProfileArn) + && Objects.equals(maxProvisionedConcurrency, that.maxProvisionedConcurrency) && Objects.equals(maxProvisionedThroughput, that.maxProvisionedThroughput) + && Objects.equals(minProvisionedConcurrency, that.minProvisionedConcurrency) && Objects.equals(minProvisionedThroughput, that.minProvisionedThroughput) && Objects.equals(name, that.name) && Objects.equals(provisionedModelUnits, that.provisionedModelUnits) @@ -294,7 +329,9 @@ public int hashCode() { externalModel, foundationModel, instanceProfileArn, + maxProvisionedConcurrency, maxProvisionedThroughput, + minProvisionedConcurrency, minProvisionedThroughput, name, provisionedModelUnits, @@ -315,7 +352,9 @@ public String toString() { .add("externalModel", externalModel) .add("foundationModel", foundationModel) .add("instanceProfileArn", instanceProfileArn) + .add("maxProvisionedConcurrency", maxProvisionedConcurrency) .add("maxProvisionedThroughput", maxProvisionedThroughput) + .add("minProvisionedConcurrency", minProvisionedConcurrency) .add("minProvisionedThroughput", minProvisionedThroughput) .add("name", name) .add("provisionedModelUnits", provisionedModelUnits) diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/ServedModelInput.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/ServedModelInput.java index 907d88d17..93b608063 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/ServedModelInput.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/ServedModelInput.java @@ -23,10 +23,24 @@ public class ServedModelInput { @JsonProperty("instance_profile_arn") private String instanceProfileArn; + /** + * The maximum provisioned concurrency that the endpoint can scale up to. Do not use if + * workload_size is specified. + */ + @JsonProperty("max_provisioned_concurrency") + private Long maxProvisionedConcurrency; + /** The maximum tokens per second that the endpoint can scale up to. */ @JsonProperty("max_provisioned_throughput") private Long maxProvisionedThroughput; + /** + * The minimum provisioned concurrency that the endpoint can scale down to. Do not use if + * workload_size is specified. + */ + @JsonProperty("min_provisioned_concurrency") + private Long minProvisionedConcurrency; + /** The minimum tokens per second that the endpoint can scale down to. */ @JsonProperty("min_provisioned_throughput") private Long minProvisionedThroughput; @@ -63,7 +77,8 @@ public class ServedModelInput { * concurrency), "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 provisioned * concurrency). Additional custom workload sizes can also be used when available in the * workspace. If scale-to-zero is enabled, the lower bound of the provisioned concurrency for each - * workload size is 0. + * workload size is 0. Do not use if min_provisioned_concurrency and max_provisioned_concurrency + * are specified. */ @JsonProperty("workload_size") private String workloadSize; @@ -98,6 +113,15 @@ public String getInstanceProfileArn() { return instanceProfileArn; } + public ServedModelInput setMaxProvisionedConcurrency(Long maxProvisionedConcurrency) { + this.maxProvisionedConcurrency = maxProvisionedConcurrency; + return this; + } + + public Long getMaxProvisionedConcurrency() { + return maxProvisionedConcurrency; + } + public ServedModelInput setMaxProvisionedThroughput(Long maxProvisionedThroughput) { this.maxProvisionedThroughput = maxProvisionedThroughput; return this; @@ -107,6 +131,15 @@ public Long getMaxProvisionedThroughput() { return maxProvisionedThroughput; } + public ServedModelInput setMinProvisionedConcurrency(Long minProvisionedConcurrency) { + this.minProvisionedConcurrency = minProvisionedConcurrency; + return this; + } + + public Long getMinProvisionedConcurrency() { + return minProvisionedConcurrency; + } + public ServedModelInput setMinProvisionedThroughput(Long minProvisionedThroughput) { this.minProvisionedThroughput = minProvisionedThroughput; return this; @@ -186,7 +219,9 @@ public boolean equals(Object o) { ServedModelInput that = (ServedModelInput) o; return Objects.equals(environmentVars, that.environmentVars) && Objects.equals(instanceProfileArn, that.instanceProfileArn) + && Objects.equals(maxProvisionedConcurrency, that.maxProvisionedConcurrency) && Objects.equals(maxProvisionedThroughput, that.maxProvisionedThroughput) + && Objects.equals(minProvisionedConcurrency, that.minProvisionedConcurrency) && Objects.equals(minProvisionedThroughput, that.minProvisionedThroughput) && Objects.equals(modelName, that.modelName) && Objects.equals(modelVersion, that.modelVersion) @@ -202,7 +237,9 @@ public int hashCode() { return Objects.hash( environmentVars, instanceProfileArn, + maxProvisionedConcurrency, maxProvisionedThroughput, + minProvisionedConcurrency, minProvisionedThroughput, modelName, modelVersion, @@ -218,7 +255,9 @@ public String toString() { return new ToStringer(ServedModelInput.class) .add("environmentVars", environmentVars) .add("instanceProfileArn", instanceProfileArn) + .add("maxProvisionedConcurrency", maxProvisionedConcurrency) .add("maxProvisionedThroughput", maxProvisionedThroughput) + .add("minProvisionedConcurrency", minProvisionedConcurrency) .add("minProvisionedThroughput", minProvisionedThroughput) .add("modelName", modelName) .add("modelVersion", modelVersion) diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/ServedModelOutput.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/ServedModelOutput.java index eabfc4a48..dfdc57241 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/ServedModelOutput.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/ServedModelOutput.java @@ -31,6 +31,20 @@ public class ServedModelOutput { @JsonProperty("instance_profile_arn") private String instanceProfileArn; + /** + * The maximum provisioned concurrency that the endpoint can scale up to. Do not use if + * workload_size is specified. + */ + @JsonProperty("max_provisioned_concurrency") + private Long maxProvisionedConcurrency; + + /** + * The minimum provisioned concurrency that the endpoint can scale down to. Do not use if + * workload_size is specified. + */ + @JsonProperty("min_provisioned_concurrency") + private Long minProvisionedConcurrency; + /** */ @JsonProperty("model_name") private String modelName; @@ -67,7 +81,8 @@ public class ServedModelOutput { * concurrency), "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 provisioned * concurrency). Additional custom workload sizes can also be used when available in the * workspace. If scale-to-zero is enabled, the lower bound of the provisioned concurrency for each - * workload size is 0. + * workload size is 0. Do not use if min_provisioned_concurrency and max_provisioned_concurrency + * are specified. */ @JsonProperty("workload_size") private String workloadSize; @@ -120,6 +135,24 @@ public String getInstanceProfileArn() { return instanceProfileArn; } + public ServedModelOutput setMaxProvisionedConcurrency(Long maxProvisionedConcurrency) { + this.maxProvisionedConcurrency = maxProvisionedConcurrency; + return this; + } + + public Long getMaxProvisionedConcurrency() { + return maxProvisionedConcurrency; + } + + public ServedModelOutput setMinProvisionedConcurrency(Long minProvisionedConcurrency) { + this.minProvisionedConcurrency = minProvisionedConcurrency; + return this; + } + + public Long getMinProvisionedConcurrency() { + return minProvisionedConcurrency; + } + public ServedModelOutput setModelName(String modelName) { this.modelName = modelName; return this; @@ -201,6 +234,8 @@ public boolean equals(Object o) { && Objects.equals(creator, that.creator) && Objects.equals(environmentVars, that.environmentVars) && Objects.equals(instanceProfileArn, that.instanceProfileArn) + && Objects.equals(maxProvisionedConcurrency, that.maxProvisionedConcurrency) + && Objects.equals(minProvisionedConcurrency, that.minProvisionedConcurrency) && Objects.equals(modelName, that.modelName) && Objects.equals(modelVersion, that.modelVersion) && Objects.equals(name, that.name) @@ -218,6 +253,8 @@ public int hashCode() { creator, environmentVars, instanceProfileArn, + maxProvisionedConcurrency, + minProvisionedConcurrency, modelName, modelVersion, name, @@ -235,6 +272,8 @@ public String toString() { .add("creator", creator) .add("environmentVars", environmentVars) .add("instanceProfileArn", instanceProfileArn) + .add("maxProvisionedConcurrency", maxProvisionedConcurrency) + .add("minProvisionedConcurrency", minProvisionedConcurrency) .add("modelName", modelName) .add("modelVersion", modelVersion) .add("name", name) diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DashboardEmailSubscriptions.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DashboardEmailSubscriptions.java new file mode 100755 index 000000000..1ba9dcb49 --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DashboardEmailSubscriptions.java @@ -0,0 +1,86 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package com.databricks.sdk.service.settings; + +import com.databricks.sdk.support.Generated; +import com.databricks.sdk.support.ToStringer; +import com.fasterxml.jackson.annotation.JsonProperty; +import java.util.Objects; + +@Generated +public class DashboardEmailSubscriptions { + /** */ + @JsonProperty("boolean_val") + private BooleanMessage booleanVal; + + /** + * etag used for versioning. The response is at least as fresh as the eTag provided. This is used + * for optimistic concurrency control as a way to help prevent simultaneous writes of a setting + * overwriting each other. It is strongly suggested that systems make use of the etag in the read + * -> update pattern to perform setting updates in order to avoid race conditions. That is, get an + * etag from a GET request, and pass it with the PATCH request to identify the setting version you + * are updating. + */ + @JsonProperty("etag") + private String etag; + + /** + * Name of the corresponding setting. This field is populated in the response, but it will not be + * respected even if it's set in the request body. The setting name in the path parameter will be + * respected instead. Setting name is required to be 'default' if the setting only has one + * instance per workspace. + */ + @JsonProperty("setting_name") + private String settingName; + + public DashboardEmailSubscriptions setBooleanVal(BooleanMessage booleanVal) { + this.booleanVal = booleanVal; + return this; + } + + public BooleanMessage getBooleanVal() { + return booleanVal; + } + + public DashboardEmailSubscriptions setEtag(String etag) { + this.etag = etag; + return this; + } + + public String getEtag() { + return etag; + } + + public DashboardEmailSubscriptions setSettingName(String settingName) { + this.settingName = settingName; + return this; + } + + public String getSettingName() { + return settingName; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + DashboardEmailSubscriptions that = (DashboardEmailSubscriptions) o; + return Objects.equals(booleanVal, that.booleanVal) + && Objects.equals(etag, that.etag) + && Objects.equals(settingName, that.settingName); + } + + @Override + public int hashCode() { + return Objects.hash(booleanVal, etag, settingName); + } + + @Override + public String toString() { + return new ToStringer(DashboardEmailSubscriptions.class) + .add("booleanVal", booleanVal) + .add("etag", etag) + .add("settingName", settingName) + .toString(); + } +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DashboardEmailSubscriptionsAPI.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DashboardEmailSubscriptionsAPI.java new file mode 100755 index 000000000..daf85f77f --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DashboardEmailSubscriptionsAPI.java @@ -0,0 +1,70 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +package com.databricks.sdk.service.settings; + +import com.databricks.sdk.core.ApiClient; +import com.databricks.sdk.support.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Controls whether schedules or workload tasks for refreshing AI/BI Dashboards in the workspace can + * send subscription emails containing PDFs and/or images of the dashboard. By default, this setting + * is enabled (set to `true`) + */ +@Generated +public class DashboardEmailSubscriptionsAPI { + private static final Logger LOG = LoggerFactory.getLogger(DashboardEmailSubscriptionsAPI.class); + + private final DashboardEmailSubscriptionsService impl; + + /** Regular-use constructor */ + public DashboardEmailSubscriptionsAPI(ApiClient apiClient) { + impl = new DashboardEmailSubscriptionsImpl(apiClient); + } + + /** Constructor for mocks */ + public DashboardEmailSubscriptionsAPI(DashboardEmailSubscriptionsService mock) { + impl = mock; + } + + /** + * Delete the Dashboard Email Subscriptions setting. + * + *

Reverts the Dashboard Email Subscriptions setting to its default value. + */ + public DeleteDashboardEmailSubscriptionsResponse delete( + DeleteDashboardEmailSubscriptionsRequest request) { + return impl.delete(request); + } + + /** + * Get the Dashboard Email Subscriptions setting. + * + *

Gets the Dashboard Email Subscriptions setting. + */ + public DashboardEmailSubscriptions get(GetDashboardEmailSubscriptionsRequest request) { + return impl.get(request); + } + + public DashboardEmailSubscriptions update( + boolean allowMissing, DashboardEmailSubscriptions setting, String fieldMask) { + return update( + new UpdateDashboardEmailSubscriptionsRequest() + .setAllowMissing(allowMissing) + .setSetting(setting) + .setFieldMask(fieldMask)); + } + + /** + * Update the Dashboard Email Subscriptions setting. + * + *

Updates the Dashboard Email Subscriptions setting. + */ + public DashboardEmailSubscriptions update(UpdateDashboardEmailSubscriptionsRequest request) { + return impl.update(request); + } + + public DashboardEmailSubscriptionsService impl() { + return impl; + } +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DashboardEmailSubscriptionsImpl.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DashboardEmailSubscriptionsImpl.java new file mode 100755 index 000000000..767cb5e75 --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DashboardEmailSubscriptionsImpl.java @@ -0,0 +1,59 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +package com.databricks.sdk.service.settings; + +import com.databricks.sdk.core.ApiClient; +import com.databricks.sdk.core.DatabricksException; +import com.databricks.sdk.core.http.Request; +import com.databricks.sdk.support.Generated; +import java.io.IOException; + +/** Package-local implementation of DashboardEmailSubscriptions */ +@Generated +class DashboardEmailSubscriptionsImpl implements DashboardEmailSubscriptionsService { + private final ApiClient apiClient; + + public DashboardEmailSubscriptionsImpl(ApiClient apiClient) { + this.apiClient = apiClient; + } + + @Override + public DeleteDashboardEmailSubscriptionsResponse delete( + DeleteDashboardEmailSubscriptionsRequest request) { + String path = "/api/2.0/settings/types/dashboard_email_subscriptions/names/default"; + try { + Request req = new Request("DELETE", path); + ApiClient.setQuery(req, request); + req.withHeader("Accept", "application/json"); + return apiClient.execute(req, DeleteDashboardEmailSubscriptionsResponse.class); + } catch (IOException e) { + throw new DatabricksException("IO error: " + e.getMessage(), e); + } + } + + @Override + public DashboardEmailSubscriptions get(GetDashboardEmailSubscriptionsRequest request) { + String path = "/api/2.0/settings/types/dashboard_email_subscriptions/names/default"; + try { + Request req = new Request("GET", path); + ApiClient.setQuery(req, request); + req.withHeader("Accept", "application/json"); + return apiClient.execute(req, DashboardEmailSubscriptions.class); + } catch (IOException e) { + throw new DatabricksException("IO error: " + e.getMessage(), e); + } + } + + @Override + public DashboardEmailSubscriptions update(UpdateDashboardEmailSubscriptionsRequest request) { + String path = "/api/2.0/settings/types/dashboard_email_subscriptions/names/default"; + try { + Request req = new Request("PATCH", path, apiClient.serialize(request)); + ApiClient.setQuery(req, request); + req.withHeader("Accept", "application/json"); + req.withHeader("Content-Type", "application/json"); + return apiClient.execute(req, DashboardEmailSubscriptions.class); + } catch (IOException e) { + throw new DatabricksException("IO error: " + e.getMessage(), e); + } + } +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DashboardEmailSubscriptionsService.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DashboardEmailSubscriptionsService.java new file mode 100755 index 000000000..1dbc66188 --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DashboardEmailSubscriptionsService.java @@ -0,0 +1,40 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +package com.databricks.sdk.service.settings; + +import com.databricks.sdk.support.Generated; + +/** + * Controls whether schedules or workload tasks for refreshing AI/BI Dashboards in the workspace can + * send subscription emails containing PDFs and/or images of the dashboard. By default, this setting + * is enabled (set to `true`) + * + *

This is the high-level interface, that contains generated methods. + * + *

Evolving: this interface is under development. Method signatures may change. + */ +@Generated +public interface DashboardEmailSubscriptionsService { + /** + * Delete the Dashboard Email Subscriptions setting. + * + *

Reverts the Dashboard Email Subscriptions setting to its default value. + */ + DeleteDashboardEmailSubscriptionsResponse delete( + DeleteDashboardEmailSubscriptionsRequest deleteDashboardEmailSubscriptionsRequest); + + /** + * Get the Dashboard Email Subscriptions setting. + * + *

Gets the Dashboard Email Subscriptions setting. + */ + DashboardEmailSubscriptions get( + GetDashboardEmailSubscriptionsRequest getDashboardEmailSubscriptionsRequest); + + /** + * Update the Dashboard Email Subscriptions setting. + * + *

Updates the Dashboard Email Subscriptions setting. + */ + DashboardEmailSubscriptions update( + UpdateDashboardEmailSubscriptionsRequest updateDashboardEmailSubscriptionsRequest); +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteDashboardEmailSubscriptionsRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteDashboardEmailSubscriptionsRequest.java new file mode 100755 index 000000000..8d3d36912 --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteDashboardEmailSubscriptionsRequest.java @@ -0,0 +1,54 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package com.databricks.sdk.service.settings; + +import com.databricks.sdk.support.Generated; +import com.databricks.sdk.support.QueryParam; +import com.databricks.sdk.support.ToStringer; +import com.fasterxml.jackson.annotation.JsonIgnore; +import java.util.Objects; + +/** Delete the Dashboard Email Subscriptions setting */ +@Generated +public class DeleteDashboardEmailSubscriptionsRequest { + /** + * etag used for versioning. The response is at least as fresh as the eTag provided. This is used + * for optimistic concurrency control as a way to help prevent simultaneous writes of a setting + * overwriting each other. It is strongly suggested that systems make use of the etag in the read + * -> delete pattern to perform setting deletions in order to avoid race conditions. That is, get + * an etag from a GET request, and pass it with the DELETE request to identify the rule set + * version you are deleting. + */ + @JsonIgnore + @QueryParam("etag") + private String etag; + + public DeleteDashboardEmailSubscriptionsRequest setEtag(String etag) { + this.etag = etag; + return this; + } + + public String getEtag() { + return etag; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + DeleteDashboardEmailSubscriptionsRequest that = (DeleteDashboardEmailSubscriptionsRequest) o; + return Objects.equals(etag, that.etag); + } + + @Override + public int hashCode() { + return Objects.hash(etag); + } + + @Override + public String toString() { + return new ToStringer(DeleteDashboardEmailSubscriptionsRequest.class) + .add("etag", etag) + .toString(); + } +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteDashboardEmailSubscriptionsResponse.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteDashboardEmailSubscriptionsResponse.java new file mode 100755 index 000000000..1cfa511ae --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteDashboardEmailSubscriptionsResponse.java @@ -0,0 +1,52 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package com.databricks.sdk.service.settings; + +import com.databricks.sdk.support.Generated; +import com.databricks.sdk.support.ToStringer; +import com.fasterxml.jackson.annotation.JsonProperty; +import java.util.Objects; + +/** The etag is returned. */ +@Generated +public class DeleteDashboardEmailSubscriptionsResponse { + /** + * etag used for versioning. The response is at least as fresh as the eTag provided. This is used + * for optimistic concurrency control as a way to help prevent simultaneous writes of a setting + * overwriting each other. It is strongly suggested that systems make use of the etag in the read + * -> delete pattern to perform setting deletions in order to avoid race conditions. That is, get + * an etag from a GET request, and pass it with the DELETE request to identify the rule set + * version you are deleting. + */ + @JsonProperty("etag") + private String etag; + + public DeleteDashboardEmailSubscriptionsResponse setEtag(String etag) { + this.etag = etag; + return this; + } + + public String getEtag() { + return etag; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + DeleteDashboardEmailSubscriptionsResponse that = (DeleteDashboardEmailSubscriptionsResponse) o; + return Objects.equals(etag, that.etag); + } + + @Override + public int hashCode() { + return Objects.hash(etag); + } + + @Override + public String toString() { + return new ToStringer(DeleteDashboardEmailSubscriptionsResponse.class) + .add("etag", etag) + .toString(); + } +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteSqlResultsDownloadRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteSqlResultsDownloadRequest.java new file mode 100755 index 000000000..3a5c3214a --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteSqlResultsDownloadRequest.java @@ -0,0 +1,52 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package com.databricks.sdk.service.settings; + +import com.databricks.sdk.support.Generated; +import com.databricks.sdk.support.QueryParam; +import com.databricks.sdk.support.ToStringer; +import com.fasterxml.jackson.annotation.JsonIgnore; +import java.util.Objects; + +/** Delete the SQL Results Download setting */ +@Generated +public class DeleteSqlResultsDownloadRequest { + /** + * etag used for versioning. The response is at least as fresh as the eTag provided. This is used + * for optimistic concurrency control as a way to help prevent simultaneous writes of a setting + * overwriting each other. It is strongly suggested that systems make use of the etag in the read + * -> delete pattern to perform setting deletions in order to avoid race conditions. That is, get + * an etag from a GET request, and pass it with the DELETE request to identify the rule set + * version you are deleting. + */ + @JsonIgnore + @QueryParam("etag") + private String etag; + + public DeleteSqlResultsDownloadRequest setEtag(String etag) { + this.etag = etag; + return this; + } + + public String getEtag() { + return etag; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + DeleteSqlResultsDownloadRequest that = (DeleteSqlResultsDownloadRequest) o; + return Objects.equals(etag, that.etag); + } + + @Override + public int hashCode() { + return Objects.hash(etag); + } + + @Override + public String toString() { + return new ToStringer(DeleteSqlResultsDownloadRequest.class).add("etag", etag).toString(); + } +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteSqlResultsDownloadResponse.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteSqlResultsDownloadResponse.java new file mode 100755 index 000000000..bc2957210 --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteSqlResultsDownloadResponse.java @@ -0,0 +1,50 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package com.databricks.sdk.service.settings; + +import com.databricks.sdk.support.Generated; +import com.databricks.sdk.support.ToStringer; +import com.fasterxml.jackson.annotation.JsonProperty; +import java.util.Objects; + +/** The etag is returned. */ +@Generated +public class DeleteSqlResultsDownloadResponse { + /** + * etag used for versioning. The response is at least as fresh as the eTag provided. This is used + * for optimistic concurrency control as a way to help prevent simultaneous writes of a setting + * overwriting each other. It is strongly suggested that systems make use of the etag in the read + * -> delete pattern to perform setting deletions in order to avoid race conditions. That is, get + * an etag from a GET request, and pass it with the DELETE request to identify the rule set + * version you are deleting. + */ + @JsonProperty("etag") + private String etag; + + public DeleteSqlResultsDownloadResponse setEtag(String etag) { + this.etag = etag; + return this; + } + + public String getEtag() { + return etag; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + DeleteSqlResultsDownloadResponse that = (DeleteSqlResultsDownloadResponse) o; + return Objects.equals(etag, that.etag); + } + + @Override + public int hashCode() { + return Objects.hash(etag); + } + + @Override + public String toString() { + return new ToStringer(DeleteSqlResultsDownloadResponse.class).add("etag", etag).toString(); + } +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetDashboardEmailSubscriptionsRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetDashboardEmailSubscriptionsRequest.java new file mode 100755 index 000000000..0c545ca9b --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetDashboardEmailSubscriptionsRequest.java @@ -0,0 +1,52 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package com.databricks.sdk.service.settings; + +import com.databricks.sdk.support.Generated; +import com.databricks.sdk.support.QueryParam; +import com.databricks.sdk.support.ToStringer; +import com.fasterxml.jackson.annotation.JsonIgnore; +import java.util.Objects; + +/** Get the Dashboard Email Subscriptions setting */ +@Generated +public class GetDashboardEmailSubscriptionsRequest { + /** + * etag used for versioning. The response is at least as fresh as the eTag provided. This is used + * for optimistic concurrency control as a way to help prevent simultaneous writes of a setting + * overwriting each other. It is strongly suggested that systems make use of the etag in the read + * -> delete pattern to perform setting deletions in order to avoid race conditions. That is, get + * an etag from a GET request, and pass it with the DELETE request to identify the rule set + * version you are deleting. + */ + @JsonIgnore + @QueryParam("etag") + private String etag; + + public GetDashboardEmailSubscriptionsRequest setEtag(String etag) { + this.etag = etag; + return this; + } + + public String getEtag() { + return etag; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + GetDashboardEmailSubscriptionsRequest that = (GetDashboardEmailSubscriptionsRequest) o; + return Objects.equals(etag, that.etag); + } + + @Override + public int hashCode() { + return Objects.hash(etag); + } + + @Override + public String toString() { + return new ToStringer(GetDashboardEmailSubscriptionsRequest.class).add("etag", etag).toString(); + } +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetSqlResultsDownloadRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetSqlResultsDownloadRequest.java new file mode 100755 index 000000000..c9cb75cc7 --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetSqlResultsDownloadRequest.java @@ -0,0 +1,52 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package com.databricks.sdk.service.settings; + +import com.databricks.sdk.support.Generated; +import com.databricks.sdk.support.QueryParam; +import com.databricks.sdk.support.ToStringer; +import com.fasterxml.jackson.annotation.JsonIgnore; +import java.util.Objects; + +/** Get the SQL Results Download setting */ +@Generated +public class GetSqlResultsDownloadRequest { + /** + * etag used for versioning. The response is at least as fresh as the eTag provided. This is used + * for optimistic concurrency control as a way to help prevent simultaneous writes of a setting + * overwriting each other. It is strongly suggested that systems make use of the etag in the read + * -> delete pattern to perform setting deletions in order to avoid race conditions. That is, get + * an etag from a GET request, and pass it with the DELETE request to identify the rule set + * version you are deleting. + */ + @JsonIgnore + @QueryParam("etag") + private String etag; + + public GetSqlResultsDownloadRequest setEtag(String etag) { + this.etag = etag; + return this; + } + + public String getEtag() { + return etag; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + GetSqlResultsDownloadRequest that = (GetSqlResultsDownloadRequest) o; + return Objects.equals(etag, that.etag); + } + + @Override + public int hashCode() { + return Objects.hash(etag); + } + + @Override + public String toString() { + return new ToStringer(GetSqlResultsDownloadRequest.class).add("etag", etag).toString(); + } +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetWorkspaceNetworkOptionRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetWorkspaceNetworkOptionRequest.java index 5344f3325..c3f99bf5d 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetWorkspaceNetworkOptionRequest.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetWorkspaceNetworkOptionRequest.java @@ -7,7 +7,7 @@ import com.fasterxml.jackson.annotation.JsonIgnore; import java.util.Objects; -/** Get workspace network configuration */ +/** Get workspace network option */ @Generated public class GetWorkspaceNetworkOptionRequest { /** The workspace ID. */ diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SettingsAPI.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SettingsAPI.java index f5eb3d0a5..16fa226ef 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SettingsAPI.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SettingsAPI.java @@ -21,6 +21,8 @@ public class SettingsAPI { private ComplianceSecurityProfileAPI complianceSecurityProfileAPI; + private DashboardEmailSubscriptionsAPI dashboardEmailSubscriptionsAPI; + private DefaultNamespaceAPI defaultNamespaceAPI; private DisableLegacyAccessAPI disableLegacyAccessAPI; @@ -39,6 +41,8 @@ public class SettingsAPI { private RestrictWorkspaceAdminsAPI restrictWorkspaceAdminsAPI; + private SqlResultsDownloadAPI sqlResultsDownloadAPI; + /** Regular-use constructor */ public SettingsAPI(ApiClient apiClient) { impl = new SettingsImpl(apiClient); @@ -52,6 +56,8 @@ public SettingsAPI(ApiClient apiClient) { complianceSecurityProfileAPI = new ComplianceSecurityProfileAPI(apiClient); + dashboardEmailSubscriptionsAPI = new DashboardEmailSubscriptionsAPI(apiClient); + defaultNamespaceAPI = new DefaultNamespaceAPI(apiClient); disableLegacyAccessAPI = new DisableLegacyAccessAPI(apiClient); @@ -69,6 +75,8 @@ public SettingsAPI(ApiClient apiClient) { llmProxyPartnerPoweredWorkspaceAPI = new LlmProxyPartnerPoweredWorkspaceAPI(apiClient); restrictWorkspaceAdminsAPI = new RestrictWorkspaceAdminsAPI(apiClient); + + sqlResultsDownloadAPI = new SqlResultsDownloadAPI(apiClient); } /** Constructor for mocks */ @@ -99,6 +107,14 @@ public ComplianceSecurityProfileAPI ComplianceSecurityProfile() { return complianceSecurityProfileAPI; } + /** + * Controls whether schedules or workload tasks for refreshing AI/BI Dashboards in the workspace + * can send subscription emails containing PDFs and/or images of the dashboard. + */ + public DashboardEmailSubscriptionsAPI DashboardEmailSubscriptions() { + return dashboardEmailSubscriptionsAPI; + } + /** * The default namespace setting API allows users to configure the default namespace for a * Databricks workspace. @@ -149,6 +165,14 @@ public RestrictWorkspaceAdminsAPI RestrictWorkspaceAdmins() { return restrictWorkspaceAdminsAPI; } + /** + * Controls whether users within the workspace are allowed to download results from the SQL Editor + * and AI/BI Dashboards UIs. + */ + public SqlResultsDownloadAPI SqlResultsDownload() { + return sqlResultsDownloadAPI; + } + public SettingsService impl() { return impl; } diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SqlResultsDownload.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SqlResultsDownload.java new file mode 100755 index 000000000..b15b7f669 --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SqlResultsDownload.java @@ -0,0 +1,86 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package com.databricks.sdk.service.settings; + +import com.databricks.sdk.support.Generated; +import com.databricks.sdk.support.ToStringer; +import com.fasterxml.jackson.annotation.JsonProperty; +import java.util.Objects; + +@Generated +public class SqlResultsDownload { + /** */ + @JsonProperty("boolean_val") + private BooleanMessage booleanVal; + + /** + * etag used for versioning. The response is at least as fresh as the eTag provided. This is used + * for optimistic concurrency control as a way to help prevent simultaneous writes of a setting + * overwriting each other. It is strongly suggested that systems make use of the etag in the read + * -> update pattern to perform setting updates in order to avoid race conditions. That is, get an + * etag from a GET request, and pass it with the PATCH request to identify the setting version you + * are updating. + */ + @JsonProperty("etag") + private String etag; + + /** + * Name of the corresponding setting. This field is populated in the response, but it will not be + * respected even if it's set in the request body. The setting name in the path parameter will be + * respected instead. Setting name is required to be 'default' if the setting only has one + * instance per workspace. + */ + @JsonProperty("setting_name") + private String settingName; + + public SqlResultsDownload setBooleanVal(BooleanMessage booleanVal) { + this.booleanVal = booleanVal; + return this; + } + + public BooleanMessage getBooleanVal() { + return booleanVal; + } + + public SqlResultsDownload setEtag(String etag) { + this.etag = etag; + return this; + } + + public String getEtag() { + return etag; + } + + public SqlResultsDownload setSettingName(String settingName) { + this.settingName = settingName; + return this; + } + + public String getSettingName() { + return settingName; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + SqlResultsDownload that = (SqlResultsDownload) o; + return Objects.equals(booleanVal, that.booleanVal) + && Objects.equals(etag, that.etag) + && Objects.equals(settingName, that.settingName); + } + + @Override + public int hashCode() { + return Objects.hash(booleanVal, etag, settingName); + } + + @Override + public String toString() { + return new ToStringer(SqlResultsDownload.class) + .add("booleanVal", booleanVal) + .add("etag", etag) + .add("settingName", settingName) + .toString(); + } +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SqlResultsDownloadAPI.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SqlResultsDownloadAPI.java new file mode 100755 index 000000000..7bc8f49d1 --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SqlResultsDownloadAPI.java @@ -0,0 +1,68 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +package com.databricks.sdk.service.settings; + +import com.databricks.sdk.core.ApiClient; +import com.databricks.sdk.support.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Controls whether users within the workspace are allowed to download results from the SQL Editor + * and AI/BI Dashboards UIs. By default, this setting is enabled (set to `true`) + */ +@Generated +public class SqlResultsDownloadAPI { + private static final Logger LOG = LoggerFactory.getLogger(SqlResultsDownloadAPI.class); + + private final SqlResultsDownloadService impl; + + /** Regular-use constructor */ + public SqlResultsDownloadAPI(ApiClient apiClient) { + impl = new SqlResultsDownloadImpl(apiClient); + } + + /** Constructor for mocks */ + public SqlResultsDownloadAPI(SqlResultsDownloadService mock) { + impl = mock; + } + + /** + * Delete the SQL Results Download setting. + * + *

Reverts the SQL Results Download setting to its default value. + */ + public DeleteSqlResultsDownloadResponse delete(DeleteSqlResultsDownloadRequest request) { + return impl.delete(request); + } + + /** + * Get the SQL Results Download setting. + * + *

Gets the SQL Results Download setting. + */ + public SqlResultsDownload get(GetSqlResultsDownloadRequest request) { + return impl.get(request); + } + + public SqlResultsDownload update( + boolean allowMissing, SqlResultsDownload setting, String fieldMask) { + return update( + new UpdateSqlResultsDownloadRequest() + .setAllowMissing(allowMissing) + .setSetting(setting) + .setFieldMask(fieldMask)); + } + + /** + * Update the SQL Results Download setting. + * + *

Updates the SQL Results Download setting. + */ + public SqlResultsDownload update(UpdateSqlResultsDownloadRequest request) { + return impl.update(request); + } + + public SqlResultsDownloadService impl() { + return impl; + } +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SqlResultsDownloadImpl.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SqlResultsDownloadImpl.java new file mode 100755 index 000000000..db09dc70e --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SqlResultsDownloadImpl.java @@ -0,0 +1,58 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +package com.databricks.sdk.service.settings; + +import com.databricks.sdk.core.ApiClient; +import com.databricks.sdk.core.DatabricksException; +import com.databricks.sdk.core.http.Request; +import com.databricks.sdk.support.Generated; +import java.io.IOException; + +/** Package-local implementation of SqlResultsDownload */ +@Generated +class SqlResultsDownloadImpl implements SqlResultsDownloadService { + private final ApiClient apiClient; + + public SqlResultsDownloadImpl(ApiClient apiClient) { + this.apiClient = apiClient; + } + + @Override + public DeleteSqlResultsDownloadResponse delete(DeleteSqlResultsDownloadRequest request) { + String path = "/api/2.0/settings/types/sql_results_download/names/default"; + try { + Request req = new Request("DELETE", path); + ApiClient.setQuery(req, request); + req.withHeader("Accept", "application/json"); + return apiClient.execute(req, DeleteSqlResultsDownloadResponse.class); + } catch (IOException e) { + throw new DatabricksException("IO error: " + e.getMessage(), e); + } + } + + @Override + public SqlResultsDownload get(GetSqlResultsDownloadRequest request) { + String path = "/api/2.0/settings/types/sql_results_download/names/default"; + try { + Request req = new Request("GET", path); + ApiClient.setQuery(req, request); + req.withHeader("Accept", "application/json"); + return apiClient.execute(req, SqlResultsDownload.class); + } catch (IOException e) { + throw new DatabricksException("IO error: " + e.getMessage(), e); + } + } + + @Override + public SqlResultsDownload update(UpdateSqlResultsDownloadRequest request) { + String path = "/api/2.0/settings/types/sql_results_download/names/default"; + try { + Request req = new Request("PATCH", path, apiClient.serialize(request)); + ApiClient.setQuery(req, request); + req.withHeader("Accept", "application/json"); + req.withHeader("Content-Type", "application/json"); + return apiClient.execute(req, SqlResultsDownload.class); + } catch (IOException e) { + throw new DatabricksException("IO error: " + e.getMessage(), e); + } + } +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SqlResultsDownloadService.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SqlResultsDownloadService.java new file mode 100755 index 000000000..0929fba03 --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SqlResultsDownloadService.java @@ -0,0 +1,37 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +package com.databricks.sdk.service.settings; + +import com.databricks.sdk.support.Generated; + +/** + * Controls whether users within the workspace are allowed to download results from the SQL Editor + * and AI/BI Dashboards UIs. By default, this setting is enabled (set to `true`) + * + *

This is the high-level interface, that contains generated methods. + * + *

Evolving: this interface is under development. Method signatures may change. + */ +@Generated +public interface SqlResultsDownloadService { + /** + * Delete the SQL Results Download setting. + * + *

Reverts the SQL Results Download setting to its default value. + */ + DeleteSqlResultsDownloadResponse delete( + DeleteSqlResultsDownloadRequest deleteSqlResultsDownloadRequest); + + /** + * Get the SQL Results Download setting. + * + *

Gets the SQL Results Download setting. + */ + SqlResultsDownload get(GetSqlResultsDownloadRequest getSqlResultsDownloadRequest); + + /** + * Update the SQL Results Download setting. + * + *

Updates the SQL Results Download setting. + */ + SqlResultsDownload update(UpdateSqlResultsDownloadRequest updateSqlResultsDownloadRequest); +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/UpdateDashboardEmailSubscriptionsRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/UpdateDashboardEmailSubscriptionsRequest.java new file mode 100755 index 000000000..37613e037 --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/UpdateDashboardEmailSubscriptionsRequest.java @@ -0,0 +1,85 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package com.databricks.sdk.service.settings; + +import com.databricks.sdk.support.Generated; +import com.databricks.sdk.support.ToStringer; +import com.fasterxml.jackson.annotation.JsonProperty; +import java.util.Objects; + +/** Details required to update a setting. */ +@Generated +public class UpdateDashboardEmailSubscriptionsRequest { + /** This should always be set to true for Settings API. Added for AIP compliance. */ + @JsonProperty("allow_missing") + private Boolean allowMissing; + + /** + * The field mask must be a single string, with multiple fields separated by commas (no spaces). + * The field path is relative to the resource object, using a dot (`.`) to navigate sub-fields + * (e.g., `author.given_name`). Specification of elements in sequence or map fields is not + * allowed, as only the entire collection field can be specified. Field names must exactly match + * the resource field names. + * + *

A field mask of `*` indicates full replacement. It’s recommended to always explicitly list + * the fields being updated and avoid using `*` wildcards, as it can lead to unintended results if + * the API changes in the future. + */ + @JsonProperty("field_mask") + private String fieldMask; + + /** */ + @JsonProperty("setting") + private DashboardEmailSubscriptions setting; + + public UpdateDashboardEmailSubscriptionsRequest setAllowMissing(Boolean allowMissing) { + this.allowMissing = allowMissing; + return this; + } + + public Boolean getAllowMissing() { + return allowMissing; + } + + public UpdateDashboardEmailSubscriptionsRequest setFieldMask(String fieldMask) { + this.fieldMask = fieldMask; + return this; + } + + public String getFieldMask() { + return fieldMask; + } + + public UpdateDashboardEmailSubscriptionsRequest setSetting(DashboardEmailSubscriptions setting) { + this.setting = setting; + return this; + } + + public DashboardEmailSubscriptions getSetting() { + return setting; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + UpdateDashboardEmailSubscriptionsRequest that = (UpdateDashboardEmailSubscriptionsRequest) o; + return Objects.equals(allowMissing, that.allowMissing) + && Objects.equals(fieldMask, that.fieldMask) + && Objects.equals(setting, that.setting); + } + + @Override + public int hashCode() { + return Objects.hash(allowMissing, fieldMask, setting); + } + + @Override + public String toString() { + return new ToStringer(UpdateDashboardEmailSubscriptionsRequest.class) + .add("allowMissing", allowMissing) + .add("fieldMask", fieldMask) + .add("setting", setting) + .toString(); + } +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/UpdateSqlResultsDownloadRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/UpdateSqlResultsDownloadRequest.java new file mode 100755 index 000000000..a0d263a52 --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/UpdateSqlResultsDownloadRequest.java @@ -0,0 +1,85 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package com.databricks.sdk.service.settings; + +import com.databricks.sdk.support.Generated; +import com.databricks.sdk.support.ToStringer; +import com.fasterxml.jackson.annotation.JsonProperty; +import java.util.Objects; + +/** Details required to update a setting. */ +@Generated +public class UpdateSqlResultsDownloadRequest { + /** This should always be set to true for Settings API. Added for AIP compliance. */ + @JsonProperty("allow_missing") + private Boolean allowMissing; + + /** + * The field mask must be a single string, with multiple fields separated by commas (no spaces). + * The field path is relative to the resource object, using a dot (`.`) to navigate sub-fields + * (e.g., `author.given_name`). Specification of elements in sequence or map fields is not + * allowed, as only the entire collection field can be specified. Field names must exactly match + * the resource field names. + * + *

A field mask of `*` indicates full replacement. It’s recommended to always explicitly list + * the fields being updated and avoid using `*` wildcards, as it can lead to unintended results if + * the API changes in the future. + */ + @JsonProperty("field_mask") + private String fieldMask; + + /** */ + @JsonProperty("setting") + private SqlResultsDownload setting; + + public UpdateSqlResultsDownloadRequest setAllowMissing(Boolean allowMissing) { + this.allowMissing = allowMissing; + return this; + } + + public Boolean getAllowMissing() { + return allowMissing; + } + + public UpdateSqlResultsDownloadRequest setFieldMask(String fieldMask) { + this.fieldMask = fieldMask; + return this; + } + + public String getFieldMask() { + return fieldMask; + } + + public UpdateSqlResultsDownloadRequest setSetting(SqlResultsDownload setting) { + this.setting = setting; + return this; + } + + public SqlResultsDownload getSetting() { + return setting; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + UpdateSqlResultsDownloadRequest that = (UpdateSqlResultsDownloadRequest) o; + return Objects.equals(allowMissing, that.allowMissing) + && Objects.equals(fieldMask, that.fieldMask) + && Objects.equals(setting, that.setting); + } + + @Override + public int hashCode() { + return Objects.hash(allowMissing, fieldMask, setting); + } + + @Override + public String toString() { + return new ToStringer(UpdateSqlResultsDownloadRequest.class) + .add("allowMissing", allowMissing) + .add("fieldMask", fieldMask) + .add("setting", setting) + .toString(); + } +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/UpdateWorkspaceNetworkOptionRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/UpdateWorkspaceNetworkOptionRequest.java index 84c39c6b0..3dbcb2ba5 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/UpdateWorkspaceNetworkOptionRequest.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/UpdateWorkspaceNetworkOptionRequest.java @@ -8,7 +8,7 @@ import com.fasterxml.jackson.annotation.JsonProperty; import java.util.Objects; -/** Update workspace network configuration */ +/** Update workspace network option */ @Generated public class UpdateWorkspaceNetworkOptionRequest { /** The workspace ID. */ diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/WorkspaceNetworkConfigurationAPI.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/WorkspaceNetworkConfigurationAPI.java index 90fe8feba..825cf4f15 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/WorkspaceNetworkConfigurationAPI.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/WorkspaceNetworkConfigurationAPI.java @@ -7,11 +7,12 @@ import org.slf4j.LoggerFactory; /** - * These APIs allow configuration of network settings for Databricks workspaces. Each workspace is - * always associated with exactly one network policy that controls which network destinations can be - * accessed from the Databricks environment. By default, workspaces are associated with the - * 'default-policy' network policy. You cannot create or delete a workspace's network configuration, - * only update it to associate the workspace with a different policy. + * These APIs allow configuration of network settings for Databricks workspaces by selecting which + * network policy to associate with the workspace. Each workspace is always associated with exactly + * one network policy that controls which network destinations can be accessed from the Databricks + * environment. By default, workspaces are associated with the 'default-policy' network policy. You + * cannot create or delete a workspace's network option, only update it to associate the workspace + * with a different policy */ @Generated public class WorkspaceNetworkConfigurationAPI { @@ -35,10 +36,10 @@ public WorkspaceNetworkOption getWorkspaceNetworkOptionRpc(long workspaceId) { } /** - * Get workspace network configuration. + * Get workspace network option. * - *

Gets the network configuration for a workspace. Every workspace has exactly one network - * policy binding, with 'default-policy' used if no explicit assignment exists. + *

Gets the network option for a workspace. Every workspace has exactly one network policy + * binding, with 'default-policy' used if no explicit assignment exists. */ public WorkspaceNetworkOption getWorkspaceNetworkOptionRpc( GetWorkspaceNetworkOptionRequest request) { @@ -54,11 +55,11 @@ public WorkspaceNetworkOption updateWorkspaceNetworkOptionRpc( } /** - * Update workspace network configuration. + * Update workspace network option. * - *

Updates the network configuration for a workspace. This operation associates the workspace - * with the specified network policy. To revert to the default policy, specify 'default-policy' as - * the network_policy_id. + *

Updates the network option for a workspace. This operation associates the workspace with the + * specified network policy. To revert to the default policy, specify 'default-policy' as the + * network_policy_id. */ public WorkspaceNetworkOption updateWorkspaceNetworkOptionRpc( UpdateWorkspaceNetworkOptionRequest request) { diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/WorkspaceNetworkConfigurationService.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/WorkspaceNetworkConfigurationService.java index 7c414aa6d..0a45ac324 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/WorkspaceNetworkConfigurationService.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/WorkspaceNetworkConfigurationService.java @@ -4,11 +4,12 @@ import com.databricks.sdk.support.Generated; /** - * These APIs allow configuration of network settings for Databricks workspaces. Each workspace is - * always associated with exactly one network policy that controls which network destinations can be - * accessed from the Databricks environment. By default, workspaces are associated with the - * 'default-policy' network policy. You cannot create or delete a workspace's network configuration, - * only update it to associate the workspace with a different policy. + * These APIs allow configuration of network settings for Databricks workspaces by selecting which + * network policy to associate with the workspace. Each workspace is always associated with exactly + * one network policy that controls which network destinations can be accessed from the Databricks + * environment. By default, workspaces are associated with the 'default-policy' network policy. You + * cannot create or delete a workspace's network option, only update it to associate the workspace + * with a different policy * *

This is the high-level interface, that contains generated methods. * @@ -17,20 +18,20 @@ @Generated public interface WorkspaceNetworkConfigurationService { /** - * Get workspace network configuration. + * Get workspace network option. * - *

Gets the network configuration for a workspace. Every workspace has exactly one network - * policy binding, with 'default-policy' used if no explicit assignment exists. + *

Gets the network option for a workspace. Every workspace has exactly one network policy + * binding, with 'default-policy' used if no explicit assignment exists. */ WorkspaceNetworkOption getWorkspaceNetworkOptionRpc( GetWorkspaceNetworkOptionRequest getWorkspaceNetworkOptionRequest); /** - * Update workspace network configuration. + * Update workspace network option. * - *

Updates the network configuration for a workspace. This operation associates the workspace - * with the specified network policy. To revert to the default policy, specify 'default-policy' as - * the network_policy_id. + *

Updates the network option for a workspace. This operation associates the workspace with the + * specified network policy. To revert to the default policy, specify 'default-policy' as the + * network_policy_id. */ WorkspaceNetworkOption updateWorkspaceNetworkOptionRpc( UpdateWorkspaceNetworkOptionRequest updateWorkspaceNetworkOptionRequest); diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sharing/AuthenticationType.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sharing/AuthenticationType.java index 6b48f36b2..05d2bd9c4 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sharing/AuthenticationType.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sharing/AuthenticationType.java @@ -9,5 +9,6 @@ public enum AuthenticationType { DATABRICKS, OAUTH_CLIENT_CREDENTIALS, + OIDC_FEDERATION, TOKEN, } diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/AlertsV2API.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/AlertsV2API.java index 756895733..e868999eb 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/AlertsV2API.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/AlertsV2API.java @@ -7,7 +7,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -/** TODO: Add description */ +/** New version of SQL Alerts */ @Generated public class AlertsV2API { private static final Logger LOG = LoggerFactory.getLogger(AlertsV2API.class); diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/AlertsV2Service.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/AlertsV2Service.java index f8740fa39..cb9ec351a 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/AlertsV2Service.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/AlertsV2Service.java @@ -4,7 +4,7 @@ import com.databricks.sdk.support.Generated; /** - * TODO: Add description + * New version of SQL Alerts * *

This is the high-level interface, that contains generated methods. *