From d7fc060b781c367102a5e4af6f963f8affa333bd Mon Sep 17 00:00:00 2001 From: Aimery Methena <159072740+aimethed@users.noreply.github.com> Date: Mon, 22 Jul 2024 15:58:42 -0400 Subject: [PATCH 01/87] V2 migration secrets (#2051) --- .../aws/cmdb/AwsCmdbMetadataHandler.java | 4 +- .../aws/cmdb/AwsCmdbRecordHandler.java | 4 +- .../aws/cmdb/AwsCmdbMetadataHandlerTest.java | 4 +- .../aws/cmdb/AwsCmdbRecordHandlerTest.java | 4 +- .../clickhouse/ClickHouseMetadataHandler.java | 4 +- .../ClickHouseMuxMetadataHandler.java | 4 +- .../ClickHouseMuxRecordHandler.java | 4 +- .../clickhouse/ClickHouseRecordHandler.java | 7 +-- .../ClickHouseMetadataHandlerTest.java | 12 ++-- .../ClickHouseMuxJdbcMetadataHandlerTest.java | 6 +- .../ClickHouseMuxJdbcRecordHandlerTest.java | 6 +- .../cloudera/HiveMetadataHandler.java | 4 +- .../cloudera/HiveMuxMetadataHandler.java | 4 +- .../cloudera/HiveMuxRecordHandler.java | 4 +- .../cloudera/HiveRecordHandler.java | 7 +-- .../cloudera/HiveMetadataHandlerTest.java | 13 ++--- .../cloudera/HiveMuxMetadataHandlerTest.java | 6 +- .../cloudera/HiveMuxRecordHandlerTest.java | 6 +- .../cloudera/HiveRecordHandlerTest.java | 12 ++-- .../ImpalaFederationExpressionParser.java | 2 +- .../cloudera/ImpalaMetadataHandler.java | 4 +- .../cloudera/ImpalaMuxMetadataHandler.java | 4 +- .../cloudera/ImpalaMuxRecordHandler.java | 4 +- .../cloudera/ImpalaRecordHandler.java | 8 +-- .../cloudera/ImpalaMetadataHandlerTest.java | 12 ++-- .../ImpalaMuxMetadataHandlerTest.java | 6 +- .../cloudera/ImpalaMuxRecordHandlerTest.java | 6 +- .../ImpalaQueryStringBuilderTest.java | 1 - .../cloudera/ImpalaRecordHandlerTest.java | 13 ++--- .../metrics/MetricsMetadataHandler.java | 4 +- .../metrics/MetricsRecordHandler.java | 7 +-- .../metrics/MetricsMetadataHandlerTest.java | 4 +- .../metrics/MetricsRecordHandlerTest.java | 4 +- .../cloudwatch/CloudwatchMetadataHandler.java | 4 +- .../cloudwatch/CloudwatchRecordHandler.java | 7 +-- .../CloudwatchMetadataHandlerTest.java | 4 +- .../CloudwatchRecordHandlerTest.java | 4 +- .../DataLakeGen2MetadataHandler.java | 4 +- .../DataLakeGen2MuxMetadataHandler.java | 4 +- .../DataLakeGen2MuxRecordHandler.java | 4 +- .../DataLakeGen2RecordHandler.java | 7 +-- .../DataLakeGen2MetadataHandlerTest.java | 12 ++-- .../DataLakeGen2MuxMetadataHandlerTest.java | 6 +- .../DataLakeGen2MuxRecordHandlerTest.java | 6 +- .../DataLakeRecordHandlerTest.java | 6 +- .../db2as400/Db2As400MetadataHandler.java | 4 +- .../db2as400/Db2As400MuxMetadataHandler.java | 4 +- .../db2as400/Db2As400MuxRecordHandler.java | 4 +- .../db2as400/Db2As400RecordHandler.java | 7 +-- .../db2as400/Db2As400MetadataHandlerTest.java | 12 ++-- .../db2as400/Db2As400RecordHandlerTest.java | 6 +- .../connectors/db2/Db2MetadataHandler.java | 4 +- .../connectors/db2/Db2MuxMetadataHandler.java | 4 +- .../connectors/db2/Db2MuxRecordHandler.java | 4 +- .../connectors/db2/Db2RecordHandler.java | 7 +-- .../db2/Db2MetadataHandlerTest.java | 12 ++-- .../connectors/db2/Db2RecordHandlerTest.java | 6 +- .../docdb/DocDBMetadataHandler.java | 4 +- .../connectors/docdb/DocDBRecordHandler.java | 7 +-- .../docdb/DocDBMetadataHandlerTest.java | 4 +- .../docdb/DocDBRecordHandlerTest.java | 6 +- athena-dynamodb/pom.xml | 8 +-- .../dynamodb/DynamoDBMetadataHandler.java | 4 +- .../dynamodb/DynamoDBRecordHandler.java | 4 +- .../dynamodb/DynamoDBMetadataHandlerTest.java | 4 +- .../dynamodb/DynamoDBRecordHandlerTest.java | 6 +- .../ElasticsearchMetadataHandler.java | 4 +- .../ElasticsearchRecordHandler.java | 7 +-- .../ElasticsearchMetadataHandlerTest.java | 4 +- .../ElasticsearchRecordHandlerTest.java | 4 +- .../example/ExampleMetadataHandler.java | 4 +- .../example/ExampleRecordHandler.java | 7 +-- .../example/ExampleMetadataHandlerTest.java | 4 +- .../example/ExampleRecordHandlerTest.java | 6 +- athena-federation-integ-test/pom.xml | 11 ++-- .../SecretsManagerCredentialsProvider.java | 21 +++---- athena-federation-sdk/pom.xml | 6 +- .../lambda/handlers/GlueMetadataHandler.java | 6 +- .../lambda/handlers/MetadataHandler.java | 7 +-- .../lambda/handlers/RecordHandler.java | 7 +-- .../security/CachableSecretsManager.java | 17 +++--- .../handlers/GlueMetadataHandlerTest.java | 6 +- .../security/CacheableSecretsManagerTest.java | 21 +++---- .../connectors/gcs/GcsMetadataHandler.java | 4 +- .../connectors/gcs/GcsRecordHandler.java | 7 +-- .../athena/connectors/gcs/GcsUtil.java | 4 +- .../gcs/GcsCompositeHandlerTest.java | 19 ++++--- .../gcs/GcsMetadataHandlerTest.java | 21 ++++--- .../connectors/gcs/GcsRecordHandlerTest.java | 9 ++- .../athena/connectors/gcs/GenericGcsTest.java | 6 +- .../bigquery/BigQueryRecordHandler.java | 7 +-- .../google/bigquery/BigQueryUtils.java | 18 +++--- .../BigQueryCompositeHandlerTest.java | 33 ++++++----- .../bigquery/BigQueryRecordHandlerTest.java | 4 +- athena-hbase/pom.xml | 56 ------------------- .../hbase/HbaseMetadataHandler.java | 4 +- .../connectors/hbase/HbaseRecordHandler.java | 7 +-- .../hbase/HbaseMetadataHandlerTest.java | 4 +- .../hbase/HbaseRecordHandlerTest.java | 4 +- .../hortonworks/HiveMetadataHandler.java | 4 +- .../hortonworks/HiveMuxMetadataHandler.java | 4 +- .../hortonworks/HiveMuxRecordHandler.java | 4 +- .../hortonworks/HiveRecordHandler.java | 7 +-- .../hortonworks/HiveMetadataHandlerTest.java | 12 ++-- .../HiveMuxMetadataHandlerTest.java | 6 +- .../hortonworks/HiveMuxRecordHandlerTest.java | 6 +- .../hortonworks/HiveRecordHandlerTest.java | 12 ++-- .../jdbc/MultiplexingJdbcMetadataHandler.java | 4 +- .../jdbc/MultiplexingJdbcRecordHandler.java | 4 +- .../jdbc/manager/JdbcMetadataHandler.java | 4 +- .../jdbc/manager/JdbcRecordHandler.java | 4 +- .../MultiplexingJdbcMetadataHandlerTest.java | 6 +- .../MultiplexingJdbcRecordHandlerTest.java | 6 +- .../jdbc/manager/JdbcMetadataHandlerTest.java | 12 ++-- .../jdbc/manager/JdbcRecordHandlerTest.java | 12 ++-- .../connectors/kafka/KafkaRecordHandler.java | 7 +-- .../athena/connectors/kafka/KafkaUtils.java | 18 +++--- .../kafka/KafkaCompositeHandlerTest.java | 12 ++-- .../kafka/KafkaRecordHandlerTest.java | 6 +- .../connectors/kafka/KafkaUtilsTest.java | 22 ++++---- .../msk/AmazonMskRecordHandler.java | 7 +-- .../athena/connectors/msk/AmazonMskUtils.java | 18 +++--- .../msk/AmazonMskCompositeHandlerTest.java | 9 +-- .../msk/AmazonMskRecordHandlerTest.java | 4 +- .../connectors/msk/AmazonMskUtilsTest.java | 21 ++++--- .../mysql/MySqlMetadataHandler.java | 4 +- .../mysql/MySqlMuxMetadataHandler.java | 4 +- .../mysql/MySqlMuxRecordHandler.java | 4 +- .../connectors/mysql/MySqlRecordHandler.java | 7 +-- .../mysql/MySqlMetadataHandlerTest.java | 12 ++-- .../MySqlMuxJdbcMetadataHandlerTest.java | 6 +- .../mysql/MySqlMuxJdbcRecordHandlerTest.java | 6 +- .../mysql/MySqlRecordHandlerTest.java | 6 +- .../neptune/NeptuneMetadataHandler.java | 4 +- .../neptune/NeptuneRecordHandler.java | 7 +-- .../neptune/NeptuneMetadataHandlerTest.java | 5 +- .../neptune/NeptuneRecordHandlerTest.java | 6 +- .../oracle/OracleMetadataHandler.java | 4 +- .../oracle/OracleMuxMetadataHandler.java | 4 +- .../oracle/OracleMuxRecordHandler.java | 4 +- .../oracle/OracleRecordHandler.java | 7 +-- .../oracle/OracleMetadataHandlerTest.java | 12 ++-- .../OracleMuxJdbcMetadataHandlerTest.java | 6 +- .../OracleMuxJdbcRecordHandlerTest.java | 6 +- .../oracle/OracleRecordHandlerTest.java | 6 +- .../postgresql/PostGreSqlMetadataHandler.java | 4 +- .../PostGreSqlMuxMetadataHandler.java | 4 +- .../PostGreSqlMuxRecordHandler.java | 4 +- .../postgresql/PostGreSqlRecordHandler.java | 7 +-- .../PostGreSqlMetadataHandlerTest.java | 12 ++-- .../PostGreSqlMuxJdbcMetadataHandlerTest.java | 6 +- .../PostGreSqlMuxJdbcRecordHandlerTest.java | 6 +- .../PostGreSqlRecordHandlerTest.java | 6 +- .../redis/RedisMetadataHandler.java | 4 +- .../connectors/redis/RedisRecordHandler.java | 7 +-- .../redis/RedisMetadataHandlerTest.java | 14 ++--- .../redis/RedisRecordHandlerTest.java | 14 ++--- .../redshift/RedshiftMetadataHandler.java | 4 +- .../redshift/RedshiftMuxMetadataHandler.java | 4 +- .../redshift/RedshiftMuxRecordHandler.java | 4 +- .../redshift/RedshiftRecordHandler.java | 7 +-- .../redshift/RedshiftMetadataHandlerTest.java | 12 ++-- .../RedshiftMuxJdbcMetadataHandlerTest.java | 6 +- .../RedshiftMuxJdbcRecordHandlerTest.java | 6 +- .../redshift/RedshiftRecordHandlerTest.java | 6 +- .../saphana/SaphanaMetadataHandler.java | 4 +- .../saphana/SaphanaMuxMetadataHandler.java | 4 +- .../saphana/SaphanaMuxRecordHandler.java | 4 +- .../saphana/SaphanaRecordHandler.java | 7 +-- .../saphana/SaphanaMetadataHandlerTest.java | 12 ++-- .../SaphanaMuxJdbcMetadataHandlerTest.java | 6 +- .../SaphanaMuxJdbcRecordHandlerTest.java | 6 +- .../saphana/SaphanaRecordHandlerTest.java | 6 +- .../snowflake/SnowflakeMetadataHandler.java | 4 +- .../SnowflakeMuxMetadataHandler.java | 4 +- .../snowflake/SnowflakeMuxRecordHandler.java | 4 +- .../snowflake/SnowflakeRecordHandler.java | 7 +-- .../SnowflakeMetadataHandlerTest.java | 12 ++-- .../SnowflakeMuxJdbcMetadataHandlerTest.java | 6 +- .../SnowflakeMuxJdbcRecordHandlerTest.java | 6 +- .../snowflake/SnowflakeRecordHandlerTest.java | 6 +- .../sqlserver/SqlServerMetadataHandler.java | 4 +- .../SqlServerMuxMetadataHandler.java | 4 +- .../sqlserver/SqlServerMuxRecordHandler.java | 4 +- .../sqlserver/SqlServerRecordHandler.java | 7 +-- .../SqlServerMetadataHandlerTest.java | 12 ++-- .../SqlServerMuxMetadataHandlerTest.java | 6 +- .../SqlServerMuxRecordHandlerTest.java | 6 +- .../sqlserver/SqlServerRecordHandlerTest.java | 6 +- .../synapse/SynapseMetadataHandler.java | 4 +- .../synapse/SynapseMuxMetadataHandler.java | 4 +- .../synapse/SynapseMuxRecordHandler.java | 4 +- .../synapse/SynapseRecordHandler.java | 7 +-- .../synapse/SynapseMetadataHandlerTest.java | 12 ++-- .../SynapseMuxMetadataHandlerTest.java | 6 +- .../synapse/SynapseMuxRecordHandlerTest.java | 6 +- .../synapse/SynapseRecordHandlerTest.java | 6 +- .../teradata/TeradataMetadataHandler.java | 4 +- .../teradata/TeradataMuxMetadataHandler.java | 4 +- .../teradata/TeradataMuxRecordHandler.java | 4 +- .../teradata/TeradataRecordHandler.java | 7 +-- .../teradata/TeradataMetadataHandlerTest.java | 12 ++-- .../TeradataMuxJdbcMetadataHandlerTest.java | 6 +- .../TeradataMuxJdbcRecordHandlerTest.java | 6 +- .../teradata/TeradataRecordHandlerTest.java | 6 +- .../timestream/TimestreamMetadataHandler.java | 4 +- .../timestream/TimestreamRecordHandler.java | 7 +-- .../TimestreamMetadataHandlerTest.java | 4 +- .../TimestreamRecordHandlerTest.java | 4 +- .../tpcds/TPCDSMetadataHandler.java | 4 +- .../connectors/tpcds/TPCDSRecordHandler.java | 7 +-- .../tpcds/TPCDSMetadataHandlerTest.java | 4 +- .../tpcds/TPCDSRecordHandlerTest.java | 4 +- .../connectors/udfs/AthenaUDFHandler.java | 4 +- .../vertica/VerticaRecordHandler.java | 7 +-- .../vertica/VerticaMetadataHandlerTest.java | 14 ++--- pom.xml | 1 + 217 files changed, 716 insertions(+), 813 deletions(-) diff --git a/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/AwsCmdbMetadataHandler.java b/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/AwsCmdbMetadataHandler.java index 4a4b61f694..70bbe81f29 100644 --- a/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/AwsCmdbMetadataHandler.java +++ b/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/AwsCmdbMetadataHandler.java @@ -40,8 +40,8 @@ import com.amazonaws.athena.connector.lambda.security.EncryptionKeyFactory; import com.amazonaws.athena.connectors.aws.cmdb.tables.TableProvider; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import org.apache.arrow.util.VisibleForTesting; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.List; import java.util.Map; @@ -77,7 +77,7 @@ public AwsCmdbMetadataHandler(java.util.Map configOptions) protected AwsCmdbMetadataHandler( TableProviderFactory tableProviderFactory, EncryptionKeyFactory keyFactory, - AWSSecretsManager secretsManager, + SecretsManagerClient secretsManager, AmazonAthena athena, String spillBucket, String spillPrefix, diff --git a/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/AwsCmdbRecordHandler.java b/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/AwsCmdbRecordHandler.java index 9dcfe3ffe6..ae851996a8 100644 --- a/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/AwsCmdbRecordHandler.java +++ b/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/AwsCmdbRecordHandler.java @@ -27,8 +27,8 @@ import com.amazonaws.athena.connectors.aws.cmdb.tables.TableProvider; import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import org.apache.arrow.util.VisibleForTesting; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Map; @@ -56,7 +56,7 @@ public AwsCmdbRecordHandler(java.util.Map configOptions) } @VisibleForTesting - protected AwsCmdbRecordHandler(AmazonS3 amazonS3, AWSSecretsManager secretsManager, AmazonAthena athena, TableProviderFactory tableProviderFactory, java.util.Map configOptions) + protected AwsCmdbRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AmazonAthena athena, TableProviderFactory tableProviderFactory, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, SOURCE_TYPE, configOptions); tableProviders = tableProviderFactory.getTableProviders(); diff --git a/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/AwsCmdbMetadataHandlerTest.java b/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/AwsCmdbMetadataHandlerTest.java index ba8f6f815e..7233ee95d9 100644 --- a/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/AwsCmdbMetadataHandlerTest.java +++ b/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/AwsCmdbMetadataHandlerTest.java @@ -40,13 +40,13 @@ import com.amazonaws.athena.connectors.aws.cmdb.tables.TableProvider; import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import org.junit.After; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; import org.mockito.junit.MockitoJUnitRunner; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.ArrayList; import java.util.Collections; @@ -98,7 +98,7 @@ public class AwsCmdbMetadataHandlerTest private Block mockBlock; @Mock - private AWSSecretsManager mockSecretsManager; + private SecretsManagerClient mockSecretsManager; @Mock private AmazonAthena mockAthena; diff --git a/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/AwsCmdbRecordHandlerTest.java b/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/AwsCmdbRecordHandlerTest.java index 9c78bb1ab8..d55c23fccf 100644 --- a/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/AwsCmdbRecordHandlerTest.java +++ b/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/AwsCmdbRecordHandlerTest.java @@ -34,13 +34,13 @@ import com.amazonaws.athena.connectors.aws.cmdb.tables.TableProvider; import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; import org.mockito.Mockito; import org.mockito.junit.MockitoJUnitRunner; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Collections; import java.util.UUID; @@ -77,7 +77,7 @@ public class AwsCmdbRecordHandlerTest private TableProvider mockTableProvider; @Mock - private AWSSecretsManager mockSecretsManager; + private SecretsManagerClient mockSecretsManager; @Mock private AmazonAthena mockAthena; diff --git a/athena-clickhouse/src/main/java/com/amazonaws/athena/connectors/clickhouse/ClickHouseMetadataHandler.java b/athena-clickhouse/src/main/java/com/amazonaws/athena/connectors/clickhouse/ClickHouseMetadataHandler.java index 54174bce6a..fd53fee4b6 100644 --- a/athena-clickhouse/src/main/java/com/amazonaws/athena/connectors/clickhouse/ClickHouseMetadataHandler.java +++ b/athena-clickhouse/src/main/java/com/amazonaws/athena/connectors/clickhouse/ClickHouseMetadataHandler.java @@ -41,7 +41,6 @@ import com.amazonaws.athena.connectors.jdbc.manager.JDBCUtil; import com.amazonaws.athena.connectors.mysql.MySqlMetadataHandler; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ImmutableSet; import org.apache.arrow.vector.complex.reader.FieldReader; @@ -49,6 +48,7 @@ import org.apache.arrow.vector.types.pojo.Schema; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; import java.sql.PreparedStatement; @@ -101,7 +101,7 @@ public ClickHouseMetadataHandler(DatabaseConnectionConfig databaseConnectionConf @VisibleForTesting protected ClickHouseMetadataHandler( DatabaseConnectionConfig databaseConnectionConfig, - AWSSecretsManager secretsManager, + SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, java.util.Map configOptions) diff --git a/athena-clickhouse/src/main/java/com/amazonaws/athena/connectors/clickhouse/ClickHouseMuxMetadataHandler.java b/athena-clickhouse/src/main/java/com/amazonaws/athena/connectors/clickhouse/ClickHouseMuxMetadataHandler.java index f062b46e13..84a567f9fb 100644 --- a/athena-clickhouse/src/main/java/com/amazonaws/athena/connectors/clickhouse/ClickHouseMuxMetadataHandler.java +++ b/athena-clickhouse/src/main/java/com/amazonaws/athena/connectors/clickhouse/ClickHouseMuxMetadataHandler.java @@ -26,8 +26,8 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandlerFactory; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import org.apache.arrow.util.VisibleForTesting; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Map; @@ -59,7 +59,7 @@ public ClickHouseMuxMetadataHandler(java.util.Map configOptions) } @VisibleForTesting - protected ClickHouseMuxMetadataHandler(AWSSecretsManager secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, + protected ClickHouseMuxMetadataHandler(SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, Map metadataHandlerMap, DatabaseConnectionConfig databaseConnectionConfig, java.util.Map configOptions) { super(secretsManager, athena, jdbcConnectionFactory, metadataHandlerMap, databaseConnectionConfig, configOptions); diff --git a/athena-clickhouse/src/main/java/com/amazonaws/athena/connectors/clickhouse/ClickHouseMuxRecordHandler.java b/athena-clickhouse/src/main/java/com/amazonaws/athena/connectors/clickhouse/ClickHouseMuxRecordHandler.java index 047864557a..00eefe9fbf 100644 --- a/athena-clickhouse/src/main/java/com/amazonaws/athena/connectors/clickhouse/ClickHouseMuxRecordHandler.java +++ b/athena-clickhouse/src/main/java/com/amazonaws/athena/connectors/clickhouse/ClickHouseMuxRecordHandler.java @@ -27,8 +27,8 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandlerFactory; import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import com.google.common.annotations.VisibleForTesting; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Map; @@ -58,7 +58,7 @@ public ClickHouseMuxRecordHandler(java.util.Map configOptions) } @VisibleForTesting - ClickHouseMuxRecordHandler(AmazonS3 amazonS3, AWSSecretsManager secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, + ClickHouseMuxRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, DatabaseConnectionConfig databaseConnectionConfig, Map recordHandlerMap, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, jdbcConnectionFactory, databaseConnectionConfig, recordHandlerMap, configOptions); diff --git a/athena-clickhouse/src/main/java/com/amazonaws/athena/connectors/clickhouse/ClickHouseRecordHandler.java b/athena-clickhouse/src/main/java/com/amazonaws/athena/connectors/clickhouse/ClickHouseRecordHandler.java index d5e485f503..f4e3ea6937 100644 --- a/athena-clickhouse/src/main/java/com/amazonaws/athena/connectors/clickhouse/ClickHouseRecordHandler.java +++ b/athena-clickhouse/src/main/java/com/amazonaws/athena/connectors/clickhouse/ClickHouseRecordHandler.java @@ -35,13 +35,12 @@ import com.amazonaws.services.athena.AmazonAthenaClientBuilder; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3ClientBuilder; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; -import com.amazonaws.services.secretsmanager.AWSSecretsManagerClientBuilder; import com.google.common.annotations.VisibleForTesting; import org.apache.arrow.vector.types.pojo.Schema; import org.apache.commons.lang3.Validate; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; import java.sql.PreparedStatement; @@ -77,12 +76,12 @@ public ClickHouseRecordHandler(DatabaseConnectionConfig databaseConnectionConfig public ClickHouseRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, JdbcConnectionFactory jdbcConnectionFactory, java.util.Map configOptions) { - this(databaseConnectionConfig, AmazonS3ClientBuilder.defaultClient(), AWSSecretsManagerClientBuilder.defaultClient(), AmazonAthenaClientBuilder.defaultClient(), + this(databaseConnectionConfig, AmazonS3ClientBuilder.defaultClient(), SecretsManagerClient.create(), AmazonAthenaClientBuilder.defaultClient(), jdbcConnectionFactory, new MySqlQueryStringBuilder(MYSQL_QUOTE_CHARACTER, new MySqlFederationExpressionParser(MYSQL_QUOTE_CHARACTER)), configOptions); } @VisibleForTesting - ClickHouseRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, final AmazonS3 amazonS3, final AWSSecretsManager secretsManager, + ClickHouseRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, final AmazonS3 amazonS3, final SecretsManagerClient secretsManager, final AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, JdbcSplitQueryBuilder jdbcSplitQueryBuilder, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, databaseConnectionConfig, jdbcConnectionFactory, configOptions); diff --git a/athena-clickhouse/src/test/java/com/amazonaws/athena/connectors/clickhouse/ClickHouseMetadataHandlerTest.java b/athena-clickhouse/src/test/java/com/amazonaws/athena/connectors/clickhouse/ClickHouseMetadataHandlerTest.java index 783650c302..d99100492c 100644 --- a/athena-clickhouse/src/test/java/com/amazonaws/athena/connectors/clickhouse/ClickHouseMetadataHandlerTest.java +++ b/athena-clickhouse/src/test/java/com/amazonaws/athena/connectors/clickhouse/ClickHouseMetadataHandlerTest.java @@ -41,9 +41,6 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.connection.JdbcCredentialProvider; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; -import com.amazonaws.services.secretsmanager.model.GetSecretValueRequest; -import com.amazonaws.services.secretsmanager.model.GetSecretValueResult; import net.jqwik.api.Table; import org.apache.arrow.vector.types.pojo.Field; import org.apache.arrow.vector.types.pojo.Schema; @@ -51,6 +48,9 @@ import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; +import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueRequest; +import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueResponse; import java.sql.Connection; import java.sql.PreparedStatement; @@ -79,7 +79,7 @@ public class ClickHouseMetadataHandlerTest private JdbcConnectionFactory jdbcConnectionFactory; private Connection connection; private FederatedIdentity federatedIdentity; - private AWSSecretsManager secretsManager; + private SecretsManagerClient secretsManager; private AmazonAthena athena; private BlockAllocator blockAllocator; @@ -90,9 +90,9 @@ public void setup() this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); this.connection = Mockito.mock(Connection.class, Mockito.RETURNS_DEEP_STUBS); Mockito.when(this.jdbcConnectionFactory.getConnection(nullable(JdbcCredentialProvider.class))).thenReturn(this.connection); - this.secretsManager = Mockito.mock(AWSSecretsManager.class); + this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AmazonAthena.class); - Mockito.when(this.secretsManager.getSecretValue(Mockito.eq(new GetSecretValueRequest().withSecretId("testSecret")))).thenReturn(new GetSecretValueResult().withSecretString("{\"username\": \"testUser\", \"password\": \"testPassword\"}")); + Mockito.when(this.secretsManager.getSecretValue(Mockito.eq(GetSecretValueRequest.builder().secretId("testSecret").build()))).thenReturn(GetSecretValueResponse.builder().secretString("{\"username\": \"testUser\", \"password\": \"testPassword\"}").build()); this.metadataHandler = new ClickHouseMetadataHandler(databaseConnectionConfig, this.secretsManager, this.athena, this.jdbcConnectionFactory, com.google.common.collect.ImmutableMap.of()); this.federatedIdentity = Mockito.mock(FederatedIdentity.class); this.blockAllocator = Mockito.mock(BlockAllocator.class); diff --git a/athena-clickhouse/src/test/java/com/amazonaws/athena/connectors/clickhouse/ClickHouseMuxJdbcMetadataHandlerTest.java b/athena-clickhouse/src/test/java/com/amazonaws/athena/connectors/clickhouse/ClickHouseMuxJdbcMetadataHandlerTest.java index afc3e2a1ae..337b5ddff0 100644 --- a/athena-clickhouse/src/test/java/com/amazonaws/athena/connectors/clickhouse/ClickHouseMuxJdbcMetadataHandlerTest.java +++ b/athena-clickhouse/src/test/java/com/amazonaws/athena/connectors/clickhouse/ClickHouseMuxJdbcMetadataHandlerTest.java @@ -33,10 +33,10 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Collections; import java.util.Map; @@ -49,7 +49,7 @@ public class ClickHouseMuxJdbcMetadataHandlerTest private ClickHouseMetadataHandler metadataHandler; private JdbcMetadataHandler jdbcMetadataHandler; private BlockAllocator allocator; - private AWSSecretsManager secretsManager; + private SecretsManagerClient secretsManager; private AmazonAthena athena; private QueryStatusChecker queryStatusChecker; private JdbcConnectionFactory jdbcConnectionFactory; @@ -62,7 +62,7 @@ public void setup() //Mockito.when(this.allocator.createBlock(nullable(Schema.class))).thenReturn(Mockito.mock(Block.class)); this.metadataHandler = Mockito.mock(ClickHouseMetadataHandler.class); this.metadataHandlerMap = Collections.singletonMap("fakedatabase", this.metadataHandler); - this.secretsManager = Mockito.mock(AWSSecretsManager.class); + this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AmazonAthena.class); this.queryStatusChecker = Mockito.mock(QueryStatusChecker.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); diff --git a/athena-clickhouse/src/test/java/com/amazonaws/athena/connectors/clickhouse/ClickHouseMuxJdbcRecordHandlerTest.java b/athena-clickhouse/src/test/java/com/amazonaws/athena/connectors/clickhouse/ClickHouseMuxJdbcRecordHandlerTest.java index e7ff91da7b..f61dcc2748 100644 --- a/athena-clickhouse/src/test/java/com/amazonaws/athena/connectors/clickhouse/ClickHouseMuxJdbcRecordHandlerTest.java +++ b/athena-clickhouse/src/test/java/com/amazonaws/athena/connectors/clickhouse/ClickHouseMuxJdbcRecordHandlerTest.java @@ -30,11 +30,11 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import org.apache.arrow.vector.types.pojo.Schema; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; import java.sql.SQLException; @@ -47,7 +47,7 @@ public class ClickHouseMuxJdbcRecordHandlerTest private ClickHouseRecordHandler recordHandler; private JdbcRecordHandler jdbcRecordHandler; private AmazonS3 amazonS3; - private AWSSecretsManager secretsManager; + private SecretsManagerClient secretsManager; private AmazonAthena athena; private QueryStatusChecker queryStatusChecker; private JdbcConnectionFactory jdbcConnectionFactory; @@ -58,7 +58,7 @@ public void setup() this.recordHandler = Mockito.mock(ClickHouseRecordHandler.class); this.recordHandlerMap = Collections.singletonMap(ClickHouseConstants.NAME, this.recordHandler); this.amazonS3 = Mockito.mock(AmazonS3.class); - this.secretsManager = Mockito.mock(AWSSecretsManager.class); + this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AmazonAthena.class); this.queryStatusChecker = Mockito.mock(QueryStatusChecker.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); diff --git a/athena-cloudera-hive/src/main/java/com/amazonaws/athena/connectors/cloudera/HiveMetadataHandler.java b/athena-cloudera-hive/src/main/java/com/amazonaws/athena/connectors/cloudera/HiveMetadataHandler.java index 6f6b38b4bc..a59aeea43a 100644 --- a/athena-cloudera-hive/src/main/java/com/amazonaws/athena/connectors/cloudera/HiveMetadataHandler.java +++ b/athena-cloudera-hive/src/main/java/com/amazonaws/athena/connectors/cloudera/HiveMetadataHandler.java @@ -50,7 +50,6 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcArrowTypeConverter; import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; @@ -61,6 +60,7 @@ import org.apache.arrow.vector.types.pojo.Schema; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; import java.sql.DatabaseMetaData; @@ -96,7 +96,7 @@ public HiveMetadataHandler(DatabaseConnectionConfig databaseConnectionConfig, ja @VisibleForTesting protected HiveMetadataHandler( DatabaseConnectionConfig databaseConnectionConfiguration, - AWSSecretsManager secretManager, + SecretsManagerClient secretManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, java.util.Map configOptions) diff --git a/athena-cloudera-hive/src/main/java/com/amazonaws/athena/connectors/cloudera/HiveMuxMetadataHandler.java b/athena-cloudera-hive/src/main/java/com/amazonaws/athena/connectors/cloudera/HiveMuxMetadataHandler.java index 6110d8cc25..ed2e1655c4 100644 --- a/athena-cloudera-hive/src/main/java/com/amazonaws/athena/connectors/cloudera/HiveMuxMetadataHandler.java +++ b/athena-cloudera-hive/src/main/java/com/amazonaws/athena/connectors/cloudera/HiveMuxMetadataHandler.java @@ -26,8 +26,8 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandlerFactory; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import org.apache.arrow.util.VisibleForTesting; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Map; @@ -56,7 +56,7 @@ public HiveMuxMetadataHandler(java.util.Map configOptions) } @VisibleForTesting - protected HiveMuxMetadataHandler(AWSSecretsManager secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, + protected HiveMuxMetadataHandler(SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, Map metadataHandlerMap, DatabaseConnectionConfig databaseConnectionConfig, java.util.Map configOptions) { super(secretsManager, athena, jdbcConnectionFactory, metadataHandlerMap, databaseConnectionConfig, configOptions); diff --git a/athena-cloudera-hive/src/main/java/com/amazonaws/athena/connectors/cloudera/HiveMuxRecordHandler.java b/athena-cloudera-hive/src/main/java/com/amazonaws/athena/connectors/cloudera/HiveMuxRecordHandler.java index f87ee06bef..0168d612d2 100644 --- a/athena-cloudera-hive/src/main/java/com/amazonaws/athena/connectors/cloudera/HiveMuxRecordHandler.java +++ b/athena-cloudera-hive/src/main/java/com/amazonaws/athena/connectors/cloudera/HiveMuxRecordHandler.java @@ -27,8 +27,8 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandlerFactory; import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import com.google.common.annotations.VisibleForTesting; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Map; @@ -57,7 +57,7 @@ public HiveMuxRecordHandler(java.util.Map configOptions) } @VisibleForTesting - HiveMuxRecordHandler(AmazonS3 amazonS3, AWSSecretsManager secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, + HiveMuxRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, DatabaseConnectionConfig databaseConnectionConfig, Map recordHandlerMap, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, jdbcConnectionFactory, databaseConnectionConfig, recordHandlerMap, configOptions); diff --git a/athena-cloudera-hive/src/main/java/com/amazonaws/athena/connectors/cloudera/HiveRecordHandler.java b/athena-cloudera-hive/src/main/java/com/amazonaws/athena/connectors/cloudera/HiveRecordHandler.java index ed5af5284d..cfb28e56a2 100644 --- a/athena-cloudera-hive/src/main/java/com/amazonaws/athena/connectors/cloudera/HiveRecordHandler.java +++ b/athena-cloudera-hive/src/main/java/com/amazonaws/athena/connectors/cloudera/HiveRecordHandler.java @@ -32,11 +32,10 @@ import com.amazonaws.services.athena.AmazonAthenaClientBuilder; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3ClientBuilder; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; -import com.amazonaws.services.secretsmanager.AWSSecretsManagerClientBuilder; import com.google.common.annotations.VisibleForTesting; import org.apache.arrow.vector.types.pojo.Schema; import org.apache.commons.lang3.Validate; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; import java.sql.PreparedStatement; @@ -62,11 +61,11 @@ public HiveRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, java } public HiveRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, JdbcConnectionFactory jdbcConnectionFactory, java.util.Map configOptions) { - this(databaseConnectionConfig, AmazonS3ClientBuilder.defaultClient(), AWSSecretsManagerClientBuilder.defaultClient(), AmazonAthenaClientBuilder.defaultClient(), + this(databaseConnectionConfig, AmazonS3ClientBuilder.defaultClient(), SecretsManagerClient.create(), AmazonAthenaClientBuilder.defaultClient(), jdbcConnectionFactory, new HiveQueryStringBuilder(HIVE_QUOTE_CHARACTER, new HiveFederationExpressionParser(HIVE_QUOTE_CHARACTER)), configOptions); } @VisibleForTesting - HiveRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, AmazonS3 amazonS3, AWSSecretsManager secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, JdbcSplitQueryBuilder jdbcSplitQueryBuilder, java.util.Map configOptions) + HiveRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, AmazonS3 amazonS3, SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, JdbcSplitQueryBuilder jdbcSplitQueryBuilder, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, databaseConnectionConfig, jdbcConnectionFactory, configOptions); this.jdbcSplitQueryBuilder = Validate.notNull(jdbcSplitQueryBuilder, "query builder must not be null"); diff --git a/athena-cloudera-hive/src/test/java/com/amazonaws/athena/connectors/cloudera/HiveMetadataHandlerTest.java b/athena-cloudera-hive/src/test/java/com/amazonaws/athena/connectors/cloudera/HiveMetadataHandlerTest.java index b1520d4a3d..2669581df9 100644 --- a/athena-cloudera-hive/src/test/java/com/amazonaws/athena/connectors/cloudera/HiveMetadataHandlerTest.java +++ b/athena-cloudera-hive/src/test/java/com/amazonaws/athena/connectors/cloudera/HiveMetadataHandlerTest.java @@ -29,9 +29,6 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.connection.JdbcCredentialProvider; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; -import com.amazonaws.services.secretsmanager.model.GetSecretValueRequest; -import com.amazonaws.services.secretsmanager.model.GetSecretValueResult; import org.apache.arrow.vector.types.pojo.Field; import org.apache.arrow.vector.types.pojo.Schema; import org.junit.Assert; @@ -39,7 +36,9 @@ import org.junit.BeforeClass; import org.junit.Test; import org.mockito.Mockito; - +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; +import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueRequest; +import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueResponse; import java.sql.*; import java.util.*; @@ -58,7 +57,7 @@ public class HiveMetadataHandlerTest private JdbcConnectionFactory jdbcConnectionFactory; private Connection connection; private FederatedIdentity federatedIdentity; - private AWSSecretsManager secretsManager; + private SecretsManagerClient secretsManager; private AmazonAthena athena; private BlockAllocator blockAllocator; @@ -75,9 +74,9 @@ public void setup() this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class, Mockito.RETURNS_DEEP_STUBS); this.connection = Mockito.mock(Connection.class, Mockito.RETURNS_DEEP_STUBS); Mockito.when(this.jdbcConnectionFactory.getConnection(nullable(JdbcCredentialProvider.class))).thenReturn(this.connection); - this.secretsManager = Mockito.mock(AWSSecretsManager.class); + this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AmazonAthena.class); - Mockito.when(this.secretsManager.getSecretValue(Mockito.eq(new GetSecretValueRequest().withSecretId("testSecret")))).thenReturn(new GetSecretValueResult().withSecretString("{\"username\": \"testUser\", \"password\": \"testPassword\"}")); + Mockito.when(this.secretsManager.getSecretValue(Mockito.eq(GetSecretValueRequest.builder().secretId("testSecret").build()))).thenReturn(GetSecretValueResponse.builder().secretString("{\"username\": \"testUser\", \"password\": \"testPassword\"}").build()); this.hiveMetadataHandler = new HiveMetadataHandler(databaseConnectionConfig, this.secretsManager, this.athena, this.jdbcConnectionFactory, com.google.common.collect.ImmutableMap.of()); this.federatedIdentity = Mockito.mock(FederatedIdentity.class); diff --git a/athena-cloudera-hive/src/test/java/com/amazonaws/athena/connectors/cloudera/HiveMuxMetadataHandlerTest.java b/athena-cloudera-hive/src/test/java/com/amazonaws/athena/connectors/cloudera/HiveMuxMetadataHandlerTest.java index 8f0f47fc63..019d29674e 100644 --- a/athena-cloudera-hive/src/test/java/com/amazonaws/athena/connectors/cloudera/HiveMuxMetadataHandlerTest.java +++ b/athena-cloudera-hive/src/test/java/com/amazonaws/athena/connectors/cloudera/HiveMuxMetadataHandlerTest.java @@ -44,7 +44,7 @@ import com.amazonaws.athena.connector.lambda.metadata.ListSchemasRequest; import com.amazonaws.athena.connector.lambda.metadata.ListTablesRequest; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import static org.mockito.ArgumentMatchers.nullable; @@ -54,7 +54,7 @@ public class HiveMuxMetadataHandlerTest private HiveMetadataHandler hiveMetadataHandler; private JdbcMetadataHandler jdbcMetadataHandler; private BlockAllocator allocator; - private AWSSecretsManager secretsManager; + private SecretsManagerClient secretsManager; private AmazonAthena athena; private QueryStatusChecker queryStatusChecker; private JdbcConnectionFactory jdbcConnectionFactory; @@ -68,7 +68,7 @@ public void setup() this.allocator = new BlockAllocatorImpl(); this.hiveMetadataHandler = Mockito.mock(HiveMetadataHandler.class); this.metadataHandlerMap = Collections.singletonMap("metaHive", this.hiveMetadataHandler); - this.secretsManager = Mockito.mock(AWSSecretsManager.class); + this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AmazonAthena.class); this.queryStatusChecker = Mockito.mock(QueryStatusChecker.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); diff --git a/athena-cloudera-hive/src/test/java/com/amazonaws/athena/connectors/cloudera/HiveMuxRecordHandlerTest.java b/athena-cloudera-hive/src/test/java/com/amazonaws/athena/connectors/cloudera/HiveMuxRecordHandlerTest.java index d3fb2d0ee3..d93d415458 100644 --- a/athena-cloudera-hive/src/test/java/com/amazonaws/athena/connectors/cloudera/HiveMuxRecordHandlerTest.java +++ b/athena-cloudera-hive/src/test/java/com/amazonaws/athena/connectors/cloudera/HiveMuxRecordHandlerTest.java @@ -31,13 +31,13 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import org.apache.arrow.vector.types.pojo.Schema; import org.junit.Before; import org.junit.BeforeClass; import org.junit.Test; import org.mockito.Mockito; import org.testng.Assert; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; import java.sql.SQLException; @@ -51,7 +51,7 @@ public class HiveMuxRecordHandlerTest private HiveRecordHandler hiveRecordHandler; private JdbcRecordHandler jdbcRecordHandler; private AmazonS3 amazonS3; - private AWSSecretsManager secretsManager; + private SecretsManagerClient secretsManager; private AmazonAthena athena; private QueryStatusChecker queryStatusChecker; private JdbcConnectionFactory jdbcConnectionFactory; @@ -65,7 +65,7 @@ public void setup() this.hiveRecordHandler = Mockito.mock(HiveRecordHandler.class); this.recordHandlerMap = Collections.singletonMap("recordHive", this.hiveRecordHandler); this.amazonS3 = Mockito.mock(AmazonS3.class); - this.secretsManager = Mockito.mock(AWSSecretsManager.class); + this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AmazonAthena.class); this.queryStatusChecker = Mockito.mock(QueryStatusChecker.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); diff --git a/athena-cloudera-hive/src/test/java/com/amazonaws/athena/connectors/cloudera/HiveRecordHandlerTest.java b/athena-cloudera-hive/src/test/java/com/amazonaws/athena/connectors/cloudera/HiveRecordHandlerTest.java index 8cfce879a6..9c25145064 100644 --- a/athena-cloudera-hive/src/test/java/com/amazonaws/athena/connectors/cloudera/HiveRecordHandlerTest.java +++ b/athena-cloudera-hive/src/test/java/com/amazonaws/athena/connectors/cloudera/HiveRecordHandlerTest.java @@ -34,9 +34,6 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcSplitQueryBuilder; import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; -import com.amazonaws.services.secretsmanager.model.GetSecretValueRequest; -import com.amazonaws.services.secretsmanager.model.GetSecretValueResult; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import org.apache.arrow.vector.types.Types; @@ -45,6 +42,9 @@ import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; +import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueRequest; +import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueResponse; import java.sql.Connection; import java.sql.Date; @@ -63,7 +63,7 @@ public class HiveRecordHandlerTest private JdbcConnectionFactory jdbcConnectionFactory; private JdbcSplitQueryBuilder jdbcSplitQueryBuilder; private AmazonS3 amazonS3; - private AWSSecretsManager secretsManager; + private SecretsManagerClient secretsManager; private AmazonAthena athena; @Before @@ -71,9 +71,9 @@ public void setup() throws Exception { this.amazonS3 = Mockito.mock(AmazonS3.class); - this.secretsManager = Mockito.mock(AWSSecretsManager.class); + this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AmazonAthena.class); - Mockito.when(this.secretsManager.getSecretValue(Mockito.eq(new GetSecretValueRequest().withSecretId("testSecret")))).thenReturn(new GetSecretValueResult().withSecretString("{\"username\": \"testUser\", \"password\": \"testPassword\"}")); + Mockito.when(this.secretsManager.getSecretValue(Mockito.eq(GetSecretValueRequest.builder().secretId("testSecret").build()))).thenReturn(GetSecretValueResponse.builder().secretString("{\"username\": \"testUser\", \"password\": \"testPassword\"}").build()); this.connection = Mockito.mock(Connection.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); Mockito.when(this.jdbcConnectionFactory.getConnection(nullable(JdbcCredentialProvider.class))).thenReturn(this.connection); diff --git a/athena-cloudera-impala/src/main/java/com/amazonaws/athena/connectors/cloudera/ImpalaFederationExpressionParser.java b/athena-cloudera-impala/src/main/java/com/amazonaws/athena/connectors/cloudera/ImpalaFederationExpressionParser.java index 74469e70fb..3abc39655e 100644 --- a/athena-cloudera-impala/src/main/java/com/amazonaws/athena/connectors/cloudera/ImpalaFederationExpressionParser.java +++ b/athena-cloudera-impala/src/main/java/com/amazonaws/athena/connectors/cloudera/ImpalaFederationExpressionParser.java @@ -17,7 +17,7 @@ * limitations under the License. * #L% */ -package com.amazonaws.athena.connectors.hortonworks; +package com.amazonaws.athena.connectors.cloudera; import com.amazonaws.athena.connectors.jdbc.manager.JdbcFederationExpressionParser; import com.google.common.base.Joiner; diff --git a/athena-cloudera-impala/src/main/java/com/amazonaws/athena/connectors/cloudera/ImpalaMetadataHandler.java b/athena-cloudera-impala/src/main/java/com/amazonaws/athena/connectors/cloudera/ImpalaMetadataHandler.java index 5d75bff3cd..b58debf907 100644 --- a/athena-cloudera-impala/src/main/java/com/amazonaws/athena/connectors/cloudera/ImpalaMetadataHandler.java +++ b/athena-cloudera-impala/src/main/java/com/amazonaws/athena/connectors/cloudera/ImpalaMetadataHandler.java @@ -50,7 +50,6 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcArrowTypeConverter; import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ImmutableMap; import org.apache.arrow.vector.complex.reader.FieldReader; @@ -60,6 +59,7 @@ import org.apache.arrow.vector.types.pojo.Schema; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; import java.sql.DatabaseMetaData; @@ -92,7 +92,7 @@ public ImpalaMetadataHandler(DatabaseConnectionConfig databaseConnectionConfig, @VisibleForTesting protected ImpalaMetadataHandler( DatabaseConnectionConfig databaseConnectionConfiguration, - AWSSecretsManager secretManager, + SecretsManagerClient secretManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, java.util.Map configOptions) diff --git a/athena-cloudera-impala/src/main/java/com/amazonaws/athena/connectors/cloudera/ImpalaMuxMetadataHandler.java b/athena-cloudera-impala/src/main/java/com/amazonaws/athena/connectors/cloudera/ImpalaMuxMetadataHandler.java index dbe810912f..d0f54c4327 100644 --- a/athena-cloudera-impala/src/main/java/com/amazonaws/athena/connectors/cloudera/ImpalaMuxMetadataHandler.java +++ b/athena-cloudera-impala/src/main/java/com/amazonaws/athena/connectors/cloudera/ImpalaMuxMetadataHandler.java @@ -26,8 +26,8 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandlerFactory; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import org.apache.arrow.util.VisibleForTesting; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Map; @@ -56,7 +56,7 @@ public ImpalaMuxMetadataHandler(java.util.Map configOptions) } @VisibleForTesting - protected ImpalaMuxMetadataHandler(AWSSecretsManager secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, + protected ImpalaMuxMetadataHandler(SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, Map metadataHandlerMap, DatabaseConnectionConfig databaseConnectionConfig, java.util.Map configOptions) { super(secretsManager, athena, jdbcConnectionFactory, metadataHandlerMap, databaseConnectionConfig, configOptions); diff --git a/athena-cloudera-impala/src/main/java/com/amazonaws/athena/connectors/cloudera/ImpalaMuxRecordHandler.java b/athena-cloudera-impala/src/main/java/com/amazonaws/athena/connectors/cloudera/ImpalaMuxRecordHandler.java index d1461b523e..29b688686e 100644 --- a/athena-cloudera-impala/src/main/java/com/amazonaws/athena/connectors/cloudera/ImpalaMuxRecordHandler.java +++ b/athena-cloudera-impala/src/main/java/com/amazonaws/athena/connectors/cloudera/ImpalaMuxRecordHandler.java @@ -27,8 +27,8 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandlerFactory; import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import com.google.common.annotations.VisibleForTesting; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Map; @@ -57,7 +57,7 @@ public ImpalaMuxRecordHandler(java.util.Map configOptions) } @VisibleForTesting - ImpalaMuxRecordHandler(AmazonS3 amazonS3, AWSSecretsManager secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, + ImpalaMuxRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, DatabaseConnectionConfig databaseConnectionConfig, Map recordHandlerMap, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, jdbcConnectionFactory, databaseConnectionConfig, recordHandlerMap, configOptions); diff --git a/athena-cloudera-impala/src/main/java/com/amazonaws/athena/connectors/cloudera/ImpalaRecordHandler.java b/athena-cloudera-impala/src/main/java/com/amazonaws/athena/connectors/cloudera/ImpalaRecordHandler.java index 8a336a0b5f..d1690d4f8a 100644 --- a/athena-cloudera-impala/src/main/java/com/amazonaws/athena/connectors/cloudera/ImpalaRecordHandler.java +++ b/athena-cloudera-impala/src/main/java/com/amazonaws/athena/connectors/cloudera/ImpalaRecordHandler.java @@ -22,7 +22,6 @@ import com.amazonaws.athena.connector.lambda.domain.Split; import com.amazonaws.athena.connector.lambda.domain.TableName; import com.amazonaws.athena.connector.lambda.domain.predicate.Constraints; -import com.amazonaws.athena.connectors.hortonworks.ImpalaFederationExpressionParser; import com.amazonaws.athena.connectors.jdbc.connection.DatabaseConnectionConfig; import com.amazonaws.athena.connectors.jdbc.connection.DatabaseConnectionInfo; import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; @@ -33,11 +32,10 @@ import com.amazonaws.services.athena.AmazonAthenaClientBuilder; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3ClientBuilder; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; -import com.amazonaws.services.secretsmanager.AWSSecretsManagerClientBuilder; import com.google.common.annotations.VisibleForTesting; import org.apache.arrow.vector.types.pojo.Schema; import org.apache.commons.lang3.Validate; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; import java.sql.PreparedStatement; @@ -63,11 +61,11 @@ public ImpalaRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, ja } public ImpalaRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, JdbcConnectionFactory jdbcConnectionFactory, java.util.Map configOptions) { - this(databaseConnectionConfig, AmazonS3ClientBuilder.defaultClient(), AWSSecretsManagerClientBuilder.defaultClient(), AmazonAthenaClientBuilder.defaultClient(), + this(databaseConnectionConfig, AmazonS3ClientBuilder.defaultClient(), SecretsManagerClient.create(), AmazonAthenaClientBuilder.defaultClient(), jdbcConnectionFactory, new ImpalaQueryStringBuilder(IMPALA_QUOTE_CHARACTER, new ImpalaFederationExpressionParser(IMPALA_QUOTE_CHARACTER)), configOptions); } @VisibleForTesting - ImpalaRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, AmazonS3 amazonS3, AWSSecretsManager secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, JdbcSplitQueryBuilder jdbcSplitQueryBuilder, java.util.Map configOptions) + ImpalaRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, AmazonS3 amazonS3, SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, JdbcSplitQueryBuilder jdbcSplitQueryBuilder, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, databaseConnectionConfig, jdbcConnectionFactory, configOptions); this.jdbcSplitQueryBuilder = Validate.notNull(jdbcSplitQueryBuilder, "query builder must not be null"); diff --git a/athena-cloudera-impala/src/test/java/com/amazonaws/athena/connectors/cloudera/ImpalaMetadataHandlerTest.java b/athena-cloudera-impala/src/test/java/com/amazonaws/athena/connectors/cloudera/ImpalaMetadataHandlerTest.java index 09746df6da..fe8f036558 100644 --- a/athena-cloudera-impala/src/test/java/com/amazonaws/athena/connectors/cloudera/ImpalaMetadataHandlerTest.java +++ b/athena-cloudera-impala/src/test/java/com/amazonaws/athena/connectors/cloudera/ImpalaMetadataHandlerTest.java @@ -29,9 +29,6 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.connection.JdbcCredentialProvider; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; -import com.amazonaws.services.secretsmanager.model.GetSecretValueRequest; -import com.amazonaws.services.secretsmanager.model.GetSecretValueResult; import org.apache.arrow.vector.types.pojo.Field; import org.apache.arrow.vector.types.pojo.Schema; import org.junit.Assert; @@ -39,6 +36,9 @@ import org.junit.BeforeClass; import org.junit.Test; import org.mockito.Mockito; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; +import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueRequest; +import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueResponse; import java.sql.*; import java.util.*; @@ -58,7 +58,7 @@ public class ImpalaMetadataHandlerTest private JdbcConnectionFactory jdbcConnectionFactory; private Connection connection; private FederatedIdentity federatedIdentity; - private AWSSecretsManager secretsManager; + private SecretsManagerClient secretsManager; private AmazonAthena athena; private BlockAllocator blockAllocator; @BeforeClass @@ -73,9 +73,9 @@ public void setup() this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class, Mockito.RETURNS_DEEP_STUBS); this.connection = Mockito.mock(Connection.class, Mockito.RETURNS_DEEP_STUBS); Mockito.when(this.jdbcConnectionFactory.getConnection(nullable(JdbcCredentialProvider.class))).thenReturn(this.connection); - this.secretsManager = Mockito.mock(AWSSecretsManager.class); + this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AmazonAthena.class); - Mockito.when(this.secretsManager.getSecretValue(Mockito.eq(new GetSecretValueRequest().withSecretId("testSecret")))).thenReturn(new GetSecretValueResult().withSecretString("{\"username\": \"testUser\", \"password\": \"testPassword\"}")); + Mockito.when(this.secretsManager.getSecretValue(Mockito.eq(GetSecretValueRequest.builder().secretId("testSecret").build()))).thenReturn(GetSecretValueResponse.builder().secretString("{\"username\": \"testUser\", \"password\": \"testPassword\"}").build()); this.impalaMetadataHandler = new ImpalaMetadataHandler(databaseConnectionConfig, this.secretsManager, this.athena, this.jdbcConnectionFactory, com.google.common.collect.ImmutableMap.of()); this.federatedIdentity = Mockito.mock(FederatedIdentity.class); diff --git a/athena-cloudera-impala/src/test/java/com/amazonaws/athena/connectors/cloudera/ImpalaMuxMetadataHandlerTest.java b/athena-cloudera-impala/src/test/java/com/amazonaws/athena/connectors/cloudera/ImpalaMuxMetadataHandlerTest.java index 8fe338fcb8..b6193c3853 100644 --- a/athena-cloudera-impala/src/test/java/com/amazonaws/athena/connectors/cloudera/ImpalaMuxMetadataHandlerTest.java +++ b/athena-cloudera-impala/src/test/java/com/amazonaws/athena/connectors/cloudera/ImpalaMuxMetadataHandlerTest.java @@ -44,7 +44,7 @@ import com.amazonaws.athena.connector.lambda.metadata.ListSchemasRequest; import com.amazonaws.athena.connector.lambda.metadata.ListTablesRequest; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import static org.mockito.ArgumentMatchers.nullable; @@ -54,7 +54,7 @@ public class ImpalaMuxMetadataHandlerTest private ImpalaMetadataHandler impalaMetadataHandler; private JdbcMetadataHandler jdbcMetadataHandler; private BlockAllocator allocator; - private AWSSecretsManager secretsManager; + private SecretsManagerClient secretsManager; private AmazonAthena athena; private QueryStatusChecker queryStatusChecker; private JdbcConnectionFactory jdbcConnectionFactory; @@ -68,7 +68,7 @@ public void setup() this.allocator = new BlockAllocatorImpl(); this.impalaMetadataHandler = Mockito.mock(ImpalaMetadataHandler.class); this.metadataHandlerMap = Collections.singletonMap("metaImpala", this.impalaMetadataHandler); - this.secretsManager = Mockito.mock(AWSSecretsManager.class); + this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AmazonAthena.class); this.queryStatusChecker = Mockito.mock(QueryStatusChecker.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); diff --git a/athena-cloudera-impala/src/test/java/com/amazonaws/athena/connectors/cloudera/ImpalaMuxRecordHandlerTest.java b/athena-cloudera-impala/src/test/java/com/amazonaws/athena/connectors/cloudera/ImpalaMuxRecordHandlerTest.java index ec84d0ed0c..0a59eefaa6 100644 --- a/athena-cloudera-impala/src/test/java/com/amazonaws/athena/connectors/cloudera/ImpalaMuxRecordHandlerTest.java +++ b/athena-cloudera-impala/src/test/java/com/amazonaws/athena/connectors/cloudera/ImpalaMuxRecordHandlerTest.java @@ -31,13 +31,13 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import org.apache.arrow.vector.types.pojo.Schema; import org.junit.Before; import org.junit.BeforeClass; import org.junit.Test; import org.mockito.Mockito; import org.testng.Assert; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; import java.sql.SQLException; @@ -51,7 +51,7 @@ public class ImpalaMuxRecordHandlerTest private ImpalaRecordHandler impalaRecordHandler; private JdbcRecordHandler jdbcRecordHandler; private AmazonS3 amazonS3; - private AWSSecretsManager secretsManager; + private SecretsManagerClient secretsManager; private AmazonAthena athena; private QueryStatusChecker queryStatusChecker; private JdbcConnectionFactory jdbcConnectionFactory; @@ -65,7 +65,7 @@ public void setup() this.impalaRecordHandler = Mockito.mock(ImpalaRecordHandler.class); this.recordHandlerMap = Collections.singletonMap("recordImpala", this.impalaRecordHandler); this.amazonS3 = Mockito.mock(AmazonS3.class); - this.secretsManager = Mockito.mock(AWSSecretsManager.class); + this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AmazonAthena.class); this.queryStatusChecker = Mockito.mock(QueryStatusChecker.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); diff --git a/athena-cloudera-impala/src/test/java/com/amazonaws/athena/connectors/cloudera/ImpalaQueryStringBuilderTest.java b/athena-cloudera-impala/src/test/java/com/amazonaws/athena/connectors/cloudera/ImpalaQueryStringBuilderTest.java index d87cc871c6..0b08ecfe45 100644 --- a/athena-cloudera-impala/src/test/java/com/amazonaws/athena/connectors/cloudera/ImpalaQueryStringBuilderTest.java +++ b/athena-cloudera-impala/src/test/java/com/amazonaws/athena/connectors/cloudera/ImpalaQueryStringBuilderTest.java @@ -20,7 +20,6 @@ package com.amazonaws.athena.connectors.cloudera; import com.amazonaws.athena.connector.lambda.domain.Split; -import com.amazonaws.athena.connectors.hortonworks.ImpalaFederationExpressionParser; import org.junit.Assert; import org.junit.Test; import org.junit.runner.RunWith; diff --git a/athena-cloudera-impala/src/test/java/com/amazonaws/athena/connectors/cloudera/ImpalaRecordHandlerTest.java b/athena-cloudera-impala/src/test/java/com/amazonaws/athena/connectors/cloudera/ImpalaRecordHandlerTest.java index 5e222fd508..35efd0edc9 100644 --- a/athena-cloudera-impala/src/test/java/com/amazonaws/athena/connectors/cloudera/ImpalaRecordHandlerTest.java +++ b/athena-cloudera-impala/src/test/java/com/amazonaws/athena/connectors/cloudera/ImpalaRecordHandlerTest.java @@ -28,16 +28,15 @@ import com.amazonaws.athena.connector.lambda.domain.predicate.Range; import com.amazonaws.athena.connector.lambda.domain.predicate.SortedRangeSet; import com.amazonaws.athena.connector.lambda.domain.predicate.ValueSet; -import com.amazonaws.athena.connectors.hortonworks.ImpalaFederationExpressionParser; import com.amazonaws.athena.connectors.jdbc.connection.DatabaseConnectionConfig; import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.connection.JdbcCredentialProvider; import com.amazonaws.athena.connectors.jdbc.manager.JdbcSplitQueryBuilder; import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; -import com.amazonaws.services.secretsmanager.model.GetSecretValueRequest; -import com.amazonaws.services.secretsmanager.model.GetSecretValueResult; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; +import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueRequest; +import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueResponse; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import org.apache.arrow.vector.types.Types; @@ -65,7 +64,7 @@ public class ImpalaRecordHandlerTest private JdbcConnectionFactory jdbcConnectionFactory; private JdbcSplitQueryBuilder jdbcSplitQueryBuilder; private AmazonS3 amazonS3; - private AWSSecretsManager secretsManager; + private SecretsManagerClient secretsManager; private AmazonAthena athena; @Before @@ -73,9 +72,9 @@ public void setup() throws Exception { this.amazonS3 = Mockito.mock(AmazonS3.class); - this.secretsManager = Mockito.mock(AWSSecretsManager.class); + this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AmazonAthena.class); - Mockito.when(this.secretsManager.getSecretValue(Mockito.eq(new GetSecretValueRequest().withSecretId("testSecret")))).thenReturn(new GetSecretValueResult().withSecretString("{\"username\": \"testUser\", \"password\": \"testPassword\"}")); + Mockito.when(this.secretsManager.getSecretValue(Mockito.eq(GetSecretValueRequest.builder().secretId("testSecret").build()))).thenReturn(GetSecretValueResponse.builder().secretString("{\"username\": \"testUser\", \"password\": \"testPassword\"}").build()); this.connection = Mockito.mock(Connection.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); Mockito.when(this.jdbcConnectionFactory.getConnection(nullable(JdbcCredentialProvider.class))).thenReturn(this.connection); diff --git a/athena-cloudwatch-metrics/src/main/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricsMetadataHandler.java b/athena-cloudwatch-metrics/src/main/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricsMetadataHandler.java index 9d17cc8616..e720783632 100644 --- a/athena-cloudwatch-metrics/src/main/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricsMetadataHandler.java +++ b/athena-cloudwatch-metrics/src/main/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricsMetadataHandler.java @@ -49,12 +49,12 @@ import com.amazonaws.services.cloudwatch.model.ListMetricsResult; import com.amazonaws.services.cloudwatch.model.Metric; import com.amazonaws.services.cloudwatch.model.MetricStat; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import com.amazonaws.util.CollectionUtils; import com.google.common.collect.Lists; import org.apache.arrow.util.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.ArrayList; import java.util.Collections; @@ -141,7 +141,7 @@ public MetricsMetadataHandler(java.util.Map configOptions) protected MetricsMetadataHandler( AmazonCloudWatch metrics, EncryptionKeyFactory keyFactory, - AWSSecretsManager secretsManager, + SecretsManagerClient secretsManager, AmazonAthena athena, String spillBucket, String spillPrefix, diff --git a/athena-cloudwatch-metrics/src/main/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricsRecordHandler.java b/athena-cloudwatch-metrics/src/main/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricsRecordHandler.java index 93b18c62d3..c14ab536ae 100644 --- a/athena-cloudwatch-metrics/src/main/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricsRecordHandler.java +++ b/athena-cloudwatch-metrics/src/main/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricsRecordHandler.java @@ -44,12 +44,11 @@ import com.amazonaws.services.cloudwatch.model.MetricStat; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3ClientBuilder; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; -import com.amazonaws.services.secretsmanager.AWSSecretsManagerClientBuilder; import org.apache.arrow.util.VisibleForTesting; import org.apache.arrow.vector.types.pojo.Field; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Date; import java.util.HashMap; @@ -105,13 +104,13 @@ public class MetricsRecordHandler public MetricsRecordHandler(java.util.Map configOptions) { this(AmazonS3ClientBuilder.defaultClient(), - AWSSecretsManagerClientBuilder.defaultClient(), + SecretsManagerClient.create(), AmazonAthenaClientBuilder.defaultClient(), AmazonCloudWatchClientBuilder.standard().build(), configOptions); } @VisibleForTesting - protected MetricsRecordHandler(AmazonS3 amazonS3, AWSSecretsManager secretsManager, AmazonAthena athena, AmazonCloudWatch metrics, java.util.Map configOptions) + protected MetricsRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AmazonAthena athena, AmazonCloudWatch metrics, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, SOURCE_TYPE, configOptions); this.amazonS3 = amazonS3; diff --git a/athena-cloudwatch-metrics/src/test/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricsMetadataHandlerTest.java b/athena-cloudwatch-metrics/src/test/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricsMetadataHandlerTest.java index 2e1992da0e..2dec95895f 100644 --- a/athena-cloudwatch-metrics/src/test/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricsMetadataHandlerTest.java +++ b/athena-cloudwatch-metrics/src/test/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricsMetadataHandlerTest.java @@ -48,7 +48,6 @@ import com.amazonaws.services.cloudwatch.model.ListMetricsRequest; import com.amazonaws.services.cloudwatch.model.ListMetricsResult; import com.amazonaws.services.cloudwatch.model.Metric; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import org.apache.arrow.vector.types.Types; import org.apache.arrow.vector.types.pojo.Schema; import org.junit.After; @@ -60,6 +59,7 @@ import org.mockito.junit.MockitoJUnitRunner; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.ArrayList; import java.util.Collections; @@ -94,7 +94,7 @@ public class MetricsMetadataHandlerTest private AmazonCloudWatch mockMetrics; @Mock - private AWSSecretsManager mockSecretsManager; + private SecretsManagerClient mockSecretsManager; @Mock private AmazonAthena mockAthena; diff --git a/athena-cloudwatch-metrics/src/test/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricsRecordHandlerTest.java b/athena-cloudwatch-metrics/src/test/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricsRecordHandlerTest.java index bf90e3134a..bb2db9e79d 100644 --- a/athena-cloudwatch-metrics/src/test/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricsRecordHandlerTest.java +++ b/athena-cloudwatch-metrics/src/test/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricsRecordHandlerTest.java @@ -53,7 +53,6 @@ import com.amazonaws.services.s3.model.PutObjectResult; import com.amazonaws.services.s3.model.S3Object; import com.amazonaws.services.s3.model.S3ObjectInputStream; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import com.google.common.io.ByteStreams; import org.junit.After; import org.junit.Before; @@ -65,6 +64,7 @@ import org.mockito.junit.MockitoJUnitRunner; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.io.ByteArrayInputStream; import java.io.InputStream; @@ -118,7 +118,7 @@ public class MetricsRecordHandlerTest private AmazonS3 mockS3; @Mock - private AWSSecretsManager mockSecretsManager; + private SecretsManagerClient mockSecretsManager; @Mock private AmazonAthena mockAthena; diff --git a/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchMetadataHandler.java b/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchMetadataHandler.java index cd52e12683..2288265de4 100644 --- a/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchMetadataHandler.java +++ b/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchMetadataHandler.java @@ -53,7 +53,6 @@ import com.amazonaws.services.logs.model.GetQueryResultsResult; import com.amazonaws.services.logs.model.LogStream; import com.amazonaws.services.logs.model.ResultField; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import com.google.common.collect.ImmutableMap; import org.apache.arrow.util.VisibleForTesting; import org.apache.arrow.vector.complex.reader.FieldReader; @@ -62,6 +61,7 @@ import org.apache.arrow.vector.types.pojo.Schema; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.ArrayList; import java.util.Collections; @@ -140,7 +140,7 @@ public CloudwatchMetadataHandler(java.util.Map configOptions) protected CloudwatchMetadataHandler( AWSLogs awsLogs, EncryptionKeyFactory keyFactory, - AWSSecretsManager secretsManager, + SecretsManagerClient secretsManager, AmazonAthena athena, String spillBucket, String spillPrefix, diff --git a/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchRecordHandler.java b/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchRecordHandler.java index a5d29f0f9b..52d4859c27 100644 --- a/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchRecordHandler.java +++ b/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchRecordHandler.java @@ -43,11 +43,10 @@ import com.amazonaws.services.logs.model.ResultField; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3ClientBuilder; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; -import com.amazonaws.services.secretsmanager.AWSSecretsManagerClientBuilder; import org.apache.arrow.util.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.List; import java.util.Map; @@ -85,14 +84,14 @@ public CloudwatchRecordHandler(java.util.Map configOptions) { this( AmazonS3ClientBuilder.defaultClient(), - AWSSecretsManagerClientBuilder.defaultClient(), + SecretsManagerClient.create(), AmazonAthenaClientBuilder.defaultClient(), AWSLogsClientBuilder.defaultClient(), configOptions); } @VisibleForTesting - protected CloudwatchRecordHandler(AmazonS3 amazonS3, AWSSecretsManager secretsManager, AmazonAthena athena, AWSLogs awsLogs, java.util.Map configOptions) + protected CloudwatchRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AmazonAthena athena, AWSLogs awsLogs, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, SOURCE_TYPE, configOptions); this.awsLogs = awsLogs; diff --git a/athena-cloudwatch/src/test/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchMetadataHandlerTest.java b/athena-cloudwatch/src/test/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchMetadataHandlerTest.java index 22a876dbae..434e1784e7 100644 --- a/athena-cloudwatch/src/test/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchMetadataHandlerTest.java +++ b/athena-cloudwatch/src/test/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchMetadataHandlerTest.java @@ -51,7 +51,6 @@ import com.amazonaws.services.logs.model.DescribeLogStreamsResult; import com.amazonaws.services.logs.model.LogGroup; import com.amazonaws.services.logs.model.LogStream; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import org.apache.arrow.vector.types.Types; import org.apache.arrow.vector.types.pojo.ArrowType; import org.apache.arrow.vector.types.pojo.Schema; @@ -65,6 +64,7 @@ import org.mockito.junit.MockitoJUnitRunner; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.ArrayList; import java.util.Collections; @@ -95,7 +95,7 @@ public class CloudwatchMetadataHandlerTest private AWSLogs mockAwsLogs; @Mock - private AWSSecretsManager mockSecretsManager; + private SecretsManagerClient mockSecretsManager; @Mock private AmazonAthena mockAthena; diff --git a/athena-cloudwatch/src/test/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchRecordHandlerTest.java b/athena-cloudwatch/src/test/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchRecordHandlerTest.java index 6e3ec73623..e39da6b81d 100644 --- a/athena-cloudwatch/src/test/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchRecordHandlerTest.java +++ b/athena-cloudwatch/src/test/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchRecordHandlerTest.java @@ -49,7 +49,6 @@ import com.amazonaws.services.s3.model.PutObjectResult; import com.amazonaws.services.s3.model.S3Object; import com.amazonaws.services.s3.model.S3ObjectInputStream; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import com.google.common.collect.ImmutableList; import com.google.common.io.ByteStreams; import org.apache.arrow.vector.types.Types; @@ -63,6 +62,7 @@ import org.mockito.junit.MockitoJUnitRunner; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.io.ByteArrayInputStream; import java.io.InputStream; @@ -100,7 +100,7 @@ public class CloudwatchRecordHandlerTest private AmazonS3 mockS3; @Mock - private AWSSecretsManager mockSecretsManager; + private SecretsManagerClient mockSecretsManager; @Mock private AmazonAthena mockAthena; diff --git a/athena-datalakegen2/src/main/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2MetadataHandler.java b/athena-datalakegen2/src/main/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2MetadataHandler.java index 53fd9386fe..5751ecc590 100644 --- a/athena-datalakegen2/src/main/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2MetadataHandler.java +++ b/athena-datalakegen2/src/main/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2MetadataHandler.java @@ -48,7 +48,6 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcArrowTypeConverter; import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; @@ -58,6 +57,7 @@ import org.apache.arrow.vector.types.pojo.Schema; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; import java.sql.DatabaseMetaData; @@ -111,7 +111,7 @@ public DataLakeGen2MetadataHandler( @VisibleForTesting protected DataLakeGen2MetadataHandler( DatabaseConnectionConfig databaseConnectionConfig, - AWSSecretsManager secretsManager, + SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, java.util.Map configOptions) diff --git a/athena-datalakegen2/src/main/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2MuxMetadataHandler.java b/athena-datalakegen2/src/main/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2MuxMetadataHandler.java index 0132af948d..917575042d 100644 --- a/athena-datalakegen2/src/main/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2MuxMetadataHandler.java +++ b/athena-datalakegen2/src/main/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2MuxMetadataHandler.java @@ -25,8 +25,8 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandlerFactory; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import org.apache.arrow.util.VisibleForTesting; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Map; @@ -55,7 +55,7 @@ public DataLakeGen2MuxMetadataHandler(java.util.Map configOption } @VisibleForTesting - protected DataLakeGen2MuxMetadataHandler(AWSSecretsManager secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, + protected DataLakeGen2MuxMetadataHandler(SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, Map metadataHandlerMap, DatabaseConnectionConfig databaseConnectionConfig, java.util.Map configOptions) { super(secretsManager, athena, jdbcConnectionFactory, metadataHandlerMap, databaseConnectionConfig, configOptions); diff --git a/athena-datalakegen2/src/main/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2MuxRecordHandler.java b/athena-datalakegen2/src/main/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2MuxRecordHandler.java index f637195150..d03751fde1 100644 --- a/athena-datalakegen2/src/main/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2MuxRecordHandler.java +++ b/athena-datalakegen2/src/main/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2MuxRecordHandler.java @@ -26,8 +26,8 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandlerFactory; import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import com.google.common.annotations.VisibleForTesting; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Map; @@ -54,7 +54,7 @@ public DataLakeGen2MuxRecordHandler(java.util.Map configOptions) } @VisibleForTesting - DataLakeGen2MuxRecordHandler(AmazonS3 amazonS3, AWSSecretsManager secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, + DataLakeGen2MuxRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, DatabaseConnectionConfig databaseConnectionConfig, Map recordHandlerMap, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, jdbcConnectionFactory, databaseConnectionConfig, recordHandlerMap, configOptions); diff --git a/athena-datalakegen2/src/main/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2RecordHandler.java b/athena-datalakegen2/src/main/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2RecordHandler.java index 16b3e5b584..8370014987 100644 --- a/athena-datalakegen2/src/main/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2RecordHandler.java +++ b/athena-datalakegen2/src/main/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2RecordHandler.java @@ -32,11 +32,10 @@ import com.amazonaws.services.athena.AmazonAthenaClientBuilder; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3ClientBuilder; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; -import com.amazonaws.services.secretsmanager.AWSSecretsManagerClientBuilder; import com.google.common.annotations.VisibleForTesting; import org.apache.arrow.vector.types.pojo.Schema; import org.apache.commons.lang3.Validate; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; import java.sql.PreparedStatement; @@ -54,12 +53,12 @@ public DataLakeGen2RecordHandler(java.util.Map configOptions) } public DataLakeGen2RecordHandler(DatabaseConnectionConfig databaseConnectionConfig, java.util.Map configOptions) { - this(databaseConnectionConfig, AmazonS3ClientBuilder.defaultClient(), AWSSecretsManagerClientBuilder.defaultClient(), AmazonAthenaClientBuilder.defaultClient(), + this(databaseConnectionConfig, AmazonS3ClientBuilder.defaultClient(), SecretsManagerClient.create(), AmazonAthenaClientBuilder.defaultClient(), new GenericJdbcConnectionFactory(databaseConnectionConfig, DataLakeGen2MetadataHandler.JDBC_PROPERTIES, new DatabaseConnectionInfo(DataLakeGen2Constants.DRIVER_CLASS, DataLakeGen2Constants.DEFAULT_PORT)), new DataLakeGen2QueryStringBuilder(QUOTE_CHARACTER, new DataLakeGen2FederationExpressionParser(QUOTE_CHARACTER)), configOptions); } @VisibleForTesting - DataLakeGen2RecordHandler(DatabaseConnectionConfig databaseConnectionConfig, AmazonS3 amazonS3, AWSSecretsManager secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, JdbcSplitQueryBuilder jdbcSplitQueryBuilder, java.util.Map configOptions) + DataLakeGen2RecordHandler(DatabaseConnectionConfig databaseConnectionConfig, AmazonS3 amazonS3, SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, JdbcSplitQueryBuilder jdbcSplitQueryBuilder, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, databaseConnectionConfig, jdbcConnectionFactory, configOptions); this.jdbcSplitQueryBuilder = Validate.notNull(jdbcSplitQueryBuilder, "query builder must not be null"); diff --git a/athena-datalakegen2/src/test/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2MetadataHandlerTest.java b/athena-datalakegen2/src/test/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2MetadataHandlerTest.java index c37359bab8..86ca3edf8b 100644 --- a/athena-datalakegen2/src/test/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2MetadataHandlerTest.java +++ b/athena-datalakegen2/src/test/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2MetadataHandlerTest.java @@ -39,9 +39,6 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.connection.JdbcCredentialProvider; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; -import com.amazonaws.services.secretsmanager.model.GetSecretValueRequest; -import com.amazonaws.services.secretsmanager.model.GetSecretValueResult; import org.apache.arrow.vector.types.pojo.Field; import org.apache.arrow.vector.types.pojo.Schema; import org.junit.Assert; @@ -50,6 +47,9 @@ import org.mockito.Mockito; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; +import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueRequest; +import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueResponse; import java.sql.Connection; import java.sql.ResultSet; @@ -77,7 +77,7 @@ public class DataLakeGen2MetadataHandlerTest private JdbcConnectionFactory jdbcConnectionFactory; private Connection connection; private FederatedIdentity federatedIdentity; - private AWSSecretsManager secretsManager; + private SecretsManagerClient secretsManager; private AmazonAthena athena; @Before @@ -89,9 +89,9 @@ public void setup() this.connection = Mockito.mock(Connection.class, Mockito.RETURNS_DEEP_STUBS); logger.info(" this.connection.."+ this.connection); Mockito.when(this.jdbcConnectionFactory.getConnection(nullable(JdbcCredentialProvider.class))).thenReturn(this.connection); - this.secretsManager = Mockito.mock(AWSSecretsManager.class); + this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AmazonAthena.class); - Mockito.when(this.secretsManager.getSecretValue(Mockito.eq(new GetSecretValueRequest().withSecretId("testSecret")))).thenReturn(new GetSecretValueResult().withSecretString("{\"user\": \"testUser\", \"password\": \"testPassword\"}")); + Mockito.when(this.secretsManager.getSecretValue(Mockito.eq(GetSecretValueRequest.builder().secretId("testSecret").build()))).thenReturn(GetSecretValueResponse.builder().secretString("{\"user\": \"testUser\", \"password\": \"testPassword\"}").build()); this.dataLakeGen2MetadataHandler = new DataLakeGen2MetadataHandler(databaseConnectionConfig, this.secretsManager, this.athena, this.jdbcConnectionFactory, com.google.common.collect.ImmutableMap.of()); this.federatedIdentity = Mockito.mock(FederatedIdentity.class); } diff --git a/athena-datalakegen2/src/test/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2MuxMetadataHandlerTest.java b/athena-datalakegen2/src/test/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2MuxMetadataHandlerTest.java index 0608abdec3..abe0a52465 100644 --- a/athena-datalakegen2/src/test/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2MuxMetadataHandlerTest.java +++ b/athena-datalakegen2/src/test/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2MuxMetadataHandlerTest.java @@ -33,10 +33,10 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Collections; import java.util.Map; @@ -49,7 +49,7 @@ public class DataLakeGen2MuxMetadataHandlerTest private DataLakeGen2MetadataHandler dataLakeGen2MetadataHandler; private JdbcMetadataHandler jdbcMetadataHandler; private BlockAllocator allocator; - private AWSSecretsManager secretsManager; + private SecretsManagerClient secretsManager; private AmazonAthena athena; private QueryStatusChecker queryStatusChecker; private JdbcConnectionFactory jdbcConnectionFactory; @@ -60,7 +60,7 @@ public void setup() this.allocator = new BlockAllocatorImpl(); this.dataLakeGen2MetadataHandler = Mockito.mock(DataLakeGen2MetadataHandler.class); this.metadataHandlerMap = Collections.singletonMap("fakedatabase", this.dataLakeGen2MetadataHandler); - this.secretsManager = Mockito.mock(AWSSecretsManager.class); + this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AmazonAthena.class); this.queryStatusChecker = Mockito.mock(QueryStatusChecker.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); diff --git a/athena-datalakegen2/src/test/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2MuxRecordHandlerTest.java b/athena-datalakegen2/src/test/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2MuxRecordHandlerTest.java index 6b7f491bd0..437706fd7b 100644 --- a/athena-datalakegen2/src/test/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2MuxRecordHandlerTest.java +++ b/athena-datalakegen2/src/test/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2MuxRecordHandlerTest.java @@ -30,11 +30,11 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import org.apache.arrow.vector.types.pojo.Schema; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; import java.sql.SQLException; @@ -47,7 +47,7 @@ public class DataLakeGen2MuxRecordHandlerTest private DataLakeGen2RecordHandler dataLakeGen2RecordHandler; private JdbcRecordHandler jdbcRecordHandler; private AmazonS3 amazonS3; - private AWSSecretsManager secretsManager; + private SecretsManagerClient secretsManager; private AmazonAthena athena; private QueryStatusChecker queryStatusChecker; private JdbcConnectionFactory jdbcConnectionFactory; @@ -58,7 +58,7 @@ public void setup() this.dataLakeGen2RecordHandler = Mockito.mock(DataLakeGen2RecordHandler.class); this.recordHandlerMap = Collections.singletonMap(DataLakeGen2Constants.NAME, this.dataLakeGen2RecordHandler); this.amazonS3 = Mockito.mock(AmazonS3.class); - this.secretsManager = Mockito.mock(AWSSecretsManager.class); + this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AmazonAthena.class); this.queryStatusChecker = Mockito.mock(QueryStatusChecker.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); diff --git a/athena-datalakegen2/src/test/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeRecordHandlerTest.java b/athena-datalakegen2/src/test/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeRecordHandlerTest.java index 1dd198ae89..2bfdabd1e9 100644 --- a/athena-datalakegen2/src/test/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeRecordHandlerTest.java +++ b/athena-datalakegen2/src/test/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeRecordHandlerTest.java @@ -33,7 +33,6 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcSplitQueryBuilder; import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import com.google.common.collect.ImmutableMap; import org.apache.arrow.vector.types.Types; import org.apache.arrow.vector.types.pojo.Schema; @@ -41,6 +40,7 @@ import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; import java.sql.PreparedStatement; @@ -57,7 +57,7 @@ public class DataLakeRecordHandlerTest private JdbcConnectionFactory jdbcConnectionFactory; private JdbcSplitQueryBuilder jdbcSplitQueryBuilder; private AmazonS3 amazonS3; - private AWSSecretsManager secretsManager; + private SecretsManagerClient secretsManager; private AmazonAthena athena; @Before @@ -66,7 +66,7 @@ public void setup() { System.setProperty("aws.region", "us-east-1"); this.amazonS3 = Mockito.mock(AmazonS3.class); - this.secretsManager = Mockito.mock(AWSSecretsManager.class); + this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AmazonAthena.class); this.connection = Mockito.mock(Connection.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); diff --git a/athena-db2-as400/src/main/java/com/amazonaws/athena/connectors/db2as400/Db2As400MetadataHandler.java b/athena-db2-as400/src/main/java/com/amazonaws/athena/connectors/db2as400/Db2As400MetadataHandler.java index b083ceecb5..49fbe8da24 100644 --- a/athena-db2-as400/src/main/java/com/amazonaws/athena/connectors/db2as400/Db2As400MetadataHandler.java +++ b/athena-db2-as400/src/main/java/com/amazonaws/athena/connectors/db2as400/Db2As400MetadataHandler.java @@ -50,7 +50,6 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler; import com.amazonaws.athena.connectors.jdbc.manager.PreparedStatementBuilder; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ImmutableMap; import org.apache.arrow.vector.complex.reader.FieldReader; @@ -60,6 +59,7 @@ import org.apache.arrow.vector.types.pojo.Schema; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; import java.sql.DatabaseMetaData; @@ -121,7 +121,7 @@ public Db2As400MetadataHandler( @VisibleForTesting protected Db2As400MetadataHandler( DatabaseConnectionConfig databaseConnectionConfig, - AWSSecretsManager secretsManager, + SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, java.util.Map configOptions) diff --git a/athena-db2-as400/src/main/java/com/amazonaws/athena/connectors/db2as400/Db2As400MuxMetadataHandler.java b/athena-db2-as400/src/main/java/com/amazonaws/athena/connectors/db2as400/Db2As400MuxMetadataHandler.java index 490a72696b..360be18dc5 100644 --- a/athena-db2-as400/src/main/java/com/amazonaws/athena/connectors/db2as400/Db2As400MuxMetadataHandler.java +++ b/athena-db2-as400/src/main/java/com/amazonaws/athena/connectors/db2as400/Db2As400MuxMetadataHandler.java @@ -25,8 +25,8 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandlerFactory; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import org.apache.arrow.util.VisibleForTesting; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Map; @@ -55,7 +55,7 @@ public Db2As400MuxMetadataHandler(java.util.Map configOptions) } @VisibleForTesting - protected Db2As400MuxMetadataHandler(AWSSecretsManager secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, + protected Db2As400MuxMetadataHandler(SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, Map metadataHandlerMap, DatabaseConnectionConfig databaseConnectionConfig, java.util.Map configOptions) { super(secretsManager, athena, jdbcConnectionFactory, metadataHandlerMap, databaseConnectionConfig, configOptions); diff --git a/athena-db2-as400/src/main/java/com/amazonaws/athena/connectors/db2as400/Db2As400MuxRecordHandler.java b/athena-db2-as400/src/main/java/com/amazonaws/athena/connectors/db2as400/Db2As400MuxRecordHandler.java index c2c19cc5d5..f59942a693 100644 --- a/athena-db2-as400/src/main/java/com/amazonaws/athena/connectors/db2as400/Db2As400MuxRecordHandler.java +++ b/athena-db2-as400/src/main/java/com/amazonaws/athena/connectors/db2as400/Db2As400MuxRecordHandler.java @@ -26,8 +26,8 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandlerFactory; import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import com.google.common.annotations.VisibleForTesting; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Map; @@ -54,7 +54,7 @@ public Db2As400MuxRecordHandler(java.util.Map configOptions) } @VisibleForTesting - Db2As400MuxRecordHandler(AmazonS3 amazonS3, AWSSecretsManager secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, + Db2As400MuxRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, DatabaseConnectionConfig databaseConnectionConfig, Map recordHandlerMap, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, jdbcConnectionFactory, databaseConnectionConfig, recordHandlerMap, configOptions); diff --git a/athena-db2-as400/src/main/java/com/amazonaws/athena/connectors/db2as400/Db2As400RecordHandler.java b/athena-db2-as400/src/main/java/com/amazonaws/athena/connectors/db2as400/Db2As400RecordHandler.java index e78ae1964b..37e04aa9e1 100644 --- a/athena-db2-as400/src/main/java/com/amazonaws/athena/connectors/db2as400/Db2As400RecordHandler.java +++ b/athena-db2-as400/src/main/java/com/amazonaws/athena/connectors/db2as400/Db2As400RecordHandler.java @@ -33,11 +33,10 @@ import com.amazonaws.services.athena.AmazonAthenaClientBuilder; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3ClientBuilder; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; -import com.amazonaws.services.secretsmanager.AWSSecretsManagerClientBuilder; import com.google.common.annotations.VisibleForTesting; import org.apache.arrow.vector.types.pojo.Schema; import org.apache.commons.lang3.Validate; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; import java.sql.PreparedStatement; @@ -60,13 +59,13 @@ public Db2As400RecordHandler(java.util.Map configOptions) */ public Db2As400RecordHandler(DatabaseConnectionConfig databaseConnectionConfig, java.util.Map configOptions) { - this(databaseConnectionConfig, AmazonS3ClientBuilder.defaultClient(), AWSSecretsManagerClientBuilder.defaultClient(), AmazonAthenaClientBuilder.defaultClient(), + this(databaseConnectionConfig, AmazonS3ClientBuilder.defaultClient(), SecretsManagerClient.create(), AmazonAthenaClientBuilder.defaultClient(), new GenericJdbcConnectionFactory(databaseConnectionConfig, null, new DatabaseConnectionInfo(Db2As400Constants.DRIVER_CLASS, Db2As400Constants.DEFAULT_PORT)), new Db2As400QueryStringBuilder(QUOTE_CHARACTER), configOptions); } @VisibleForTesting - Db2As400RecordHandler(DatabaseConnectionConfig databaseConnectionConfig, AmazonS3 amazonS3, AWSSecretsManager secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, JdbcSplitQueryBuilder jdbcSplitQueryBuilder, java.util.Map configOptions) + Db2As400RecordHandler(DatabaseConnectionConfig databaseConnectionConfig, AmazonS3 amazonS3, SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, JdbcSplitQueryBuilder jdbcSplitQueryBuilder, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, databaseConnectionConfig, jdbcConnectionFactory, configOptions); this.jdbcSplitQueryBuilder = Validate.notNull(jdbcSplitQueryBuilder, "query builder must not be null"); diff --git a/athena-db2-as400/src/test/java/com/amazonaws/athena/connectors/db2as400/Db2As400MetadataHandlerTest.java b/athena-db2-as400/src/test/java/com/amazonaws/athena/connectors/db2as400/Db2As400MetadataHandlerTest.java index 5f16236d1a..57c8c7b9ec 100644 --- a/athena-db2-as400/src/test/java/com/amazonaws/athena/connectors/db2as400/Db2As400MetadataHandlerTest.java +++ b/athena-db2-as400/src/test/java/com/amazonaws/athena/connectors/db2as400/Db2As400MetadataHandlerTest.java @@ -42,9 +42,6 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.connection.JdbcCredentialProvider; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; -import com.amazonaws.services.secretsmanager.model.GetSecretValueRequest; -import com.amazonaws.services.secretsmanager.model.GetSecretValueResult; import org.apache.arrow.vector.types.pojo.Field; import org.apache.arrow.vector.types.pojo.Schema; import org.junit.Assert; @@ -53,6 +50,9 @@ import org.mockito.Mockito; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; +import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueRequest; +import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueResponse; import java.sql.Connection; import java.sql.PreparedStatement; @@ -80,7 +80,7 @@ public class Db2As400MetadataHandlerTest extends TestBase { private JdbcConnectionFactory jdbcConnectionFactory; private Connection connection; private FederatedIdentity federatedIdentity; - private AWSSecretsManager secretsManager; + private SecretsManagerClient secretsManager; private BlockAllocator blockAllocator; private AmazonAthena athena; @@ -91,9 +91,9 @@ public void setup() throws Exception { this.connection = Mockito.mock(Connection.class, Mockito.RETURNS_DEEP_STUBS); logger.info(" this.connection.."+ this.connection); Mockito.when(this.jdbcConnectionFactory.getConnection(nullable(JdbcCredentialProvider.class))).thenReturn(this.connection); - this.secretsManager = Mockito.mock(AWSSecretsManager.class); + this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AmazonAthena.class); - Mockito.when(this.secretsManager.getSecretValue(Mockito.eq(new GetSecretValueRequest().withSecretId("testSecret")))).thenReturn(new GetSecretValueResult().withSecretString("{\"user\": \"testUser\", \"password\": \"testPassword\"}")); + Mockito.when(this.secretsManager.getSecretValue(Mockito.eq(GetSecretValueRequest.builder().secretId("testSecret").build()))).thenReturn(GetSecretValueResponse.builder().secretString("{\"user\": \"testUser\", \"password\": \"testPassword\"}").build()); this.db2As400MetadataHandler = new Db2As400MetadataHandler(databaseConnectionConfig, this.secretsManager, this.athena, this.jdbcConnectionFactory, com.google.common.collect.ImmutableMap.of()); this.federatedIdentity = Mockito.mock(FederatedIdentity.class); this.blockAllocator = new BlockAllocatorImpl(); diff --git a/athena-db2-as400/src/test/java/com/amazonaws/athena/connectors/db2as400/Db2As400RecordHandlerTest.java b/athena-db2-as400/src/test/java/com/amazonaws/athena/connectors/db2as400/Db2As400RecordHandlerTest.java index fa2314b253..286c2564ec 100644 --- a/athena-db2-as400/src/test/java/com/amazonaws/athena/connectors/db2as400/Db2As400RecordHandlerTest.java +++ b/athena-db2-as400/src/test/java/com/amazonaws/athena/connectors/db2as400/Db2As400RecordHandlerTest.java @@ -33,7 +33,6 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcSplitQueryBuilder; import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import com.google.common.collect.ImmutableMap; import org.apache.arrow.vector.types.Types; import org.apache.arrow.vector.types.pojo.Schema; @@ -41,6 +40,7 @@ import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; import java.sql.PreparedStatement; @@ -55,14 +55,14 @@ public class Db2As400RecordHandlerTest { private JdbcConnectionFactory jdbcConnectionFactory; private JdbcSplitQueryBuilder jdbcSplitQueryBuilder; private AmazonS3 amazonS3; - private AWSSecretsManager secretsManager; + private SecretsManagerClient secretsManager; private AmazonAthena athena; @Before public void setup() throws Exception { System.setProperty("aws.region", "us-east-1"); this.amazonS3 = Mockito.mock(AmazonS3.class); - this.secretsManager = Mockito.mock(AWSSecretsManager.class); + this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AmazonAthena.class); this.connection = Mockito.mock(Connection.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); diff --git a/athena-db2/src/main/java/com/amazonaws/athena/connectors/db2/Db2MetadataHandler.java b/athena-db2/src/main/java/com/amazonaws/athena/connectors/db2/Db2MetadataHandler.java index d5dec08242..0020ff3f59 100644 --- a/athena-db2/src/main/java/com/amazonaws/athena/connectors/db2/Db2MetadataHandler.java +++ b/athena-db2/src/main/java/com/amazonaws/athena/connectors/db2/Db2MetadataHandler.java @@ -56,7 +56,6 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler; import com.amazonaws.athena.connectors.jdbc.manager.PreparedStatementBuilder; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; @@ -67,6 +66,7 @@ import org.apache.arrow.vector.types.pojo.Schema; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; import java.sql.DatabaseMetaData; @@ -130,7 +130,7 @@ public Db2MetadataHandler( @VisibleForTesting protected Db2MetadataHandler( DatabaseConnectionConfig databaseConnectionConfig, - AWSSecretsManager secretsManager, + SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, java.util.Map configOptions) diff --git a/athena-db2/src/main/java/com/amazonaws/athena/connectors/db2/Db2MuxMetadataHandler.java b/athena-db2/src/main/java/com/amazonaws/athena/connectors/db2/Db2MuxMetadataHandler.java index ab596649ab..a2f60230fb 100644 --- a/athena-db2/src/main/java/com/amazonaws/athena/connectors/db2/Db2MuxMetadataHandler.java +++ b/athena-db2/src/main/java/com/amazonaws/athena/connectors/db2/Db2MuxMetadataHandler.java @@ -25,8 +25,8 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandlerFactory; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import org.apache.arrow.util.VisibleForTesting; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Map; @@ -55,7 +55,7 @@ public Db2MuxMetadataHandler(java.util.Map configOptions) } @VisibleForTesting - protected Db2MuxMetadataHandler(AWSSecretsManager secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, + protected Db2MuxMetadataHandler(SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, Map metadataHandlerMap, DatabaseConnectionConfig databaseConnectionConfig, java.util.Map configOptions) { super(secretsManager, athena, jdbcConnectionFactory, metadataHandlerMap, databaseConnectionConfig, configOptions); diff --git a/athena-db2/src/main/java/com/amazonaws/athena/connectors/db2/Db2MuxRecordHandler.java b/athena-db2/src/main/java/com/amazonaws/athena/connectors/db2/Db2MuxRecordHandler.java index 1919316e39..c98fac2178 100644 --- a/athena-db2/src/main/java/com/amazonaws/athena/connectors/db2/Db2MuxRecordHandler.java +++ b/athena-db2/src/main/java/com/amazonaws/athena/connectors/db2/Db2MuxRecordHandler.java @@ -26,8 +26,8 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandlerFactory; import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import com.google.common.annotations.VisibleForTesting; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Map; @@ -54,7 +54,7 @@ public Db2MuxRecordHandler(java.util.Map configOptions) } @VisibleForTesting - Db2MuxRecordHandler(AmazonS3 amazonS3, AWSSecretsManager secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, + Db2MuxRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, DatabaseConnectionConfig databaseConnectionConfig, Map recordHandlerMap, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, jdbcConnectionFactory, databaseConnectionConfig, recordHandlerMap, configOptions); diff --git a/athena-db2/src/main/java/com/amazonaws/athena/connectors/db2/Db2RecordHandler.java b/athena-db2/src/main/java/com/amazonaws/athena/connectors/db2/Db2RecordHandler.java index 442d19fee3..f8f40ad018 100644 --- a/athena-db2/src/main/java/com/amazonaws/athena/connectors/db2/Db2RecordHandler.java +++ b/athena-db2/src/main/java/com/amazonaws/athena/connectors/db2/Db2RecordHandler.java @@ -33,11 +33,10 @@ import com.amazonaws.services.athena.AmazonAthenaClientBuilder; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3ClientBuilder; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; -import com.amazonaws.services.secretsmanager.AWSSecretsManagerClientBuilder; import com.google.common.annotations.VisibleForTesting; import org.apache.arrow.vector.types.pojo.Schema; import org.apache.commons.lang3.Validate; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; import java.sql.PreparedStatement; @@ -61,13 +60,13 @@ public Db2RecordHandler(java.util.Map configOptions) */ public Db2RecordHandler(DatabaseConnectionConfig databaseConnectionConfig, java.util.Map configOptions) { - this(databaseConnectionConfig, AmazonS3ClientBuilder.defaultClient(), AWSSecretsManagerClientBuilder.defaultClient(), AmazonAthenaClientBuilder.defaultClient(), + this(databaseConnectionConfig, AmazonS3ClientBuilder.defaultClient(), SecretsManagerClient.create(), AmazonAthenaClientBuilder.defaultClient(), new GenericJdbcConnectionFactory(databaseConnectionConfig, null, new DatabaseConnectionInfo(Db2Constants.DRIVER_CLASS, Db2Constants.DEFAULT_PORT)), new Db2QueryStringBuilder(QUOTE_CHARACTER, new Db2FederationExpressionParser(QUOTE_CHARACTER)), configOptions); } @VisibleForTesting - Db2RecordHandler(DatabaseConnectionConfig databaseConnectionConfig, AmazonS3 amazonS3, AWSSecretsManager secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, JdbcSplitQueryBuilder jdbcSplitQueryBuilder, java.util.Map configOptions) + Db2RecordHandler(DatabaseConnectionConfig databaseConnectionConfig, AmazonS3 amazonS3, SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, JdbcSplitQueryBuilder jdbcSplitQueryBuilder, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, databaseConnectionConfig, jdbcConnectionFactory, configOptions); this.jdbcSplitQueryBuilder = Validate.notNull(jdbcSplitQueryBuilder, "query builder must not be null"); diff --git a/athena-db2/src/test/java/com/amazonaws/athena/connectors/db2/Db2MetadataHandlerTest.java b/athena-db2/src/test/java/com/amazonaws/athena/connectors/db2/Db2MetadataHandlerTest.java index 02ff20fa93..c95c114801 100644 --- a/athena-db2/src/test/java/com/amazonaws/athena/connectors/db2/Db2MetadataHandlerTest.java +++ b/athena-db2/src/test/java/com/amazonaws/athena/connectors/db2/Db2MetadataHandlerTest.java @@ -42,9 +42,6 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.connection.JdbcCredentialProvider; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; -import com.amazonaws.services.secretsmanager.model.GetSecretValueRequest; -import com.amazonaws.services.secretsmanager.model.GetSecretValueResult; import org.apache.arrow.vector.types.pojo.Field; import org.apache.arrow.vector.types.pojo.Schema; import org.junit.Assert; @@ -53,6 +50,9 @@ import org.mockito.Mockito; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; +import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueRequest; +import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueResponse; import java.sql.Connection; import java.sql.PreparedStatement; @@ -81,7 +81,7 @@ public class Db2MetadataHandlerTest extends TestBase { private JdbcConnectionFactory jdbcConnectionFactory; private Connection connection; private FederatedIdentity federatedIdentity; - private AWSSecretsManager secretsManager; + private SecretsManagerClient secretsManager; private BlockAllocator blockAllocator; private AmazonAthena athena; @@ -92,9 +92,9 @@ public void setup() throws Exception { this.connection = Mockito.mock(Connection.class, Mockito.RETURNS_DEEP_STUBS); logger.info(" this.connection.."+ this.connection); Mockito.when(this.jdbcConnectionFactory.getConnection(nullable(JdbcCredentialProvider.class))).thenReturn(this.connection); - this.secretsManager = Mockito.mock(AWSSecretsManager.class); + this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AmazonAthena.class); - Mockito.when(this.secretsManager.getSecretValue(Mockito.eq(new GetSecretValueRequest().withSecretId("testSecret")))).thenReturn(new GetSecretValueResult().withSecretString("{\"user\": \"testUser\", \"password\": \"testPassword\"}")); + Mockito.when(this.secretsManager.getSecretValue(Mockito.eq(GetSecretValueRequest.builder().secretId("testSecret").build()))).thenReturn(GetSecretValueResponse.builder().secretString("{\"user\": \"testUser\", \"password\": \"testPassword\"}").build()); this.db2MetadataHandler = new Db2MetadataHandler(databaseConnectionConfig, this.secretsManager, this.athena, this.jdbcConnectionFactory, com.google.common.collect.ImmutableMap.of()); this.federatedIdentity = Mockito.mock(FederatedIdentity.class); this.blockAllocator = new BlockAllocatorImpl(); diff --git a/athena-db2/src/test/java/com/amazonaws/athena/connectors/db2/Db2RecordHandlerTest.java b/athena-db2/src/test/java/com/amazonaws/athena/connectors/db2/Db2RecordHandlerTest.java index 801db06233..fc4c301c88 100644 --- a/athena-db2/src/test/java/com/amazonaws/athena/connectors/db2/Db2RecordHandlerTest.java +++ b/athena-db2/src/test/java/com/amazonaws/athena/connectors/db2/Db2RecordHandlerTest.java @@ -33,7 +33,6 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcSplitQueryBuilder; import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import com.google.common.collect.ImmutableMap; import org.apache.arrow.vector.types.Types; import org.apache.arrow.vector.types.pojo.Schema; @@ -41,6 +40,7 @@ import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; import java.sql.PreparedStatement; @@ -56,14 +56,14 @@ public class Db2RecordHandlerTest { private JdbcConnectionFactory jdbcConnectionFactory; private JdbcSplitQueryBuilder jdbcSplitQueryBuilder; private AmazonS3 amazonS3; - private AWSSecretsManager secretsManager; + private SecretsManagerClient secretsManager; private AmazonAthena athena; @Before public void setup() throws Exception { System.setProperty("aws.region", "us-east-1"); this.amazonS3 = Mockito.mock(AmazonS3.class); - this.secretsManager = Mockito.mock(AWSSecretsManager.class); + this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AmazonAthena.class); this.connection = Mockito.mock(Connection.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); diff --git a/athena-docdb/src/main/java/com/amazonaws/athena/connectors/docdb/DocDBMetadataHandler.java b/athena-docdb/src/main/java/com/amazonaws/athena/connectors/docdb/DocDBMetadataHandler.java index 5a25b6f50c..381186bf45 100644 --- a/athena-docdb/src/main/java/com/amazonaws/athena/connectors/docdb/DocDBMetadataHandler.java +++ b/athena-docdb/src/main/java/com/amazonaws/athena/connectors/docdb/DocDBMetadataHandler.java @@ -46,7 +46,6 @@ import com.amazonaws.services.glue.AWSGlue; import com.amazonaws.services.glue.model.Database; import com.amazonaws.services.glue.model.Table; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import com.google.common.base.Strings; import com.google.common.collect.ImmutableMap; import com.mongodb.client.MongoClient; @@ -58,6 +57,7 @@ import org.bson.Document; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.ArrayList; import java.util.LinkedHashSet; @@ -117,7 +117,7 @@ protected DocDBMetadataHandler( AWSGlue glue, DocDBConnectionFactory connectionFactory, EncryptionKeyFactory keyFactory, - AWSSecretsManager secretsManager, + SecretsManagerClient secretsManager, AmazonAthena athena, String spillBucket, String spillPrefix, diff --git a/athena-docdb/src/main/java/com/amazonaws/athena/connectors/docdb/DocDBRecordHandler.java b/athena-docdb/src/main/java/com/amazonaws/athena/connectors/docdb/DocDBRecordHandler.java index ecba05bc18..0131d81cda 100644 --- a/athena-docdb/src/main/java/com/amazonaws/athena/connectors/docdb/DocDBRecordHandler.java +++ b/athena-docdb/src/main/java/com/amazonaws/athena/connectors/docdb/DocDBRecordHandler.java @@ -32,8 +32,6 @@ import com.amazonaws.services.athena.AmazonAthenaClientBuilder; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3ClientBuilder; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; -import com.amazonaws.services.secretsmanager.AWSSecretsManagerClientBuilder; import com.mongodb.client.MongoClient; import com.mongodb.client.MongoCollection; import com.mongodb.client.MongoCursor; @@ -44,6 +42,7 @@ import org.bson.Document; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Map; import java.util.TreeMap; @@ -82,14 +81,14 @@ public DocDBRecordHandler(java.util.Map configOptions) { this( AmazonS3ClientBuilder.defaultClient(), - AWSSecretsManagerClientBuilder.defaultClient(), + SecretsManagerClient.create(), AmazonAthenaClientBuilder.defaultClient(), new DocDBConnectionFactory(), configOptions); } @VisibleForTesting - protected DocDBRecordHandler(AmazonS3 amazonS3, AWSSecretsManager secretsManager, AmazonAthena athena, DocDBConnectionFactory connectionFactory, java.util.Map configOptions) + protected DocDBRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AmazonAthena athena, DocDBConnectionFactory connectionFactory, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, SOURCE_TYPE, configOptions); this.connectionFactory = connectionFactory; diff --git a/athena-docdb/src/test/java/com/amazonaws/athena/connectors/docdb/DocDBMetadataHandlerTest.java b/athena-docdb/src/test/java/com/amazonaws/athena/connectors/docdb/DocDBMetadataHandlerTest.java index 866ecf164b..9a65cb275d 100644 --- a/athena-docdb/src/test/java/com/amazonaws/athena/connectors/docdb/DocDBMetadataHandlerTest.java +++ b/athena-docdb/src/test/java/com/amazonaws/athena/connectors/docdb/DocDBMetadataHandlerTest.java @@ -41,7 +41,6 @@ import com.amazonaws.athena.connector.lambda.security.LocalKeyFactory; import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.glue.AWSGlue; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import com.google.common.collect.ImmutableList; import com.mongodb.client.FindIterable; import com.mongodb.client.MongoClient; @@ -63,6 +62,7 @@ import org.mockito.junit.MockitoJUnitRunner; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.ArrayList; import java.util.Arrays; @@ -103,7 +103,7 @@ public class DocDBMetadataHandlerTest private AWSGlue awsGlue; @Mock - private AWSSecretsManager secretsManager; + private SecretsManagerClient secretsManager; @Mock private AmazonAthena mockAthena; diff --git a/athena-docdb/src/test/java/com/amazonaws/athena/connectors/docdb/DocDBRecordHandlerTest.java b/athena-docdb/src/test/java/com/amazonaws/athena/connectors/docdb/DocDBRecordHandlerTest.java index 18a1947c79..ea2a5da993 100644 --- a/athena-docdb/src/test/java/com/amazonaws/athena/connectors/docdb/DocDBRecordHandlerTest.java +++ b/athena-docdb/src/test/java/com/amazonaws/athena/connectors/docdb/DocDBRecordHandlerTest.java @@ -47,7 +47,6 @@ import com.amazonaws.services.s3.model.PutObjectResult; import com.amazonaws.services.s3.model.S3Object; import com.amazonaws.services.s3.model.S3ObjectInputStream; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import com.google.common.collect.ImmutableList; import com.google.common.io.ByteStreams; import com.mongodb.client.FindIterable; @@ -71,6 +70,7 @@ import org.mockito.junit.MockitoJUnitRunner; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.io.ByteArrayInputStream; import java.io.InputStream; @@ -116,7 +116,7 @@ public class DocDBRecordHandlerTest private MongoClient mockClient; @Mock - private AWSSecretsManager mockSecretsManager; + private SecretsManagerClient mockSecretsManager; @Mock private AmazonAthena mockAthena; @@ -125,7 +125,7 @@ public class DocDBRecordHandlerTest private AWSGlue awsGlue; @Mock - private AWSSecretsManager secretsManager; + private SecretsManagerClient secretsManager; @Mock MongoDatabase mockDatabase; diff --git a/athena-dynamodb/pom.xml b/athena-dynamodb/pom.xml index cb4d6d262b..c3e91e6e74 100644 --- a/athena-dynamodb/pom.xml +++ b/athena-dynamodb/pom.xml @@ -41,10 +41,12 @@ software.amazon.awssdk dynamodb + ${aws-sdk-v2.version} software.amazon.awssdk dynamodb-enhanced + ${aws-sdk-v2.version} com.amazonaws @@ -55,6 +57,7 @@ software.amazon.awssdk url-connection-client + ${aws-sdk-v2.version} test @@ -111,13 +114,10 @@ test-jar test - - software.amazon.awssdk - sdk-core - software.amazon.awssdk sts + ${aws-sdk-v2.version} diff --git a/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/DynamoDBMetadataHandler.java b/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/DynamoDBMetadataHandler.java index 4036d4176c..a82e3c78bd 100644 --- a/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/DynamoDBMetadataHandler.java +++ b/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/DynamoDBMetadataHandler.java @@ -60,7 +60,6 @@ import com.amazonaws.services.glue.AWSGlue; import com.amazonaws.services.glue.model.Database; import com.amazonaws.services.glue.model.Table; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import com.amazonaws.util.json.Jackson; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ImmutableMap; @@ -75,6 +74,7 @@ import software.amazon.awssdk.services.dynamodb.model.AttributeValue; import software.amazon.awssdk.services.dynamodb.model.ExecuteStatementRequest; import software.amazon.awssdk.services.dynamodb.model.ExecuteStatementResponse; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.ArrayList; import java.util.Collections; @@ -159,7 +159,7 @@ public DynamoDBMetadataHandler(java.util.Map configOptions) @VisibleForTesting DynamoDBMetadataHandler( EncryptionKeyFactory keyFactory, - AWSSecretsManager secretsManager, + SecretsManagerClient secretsManager, AmazonAthena athena, String spillBucket, String spillPrefix, diff --git a/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/DynamoDBRecordHandler.java b/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/DynamoDBRecordHandler.java index b06bb5aa1e..919e2b6c67 100644 --- a/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/DynamoDBRecordHandler.java +++ b/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/DynamoDBRecordHandler.java @@ -37,7 +37,6 @@ import com.amazonaws.athena.connectors.dynamodb.util.DDBTypeUtils; import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import com.amazonaws.util.json.Jackson; import com.fasterxml.jackson.core.type.TypeReference; import com.google.common.annotations.VisibleForTesting; @@ -57,6 +56,7 @@ import software.amazon.awssdk.services.dynamodb.model.QueryResponse; import software.amazon.awssdk.services.dynamodb.model.ScanRequest; import software.amazon.awssdk.services.dynamodb.model.ScanResponse; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.io.IOException; import java.util.ArrayList; @@ -128,7 +128,7 @@ public ThrottlingInvoker load(String tableName) } @VisibleForTesting - DynamoDBRecordHandler(DynamoDbClient ddbClient, AmazonS3 amazonS3, AWSSecretsManager secretsManager, AmazonAthena athena, String sourceType, java.util.Map configOptions) + DynamoDBRecordHandler(DynamoDbClient ddbClient, AmazonS3 amazonS3, SecretsManagerClient secretsManager, AmazonAthena athena, String sourceType, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, sourceType, configOptions); this.ddbClient = ddbClient; diff --git a/athena-dynamodb/src/test/java/com/amazonaws/athena/connectors/dynamodb/DynamoDBMetadataHandlerTest.java b/athena-dynamodb/src/test/java/com/amazonaws/athena/connectors/dynamodb/DynamoDBMetadataHandlerTest.java index bff76405e8..df7935a8e2 100644 --- a/athena-dynamodb/src/test/java/com/amazonaws/athena/connectors/dynamodb/DynamoDBMetadataHandlerTest.java +++ b/athena-dynamodb/src/test/java/com/amazonaws/athena/connectors/dynamodb/DynamoDBMetadataHandlerTest.java @@ -55,7 +55,6 @@ import com.amazonaws.services.glue.model.GetTablesResult; import com.amazonaws.services.glue.model.StorageDescriptor; import com.amazonaws.services.glue.model.Table; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import com.amazonaws.util.json.Jackson; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; @@ -75,6 +74,7 @@ import org.slf4j.LoggerFactory; import software.amazon.awssdk.enhanced.dynamodb.document.EnhancedDocument; import software.amazon.awssdk.services.dynamodb.model.AttributeValue; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.time.Instant; import java.time.LocalDateTime; @@ -132,7 +132,7 @@ public class DynamoDBMetadataHandlerTest private AWSGlue glueClient; @Mock - private AWSSecretsManager secretsManager; + private SecretsManagerClient secretsManager; @Mock private AmazonAthena athena; diff --git a/athena-dynamodb/src/test/java/com/amazonaws/athena/connectors/dynamodb/DynamoDBRecordHandlerTest.java b/athena-dynamodb/src/test/java/com/amazonaws/athena/connectors/dynamodb/DynamoDBRecordHandlerTest.java index d9f3f421b1..68362c087d 100644 --- a/athena-dynamodb/src/test/java/com/amazonaws/athena/connectors/dynamodb/DynamoDBRecordHandlerTest.java +++ b/athena-dynamodb/src/test/java/com/amazonaws/athena/connectors/dynamodb/DynamoDBRecordHandlerTest.java @@ -46,7 +46,6 @@ import com.amazonaws.services.glue.model.StorageDescriptor; import com.amazonaws.services.glue.model.Table; import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import org.apache.arrow.vector.complex.impl.UnionListReader; @@ -69,6 +68,7 @@ import org.slf4j.LoggerFactory; import software.amazon.awssdk.enhanced.dynamodb.document.EnhancedDocument; import software.amazon.awssdk.services.dynamodb.model.AttributeValue; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.time.LocalDate; import java.time.LocalDateTime; @@ -124,7 +124,7 @@ public class DynamoDBRecordHandlerTest private AWSGlue glueClient; @Mock - private AWSSecretsManager secretsManager; + private SecretsManagerClient secretsManager; @Mock private AmazonAthena athena; @@ -138,7 +138,7 @@ public void setup() logger.info("{}: enter", testName.getMethodName()); allocator = new BlockAllocatorImpl(); - handler = new DynamoDBRecordHandler(ddbClient, mock(AmazonS3.class), mock(AWSSecretsManager.class), mock(AmazonAthena.class), "source_type", com.google.common.collect.ImmutableMap.of()); + handler = new DynamoDBRecordHandler(ddbClient, mock(AmazonS3.class), mock(SecretsManagerClient.class), mock(AmazonAthena.class), "source_type", com.google.common.collect.ImmutableMap.of()); metadataHandler = new DynamoDBMetadataHandler(new LocalKeyFactory(), secretsManager, athena, "spillBucket", "spillPrefix", ddbClient, glueClient, com.google.common.collect.ImmutableMap.of()); } diff --git a/athena-elasticsearch/src/main/java/com/amazonaws/athena/connectors/elasticsearch/ElasticsearchMetadataHandler.java b/athena-elasticsearch/src/main/java/com/amazonaws/athena/connectors/elasticsearch/ElasticsearchMetadataHandler.java index c51e32c100..eb781ef5b9 100644 --- a/athena-elasticsearch/src/main/java/com/amazonaws/athena/connectors/elasticsearch/ElasticsearchMetadataHandler.java +++ b/athena-elasticsearch/src/main/java/com/amazonaws/athena/connectors/elasticsearch/ElasticsearchMetadataHandler.java @@ -43,7 +43,6 @@ import com.amazonaws.athena.connectors.elasticsearch.qpt.ElasticsearchQueryPassthrough; import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.glue.AWSGlue; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import com.google.common.collect.ImmutableMap; import org.apache.arrow.util.VisibleForTesting; import org.apache.arrow.vector.types.pojo.Field; @@ -54,6 +53,7 @@ import org.elasticsearch.client.indices.GetIndexResponse; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.io.IOException; import java.util.Arrays; @@ -132,7 +132,7 @@ public ElasticsearchMetadataHandler(Map configOptions) protected ElasticsearchMetadataHandler( AWSGlue awsGlue, EncryptionKeyFactory keyFactory, - AWSSecretsManager awsSecretsManager, + SecretsManagerClient awsSecretsManager, AmazonAthena athena, String spillBucket, String spillPrefix, diff --git a/athena-elasticsearch/src/main/java/com/amazonaws/athena/connectors/elasticsearch/ElasticsearchRecordHandler.java b/athena-elasticsearch/src/main/java/com/amazonaws/athena/connectors/elasticsearch/ElasticsearchRecordHandler.java index 2307ddcd46..7a76a2608f 100644 --- a/athena-elasticsearch/src/main/java/com/amazonaws/athena/connectors/elasticsearch/ElasticsearchRecordHandler.java +++ b/athena-elasticsearch/src/main/java/com/amazonaws/athena/connectors/elasticsearch/ElasticsearchRecordHandler.java @@ -31,8 +31,6 @@ import com.amazonaws.services.athena.AmazonAthenaClientBuilder; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3ClientBuilder; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; -import com.amazonaws.services.secretsmanager.AWSSecretsManagerClientBuilder; import org.apache.arrow.util.VisibleForTesting; import org.apache.arrow.vector.types.pojo.Field; import org.elasticsearch.action.search.ClearScrollRequest; @@ -48,6 +46,7 @@ import org.elasticsearch.search.builder.SearchSourceBuilder; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.io.IOException; import java.util.Iterator; @@ -91,7 +90,7 @@ public class ElasticsearchRecordHandler public ElasticsearchRecordHandler(Map configOptions) { - super(AmazonS3ClientBuilder.defaultClient(), AWSSecretsManagerClientBuilder.defaultClient(), + super(AmazonS3ClientBuilder.defaultClient(), SecretsManagerClient.create(), AmazonAthenaClientBuilder.defaultClient(), SOURCE_TYPE, configOptions); this.typeUtils = new ElasticsearchTypeUtils(); @@ -103,7 +102,7 @@ public ElasticsearchRecordHandler(Map configOptions) @VisibleForTesting protected ElasticsearchRecordHandler( AmazonS3 amazonS3, - AWSSecretsManager secretsManager, + SecretsManagerClient secretsManager, AmazonAthena amazonAthena, AwsRestHighLevelClientFactory clientFactory, long queryTimeout, diff --git a/athena-elasticsearch/src/test/java/com/amazonaws/athena/connectors/elasticsearch/ElasticsearchMetadataHandlerTest.java b/athena-elasticsearch/src/test/java/com/amazonaws/athena/connectors/elasticsearch/ElasticsearchMetadataHandlerTest.java index ce3c50ce6f..9478de2c5f 100644 --- a/athena-elasticsearch/src/test/java/com/amazonaws/athena/connectors/elasticsearch/ElasticsearchMetadataHandlerTest.java +++ b/athena-elasticsearch/src/test/java/com/amazonaws/athena/connectors/elasticsearch/ElasticsearchMetadataHandlerTest.java @@ -30,7 +30,6 @@ import com.amazonaws.athena.connector.lambda.security.LocalKeyFactory; import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.glue.AWSGlue; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; @@ -54,6 +53,7 @@ import org.mockito.junit.MockitoJUnitRunner; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.io.IOException; import java.util.ArrayList; @@ -93,7 +93,7 @@ public class ElasticsearchMetadataHandlerTest private AWSGlue awsGlue; @Mock - private AWSSecretsManager awsSecretsManager; + private SecretsManagerClient awsSecretsManager; @Mock private AmazonAthena amazonAthena; diff --git a/athena-elasticsearch/src/test/java/com/amazonaws/athena/connectors/elasticsearch/ElasticsearchRecordHandlerTest.java b/athena-elasticsearch/src/test/java/com/amazonaws/athena/connectors/elasticsearch/ElasticsearchRecordHandlerTest.java index f7db1f124c..862b62c49c 100644 --- a/athena-elasticsearch/src/test/java/com/amazonaws/athena/connectors/elasticsearch/ElasticsearchRecordHandlerTest.java +++ b/athena-elasticsearch/src/test/java/com/amazonaws/athena/connectors/elasticsearch/ElasticsearchRecordHandlerTest.java @@ -43,7 +43,6 @@ import com.amazonaws.services.s3.model.PutObjectResult; import com.amazonaws.services.s3.model.S3Object; import com.amazonaws.services.s3.model.S3ObjectInputStream; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; @@ -67,6 +66,7 @@ import org.mockito.junit.MockitoJUnitRunner; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.io.ByteArrayInputStream; import java.io.IOException; @@ -119,7 +119,7 @@ public class ElasticsearchRecordHandlerTest private AmazonS3 amazonS3; @Mock - private AWSSecretsManager awsSecretsManager; + private SecretsManagerClient awsSecretsManager; @Mock private AmazonAthena athena; diff --git a/athena-example/src/main/java/com/amazonaws/athena/connectors/example/ExampleMetadataHandler.java b/athena-example/src/main/java/com/amazonaws/athena/connectors/example/ExampleMetadataHandler.java index 21d2594da7..afa4f706fc 100644 --- a/athena-example/src/main/java/com/amazonaws/athena/connectors/example/ExampleMetadataHandler.java +++ b/athena-example/src/main/java/com/amazonaws/athena/connectors/example/ExampleMetadataHandler.java @@ -41,7 +41,6 @@ import com.amazonaws.athena.connector.lambda.metadata.optimizations.OptimizationSubType; import com.amazonaws.athena.connector.lambda.security.EncryptionKeyFactory; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import org.apache.arrow.util.VisibleForTesting; import org.apache.arrow.vector.complex.reader.FieldReader; //DO NOT REMOVE - this will not be _unused_ when customers go through the tutorial and uncomment @@ -49,6 +48,7 @@ import org.apache.arrow.vector.types.Types; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.ArrayList; import java.util.Comparator; @@ -92,7 +92,7 @@ public ExampleMetadataHandler(java.util.Map configOptions) @VisibleForTesting protected ExampleMetadataHandler( EncryptionKeyFactory keyFactory, - AWSSecretsManager awsSecretsManager, + SecretsManagerClient awsSecretsManager, AmazonAthena athena, String spillBucket, String spillPrefix, diff --git a/athena-example/src/main/java/com/amazonaws/athena/connectors/example/ExampleRecordHandler.java b/athena-example/src/main/java/com/amazonaws/athena/connectors/example/ExampleRecordHandler.java index cc895cade2..02797b696d 100644 --- a/athena-example/src/main/java/com/amazonaws/athena/connectors/example/ExampleRecordHandler.java +++ b/athena-example/src/main/java/com/amazonaws/athena/connectors/example/ExampleRecordHandler.java @@ -39,13 +39,12 @@ import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3ClientBuilder; import com.amazonaws.services.s3.model.S3Object; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; -import com.amazonaws.services.secretsmanager.AWSSecretsManagerClientBuilder; import org.apache.arrow.util.VisibleForTesting; import org.apache.arrow.vector.FieldVector; import org.apache.arrow.vector.holders.NullableIntHolder; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.io.BufferedReader; import java.io.IOException; @@ -81,11 +80,11 @@ public class ExampleRecordHandler public ExampleRecordHandler(java.util.Map configOptions) { - this(AmazonS3ClientBuilder.defaultClient(), AWSSecretsManagerClientBuilder.defaultClient(), AmazonAthenaClientBuilder.defaultClient(), configOptions); + this(AmazonS3ClientBuilder.defaultClient(), SecretsManagerClient.create(), AmazonAthenaClientBuilder.defaultClient(), configOptions); } @VisibleForTesting - protected ExampleRecordHandler(AmazonS3 amazonS3, AWSSecretsManager secretsManager, AmazonAthena amazonAthena, java.util.Map configOptions) + protected ExampleRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AmazonAthena amazonAthena, java.util.Map configOptions) { super(amazonS3, secretsManager, amazonAthena, SOURCE_TYPE, configOptions); this.amazonS3 = amazonS3; diff --git a/athena-example/src/test/java/com/amazonaws/athena/connectors/example/ExampleMetadataHandlerTest.java b/athena-example/src/test/java/com/amazonaws/athena/connectors/example/ExampleMetadataHandlerTest.java index 5f8ff32501..e78af2ccc5 100644 --- a/athena-example/src/test/java/com/amazonaws/athena/connectors/example/ExampleMetadataHandlerTest.java +++ b/athena-example/src/test/java/com/amazonaws/athena/connectors/example/ExampleMetadataHandlerTest.java @@ -44,7 +44,6 @@ import com.amazonaws.athena.connector.lambda.security.FederatedIdentity; import com.amazonaws.athena.connector.lambda.security.LocalKeyFactory; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import com.google.common.collect.ImmutableList; import org.apache.arrow.vector.types.Types; import org.apache.arrow.vector.types.pojo.Schema; @@ -53,6 +52,7 @@ import org.junit.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.ArrayList; import java.util.Collections; @@ -72,7 +72,7 @@ public class ExampleMetadataHandlerTest private static final Logger logger = LoggerFactory.getLogger(ExampleMetadataHandlerTest.class); private ExampleMetadataHandler handler = new ExampleMetadataHandler(new LocalKeyFactory(), - mock(AWSSecretsManager.class), + mock(SecretsManagerClient.class), mock(AmazonAthena.class), "spill-bucket", "spill-prefix", diff --git a/athena-example/src/test/java/com/amazonaws/athena/connectors/example/ExampleRecordHandlerTest.java b/athena-example/src/test/java/com/amazonaws/athena/connectors/example/ExampleRecordHandlerTest.java index 2d597c4632..7bddf4bfcd 100644 --- a/athena-example/src/test/java/com/amazonaws/athena/connectors/example/ExampleRecordHandlerTest.java +++ b/athena-example/src/test/java/com/amazonaws/athena/connectors/example/ExampleRecordHandlerTest.java @@ -37,7 +37,6 @@ import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.model.S3Object; import com.amazonaws.services.s3.model.S3ObjectInputStream; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import org.apache.arrow.vector.types.Types; import org.apache.arrow.vector.types.pojo.Schema; import org.junit.After; @@ -49,6 +48,7 @@ import org.mockito.stubbing.Answer; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.io.ByteArrayInputStream; import java.io.UnsupportedEncodingException; @@ -73,7 +73,7 @@ public class ExampleRecordHandlerTest private BlockAllocatorImpl allocator; private Schema schemaForRead; private AmazonS3 amazonS3; - private AWSSecretsManager awsSecretsManager; + private SecretsManagerClient awsSecretsManager; private AmazonAthena athena; private S3BlockSpillReader spillReader; @@ -106,7 +106,7 @@ public void setUp() allocator = new BlockAllocatorImpl(); amazonS3 = mock(AmazonS3.class); - awsSecretsManager = mock(AWSSecretsManager.class); + awsSecretsManager = mock(SecretsManagerClient.class); athena = mock(AmazonAthena.class); when(amazonS3.doesObjectExist(nullable(String.class), nullable(String.class))).thenReturn(true); diff --git a/athena-federation-integ-test/pom.xml b/athena-federation-integ-test/pom.xml index dc5db5c2d1..3bf267087b 100644 --- a/athena-federation-integ-test/pom.xml +++ b/athena-federation-integ-test/pom.xml @@ -136,6 +136,11 @@ guava ${guava.version} + + software.amazon.awssdk + secretsmanager + ${aws-sdk-v2.version} + org.testng @@ -154,12 +159,6 @@ aws-java-sdk-cloudformation ${aws-sdk.version} - - - com.amazonaws - aws-java-sdk-secretsmanager - ${aws-sdk.version} - software.amazon.awscdk diff --git a/athena-federation-integ-test/src/main/java/com/amazonaws/athena/connector/integ/providers/SecretsManagerCredentialsProvider.java b/athena-federation-integ-test/src/main/java/com/amazonaws/athena/connector/integ/providers/SecretsManagerCredentialsProvider.java index cabc1afa66..c2b5f4af13 100644 --- a/athena-federation-integ-test/src/main/java/com/amazonaws/athena/connector/integ/providers/SecretsManagerCredentialsProvider.java +++ b/athena-federation-integ-test/src/main/java/com/amazonaws/athena/connector/integ/providers/SecretsManagerCredentialsProvider.java @@ -21,11 +21,10 @@ import com.amazonaws.athena.connector.integ.data.SecretsManagerCredentials; import com.amazonaws.athena.connector.integ.data.TestConfig; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; -import com.amazonaws.services.secretsmanager.AWSSecretsManagerClientBuilder; -import com.amazonaws.services.secretsmanager.model.GetSecretValueRequest; -import com.amazonaws.services.secretsmanager.model.GetSecretValueResult; import com.fasterxml.jackson.databind.ObjectMapper; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; +import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueRequest; +import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueResponse; import java.io.IOException; import java.util.HashMap; @@ -55,23 +54,21 @@ public static Optional getCredentials(TestConfig test if (secretsManagerSecret.isPresent()) { String secret = secretsManagerSecret.get(); - AWSSecretsManager secretsManager = AWSSecretsManagerClientBuilder.defaultClient(); + SecretsManagerClient secretsManager = SecretsManagerClient.create(); try { - GetSecretValueResult secretValueResult = secretsManager.getSecretValue(new GetSecretValueRequest() - .withSecretId(secret)); + GetSecretValueResponse secretValueResult = secretsManager.getSecretValue(GetSecretValueRequest.builder() + .secretId(secret) + .build()); ObjectMapper objectMapper = new ObjectMapper(); - Map credentials = objectMapper.readValue(secretValueResult.getSecretString(), + Map credentials = objectMapper.readValue(secretValueResult.secretString(), HashMap.class); return Optional.of(new SecretsManagerCredentials(secret, credentials.get("username"), - credentials.get("password"), secretValueResult.getARN())); + credentials.get("password"), secretValueResult.arn())); } catch (IOException e) { throw new RuntimeException(String.format("Unable to parse SecretsManager secret (%s): %s", secret, e.getMessage()), e); } - finally { - secretsManager.shutdown(); - } } return Optional.empty(); diff --git a/athena-federation-sdk/pom.xml b/athena-federation-sdk/pom.xml index 4db6670cc7..932614edbb 100644 --- a/athena-federation-sdk/pom.xml +++ b/athena-federation-sdk/pom.xml @@ -108,9 +108,9 @@ - com.amazonaws - aws-java-sdk-secretsmanager - ${aws-sdk.version} + software.amazon.awssdk + secretsmanager + ${aws-sdk-v2.version} diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/handlers/GlueMetadataHandler.java b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/handlers/GlueMetadataHandler.java index e33a639ea7..c639f4f197 100644 --- a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/handlers/GlueMetadataHandler.java +++ b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/handlers/GlueMetadataHandler.java @@ -44,7 +44,6 @@ import com.amazonaws.services.glue.model.GetTablesRequest; import com.amazonaws.services.glue.model.GetTablesResult; import com.amazonaws.services.glue.model.Table; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import com.google.common.base.Splitter; import com.google.common.base.Strings; import com.google.common.collect.ImmutableMap; @@ -54,6 +53,7 @@ import org.apache.arrow.vector.types.pojo.Schema; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.ArrayList; import java.util.HashMap; @@ -179,7 +179,7 @@ public GlueMetadataHandler(AWSGlue awsGlue, String sourceType, java.util.Map configOp logger.debug("ENABLE_SPILL_ENCRYPTION with encryption factory: " + encryptionKeyFactory.getClass().getSimpleName()); } - this.secretsManager = new CachableSecretsManager(AWSSecretsManagerClientBuilder.defaultClient()); + this.secretsManager = new CachableSecretsManager(SecretsManagerClient.create()); this.athena = AmazonAthenaClientBuilder.defaultClient(); this.verifier = new SpillLocationVerifier(AmazonS3ClientBuilder.standard().build()); this.athenaInvoker = ThrottlingInvoker.newDefaultBuilder(ATHENA_EXCEPTION_FILTER, configOptions).build(); @@ -162,7 +161,7 @@ public MetadataHandler(String sourceType, java.util.Map configOp */ public MetadataHandler( EncryptionKeyFactory encryptionKeyFactory, - AWSSecretsManager secretsManager, + SecretsManagerClient secretsManager, AmazonAthena athena, String sourceType, String spillBucket, diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/handlers/RecordHandler.java b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/handlers/RecordHandler.java index f3b47a1a45..8644615f18 100644 --- a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/handlers/RecordHandler.java +++ b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/handlers/RecordHandler.java @@ -46,11 +46,10 @@ import com.amazonaws.services.lambda.runtime.RequestStreamHandler; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3ClientBuilder; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; -import com.amazonaws.services.secretsmanager.AWSSecretsManagerClientBuilder; import com.fasterxml.jackson.databind.ObjectMapper; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.io.IOException; import java.io.InputStream; @@ -84,7 +83,7 @@ public RecordHandler(String sourceType, java.util.Map configOpti { this.sourceType = sourceType; this.amazonS3 = AmazonS3ClientBuilder.defaultClient(); - this.secretsManager = new CachableSecretsManager(AWSSecretsManagerClientBuilder.defaultClient()); + this.secretsManager = new CachableSecretsManager(SecretsManagerClient.create()); this.athena = AmazonAthenaClientBuilder.defaultClient(); this.configOptions = configOptions; this.athenaInvoker = ThrottlingInvoker.newDefaultBuilder(ATHENA_EXCEPTION_FILTER, configOptions).build(); @@ -93,7 +92,7 @@ public RecordHandler(String sourceType, java.util.Map configOpti /** * @param sourceType Used to aid in logging diagnostic info when raising a support case. */ - public RecordHandler(AmazonS3 amazonS3, AWSSecretsManager secretsManager, AmazonAthena athena, String sourceType, java.util.Map configOptions) + public RecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AmazonAthena athena, String sourceType, java.util.Map configOptions) { this.sourceType = sourceType; this.amazonS3 = amazonS3; diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/security/CachableSecretsManager.java b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/security/CachableSecretsManager.java index e557210bff..00ccf90900 100644 --- a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/security/CachableSecretsManager.java +++ b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/security/CachableSecretsManager.java @@ -20,12 +20,12 @@ * #L% */ -import com.amazonaws.services.secretsmanager.AWSSecretsManager; -import com.amazonaws.services.secretsmanager.model.GetSecretValueRequest; -import com.amazonaws.services.secretsmanager.model.GetSecretValueResult; import org.apache.arrow.util.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; +import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueRequest; +import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueResponse; import java.util.Iterator; import java.util.LinkedHashMap; @@ -52,9 +52,9 @@ public class CachableSecretsManager private static final Pattern NAME_PATTERN = Pattern.compile(SECRET_NAME_PATTERN); private final LinkedHashMap cache = new LinkedHashMap<>(); - private final AWSSecretsManager secretsManager; + private final SecretsManagerClient secretsManager; - public CachableSecretsManager(AWSSecretsManager secretsManager) + public CachableSecretsManager(SecretsManagerClient secretsManager) { this.secretsManager = secretsManager; } @@ -97,9 +97,10 @@ public String getSecret(String secretName) if (cacheEntry == null || cacheEntry.getAge() > MAX_CACHE_AGE_MS) { logger.info("getSecret: Resolving secret[{}].", secretName); - GetSecretValueResult secretValueResult = secretsManager.getSecretValue(new GetSecretValueRequest() - .withSecretId(secretName)); - cacheEntry = new CacheEntry(secretName, secretValueResult.getSecretString()); + GetSecretValueResponse secretValueResult = secretsManager.getSecretValue(GetSecretValueRequest.builder() + .secretId(secretName) + .build()); + cacheEntry = new CacheEntry(secretName, secretValueResult.secretString()); evictCache(cache.size() >= MAX_CACHE_SIZE); cache.put(secretName, cacheEntry); } diff --git a/athena-federation-sdk/src/test/java/com/amazonaws/athena/connector/lambda/handlers/GlueMetadataHandlerTest.java b/athena-federation-sdk/src/test/java/com/amazonaws/athena/connector/lambda/handlers/GlueMetadataHandlerTest.java index 4c8409877c..20c0e5819d 100644 --- a/athena-federation-sdk/src/test/java/com/amazonaws/athena/connector/lambda/handlers/GlueMetadataHandlerTest.java +++ b/athena-federation-sdk/src/test/java/com/amazonaws/athena/connector/lambda/handlers/GlueMetadataHandlerTest.java @@ -53,8 +53,10 @@ import com.amazonaws.services.glue.model.StorageDescriptor; import com.amazonaws.services.glue.model.Table; import com.amazonaws.services.lambda.runtime.Context; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import com.google.common.collect.ImmutableList; + +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; + import org.apache.arrow.vector.types.Types; import org.apache.arrow.vector.types.pojo.Schema; import org.junit.After; @@ -149,7 +151,7 @@ public void setUp() handler = new GlueMetadataHandler(mockGlue, new LocalKeyFactory(), - mock(AWSSecretsManager.class), + mock(SecretsManagerClient.class), mock(AmazonAthena.class), "glue-test", "spill-bucket", diff --git a/athena-federation-sdk/src/test/java/com/amazonaws/athena/connector/lambda/security/CacheableSecretsManagerTest.java b/athena-federation-sdk/src/test/java/com/amazonaws/athena/connector/lambda/security/CacheableSecretsManagerTest.java index 181945f55f..3749e6edd2 100644 --- a/athena-federation-sdk/src/test/java/com/amazonaws/athena/connector/lambda/security/CacheableSecretsManagerTest.java +++ b/athena-federation-sdk/src/test/java/com/amazonaws/athena/connector/lambda/security/CacheableSecretsManagerTest.java @@ -20,9 +20,10 @@ * #L% */ -import com.amazonaws.services.secretsmanager.AWSSecretsManager; -import com.amazonaws.services.secretsmanager.model.GetSecretValueRequest; -import com.amazonaws.services.secretsmanager.model.GetSecretValueResult; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; +import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueRequest; +import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueResponse; + import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -39,14 +40,14 @@ public class CacheableSecretsManagerTest { - private AWSSecretsManager mockSecretsManager; + private SecretsManagerClient mockSecretsManager; private CachableSecretsManager cachableSecretsManager; @Before public void setup() { - mockSecretsManager = mock(AWSSecretsManager.class); + mockSecretsManager = mock(SecretsManagerClient.class); cachableSecretsManager = new CachableSecretsManager(mockSecretsManager); } @@ -67,8 +68,8 @@ public void expirationTest() when(mockSecretsManager.getSecretValue(nullable(GetSecretValueRequest.class))) .thenAnswer((InvocationOnMock invocation) -> { GetSecretValueRequest request = invocation.getArgument(0, GetSecretValueRequest.class); - if (request.getSecretId().equalsIgnoreCase("test")) { - return new GetSecretValueResult().withSecretString("value2"); + if (request.secretId().equalsIgnoreCase("test")) { + return GetSecretValueResponse.builder().secretString("value2").build(); } throw new RuntimeException(); }); @@ -86,7 +87,7 @@ public void evictionTest() when(mockSecretsManager.getSecretValue(nullable(GetSecretValueRequest.class))) .thenAnswer((InvocationOnMock invocation) -> { GetSecretValueRequest request = invocation.getArgument(0, GetSecretValueRequest.class); - return new GetSecretValueResult().withSecretString(request.getSecretId() + "_value"); + return GetSecretValueResponse.builder().secretString(request.secretId() + "_value").build(); }); assertEquals("test_value", cachableSecretsManager.getSecret("test")); @@ -101,11 +102,11 @@ public void resolveSecrets() when(mockSecretsManager.getSecretValue(nullable(GetSecretValueRequest.class))) .thenAnswer((InvocationOnMock invocation) -> { GetSecretValueRequest request = invocation.getArgument(0, GetSecretValueRequest.class); - String result = request.getSecretId(); + String result = request.secretId(); if (result.equalsIgnoreCase("unknown")) { throw new RuntimeException("Unknown secret!"); } - return new GetSecretValueResult().withSecretString(result); + return GetSecretValueResponse.builder().secretString(result).build(); }); String oneSecret = "${OneSecret}"; diff --git a/athena-gcs/src/main/java/com/amazonaws/athena/connectors/gcs/GcsMetadataHandler.java b/athena-gcs/src/main/java/com/amazonaws/athena/connectors/gcs/GcsMetadataHandler.java index 231b5763a3..1fcb52c7b1 100644 --- a/athena-gcs/src/main/java/com/amazonaws/athena/connectors/gcs/GcsMetadataHandler.java +++ b/athena-gcs/src/main/java/com/amazonaws/athena/connectors/gcs/GcsMetadataHandler.java @@ -44,7 +44,6 @@ import com.amazonaws.services.glue.model.Column; import com.amazonaws.services.glue.model.Database; import com.amazonaws.services.glue.model.Table; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import com.fasterxml.jackson.databind.ObjectMapper; import org.apache.arrow.memory.BufferAllocator; import org.apache.arrow.util.VisibleForTesting; @@ -52,6 +51,7 @@ import org.apache.arrow.vector.types.pojo.Schema; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.io.IOException; import java.net.URI; @@ -100,7 +100,7 @@ public GcsMetadataHandler(BufferAllocator allocator, java.util.Map configOptions) { this(AmazonS3ClientBuilder.defaultClient(), - AWSSecretsManagerClientBuilder.defaultClient(), + SecretsManagerClient.create(), AmazonAthenaClientBuilder.defaultClient(), configOptions); this.allocator = allocator; } @@ -91,7 +90,7 @@ public GcsRecordHandler(BufferAllocator allocator, java.util.Map * @param amazonAthena An instance of AmazonAthena */ @VisibleForTesting - protected GcsRecordHandler(AmazonS3 amazonS3, AWSSecretsManager secretsManager, AmazonAthena amazonAthena, java.util.Map configOptions) + protected GcsRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AmazonAthena amazonAthena, java.util.Map configOptions) { super(amazonS3, secretsManager, amazonAthena, SOURCE_TYPE, configOptions); this.invoker = ThrottlingInvoker.newDefaultBuilder(EXCEPTION_FILTER, configOptions).build(); diff --git a/athena-gcs/src/main/java/com/amazonaws/athena/connectors/gcs/GcsUtil.java b/athena-gcs/src/main/java/com/amazonaws/athena/connectors/gcs/GcsUtil.java index 7a273585e2..121ea96a15 100644 --- a/athena-gcs/src/main/java/com/amazonaws/athena/connectors/gcs/GcsUtil.java +++ b/athena-gcs/src/main/java/com/amazonaws/athena/connectors/gcs/GcsUtil.java @@ -26,12 +26,12 @@ import com.amazonaws.services.glue.model.GetTableRequest; import com.amazonaws.services.glue.model.GetTableResult; import com.amazonaws.services.glue.model.Table; -import com.amazonaws.services.secretsmanager.AWSSecretsManagerClientBuilder; import com.sun.jna.platform.unix.LibC; import org.apache.arrow.vector.FieldVector; import org.apache.arrow.vector.types.pojo.ArrowType; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import javax.net.ssl.TrustManager; import javax.net.ssl.TrustManagerFactory; @@ -100,7 +100,7 @@ public static void installCaCertificate() throws IOException, NoSuchAlgorithmExc */ public static void installGoogleCredentialsJsonFile(java.util.Map configOptions) throws IOException { - CachableSecretsManager secretsManager = new CachableSecretsManager(AWSSecretsManagerClientBuilder.defaultClient()); + CachableSecretsManager secretsManager = new CachableSecretsManager(SecretsManagerClient.create()); String gcsCredentialsJsonString = secretsManager.getSecret(configOptions.get(GCS_SECRET_KEY_ENV_VAR)); File destination = new File(GOOGLE_SERVICE_ACCOUNT_JSON_TEMP_FILE_LOCATION_VALUE); boolean destinationDirExists = new File(destination.getParent()).mkdirs(); diff --git a/athena-gcs/src/test/java/com/amazonaws/athena/connectors/gcs/GcsCompositeHandlerTest.java b/athena-gcs/src/test/java/com/amazonaws/athena/connectors/gcs/GcsCompositeHandlerTest.java index 1fa7d68b61..5a6d3e0fc8 100644 --- a/athena-gcs/src/test/java/com/amazonaws/athena/connectors/gcs/GcsCompositeHandlerTest.java +++ b/athena-gcs/src/test/java/com/amazonaws/athena/connectors/gcs/GcsCompositeHandlerTest.java @@ -21,9 +21,6 @@ import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3ClientBuilder; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; -import com.amazonaws.services.secretsmanager.AWSSecretsManagerClientBuilder; -import com.amazonaws.services.secretsmanager.model.GetSecretValueResult; import com.google.auth.oauth2.GoogleCredentials; import com.google.auth.oauth2.ServiceAccountCredentials; import org.junit.jupiter.api.AfterAll; @@ -31,6 +28,9 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.api.TestInstance; import org.mockito.Mockito; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; +import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueRequest; +import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueResponse; import java.io.IOException; import java.security.KeyStoreException; @@ -46,15 +46,15 @@ @TestInstance(PER_CLASS) public class GcsCompositeHandlerTest extends GenericGcsTest { - private AWSSecretsManager secretsManager; + private SecretsManagerClient secretsManager; private ServiceAccountCredentials serviceAccountCredentials; private GoogleCredentials credentials; @BeforeAll public void init() { super.initCommonMockedStatic(); - secretsManager = Mockito.mock(AWSSecretsManager.class); - mockedSecretManagerBuilder.when(AWSSecretsManagerClientBuilder::defaultClient).thenReturn(secretsManager); + secretsManager = Mockito.mock(SecretsManagerClient.class); + mockedSecretManagerBuilder.when(SecretsManagerClient::create).thenReturn(secretsManager); serviceAccountCredentials = Mockito.mock(ServiceAccountCredentials.class); mockedServiceAccountCredentials.when(() -> ServiceAccountCredentials.fromStream(Mockito.any())).thenReturn(serviceAccountCredentials); credentials = Mockito.mock(GoogleCredentials.class); @@ -74,8 +74,11 @@ public void cleanUp() { @Test public void testGcsCompositeHandler() throws IOException, CertificateEncodingException, NoSuchAlgorithmException, KeyStoreException { - GetSecretValueResult getSecretValueResult = new GetSecretValueResult().withVersionStages(com.google.common.collect.ImmutableList.of("v1")).withSecretString("{\"gcs_credential_keys\": \"test\"}"); - when(secretsManager.getSecretValue(Mockito.any())).thenReturn(getSecretValueResult); + GetSecretValueResponse getSecretValueResponse = GetSecretValueResponse.builder() + .versionStages(com.google.common.collect.ImmutableList.of("v1")) + .secretString("{\"gcs_credential_keys\": \"test\"}") + .build(); + when(secretsManager.getSecretValue(Mockito.isA(GetSecretValueRequest.class))).thenReturn(getSecretValueResponse); when(ServiceAccountCredentials.fromStream(Mockito.any())).thenReturn(serviceAccountCredentials); when(credentials.createScoped((Collection) any())).thenReturn(credentials); GcsCompositeHandler gcsCompositeHandler = new GcsCompositeHandler(); diff --git a/athena-gcs/src/test/java/com/amazonaws/athena/connectors/gcs/GcsMetadataHandlerTest.java b/athena-gcs/src/test/java/com/amazonaws/athena/connectors/gcs/GcsMetadataHandlerTest.java index 84005f8d02..3f9e4ab632 100644 --- a/athena-gcs/src/test/java/com/amazonaws/athena/connectors/gcs/GcsMetadataHandlerTest.java +++ b/athena-gcs/src/test/java/com/amazonaws/athena/connectors/gcs/GcsMetadataHandlerTest.java @@ -50,9 +50,6 @@ import com.amazonaws.services.glue.model.GetTablesResult; import com.amazonaws.services.glue.model.StorageDescriptor; import com.amazonaws.services.glue.model.Table; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; -import com.amazonaws.services.secretsmanager.AWSSecretsManagerClientBuilder; -import com.amazonaws.services.secretsmanager.model.GetSecretValueResult; import com.google.api.gax.paging.Page; import com.google.auth.oauth2.GoogleCredentials; import com.google.auth.oauth2.ServiceAccountCredentials; @@ -77,6 +74,9 @@ import org.mockito.Mockito; import org.mockito.MockitoAnnotations; import org.mockito.junit.MockitoJUnitRunner; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; +import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueRequest; +import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueResponse; import java.util.ArrayList; import java.util.Collection; @@ -130,7 +130,7 @@ public class GcsMetadataHandlerTest @Mock private AWSGlue awsGlue; @Mock - private AWSSecretsManager secretsManager; + private SecretsManagerClient secretsManager; @Mock private ServiceAccountCredentials serviceAccountCredentials; @Mock @@ -139,7 +139,7 @@ public class GcsMetadataHandlerTest private MockedStatic mockedStorageOptions; private MockedStatic mockedServiceAccountCredentials; private MockedStatic mockedServiceGoogleCredentials; - private MockedStatic mockedAWSSecretsManagerClientBuilder; + private MockedStatic mockedAWSSecretsManagerClientBuilder; private MockedStatic mockedAWSGlueClientBuilder; @Before @@ -149,7 +149,7 @@ public void setUp() throws Exception mockedStorageOptions = mockStatic(StorageOptions.class); mockedServiceAccountCredentials = mockStatic(ServiceAccountCredentials.class); mockedServiceGoogleCredentials = mockStatic(GoogleCredentials.class); - mockedAWSSecretsManagerClientBuilder = mockStatic(AWSSecretsManagerClientBuilder.class); + mockedAWSSecretsManagerClientBuilder = mockStatic(SecretsManagerClient.class); mockedAWSGlueClientBuilder = mockStatic(AWSGlueClientBuilder.class); Storage storage = mock(Storage.class); @@ -170,9 +170,12 @@ public void setUp() throws Exception Mockito.when(GoogleCredentials.fromStream(Mockito.any())).thenReturn(credentials); Mockito.when(credentials.createScoped((Collection) any())).thenReturn(credentials); - Mockito.when(AWSSecretsManagerClientBuilder.defaultClient()).thenReturn(secretsManager); - GetSecretValueResult getSecretValueResult = new GetSecretValueResult().withVersionStages(ImmutableList.of("v1")).withSecretString("{\"gcs_credential_keys\": \"test\"}"); - Mockito.when(secretsManager.getSecretValue(Mockito.any())).thenReturn(getSecretValueResult); + Mockito.when(SecretsManagerClient.create()).thenReturn(secretsManager); + GetSecretValueResponse getSecretValueResponse = GetSecretValueResponse.builder() + .versionStages(ImmutableList.of("v1")) + .secretString("{\"gcs_credential_keys\": \"test\"}") + .build(); + Mockito.when(secretsManager.getSecretValue(Mockito.isA(GetSecretValueRequest.class))).thenReturn(getSecretValueResponse); Mockito.when(AWSGlueClientBuilder.defaultClient()).thenReturn(awsGlue); gcsMetadataHandler = new GcsMetadataHandler(new LocalKeyFactory(), secretsManager, athena, "spillBucket", "spillPrefix", awsGlue, allocator, ImmutableMap.of()); blockAllocator = new BlockAllocatorImpl(); diff --git a/athena-gcs/src/test/java/com/amazonaws/athena/connectors/gcs/GcsRecordHandlerTest.java b/athena-gcs/src/test/java/com/amazonaws/athena/connectors/gcs/GcsRecordHandlerTest.java index 60274ad4f7..ee011e00ef 100644 --- a/athena-gcs/src/test/java/com/amazonaws/athena/connectors/gcs/GcsRecordHandlerTest.java +++ b/athena-gcs/src/test/java/com/amazonaws/athena/connectors/gcs/GcsRecordHandlerTest.java @@ -38,8 +38,6 @@ import com.amazonaws.services.athena.AmazonAthenaClientBuilder; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3ClientBuilder; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; -import com.amazonaws.services.secretsmanager.AWSSecretsManagerClientBuilder; import com.google.auth.oauth2.GoogleCredentials; import org.apache.arrow.memory.BufferAllocator; import org.apache.arrow.memory.RootAllocator; @@ -52,6 +50,7 @@ import org.mockito.Mockito; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.io.File; import java.util.Collections; @@ -73,7 +72,7 @@ public class GcsRecordHandlerTest extends GenericGcsTest private static final Logger LOGGER = LoggerFactory.getLogger(GcsRecordHandlerTest.class); @Mock - private AWSSecretsManager secretsManager; + private SecretsManagerClient secretsManager; @Mock private AmazonAthena athena; @@ -126,8 +125,8 @@ public void initCommonMockedStatic() .build(); // To mock AmazonS3 via AmazonS3ClientBuilder mockedS3Builder.when(AmazonS3ClientBuilder::defaultClient).thenReturn(amazonS3); - // To mock AWSSecretsManager via AWSSecretsManagerClientBuilder - mockedSecretManagerBuilder.when(AWSSecretsManagerClientBuilder::defaultClient).thenReturn(secretsManager); + // To mock SecretsManagerClient via SecretsManagerClient + mockedSecretManagerBuilder.when(SecretsManagerClient::create).thenReturn(secretsManager); // To mock AmazonAthena via AmazonAthenaClientBuilder mockedAthenaClientBuilder.when(AmazonAthenaClientBuilder::defaultClient).thenReturn(athena); mockedGoogleCredentials.when(() -> GoogleCredentials.fromStream(any())).thenReturn(credentials); diff --git a/athena-gcs/src/test/java/com/amazonaws/athena/connectors/gcs/GenericGcsTest.java b/athena-gcs/src/test/java/com/amazonaws/athena/connectors/gcs/GenericGcsTest.java index bd8853cb23..35826badda 100644 --- a/athena-gcs/src/test/java/com/amazonaws/athena/connectors/gcs/GenericGcsTest.java +++ b/athena-gcs/src/test/java/com/amazonaws/athena/connectors/gcs/GenericGcsTest.java @@ -21,18 +21,18 @@ import com.amazonaws.services.athena.AmazonAthenaClientBuilder; import com.amazonaws.services.s3.AmazonS3ClientBuilder; -import com.amazonaws.services.secretsmanager.AWSSecretsManagerClientBuilder; import com.google.auth.oauth2.GoogleCredentials; import com.google.auth.oauth2.ServiceAccountCredentials; import org.mockito.MockedStatic; import org.mockito.Mockito; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.lang.reflect.Field; public class GenericGcsTest { protected MockedStatic mockedS3Builder; - protected MockedStatic mockedSecretManagerBuilder; + protected MockedStatic mockedSecretManagerBuilder; protected MockedStatic mockedAthenaClientBuilder; protected MockedStatic mockedGoogleCredentials; protected MockedStatic mockedGcsUtil; @@ -42,7 +42,7 @@ public class GenericGcsTest protected void initCommonMockedStatic() { mockedS3Builder = Mockito.mockStatic(AmazonS3ClientBuilder.class); - mockedSecretManagerBuilder = Mockito.mockStatic(AWSSecretsManagerClientBuilder.class); + mockedSecretManagerBuilder = Mockito.mockStatic(SecretsManagerClient.class); mockedAthenaClientBuilder = Mockito.mockStatic(AmazonAthenaClientBuilder.class); mockedGoogleCredentials = Mockito.mockStatic(GoogleCredentials.class); mockedGcsUtil = Mockito.mockStatic(GcsUtil.class); diff --git a/athena-google-bigquery/src/main/java/com/amazonaws/athena/connectors/google/bigquery/BigQueryRecordHandler.java b/athena-google-bigquery/src/main/java/com/amazonaws/athena/connectors/google/bigquery/BigQueryRecordHandler.java index c2fde107cf..7db7915cd3 100644 --- a/athena-google-bigquery/src/main/java/com/amazonaws/athena/connectors/google/bigquery/BigQueryRecordHandler.java +++ b/athena-google-bigquery/src/main/java/com/amazonaws/athena/connectors/google/bigquery/BigQueryRecordHandler.java @@ -33,8 +33,6 @@ import com.amazonaws.services.athena.AmazonAthenaClientBuilder; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3ClientBuilder; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; -import com.amazonaws.services.secretsmanager.AWSSecretsManagerClientBuilder; import com.google.api.gax.rpc.ServerStream; import com.google.cloud.bigquery.BigQuery; import com.google.cloud.bigquery.BigQueryException; @@ -62,6 +60,7 @@ import org.apache.arrow.vector.types.pojo.Field; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.io.IOException; import java.util.ArrayList; @@ -94,12 +93,12 @@ public class BigQueryRecordHandler BigQueryRecordHandler(java.util.Map configOptions, BufferAllocator allocator) { this(AmazonS3ClientBuilder.defaultClient(), - AWSSecretsManagerClientBuilder.defaultClient(), + SecretsManagerClient.create(), AmazonAthenaClientBuilder.defaultClient(), configOptions, allocator); } @VisibleForTesting - public BigQueryRecordHandler(AmazonS3 amazonS3, AWSSecretsManager secretsManager, AmazonAthena athena, java.util.Map configOptions, BufferAllocator allocator) + public BigQueryRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AmazonAthena athena, java.util.Map configOptions, BufferAllocator allocator) { super(amazonS3, secretsManager, athena, BigQueryConstants.SOURCE_TYPE, configOptions); this.invoker = ThrottlingInvoker.newDefaultBuilder(EXCEPTION_FILTER, configOptions).build(); diff --git a/athena-google-bigquery/src/main/java/com/amazonaws/athena/connectors/google/bigquery/BigQueryUtils.java b/athena-google-bigquery/src/main/java/com/amazonaws/athena/connectors/google/bigquery/BigQueryUtils.java index 6f1cd97870..36d3e8d51e 100644 --- a/athena-google-bigquery/src/main/java/com/amazonaws/athena/connectors/google/bigquery/BigQueryUtils.java +++ b/athena-google-bigquery/src/main/java/com/amazonaws/athena/connectors/google/bigquery/BigQueryUtils.java @@ -22,10 +22,6 @@ import com.amazonaws.athena.connector.lambda.data.DateTimeFormatterUtil; import com.amazonaws.athena.connector.lambda.security.CachableSecretsManager; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; -import com.amazonaws.services.secretsmanager.AWSSecretsManagerClientBuilder; -import com.amazonaws.services.secretsmanager.model.GetSecretValueRequest; -import com.amazonaws.services.secretsmanager.model.GetSecretValueResult; import com.google.api.gax.paging.Page; import com.google.auth.Credentials; import com.google.auth.oauth2.ServiceAccountCredentials; @@ -48,6 +44,9 @@ import org.apache.commons.lang3.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; +import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueRequest; +import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueResponse; import java.io.ByteArrayInputStream; import java.io.File; @@ -81,11 +80,10 @@ private BigQueryUtils() public static Credentials getCredentialsFromSecretsManager(java.util.Map configOptions) throws IOException { - AWSSecretsManager secretsManager = AWSSecretsManagerClientBuilder.defaultClient(); - GetSecretValueRequest getSecretValueRequest = new GetSecretValueRequest(); - getSecretValueRequest.setSecretId(getEnvBigQueryCredsSmId(configOptions)); - GetSecretValueResult response = secretsManager.getSecretValue(getSecretValueRequest); - return ServiceAccountCredentials.fromStream(new ByteArrayInputStream(response.getSecretString().getBytes())).createScoped( + SecretsManagerClient secretsManager = SecretsManagerClient.create(); + GetSecretValueRequest getSecretValueRequest = GetSecretValueRequest.builder().secretId(getEnvBigQueryCredsSmId(configOptions)).build(); + GetSecretValueResponse response = secretsManager.getSecretValue(getSecretValueRequest); + return ServiceAccountCredentials.fromStream(new ByteArrayInputStream(response.secretString().getBytes())).createScoped( ImmutableSet.of( "https://www.googleapis.com/auth/bigquery", "https://www.googleapis.com/auth/drive")); @@ -213,7 +211,7 @@ else if (subField.getType().getStandardType().name().equalsIgnoreCase("Struct")) */ public static void installGoogleCredentialsJsonFile(java.util.Map configOptions) throws IOException { - CachableSecretsManager secretsManager = new CachableSecretsManager(AWSSecretsManagerClientBuilder.defaultClient()); + CachableSecretsManager secretsManager = new CachableSecretsManager(SecretsManagerClient.create()); String gcsCredentialsJsonString = secretsManager.getSecret(configOptions.get(BigQueryConstants.ENV_BIG_QUERY_CREDS_SM_ID)); File destination = new File(TMP_SERVICE_ACCOUNT_JSON); boolean destinationDirExists = new File(destination.getParent()).mkdirs(); diff --git a/athena-google-bigquery/src/test/java/com/amazonaws/athena/connectors/google/bigquery/BigQueryCompositeHandlerTest.java b/athena-google-bigquery/src/test/java/com/amazonaws/athena/connectors/google/bigquery/BigQueryCompositeHandlerTest.java index 266598fe38..047c4118bc 100644 --- a/athena-google-bigquery/src/test/java/com/amazonaws/athena/connectors/google/bigquery/BigQueryCompositeHandlerTest.java +++ b/athena-google-bigquery/src/test/java/com/amazonaws/athena/connectors/google/bigquery/BigQueryCompositeHandlerTest.java @@ -19,9 +19,6 @@ */ package com.amazonaws.athena.connectors.google.bigquery; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; -import com.amazonaws.services.secretsmanager.AWSSecretsManagerClientBuilder; -import com.amazonaws.services.secretsmanager.model.GetSecretValueResult; import com.google.auth.oauth2.ServiceAccountCredentials; import org.junit.After; import org.junit.Before; @@ -31,6 +28,9 @@ import org.mockito.MockedStatic; import org.mockito.Mockito; import org.mockito.junit.MockitoJUnitRunner; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; +import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueRequest; +import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueResponse; import java.io.IOException; import java.util.Arrays; @@ -47,12 +47,12 @@ public class BigQueryCompositeHandlerTest System.setProperty("aws.region", "us-east-1"); } - MockedStatic awsSecretManagerClient; + MockedStatic awsSecretManagerClient; MockedStatic serviceAccountCredentialsStatic; MockedStatic bigQueryUtils; private BigQueryCompositeHandler bigQueryCompositeHandler; @Mock - private AWSSecretsManager secretsManager; + private SecretsManagerClient secretsManager; @Mock private ServiceAccountCredentials serviceAccountCredentials; @@ -61,7 +61,7 @@ public void setUp() { bigQueryUtils = mockStatic(BigQueryUtils.class); serviceAccountCredentialsStatic = mockStatic(ServiceAccountCredentials.class); - awsSecretManagerClient = mockStatic(AWSSecretsManagerClientBuilder.class); + awsSecretManagerClient = mockStatic(SecretsManagerClient.class); } @After @@ -77,15 +77,18 @@ public void bigQueryCompositeHandlerTest() throws IOException { Exception ex = null; - Mockito.when(AWSSecretsManagerClientBuilder.defaultClient()).thenReturn(secretsManager); - GetSecretValueResult getSecretValueResult = new GetSecretValueResult().withVersionStages(Arrays.asList("v1")).withSecretString("{\n" + - " \"type\": \"service_account\",\n" + - " \"project_id\": \"mockProjectId\",\n" + - " \"private_key_id\": \"mockPrivateKeyId\",\n" + - " \"private_key\": \"-----BEGIN PRIVATE KEY-----\\nmockPrivateKeydsfhdskfhjdfjkdhgfdjkghfdngvfkvfnjvfdjkg\\n-----END PRIVATE KEY-----\\n\",\n" + - " \"client_email\": \"mockabc@mockprojectid.iam.gserviceaccount.com\",\n" + - " \"client_id\": \"000000000000000000000\"\n" + - "}"); + Mockito.when(SecretsManagerClient.create()).thenReturn(secretsManager); + GetSecretValueResponse getSecretValueResponse = GetSecretValueResponse.builder() + .versionStages(Arrays.asList("v1")) + .secretString("{\n" + + " \"type\": \"service_account\",\n" + + " \"project_id\": \"mockProjectId\",\n" + + " \"private_key_id\": \"mockPrivateKeyId\",\n" + + " \"private_key\": \"-----BEGIN PRIVATE KEY-----\\nmockPrivateKeydsfhdskfhjdfjkdhgfdjkghfdngvfkvfnjvfdjkg\\n-----END PRIVATE KEY-----\\n\",\n" + + " \"client_email\": \"mockabc@mockprojectid.iam.gserviceaccount.com\",\n" + + " \"client_id\": \"000000000000000000000\"\n" + + "}") + .build(); Mockito.when(ServiceAccountCredentials.fromStream(any())).thenReturn(serviceAccountCredentials); bigQueryCompositeHandler = new BigQueryCompositeHandler(); diff --git a/athena-google-bigquery/src/test/java/com/amazonaws/athena/connectors/google/bigquery/BigQueryRecordHandlerTest.java b/athena-google-bigquery/src/test/java/com/amazonaws/athena/connectors/google/bigquery/BigQueryRecordHandlerTest.java index 74f97587bd..8294adc6a1 100644 --- a/athena-google-bigquery/src/test/java/com/amazonaws/athena/connectors/google/bigquery/BigQueryRecordHandlerTest.java +++ b/athena-google-bigquery/src/test/java/com/amazonaws/athena/connectors/google/bigquery/BigQueryRecordHandlerTest.java @@ -37,7 +37,6 @@ import com.amazonaws.athena.connector.lambda.security.LocalKeyFactory; import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import com.google.api.gax.rpc.ServerStream; import com.google.api.gax.rpc.ServerStreamingCallable; import com.google.cloud.bigquery.BigQuery; @@ -79,6 +78,7 @@ import org.mockito.junit.MockitoJUnitRunner; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.nio.charset.StandardCharsets; import java.util.ArrayList; @@ -107,7 +107,7 @@ public class BigQueryRecordHandlerTest BigQuery bigQuery; @Mock - AWSSecretsManager awsSecretsManager; + SecretsManagerClient awsSecretsManager; private String bucket = "bucket"; private String prefix = "prefix"; diff --git a/athena-hbase/pom.xml b/athena-hbase/pom.xml index 3c4914d4b8..3f802a0914 100644 --- a/athena-hbase/pom.xml +++ b/athena-hbase/pom.xml @@ -13,11 +13,6 @@ 2.6.0-hadoop3 - - org.slf4j - slf4j-simple - ${slf4j-log4j.version} - com.amazonaws aws-athena-federation-sdk @@ -52,32 +47,11 @@ ${aws-sdk.version} test - - org.apache.directory.server - apacheds-kerberos-codec - 2.0.0.AM27 - - - org.apache.directory.api - api-ldap-model - - - org.apache.avro avro 1.11.3 - - com.fasterxml.jackson.module - jackson-module-jaxb-annotations - ${fasterxml.jackson.version} - - - org.codehaus.jettison - jettison - 1.5.4 - com.google.protobuf protobuf-java @@ -88,36 +62,6 @@ api-ldap-model 2.1.6 - - org.eclipse.jetty - jetty-server - ${jetty.version} - - - org.eclipse.jetty - jetty-xml - ${jetty.version} - - - org.eclipse.jetty - jetty-webapp - ${jetty.version} - - - org.eclipse.jetty - jetty-servlet - ${jetty.version} - - - org.eclipse.jetty - jetty-io - ${jetty.version} - - - org.apache.zookeeper - zookeeper - 3.9.2 - org.apache.hadoop hadoop-common diff --git a/athena-hbase/src/main/java/com/amazonaws/athena/connectors/hbase/HbaseMetadataHandler.java b/athena-hbase/src/main/java/com/amazonaws/athena/connectors/hbase/HbaseMetadataHandler.java index 6be56ee456..ab3cca488e 100644 --- a/athena-hbase/src/main/java/com/amazonaws/athena/connectors/hbase/HbaseMetadataHandler.java +++ b/athena-hbase/src/main/java/com/amazonaws/athena/connectors/hbase/HbaseMetadataHandler.java @@ -47,7 +47,6 @@ import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.glue.AWSGlue; import com.amazonaws.services.glue.model.Table; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import com.google.common.collect.ImmutableMap; import org.apache.arrow.util.VisibleForTesting; import org.apache.arrow.vector.types.Types; @@ -58,6 +57,7 @@ import org.apache.hadoop.hbase.TableName; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.io.IOException; import java.util.ArrayList; @@ -122,7 +122,7 @@ public HbaseMetadataHandler(java.util.Map configOptions) protected HbaseMetadataHandler( AWSGlue awsGlue, EncryptionKeyFactory keyFactory, - AWSSecretsManager secretsManager, + SecretsManagerClient secretsManager, AmazonAthena athena, HbaseConnectionFactory connectionFactory, String spillBucket, diff --git a/athena-hbase/src/main/java/com/amazonaws/athena/connectors/hbase/HbaseRecordHandler.java b/athena-hbase/src/main/java/com/amazonaws/athena/connectors/hbase/HbaseRecordHandler.java index 9e10b503df..7da3700652 100644 --- a/athena-hbase/src/main/java/com/amazonaws/athena/connectors/hbase/HbaseRecordHandler.java +++ b/athena-hbase/src/main/java/com/amazonaws/athena/connectors/hbase/HbaseRecordHandler.java @@ -35,8 +35,6 @@ import com.amazonaws.services.athena.AmazonAthenaClientBuilder; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3ClientBuilder; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; -import com.amazonaws.services.secretsmanager.AWSSecretsManagerClientBuilder; import org.apache.arrow.util.VisibleForTesting; import org.apache.arrow.vector.types.Types; import org.apache.arrow.vector.types.pojo.ArrowType; @@ -55,6 +53,7 @@ import org.apache.hadoop.hbase.util.Bytes; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.io.IOException; import java.nio.charset.CharacterCodingException; @@ -92,14 +91,14 @@ public HbaseRecordHandler(java.util.Map configOptions) { this( AmazonS3ClientBuilder.defaultClient(), - AWSSecretsManagerClientBuilder.defaultClient(), + SecretsManagerClient.create(), AmazonAthenaClientBuilder.defaultClient(), new HbaseConnectionFactory(), configOptions); } @VisibleForTesting - protected HbaseRecordHandler(AmazonS3 amazonS3, AWSSecretsManager secretsManager, AmazonAthena athena, HbaseConnectionFactory connectionFactory, java.util.Map configOptions) + protected HbaseRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AmazonAthena athena, HbaseConnectionFactory connectionFactory, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, SOURCE_TYPE, configOptions); this.amazonS3 = amazonS3; diff --git a/athena-hbase/src/test/java/com/amazonaws/athena/connectors/hbase/HbaseMetadataHandlerTest.java b/athena-hbase/src/test/java/com/amazonaws/athena/connectors/hbase/HbaseMetadataHandlerTest.java index eb43f48841..04593b066c 100644 --- a/athena-hbase/src/test/java/com/amazonaws/athena/connectors/hbase/HbaseMetadataHandlerTest.java +++ b/athena-hbase/src/test/java/com/amazonaws/athena/connectors/hbase/HbaseMetadataHandlerTest.java @@ -44,7 +44,6 @@ import com.amazonaws.athena.connectors.hbase.connection.ResultProcessor; import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.glue.AWSGlue; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import org.apache.arrow.vector.types.Types; import org.apache.arrow.vector.types.pojo.Schema; import org.apache.hadoop.hbase.HRegionInfo; @@ -63,6 +62,7 @@ import org.mockito.junit.MockitoJUnitRunner; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.io.IOException; import java.util.ArrayList; @@ -102,7 +102,7 @@ public class HbaseMetadataHandlerTest private AWSGlue awsGlue; @Mock - private AWSSecretsManager secretsManager; + private SecretsManagerClient secretsManager; @Mock private AmazonAthena athena; diff --git a/athena-hbase/src/test/java/com/amazonaws/athena/connectors/hbase/HbaseRecordHandlerTest.java b/athena-hbase/src/test/java/com/amazonaws/athena/connectors/hbase/HbaseRecordHandlerTest.java index 9933043e5a..ac9c06c6ee 100644 --- a/athena-hbase/src/test/java/com/amazonaws/athena/connectors/hbase/HbaseRecordHandlerTest.java +++ b/athena-hbase/src/test/java/com/amazonaws/athena/connectors/hbase/HbaseRecordHandlerTest.java @@ -53,7 +53,6 @@ import com.amazonaws.services.s3.model.PutObjectResult; import com.amazonaws.services.s3.model.S3Object; import com.amazonaws.services.s3.model.S3ObjectInputStream; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import com.google.common.collect.ImmutableList; import com.google.common.io.ByteStreams; import org.apache.arrow.vector.types.Types; @@ -74,6 +73,7 @@ import org.mockito.junit.MockitoJUnitRunner; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.io.ByteArrayInputStream; import java.io.IOException; @@ -124,7 +124,7 @@ public class HbaseRecordHandlerTest private HbaseConnectionFactory mockConnFactory; @Mock - private AWSSecretsManager mockSecretsManager; + private SecretsManagerClient mockSecretsManager; @Mock private AmazonAthena mockAthena; diff --git a/athena-hortonworks-hive/src/main/java/com/amazonaws/athena/connectors/hortonworks/HiveMetadataHandler.java b/athena-hortonworks-hive/src/main/java/com/amazonaws/athena/connectors/hortonworks/HiveMetadataHandler.java index 4a42dca536..56016626e2 100644 --- a/athena-hortonworks-hive/src/main/java/com/amazonaws/athena/connectors/hortonworks/HiveMetadataHandler.java +++ b/athena-hortonworks-hive/src/main/java/com/amazonaws/athena/connectors/hortonworks/HiveMetadataHandler.java @@ -49,7 +49,6 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcArrowTypeConverter; import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; @@ -60,6 +59,7 @@ import org.apache.arrow.vector.types.pojo.Schema; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; import java.sql.DatabaseMetaData; @@ -96,7 +96,7 @@ public HiveMetadataHandler(DatabaseConnectionConfig databaseConnectionConfig, ja @VisibleForTesting protected HiveMetadataHandler( DatabaseConnectionConfig databaseConnectionConfiguration, - AWSSecretsManager secretManager, + SecretsManagerClient secretManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, java.util.Map configOptions) diff --git a/athena-hortonworks-hive/src/main/java/com/amazonaws/athena/connectors/hortonworks/HiveMuxMetadataHandler.java b/athena-hortonworks-hive/src/main/java/com/amazonaws/athena/connectors/hortonworks/HiveMuxMetadataHandler.java index fa13a931c4..13b93a3f80 100644 --- a/athena-hortonworks-hive/src/main/java/com/amazonaws/athena/connectors/hortonworks/HiveMuxMetadataHandler.java +++ b/athena-hortonworks-hive/src/main/java/com/amazonaws/athena/connectors/hortonworks/HiveMuxMetadataHandler.java @@ -26,8 +26,8 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandlerFactory; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import org.apache.arrow.util.VisibleForTesting; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Map; @@ -56,7 +56,7 @@ public HiveMuxMetadataHandler(java.util.Map configOptions) } @VisibleForTesting - protected HiveMuxMetadataHandler(AWSSecretsManager secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, + protected HiveMuxMetadataHandler(SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, Map metadataHandlerMap, DatabaseConnectionConfig databaseConnectionConfig, java.util.Map configOptions) { super(secretsManager, athena, jdbcConnectionFactory, metadataHandlerMap, databaseConnectionConfig, configOptions); diff --git a/athena-hortonworks-hive/src/main/java/com/amazonaws/athena/connectors/hortonworks/HiveMuxRecordHandler.java b/athena-hortonworks-hive/src/main/java/com/amazonaws/athena/connectors/hortonworks/HiveMuxRecordHandler.java index 6bc47f1687..a0aba7271b 100644 --- a/athena-hortonworks-hive/src/main/java/com/amazonaws/athena/connectors/hortonworks/HiveMuxRecordHandler.java +++ b/athena-hortonworks-hive/src/main/java/com/amazonaws/athena/connectors/hortonworks/HiveMuxRecordHandler.java @@ -27,8 +27,8 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandlerFactory; import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import com.google.common.annotations.VisibleForTesting; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Map; @@ -57,7 +57,7 @@ public HiveMuxRecordHandler(java.util.Map configOptions) } @VisibleForTesting - HiveMuxRecordHandler(AmazonS3 amazonS3, AWSSecretsManager secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, + HiveMuxRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, DatabaseConnectionConfig databaseConnectionConfig, Map recordHandlerMap, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, jdbcConnectionFactory, databaseConnectionConfig, recordHandlerMap, configOptions); diff --git a/athena-hortonworks-hive/src/main/java/com/amazonaws/athena/connectors/hortonworks/HiveRecordHandler.java b/athena-hortonworks-hive/src/main/java/com/amazonaws/athena/connectors/hortonworks/HiveRecordHandler.java index 47a7b235fd..ff1f8e3a65 100644 --- a/athena-hortonworks-hive/src/main/java/com/amazonaws/athena/connectors/hortonworks/HiveRecordHandler.java +++ b/athena-hortonworks-hive/src/main/java/com/amazonaws/athena/connectors/hortonworks/HiveRecordHandler.java @@ -32,11 +32,10 @@ import com.amazonaws.services.athena.AmazonAthenaClientBuilder; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3ClientBuilder; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; -import com.amazonaws.services.secretsmanager.AWSSecretsManagerClientBuilder; import com.google.common.annotations.VisibleForTesting; import org.apache.arrow.vector.types.pojo.Schema; import org.apache.commons.lang3.Validate; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; import java.sql.PreparedStatement; @@ -62,11 +61,11 @@ public HiveRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, java } public HiveRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, JdbcConnectionFactory jdbcConnectionFactory, java.util.Map configOptions) { - this(databaseConnectionConfig, AmazonS3ClientBuilder.defaultClient(), AWSSecretsManagerClientBuilder.defaultClient(), AmazonAthenaClientBuilder.defaultClient(), + this(databaseConnectionConfig, AmazonS3ClientBuilder.defaultClient(), SecretsManagerClient.create(), AmazonAthenaClientBuilder.defaultClient(), jdbcConnectionFactory, new HiveQueryStringBuilder(HIVE_QUOTE_CHARACTER, new HiveFederationExpressionParser(HIVE_QUOTE_CHARACTER)), configOptions); } @VisibleForTesting - HiveRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, AmazonS3 amazonS3, AWSSecretsManager secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, JdbcSplitQueryBuilder jdbcSplitQueryBuilder, java.util.Map configOptions) + HiveRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, AmazonS3 amazonS3, SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, JdbcSplitQueryBuilder jdbcSplitQueryBuilder, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, databaseConnectionConfig, jdbcConnectionFactory, configOptions); this.jdbcSplitQueryBuilder = Validate.notNull(jdbcSplitQueryBuilder, "query builder must not be null"); diff --git a/athena-hortonworks-hive/src/test/java/com/amazonaws/athena/connectors/hortonworks/HiveMetadataHandlerTest.java b/athena-hortonworks-hive/src/test/java/com/amazonaws/athena/connectors/hortonworks/HiveMetadataHandlerTest.java index ff9065debe..988ffbfe1d 100644 --- a/athena-hortonworks-hive/src/test/java/com/amazonaws/athena/connectors/hortonworks/HiveMetadataHandlerTest.java +++ b/athena-hortonworks-hive/src/test/java/com/amazonaws/athena/connectors/hortonworks/HiveMetadataHandlerTest.java @@ -29,9 +29,6 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.connection.JdbcCredentialProvider; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; -import com.amazonaws.services.secretsmanager.model.GetSecretValueRequest; -import com.amazonaws.services.secretsmanager.model.GetSecretValueResult; import org.apache.arrow.vector.types.pojo.Field; import org.apache.arrow.vector.types.pojo.Schema; import org.junit.Assert; @@ -39,6 +36,9 @@ import org.junit.BeforeClass; import org.junit.Test; import org.mockito.Mockito; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; +import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueRequest; +import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueResponse; import java.sql.*; import java.util.*; @@ -57,7 +57,7 @@ public class HiveMetadataHandlerTest private JdbcConnectionFactory jdbcConnectionFactory; private Connection connection; private FederatedIdentity federatedIdentity; - private AWSSecretsManager secretsManager; + private SecretsManagerClient secretsManager; private AmazonAthena athena; private BlockAllocator blockAllocator; @@ -74,9 +74,9 @@ public void setup() this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class, Mockito.RETURNS_DEEP_STUBS); this.connection = Mockito.mock(Connection.class, Mockito.RETURNS_DEEP_STUBS); Mockito.when(this.jdbcConnectionFactory.getConnection(nullable(JdbcCredentialProvider.class))).thenReturn(this.connection); - this.secretsManager = Mockito.mock(AWSSecretsManager.class); + this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AmazonAthena.class); - Mockito.when(this.secretsManager.getSecretValue(Mockito.eq(new GetSecretValueRequest().withSecretId("testSecret")))).thenReturn(new GetSecretValueResult().withSecretString("{\"username\": \"testUser\", \"password\": \"testPassword\"}")); + Mockito.when(this.secretsManager.getSecretValue(Mockito.eq(GetSecretValueRequest.builder().secretId("testSecret").build()))).thenReturn(GetSecretValueResponse.builder().secretString("{\"username\": \"testUser\", \"password\": \"testPassword\"}").build()); this.hiveMetadataHandler = new HiveMetadataHandler(databaseConnectionConfig, this.secretsManager, this.athena, this.jdbcConnectionFactory, com.google.common.collect.ImmutableMap.of()); this.federatedIdentity = Mockito.mock(FederatedIdentity.class); diff --git a/athena-hortonworks-hive/src/test/java/com/amazonaws/athena/connectors/hortonworks/HiveMuxMetadataHandlerTest.java b/athena-hortonworks-hive/src/test/java/com/amazonaws/athena/connectors/hortonworks/HiveMuxMetadataHandlerTest.java index 5d93ab7427..51aec4c5ab 100644 --- a/athena-hortonworks-hive/src/test/java/com/amazonaws/athena/connectors/hortonworks/HiveMuxMetadataHandlerTest.java +++ b/athena-hortonworks-hive/src/test/java/com/amazonaws/athena/connectors/hortonworks/HiveMuxMetadataHandlerTest.java @@ -44,7 +44,7 @@ import com.amazonaws.athena.connector.lambda.metadata.ListSchemasRequest; import com.amazonaws.athena.connector.lambda.metadata.ListTablesRequest; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import static org.mockito.ArgumentMatchers.nullable; @@ -54,7 +54,7 @@ public class HiveMuxMetadataHandlerTest private HiveMetadataHandler hiveMetadataHandler; private JdbcMetadataHandler jdbcMetadataHandler; private BlockAllocator allocator; - private AWSSecretsManager secretsManager; + private SecretsManagerClient secretsManager; private AmazonAthena athena; private QueryStatusChecker queryStatusChecker; private JdbcConnectionFactory jdbcConnectionFactory; @@ -68,7 +68,7 @@ public void setup() this.allocator = new BlockAllocatorImpl(); this.hiveMetadataHandler = Mockito.mock(HiveMetadataHandler.class); this.metadataHandlerMap = Collections.singletonMap("metaHive", this.hiveMetadataHandler); - this.secretsManager = Mockito.mock(AWSSecretsManager.class); + this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AmazonAthena.class); this.queryStatusChecker = Mockito.mock(QueryStatusChecker.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); diff --git a/athena-hortonworks-hive/src/test/java/com/amazonaws/athena/connectors/hortonworks/HiveMuxRecordHandlerTest.java b/athena-hortonworks-hive/src/test/java/com/amazonaws/athena/connectors/hortonworks/HiveMuxRecordHandlerTest.java index 9d78ee80e1..60b5c89e48 100644 --- a/athena-hortonworks-hive/src/test/java/com/amazonaws/athena/connectors/hortonworks/HiveMuxRecordHandlerTest.java +++ b/athena-hortonworks-hive/src/test/java/com/amazonaws/athena/connectors/hortonworks/HiveMuxRecordHandlerTest.java @@ -31,13 +31,13 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import org.apache.arrow.vector.types.pojo.Schema; import org.junit.Before; import org.junit.BeforeClass; import org.junit.Test; import org.mockito.Mockito; import org.testng.Assert; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; import java.sql.SQLException; @@ -51,7 +51,7 @@ public class HiveMuxRecordHandlerTest private HiveRecordHandler hiveRecordHandler; private JdbcRecordHandler jdbcRecordHandler; private AmazonS3 amazonS3; - private AWSSecretsManager secretsManager; + private SecretsManagerClient secretsManager; private AmazonAthena athena; private QueryStatusChecker queryStatusChecker; private JdbcConnectionFactory jdbcConnectionFactory; @@ -65,7 +65,7 @@ public void setup() this.hiveRecordHandler = Mockito.mock(HiveRecordHandler.class); this.recordHandlerMap = Collections.singletonMap("recordHive", this.hiveRecordHandler); this.amazonS3 = Mockito.mock(AmazonS3.class); - this.secretsManager = Mockito.mock(AWSSecretsManager.class); + this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AmazonAthena.class); this.queryStatusChecker = Mockito.mock(QueryStatusChecker.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); diff --git a/athena-hortonworks-hive/src/test/java/com/amazonaws/athena/connectors/hortonworks/HiveRecordHandlerTest.java b/athena-hortonworks-hive/src/test/java/com/amazonaws/athena/connectors/hortonworks/HiveRecordHandlerTest.java index 679bd11228..254c4e14bb 100644 --- a/athena-hortonworks-hive/src/test/java/com/amazonaws/athena/connectors/hortonworks/HiveRecordHandlerTest.java +++ b/athena-hortonworks-hive/src/test/java/com/amazonaws/athena/connectors/hortonworks/HiveRecordHandlerTest.java @@ -48,11 +48,11 @@ import com.amazonaws.athena.connector.lambda.domain.predicate.ValueSet; import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; -import com.amazonaws.services.secretsmanager.model.GetSecretValueRequest; -import com.amazonaws.services.secretsmanager.model.GetSecretValueResult; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; +import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueRequest; +import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueResponse; import static com.amazonaws.athena.connectors.hortonworks.HiveConstants.HIVE_QUOTE_CHARACTER; import static org.mockito.ArgumentMatchers.any; @@ -65,7 +65,7 @@ public class HiveRecordHandlerTest private JdbcConnectionFactory jdbcConnectionFactory; private JdbcSplitQueryBuilder jdbcSplitQueryBuilder; private AmazonS3 amazonS3; - private AWSSecretsManager secretsManager; + private SecretsManagerClient secretsManager; private AmazonAthena athena; @Before @@ -73,9 +73,9 @@ public void setup() throws Exception { this.amazonS3 = Mockito.mock(AmazonS3.class); - this.secretsManager = Mockito.mock(AWSSecretsManager.class); + this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AmazonAthena.class); - Mockito.when(this.secretsManager.getSecretValue(Mockito.eq(new GetSecretValueRequest().withSecretId("testSecret")))).thenReturn(new GetSecretValueResult().withSecretString("{\"username\": \"testUser\", \"password\": \"testPassword\"}")); + Mockito.when(this.secretsManager.getSecretValue(Mockito.eq(GetSecretValueRequest.builder().secretId("testSecret").build()))).thenReturn(GetSecretValueResponse.builder().secretString("{\"username\": \"testUser\", \"password\": \"testPassword\"}").build()); this.connection = Mockito.mock(Connection.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); Mockito.when(this.jdbcConnectionFactory.getConnection(nullable(JdbcCredentialProvider.class))).thenReturn(this.connection); diff --git a/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/MultiplexingJdbcMetadataHandler.java b/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/MultiplexingJdbcMetadataHandler.java index c20fa505da..71d5873cca 100644 --- a/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/MultiplexingJdbcMetadataHandler.java +++ b/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/MultiplexingJdbcMetadataHandler.java @@ -40,9 +40,9 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandlerFactory; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import org.apache.arrow.vector.types.pojo.Schema; import org.apache.commons.lang3.Validate; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Map; @@ -64,7 +64,7 @@ public class MultiplexingJdbcMetadataHandler * @param metadataHandlerMap catalog -> JdbcMetadataHandler */ protected MultiplexingJdbcMetadataHandler( - AWSSecretsManager secretsManager, + SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, Map metadataHandlerMap, diff --git a/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/MultiplexingJdbcRecordHandler.java b/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/MultiplexingJdbcRecordHandler.java index e458b7cbcd..86c8e4db7e 100644 --- a/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/MultiplexingJdbcRecordHandler.java +++ b/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/MultiplexingJdbcRecordHandler.java @@ -32,10 +32,10 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandlerFactory; import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import com.google.common.annotations.VisibleForTesting; import org.apache.arrow.vector.types.pojo.Schema; import org.apache.commons.lang3.Validate; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; import java.sql.PreparedStatement; @@ -62,7 +62,7 @@ public MultiplexingJdbcRecordHandler(JdbcRecordHandlerFactory jdbcRecordHandlerF @VisibleForTesting protected MultiplexingJdbcRecordHandler( AmazonS3 amazonS3, - AWSSecretsManager secretsManager, + SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, DatabaseConnectionConfig databaseConnectionConfig, diff --git a/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/manager/JdbcMetadataHandler.java b/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/manager/JdbcMetadataHandler.java index bb4154e6dd..e882fac762 100644 --- a/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/manager/JdbcMetadataHandler.java +++ b/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/manager/JdbcMetadataHandler.java @@ -46,7 +46,6 @@ import com.amazonaws.athena.connectors.jdbc.splits.Splitter; import com.amazonaws.athena.connectors.jdbc.splits.SplitterFactory; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableList; @@ -58,6 +57,7 @@ import org.apache.commons.lang3.Validate; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; import java.sql.DatabaseMetaData; @@ -115,7 +115,7 @@ protected JdbcMetadataHandler( @VisibleForTesting protected JdbcMetadataHandler( DatabaseConnectionConfig databaseConnectionConfig, - AWSSecretsManager secretsManager, + SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, java.util.Map configOptions) diff --git a/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/manager/JdbcRecordHandler.java b/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/manager/JdbcRecordHandler.java index 3c431efb5c..974551550b 100644 --- a/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/manager/JdbcRecordHandler.java +++ b/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/manager/JdbcRecordHandler.java @@ -56,7 +56,6 @@ import com.amazonaws.athena.connectors.jdbc.qpt.JdbcQueryPassthrough; import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import org.apache.arrow.util.VisibleForTesting; import org.apache.arrow.vector.FieldVector; import org.apache.arrow.vector.holders.NullableBigIntHolder; @@ -76,6 +75,7 @@ import org.apache.commons.lang3.Validate; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Array; import java.sql.Connection; @@ -112,7 +112,7 @@ protected JdbcRecordHandler(String sourceType, java.util.Map con protected JdbcRecordHandler( AmazonS3 amazonS3, - AWSSecretsManager secretsManager, + SecretsManagerClient secretsManager, AmazonAthena athena, DatabaseConnectionConfig databaseConnectionConfig, JdbcConnectionFactory jdbcConnectionFactory, diff --git a/athena-jdbc/src/test/java/com/amazonaws/athena/connectors/jdbc/MultiplexingJdbcMetadataHandlerTest.java b/athena-jdbc/src/test/java/com/amazonaws/athena/connectors/jdbc/MultiplexingJdbcMetadataHandlerTest.java index 1c4c2cea54..1e952eff7b 100644 --- a/athena-jdbc/src/test/java/com/amazonaws/athena/connectors/jdbc/MultiplexingJdbcMetadataHandlerTest.java +++ b/athena-jdbc/src/test/java/com/amazonaws/athena/connectors/jdbc/MultiplexingJdbcMetadataHandlerTest.java @@ -33,10 +33,10 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Collections; import java.util.Map; @@ -49,7 +49,7 @@ public class MultiplexingJdbcMetadataHandlerTest private JdbcMetadataHandler fakeDatabaseHandler; private JdbcMetadataHandler jdbcMetadataHandler; private BlockAllocator allocator; - private AWSSecretsManager secretsManager; + private SecretsManagerClient secretsManager; private AmazonAthena athena; private QueryStatusChecker queryStatusChecker; private JdbcConnectionFactory jdbcConnectionFactory; @@ -62,7 +62,7 @@ public void setup() //Mockito.when(this.allocator.createBlock(nullable(Schema.class))).thenReturn(Mockito.mock(Block.class)); this.fakeDatabaseHandler = Mockito.mock(JdbcMetadataHandler.class); this.metadataHandlerMap = Collections.singletonMap("fakedatabase", this.fakeDatabaseHandler); - this.secretsManager = Mockito.mock(AWSSecretsManager.class); + this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AmazonAthena.class); this.queryStatusChecker = Mockito.mock(QueryStatusChecker.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); diff --git a/athena-jdbc/src/test/java/com/amazonaws/athena/connectors/jdbc/MultiplexingJdbcRecordHandlerTest.java b/athena-jdbc/src/test/java/com/amazonaws/athena/connectors/jdbc/MultiplexingJdbcRecordHandlerTest.java index 46eac3ba57..2d42e4c01d 100644 --- a/athena-jdbc/src/test/java/com/amazonaws/athena/connectors/jdbc/MultiplexingJdbcRecordHandlerTest.java +++ b/athena-jdbc/src/test/java/com/amazonaws/athena/connectors/jdbc/MultiplexingJdbcRecordHandlerTest.java @@ -30,11 +30,11 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import org.apache.arrow.vector.types.pojo.Schema; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; import java.sql.SQLException; @@ -47,7 +47,7 @@ public class MultiplexingJdbcRecordHandlerTest private JdbcRecordHandler fakeJdbcRecordHandler; private JdbcRecordHandler jdbcRecordHandler; private AmazonS3 amazonS3; - private AWSSecretsManager secretsManager; + private SecretsManagerClient secretsManager; private AmazonAthena athena; private QueryStatusChecker queryStatusChecker; private JdbcConnectionFactory jdbcConnectionFactory; @@ -58,7 +58,7 @@ public void setup() this.fakeJdbcRecordHandler = Mockito.mock(JdbcRecordHandler.class); this.recordHandlerMap = Collections.singletonMap("fakedatabase", this.fakeJdbcRecordHandler); this.amazonS3 = Mockito.mock(AmazonS3.class); - this.secretsManager = Mockito.mock(AWSSecretsManager.class); + this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AmazonAthena.class); this.queryStatusChecker = Mockito.mock(QueryStatusChecker.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); diff --git a/athena-jdbc/src/test/java/com/amazonaws/athena/connectors/jdbc/manager/JdbcMetadataHandlerTest.java b/athena-jdbc/src/test/java/com/amazonaws/athena/connectors/jdbc/manager/JdbcMetadataHandlerTest.java index 35bb083715..e1d470af21 100644 --- a/athena-jdbc/src/test/java/com/amazonaws/athena/connectors/jdbc/manager/JdbcMetadataHandlerTest.java +++ b/athena-jdbc/src/test/java/com/amazonaws/athena/connectors/jdbc/manager/JdbcMetadataHandlerTest.java @@ -40,14 +40,14 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.connection.JdbcCredentialProvider; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; -import com.amazonaws.services.secretsmanager.model.GetSecretValueRequest; -import com.amazonaws.services.secretsmanager.model.GetSecretValueResult; import org.apache.arrow.vector.types.pojo.Schema; import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; +import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueRequest; +import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueResponse; import java.sql.Connection; import java.sql.ResultSet; @@ -70,7 +70,7 @@ public class JdbcMetadataHandlerTest private FederatedIdentity federatedIdentity; private Connection connection; private BlockAllocator blockAllocator; - private AWSSecretsManager secretsManager; + private SecretsManagerClient secretsManager; private AmazonAthena athena; private ResultSet resultSetName; @@ -82,9 +82,9 @@ public void setup() this.connection = Mockito.mock(Connection.class, Mockito.RETURNS_DEEP_STUBS); Mockito.when(connection.getCatalog()).thenReturn("testCatalog"); Mockito.when(this.jdbcConnectionFactory.getConnection(nullable(JdbcCredentialProvider.class))).thenReturn(this.connection); - this.secretsManager = Mockito.mock(AWSSecretsManager.class); + this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AmazonAthena.class); - Mockito.when(this.secretsManager.getSecretValue(Mockito.eq(new GetSecretValueRequest().withSecretId("testSecret")))).thenReturn(new GetSecretValueResult().withSecretString("{\"username\": \"testUser\", \"password\": \"testPassword\"}")); + Mockito.when(this.secretsManager.getSecretValue(Mockito.eq(GetSecretValueRequest.builder().secretId("testSecret").build()))).thenReturn(GetSecretValueResponse.builder().secretString("{\"username\": \"testUser\", \"password\": \"testPassword\"}").build()); DatabaseConnectionConfig databaseConnectionConfig = new DatabaseConnectionConfig("testCatalog", "fakedatabase", "fakedatabase://jdbc:fakedatabase://hostname/${testSecret}", "testSecret"); this.jdbcMetadataHandler = new JdbcMetadataHandler(databaseConnectionConfig, this.secretsManager, this.athena, jdbcConnectionFactory, com.google.common.collect.ImmutableMap.of()) diff --git a/athena-jdbc/src/test/java/com/amazonaws/athena/connectors/jdbc/manager/JdbcRecordHandlerTest.java b/athena-jdbc/src/test/java/com/amazonaws/athena/connectors/jdbc/manager/JdbcRecordHandlerTest.java index df6d6f1e42..cb297e192a 100644 --- a/athena-jdbc/src/test/java/com/amazonaws/athena/connectors/jdbc/manager/JdbcRecordHandlerTest.java +++ b/athena-jdbc/src/test/java/com/amazonaws/athena/connectors/jdbc/manager/JdbcRecordHandlerTest.java @@ -43,9 +43,6 @@ import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.model.PutObjectRequest; import com.amazonaws.services.s3.model.PutObjectResult; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; -import com.amazonaws.services.secretsmanager.model.GetSecretValueRequest; -import com.amazonaws.services.secretsmanager.model.GetSecretValueResult; import org.apache.arrow.vector.holders.NullableFloat8Holder; import org.apache.arrow.vector.types.pojo.Schema; import org.junit.Assert; @@ -53,6 +50,9 @@ import org.junit.Test; import org.mockito.Mockito; import org.mockito.stubbing.Answer; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; +import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueRequest; +import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueResponse; import java.io.ByteArrayInputStream; import java.nio.charset.StandardCharsets; @@ -76,7 +76,7 @@ public class JdbcRecordHandlerTest private Connection connection; private JdbcConnectionFactory jdbcConnectionFactory; private AmazonS3 amazonS3; - private AWSSecretsManager secretsManager; + private SecretsManagerClient secretsManager; private AmazonAthena athena; private QueryStatusChecker queryStatusChecker; private FederatedIdentity federatedIdentity; @@ -90,10 +90,10 @@ public void setup() this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); Mockito.when(this.jdbcConnectionFactory.getConnection(nullable(JdbcCredentialProvider.class))).thenReturn(this.connection); this.amazonS3 = Mockito.mock(AmazonS3.class); - this.secretsManager = Mockito.mock(AWSSecretsManager.class); + this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AmazonAthena.class); this.queryStatusChecker = Mockito.mock(QueryStatusChecker.class); - Mockito.when(this.secretsManager.getSecretValue(Mockito.eq(new GetSecretValueRequest().withSecretId("testSecret")))).thenReturn(new GetSecretValueResult().withSecretString("{\"username\": \"testUser\", \"password\": \"testPassword\"}")); + Mockito.when(this.secretsManager.getSecretValue(Mockito.eq(GetSecretValueRequest.builder().secretId("testSecret").build()))).thenReturn(GetSecretValueResponse.builder().secretString("{\"username\": \"testUser\", \"password\": \"testPassword\"}").build()); this.preparedStatement = Mockito.mock(PreparedStatement.class); Mockito.when(this.connection.prepareStatement("someSql")).thenReturn(this.preparedStatement); DatabaseConnectionConfig databaseConnectionConfig = new DatabaseConnectionConfig("testCatalog", "fakedatabase", diff --git a/athena-kafka/src/main/java/com/amazonaws/athena/connectors/kafka/KafkaRecordHandler.java b/athena-kafka/src/main/java/com/amazonaws/athena/connectors/kafka/KafkaRecordHandler.java index 5fd5dd104e..16d45fc6c5 100644 --- a/athena-kafka/src/main/java/com/amazonaws/athena/connectors/kafka/KafkaRecordHandler.java +++ b/athena-kafka/src/main/java/com/amazonaws/athena/connectors/kafka/KafkaRecordHandler.java @@ -31,8 +31,6 @@ import com.amazonaws.services.athena.AmazonAthenaClientBuilder; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3ClientBuilder; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; -import com.amazonaws.services.secretsmanager.AWSSecretsManagerClientBuilder; import com.google.common.annotations.VisibleForTesting; import com.google.protobuf.Descriptors; import com.google.protobuf.DynamicMessage; @@ -45,6 +43,7 @@ import org.apache.kafka.common.TopicPartition; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.time.Duration; import java.util.Collection; @@ -63,13 +62,13 @@ public class KafkaRecordHandler { this( AmazonS3ClientBuilder.defaultClient(), - AWSSecretsManagerClientBuilder.defaultClient(), + SecretsManagerClient.create(), AmazonAthenaClientBuilder.defaultClient(), configOptions); } @VisibleForTesting - public KafkaRecordHandler(AmazonS3 amazonS3, AWSSecretsManager secretsManager, AmazonAthena athena, java.util.Map configOptions) + public KafkaRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AmazonAthena athena, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, KafkaConstants.KAFKA_SOURCE, configOptions); } diff --git a/athena-kafka/src/main/java/com/amazonaws/athena/connectors/kafka/KafkaUtils.java b/athena-kafka/src/main/java/com/amazonaws/athena/connectors/kafka/KafkaUtils.java index 35e757c4e3..7ff8296da6 100644 --- a/athena-kafka/src/main/java/com/amazonaws/athena/connectors/kafka/KafkaUtils.java +++ b/athena-kafka/src/main/java/com/amazonaws/athena/connectors/kafka/KafkaUtils.java @@ -33,10 +33,6 @@ import com.amazonaws.services.s3.model.ObjectListing; import com.amazonaws.services.s3.model.S3Object; import com.amazonaws.services.s3.model.S3ObjectSummary; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; -import com.amazonaws.services.secretsmanager.AWSSecretsManagerClientBuilder; -import com.amazonaws.services.secretsmanager.model.GetSecretValueRequest; -import com.amazonaws.services.secretsmanager.model.GetSecretValueResult; import com.fasterxml.jackson.core.type.TypeReference; import com.fasterxml.jackson.databind.ObjectMapper; import com.google.protobuf.DynamicMessage; @@ -51,6 +47,9 @@ import org.apache.kafka.common.serialization.StringDeserializer; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; +import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueRequest; +import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueResponse; import java.io.BufferedInputStream; import java.io.File; @@ -375,11 +374,12 @@ private static Path getTempDirPath() */ private static Map getCredentialsAsKeyValue(java.util.Map configOptions) throws Exception { - AWSSecretsManager secretsManager = AWSSecretsManagerClientBuilder.defaultClient(); - GetSecretValueRequest getSecretValueRequest = new GetSecretValueRequest(); - getSecretValueRequest.setSecretId(getRequiredConfig(KafkaConstants.SECRET_MANAGER_KAFKA_CREDS_NAME, configOptions)); - GetSecretValueResult response = secretsManager.getSecretValue(getSecretValueRequest); - return objectMapper.readValue(response.getSecretString(), new TypeReference>() + SecretsManagerClient secretsManager = SecretsManagerClient.create(); + GetSecretValueRequest getSecretValueRequest = GetSecretValueRequest.builder() + .secretId(getRequiredConfig(KafkaConstants.SECRET_MANAGER_KAFKA_CREDS_NAME, configOptions)) + .build(); + GetSecretValueResponse response = secretsManager.getSecretValue(getSecretValueRequest); + return objectMapper.readValue(response.secretString(), new TypeReference>() { }); } diff --git a/athena-kafka/src/test/java/com/amazonaws/athena/connectors/kafka/KafkaCompositeHandlerTest.java b/athena-kafka/src/test/java/com/amazonaws/athena/connectors/kafka/KafkaCompositeHandlerTest.java index 337610b80a..ec01502a4a 100644 --- a/athena-kafka/src/test/java/com/amazonaws/athena/connectors/kafka/KafkaCompositeHandlerTest.java +++ b/athena-kafka/src/test/java/com/amazonaws/athena/connectors/kafka/KafkaCompositeHandlerTest.java @@ -19,8 +19,6 @@ */ package com.amazonaws.athena.connectors.kafka; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; -import com.amazonaws.services.secretsmanager.AWSSecretsManagerClientBuilder; import org.apache.kafka.clients.consumer.KafkaConsumer; import org.junit.After; import org.junit.Assert; @@ -29,10 +27,10 @@ import org.junit.runner.RunWith; import org.mockito.Mock; import org.mockito.junit.MockitoJUnitRunner; - import org.mockito.MockedStatic; import org.mockito.Mockito; import org.mockito.junit.MockitoJUnitRunner; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; @@ -55,14 +53,14 @@ public class KafkaCompositeHandlerTest { @Mock - private AWSSecretsManager secretsManager; + private SecretsManagerClient secretsManager; private MockedStatic mockedKafkaUtils; - private MockedStatic mockedSecretsManagerClient; + private MockedStatic mockedSecretsManagerClient; @Before public void setUp() throws Exception { - mockedSecretsManagerClient = Mockito.mockStatic(AWSSecretsManagerClientBuilder.class); - mockedSecretsManagerClient.when(()-> AWSSecretsManagerClientBuilder.defaultClient()).thenReturn(secretsManager); + mockedSecretsManagerClient = Mockito.mockStatic(SecretsManagerClient.class); + mockedSecretsManagerClient.when(()-> SecretsManagerClient.create()).thenReturn(secretsManager); mockedKafkaUtils = Mockito.mockStatic(KafkaUtils.class); mockedKafkaUtils.when(() -> KafkaUtils.getKafkaConsumer(configOptions)).thenReturn(kafkaConsumer); } diff --git a/athena-kafka/src/test/java/com/amazonaws/athena/connectors/kafka/KafkaRecordHandlerTest.java b/athena-kafka/src/test/java/com/amazonaws/athena/connectors/kafka/KafkaRecordHandlerTest.java index 2d46fb26be..5395640caa 100644 --- a/athena-kafka/src/test/java/com/amazonaws/athena/connectors/kafka/KafkaRecordHandlerTest.java +++ b/athena-kafka/src/test/java/com/amazonaws/athena/connectors/kafka/KafkaRecordHandlerTest.java @@ -38,7 +38,6 @@ import com.amazonaws.services.glue.model.GetSchemaResult; import com.amazonaws.services.glue.model.GetSchemaVersionResult; import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.ObjectMapper; import com.google.protobuf.Descriptors; @@ -62,13 +61,12 @@ import org.mockito.junit.MockitoJUnitRunner; import org.mockito.MockedStatic; import org.mockito.Mockito; - +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Collections; import java.util.HashMap; import java.util.UUID; - import static com.amazonaws.athena.connector.lambda.domain.predicate.Constraints.DEFAULT_NO_LIMIT; import static com.amazonaws.athena.connectors.kafka.KafkaConstants.AVRO_DATA_FORMAT; import static com.amazonaws.athena.connectors.kafka.KafkaConstants.PROTOBUF_DATA_FORMAT; @@ -90,7 +88,7 @@ public class KafkaRecordHandlerTest { AmazonS3 amazonS3; @Mock - AWSSecretsManager awsSecretsManager; + SecretsManagerClient awsSecretsManager; @Mock private AmazonAthena athena; diff --git a/athena-kafka/src/test/java/com/amazonaws/athena/connectors/kafka/KafkaUtilsTest.java b/athena-kafka/src/test/java/com/amazonaws/athena/connectors/kafka/KafkaUtilsTest.java index 7a80682bec..ce8ee50fea 100644 --- a/athena-kafka/src/test/java/com/amazonaws/athena/connectors/kafka/KafkaUtilsTest.java +++ b/athena-kafka/src/test/java/com/amazonaws/athena/connectors/kafka/KafkaUtilsTest.java @@ -34,10 +34,6 @@ import com.amazonaws.services.s3.model.ObjectListing; import com.amazonaws.services.s3.model.S3Object; import com.amazonaws.services.s3.model.S3ObjectSummary; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; -import com.amazonaws.services.secretsmanager.AWSSecretsManagerClientBuilder; -import com.amazonaws.services.secretsmanager.model.GetSecretValueRequest; -import com.amazonaws.services.secretsmanager.model.GetSecretValueResult; import com.fasterxml.jackson.core.type.TypeReference; import com.fasterxml.jackson.databind.ObjectMapper; import org.apache.arrow.vector.types.Types; @@ -56,9 +52,11 @@ import org.mockito.junit.MockitoJUnitRunner; import org.mockito.MockedConstruction; import org.mockito.MockedStatic; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; +import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueRequest; +import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueResponse; import java.io.ByteArrayInputStream; - import java.io.FileWriter; import java.util.*; @@ -77,13 +75,13 @@ public class KafkaUtilsTest { ObjectMapper objectMapper; @Mock - AWSSecretsManager awsSecretsManager; + SecretsManagerClient awsSecretsManager; @Mock GetSecretValueRequest secretValueRequest; @Mock - GetSecretValueResult secretValueResult; + GetSecretValueResponse secretValueResponse; @Mock DefaultAWSCredentialsProviderChain chain; @@ -114,7 +112,7 @@ public class KafkaUtilsTest { private MockedConstruction mockedObjectMapper; private MockedConstruction mockedDefaultCredentials; private MockedStatic mockedS3ClientBuilder; - private MockedStatic mockedSecretsManagerClient; + private MockedStatic mockedSecretsManagerClient; @Before @@ -123,8 +121,8 @@ public void init() throws Exception { System.setProperty("aws.accessKeyId", "xxyyyioyuu"); System.setProperty("aws.secretKey", "vamsajdsjkl"); - mockedSecretsManagerClient = Mockito.mockStatic(AWSSecretsManagerClientBuilder.class); - mockedSecretsManagerClient.when(()-> AWSSecretsManagerClientBuilder.defaultClient()).thenReturn(awsSecretsManager); + mockedSecretsManagerClient = Mockito.mockStatic(SecretsManagerClient.class); + mockedSecretsManagerClient.when(()-> SecretsManagerClient.create()).thenReturn(awsSecretsManager); String creds = "{\"username\":\"admin\",\"password\":\"test\",\"keystore_password\":\"keypass\",\"truststore_password\":\"trustpass\",\"ssl_key_password\":\"sslpass\"}"; @@ -135,8 +133,8 @@ public void init() throws Exception { map.put("truststore_password", "trustpass"); map.put("ssl_key_password", "sslpass"); - Mockito.when(secretValueResult.getSecretString()).thenReturn(creds); - Mockito.when(awsSecretsManager.getSecretValue(Mockito.isA(GetSecretValueRequest.class))).thenReturn(secretValueResult); + Mockito.when(secretValueResponse.secretString()).thenReturn(creds); + Mockito.when(awsSecretsManager.getSecretValue(Mockito.isA(GetSecretValueRequest.class))).thenReturn(secretValueResponse); mockedObjectMapper = Mockito.mockConstruction(ObjectMapper.class, (mock, context) -> { diff --git a/athena-msk/src/main/java/com/amazonaws/athena/connectors/msk/AmazonMskRecordHandler.java b/athena-msk/src/main/java/com/amazonaws/athena/connectors/msk/AmazonMskRecordHandler.java index b01b3403e1..89c4df6e52 100644 --- a/athena-msk/src/main/java/com/amazonaws/athena/connectors/msk/AmazonMskRecordHandler.java +++ b/athena-msk/src/main/java/com/amazonaws/athena/connectors/msk/AmazonMskRecordHandler.java @@ -31,8 +31,6 @@ import com.amazonaws.services.athena.AmazonAthenaClientBuilder; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3ClientBuilder; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; -import com.amazonaws.services.secretsmanager.AWSSecretsManagerClientBuilder; import com.google.common.annotations.VisibleForTesting; import org.apache.kafka.clients.consumer.Consumer; import org.apache.kafka.clients.consumer.ConsumerRecord; @@ -41,6 +39,7 @@ import org.apache.kafka.common.TopicPartition; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.time.Duration; import java.util.Collection; @@ -56,13 +55,13 @@ public class AmazonMskRecordHandler { this( AmazonS3ClientBuilder.defaultClient(), - AWSSecretsManagerClientBuilder.defaultClient(), + SecretsManagerClient.create(), AmazonAthenaClientBuilder.defaultClient(), configOptions); } @VisibleForTesting - public AmazonMskRecordHandler(AmazonS3 amazonS3, AWSSecretsManager secretsManager, AmazonAthena athena, java.util.Map configOptions) + public AmazonMskRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AmazonAthena athena, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, AmazonMskConstants.MSK_SOURCE, configOptions); } diff --git a/athena-msk/src/main/java/com/amazonaws/athena/connectors/msk/AmazonMskUtils.java b/athena-msk/src/main/java/com/amazonaws/athena/connectors/msk/AmazonMskUtils.java index 2ed041502d..9a12c3bf06 100644 --- a/athena-msk/src/main/java/com/amazonaws/athena/connectors/msk/AmazonMskUtils.java +++ b/athena-msk/src/main/java/com/amazonaws/athena/connectors/msk/AmazonMskUtils.java @@ -33,10 +33,6 @@ import com.amazonaws.services.s3.model.ObjectListing; import com.amazonaws.services.s3.model.S3Object; import com.amazonaws.services.s3.model.S3ObjectSummary; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; -import com.amazonaws.services.secretsmanager.AWSSecretsManagerClientBuilder; -import com.amazonaws.services.secretsmanager.model.GetSecretValueRequest; -import com.amazonaws.services.secretsmanager.model.GetSecretValueResult; import com.fasterxml.jackson.core.type.TypeReference; import com.fasterxml.jackson.databind.ObjectMapper; import org.apache.arrow.vector.types.Types; @@ -49,6 +45,9 @@ import org.apache.kafka.common.serialization.StringDeserializer; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; +import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueRequest; +import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueResponse; import java.io.BufferedInputStream; import java.io.File; @@ -354,11 +353,12 @@ private static Path getTempDirPath() */ private static Map getCredentialsAsKeyValue(java.util.Map configOptions) throws Exception { - AWSSecretsManager secretsManager = AWSSecretsManagerClientBuilder.defaultClient(); - GetSecretValueRequest getSecretValueRequest = new GetSecretValueRequest(); - getSecretValueRequest.setSecretId(getRequiredConfig(AmazonMskConstants.SECRET_MANAGER_MSK_CREDS_NAME, configOptions)); - GetSecretValueResult response = secretsManager.getSecretValue(getSecretValueRequest); - return objectMapper.readValue(response.getSecretString(), new TypeReference>() + SecretsManagerClient secretsManager = SecretsManagerClient.create(); + GetSecretValueRequest getSecretValueRequest = GetSecretValueRequest.builder() + .secretId(getRequiredConfig(AmazonMskConstants.SECRET_MANAGER_MSK_CREDS_NAME, configOptions)) + .build(); + GetSecretValueResponse response = secretsManager.getSecretValue(getSecretValueRequest); + return objectMapper.readValue(response.secretString(), new TypeReference>() { }); } diff --git a/athena-msk/src/test/java/com/amazonaws/athena/connectors/msk/AmazonMskCompositeHandlerTest.java b/athena-msk/src/test/java/com/amazonaws/athena/connectors/msk/AmazonMskCompositeHandlerTest.java index 3ae72cecb3..9154368a0b 100644 --- a/athena-msk/src/test/java/com/amazonaws/athena/connectors/msk/AmazonMskCompositeHandlerTest.java +++ b/athena-msk/src/test/java/com/amazonaws/athena/connectors/msk/AmazonMskCompositeHandlerTest.java @@ -19,8 +19,6 @@ */ package com.amazonaws.athena.connectors.msk; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; -import com.amazonaws.services.secretsmanager.AWSSecretsManagerClientBuilder; import org.apache.kafka.clients.consumer.KafkaConsumer; import org.junit.After; import org.junit.Assert; @@ -31,6 +29,7 @@ import org.mockito.MockedStatic; import org.mockito.Mockito; import org.mockito.junit.MockitoJUnitRunner; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; @RunWith(MockitoJUnitRunner.class) public class AmazonMskCompositeHandlerTest { @@ -47,16 +46,13 @@ public class AmazonMskCompositeHandlerTest { @Mock KafkaConsumer kafkaConsumer; @Mock - private AWSSecretsManager secretsManager; + private SecretsManagerClient secretsManager; private AmazonMskCompositeHandler amazonMskCompositeHandler; private MockedStatic mockedMskUtils; - private MockedStatic mockedSecretsManagerClient; @Before public void setUp() throws Exception { - mockedSecretsManagerClient = Mockito.mockStatic(AWSSecretsManagerClientBuilder.class); - mockedSecretsManagerClient.when(()-> AWSSecretsManagerClientBuilder.defaultClient()).thenReturn(secretsManager); mockedMskUtils = Mockito.mockStatic(AmazonMskUtils.class); mockedMskUtils.when(() -> AmazonMskUtils.getKafkaConsumer(configOptions)).thenReturn(kafkaConsumer); } @@ -64,7 +60,6 @@ public void setUp() throws Exception { @After public void close() { mockedMskUtils.close(); - mockedSecretsManagerClient.close(); } @Test diff --git a/athena-msk/src/test/java/com/amazonaws/athena/connectors/msk/AmazonMskRecordHandlerTest.java b/athena-msk/src/test/java/com/amazonaws/athena/connectors/msk/AmazonMskRecordHandlerTest.java index 78d5f2da24..742af33983 100644 --- a/athena-msk/src/test/java/com/amazonaws/athena/connectors/msk/AmazonMskRecordHandlerTest.java +++ b/athena-msk/src/test/java/com/amazonaws/athena/connectors/msk/AmazonMskRecordHandlerTest.java @@ -34,7 +34,6 @@ import com.amazonaws.athena.connectors.msk.dto.*; import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.ObjectMapper; import org.apache.arrow.vector.types.pojo.Field; @@ -53,6 +52,7 @@ import org.mockito.Mockito; import org.mockito.MockitoAnnotations; import org.mockito.junit.MockitoJUnitRunner; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Collections; import java.util.HashMap; @@ -71,7 +71,7 @@ public class AmazonMskRecordHandlerTest { AmazonS3 amazonS3; @Mock - AWSSecretsManager awsSecretsManager; + SecretsManagerClient awsSecretsManager; @Mock private AmazonAthena athena; diff --git a/athena-msk/src/test/java/com/amazonaws/athena/connectors/msk/AmazonMskUtilsTest.java b/athena-msk/src/test/java/com/amazonaws/athena/connectors/msk/AmazonMskUtilsTest.java index f3975783a0..1888bddbe1 100644 --- a/athena-msk/src/test/java/com/amazonaws/athena/connectors/msk/AmazonMskUtilsTest.java +++ b/athena-msk/src/test/java/com/amazonaws/athena/connectors/msk/AmazonMskUtilsTest.java @@ -29,10 +29,6 @@ import com.amazonaws.services.s3.model.ObjectListing; import com.amazonaws.services.s3.model.S3Object; import com.amazonaws.services.s3.model.S3ObjectSummary; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; -import com.amazonaws.services.secretsmanager.AWSSecretsManagerClientBuilder; -import com.amazonaws.services.secretsmanager.model.GetSecretValueRequest; -import com.amazonaws.services.secretsmanager.model.GetSecretValueResult; import com.fasterxml.jackson.core.type.TypeReference; import com.fasterxml.jackson.databind.ObjectMapper; import org.apache.arrow.vector.types.Types; @@ -50,6 +46,9 @@ import org.mockito.MockedStatic; import org.mockito.Mockito; import org.mockito.junit.MockitoJUnitRunner; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; +import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueRequest; +import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueResponse; import java.io.ByteArrayInputStream; import java.io.FileWriter; @@ -70,13 +69,13 @@ public class AmazonMskUtilsTest { ObjectMapper objectMapper; @Mock - AWSSecretsManager awsSecretsManager; + SecretsManagerClient awsSecretsManager; @Mock GetSecretValueRequest secretValueRequest; @Mock - GetSecretValueResult secretValueResult; + GetSecretValueResponse secretValueResponse; @Mock DefaultAWSCredentialsProviderChain chain; @@ -105,15 +104,15 @@ public class AmazonMskUtilsTest { private MockedConstruction mockedObjectMapper; private MockedConstruction mockedDefaultCredentials; private MockedStatic mockedS3ClientBuilder; - private MockedStatic mockedSecretsManagerClient; + private MockedStatic mockedSecretsManagerClient; @Before public void init() throws Exception { System.setProperty("aws.region", "us-west-2"); System.setProperty("aws.accessKeyId", "xxyyyioyuu"); System.setProperty("aws.secretKey", "vamsajdsjkl"); - mockedSecretsManagerClient = Mockito.mockStatic(AWSSecretsManagerClientBuilder.class); - mockedSecretsManagerClient.when(()-> AWSSecretsManagerClientBuilder.defaultClient()).thenReturn(awsSecretsManager); + mockedSecretsManagerClient = Mockito.mockStatic(SecretsManagerClient.class); + mockedSecretsManagerClient.when(()-> SecretsManagerClient.create()).thenReturn(awsSecretsManager); String creds = "{\"username\":\"admin\",\"password\":\"test\",\"keystore_password\":\"keypass\",\"truststore_password\":\"trustpass\",\"ssl_key_password\":\"sslpass\"}"; @@ -125,8 +124,8 @@ public void init() throws Exception { map.put("truststore_password", "trustpass"); map.put("ssl_key_password", "sslpass"); - Mockito.when(secretValueResult.getSecretString()).thenReturn(creds); - Mockito.when(awsSecretsManager.getSecretValue(Mockito.isA(GetSecretValueRequest.class))).thenReturn(secretValueResult); + Mockito.when(secretValueResponse.secretString()).thenReturn(creds); + Mockito.when(awsSecretsManager.getSecretValue(Mockito.isA(GetSecretValueRequest.class))).thenReturn(secretValueResponse); mockedObjectMapper = Mockito.mockConstruction(ObjectMapper.class, (mock, context) -> { Mockito.doReturn(map).when(mock).readValue(Mockito.eq(creds), nullable(TypeReference.class)); diff --git a/athena-mysql/src/main/java/com/amazonaws/athena/connectors/mysql/MySqlMetadataHandler.java b/athena-mysql/src/main/java/com/amazonaws/athena/connectors/mysql/MySqlMetadataHandler.java index d569f8dfe6..c2e668ef53 100644 --- a/athena-mysql/src/main/java/com/amazonaws/athena/connectors/mysql/MySqlMetadataHandler.java +++ b/athena-mysql/src/main/java/com/amazonaws/athena/connectors/mysql/MySqlMetadataHandler.java @@ -47,7 +47,6 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler; import com.amazonaws.athena.connectors.jdbc.manager.PreparedStatementBuilder; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ImmutableMap; import org.apache.arrow.vector.complex.reader.FieldReader; @@ -55,6 +54,7 @@ import org.apache.arrow.vector.types.pojo.Schema; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; import java.sql.PreparedStatement; @@ -114,7 +114,7 @@ public MySqlMetadataHandler(DatabaseConnectionConfig databaseConnectionConfig, J @VisibleForTesting protected MySqlMetadataHandler( DatabaseConnectionConfig databaseConnectionConfig, - AWSSecretsManager secretsManager, + SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, java.util.Map configOptions) diff --git a/athena-mysql/src/main/java/com/amazonaws/athena/connectors/mysql/MySqlMuxMetadataHandler.java b/athena-mysql/src/main/java/com/amazonaws/athena/connectors/mysql/MySqlMuxMetadataHandler.java index cbeaf1d92e..0747a1390c 100644 --- a/athena-mysql/src/main/java/com/amazonaws/athena/connectors/mysql/MySqlMuxMetadataHandler.java +++ b/athena-mysql/src/main/java/com/amazonaws/athena/connectors/mysql/MySqlMuxMetadataHandler.java @@ -26,8 +26,8 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandlerFactory; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import org.apache.arrow.util.VisibleForTesting; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Map; @@ -58,7 +58,7 @@ public MySqlMuxMetadataHandler(java.util.Map configOptions) } @VisibleForTesting - protected MySqlMuxMetadataHandler(AWSSecretsManager secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, + protected MySqlMuxMetadataHandler(SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, Map metadataHandlerMap, DatabaseConnectionConfig databaseConnectionConfig, java.util.Map configOptions) { super(secretsManager, athena, jdbcConnectionFactory, metadataHandlerMap, databaseConnectionConfig, configOptions); diff --git a/athena-mysql/src/main/java/com/amazonaws/athena/connectors/mysql/MySqlMuxRecordHandler.java b/athena-mysql/src/main/java/com/amazonaws/athena/connectors/mysql/MySqlMuxRecordHandler.java index f3bf4ab940..5382fbba2b 100644 --- a/athena-mysql/src/main/java/com/amazonaws/athena/connectors/mysql/MySqlMuxRecordHandler.java +++ b/athena-mysql/src/main/java/com/amazonaws/athena/connectors/mysql/MySqlMuxRecordHandler.java @@ -27,8 +27,8 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandlerFactory; import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import com.google.common.annotations.VisibleForTesting; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Map; @@ -57,7 +57,7 @@ public MySqlMuxRecordHandler(java.util.Map configOptions) } @VisibleForTesting - MySqlMuxRecordHandler(AmazonS3 amazonS3, AWSSecretsManager secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, + MySqlMuxRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, DatabaseConnectionConfig databaseConnectionConfig, Map recordHandlerMap, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, jdbcConnectionFactory, databaseConnectionConfig, recordHandlerMap, configOptions); diff --git a/athena-mysql/src/main/java/com/amazonaws/athena/connectors/mysql/MySqlRecordHandler.java b/athena-mysql/src/main/java/com/amazonaws/athena/connectors/mysql/MySqlRecordHandler.java index 57a60f24e8..1eb7b38d1b 100644 --- a/athena-mysql/src/main/java/com/amazonaws/athena/connectors/mysql/MySqlRecordHandler.java +++ b/athena-mysql/src/main/java/com/amazonaws/athena/connectors/mysql/MySqlRecordHandler.java @@ -33,13 +33,12 @@ import com.amazonaws.services.athena.AmazonAthenaClientBuilder; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3ClientBuilder; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; -import com.amazonaws.services.secretsmanager.AWSSecretsManagerClientBuilder; import com.google.common.annotations.VisibleForTesting; import org.apache.arrow.vector.types.pojo.Schema; import org.apache.commons.lang3.Validate; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; import java.sql.PreparedStatement; @@ -79,12 +78,12 @@ public MySqlRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, jav public MySqlRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, JdbcConnectionFactory jdbcConnectionFactory, java.util.Map configOptions) { - this(databaseConnectionConfig, AmazonS3ClientBuilder.defaultClient(), AWSSecretsManagerClientBuilder.defaultClient(), AmazonAthenaClientBuilder.defaultClient(), + this(databaseConnectionConfig, AmazonS3ClientBuilder.defaultClient(), SecretsManagerClient.create(), AmazonAthenaClientBuilder.defaultClient(), jdbcConnectionFactory, new MySqlQueryStringBuilder(MYSQL_QUOTE_CHARACTER, new MySqlFederationExpressionParser(MYSQL_QUOTE_CHARACTER)), configOptions); } @VisibleForTesting - MySqlRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, final AmazonS3 amazonS3, final AWSSecretsManager secretsManager, + MySqlRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, final AmazonS3 amazonS3, final SecretsManagerClient secretsManager, final AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, JdbcSplitQueryBuilder jdbcSplitQueryBuilder, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, databaseConnectionConfig, jdbcConnectionFactory, configOptions); diff --git a/athena-mysql/src/test/java/com/amazonaws/athena/connectors/mysql/MySqlMetadataHandlerTest.java b/athena-mysql/src/test/java/com/amazonaws/athena/connectors/mysql/MySqlMetadataHandlerTest.java index 1f44feac4f..bd59bf4c18 100644 --- a/athena-mysql/src/test/java/com/amazonaws/athena/connectors/mysql/MySqlMetadataHandlerTest.java +++ b/athena-mysql/src/test/java/com/amazonaws/athena/connectors/mysql/MySqlMetadataHandlerTest.java @@ -41,15 +41,15 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.connection.JdbcCredentialProvider; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; -import com.amazonaws.services.secretsmanager.model.GetSecretValueRequest; -import com.amazonaws.services.secretsmanager.model.GetSecretValueResult; import org.apache.arrow.vector.types.pojo.Field; import org.apache.arrow.vector.types.pojo.Schema; import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; +import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueRequest; +import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueResponse; import java.sql.Connection; import java.sql.PreparedStatement; @@ -78,7 +78,7 @@ public class MySqlMetadataHandlerTest private JdbcConnectionFactory jdbcConnectionFactory; private Connection connection; private FederatedIdentity federatedIdentity; - private AWSSecretsManager secretsManager; + private SecretsManagerClient secretsManager; private AmazonAthena athena; private BlockAllocator blockAllocator; @@ -89,9 +89,9 @@ public void setup() this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); this.connection = Mockito.mock(Connection.class, Mockito.RETURNS_DEEP_STUBS); Mockito.when(this.jdbcConnectionFactory.getConnection(nullable(JdbcCredentialProvider.class))).thenReturn(this.connection); - this.secretsManager = Mockito.mock(AWSSecretsManager.class); + this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AmazonAthena.class); - Mockito.when(this.secretsManager.getSecretValue(Mockito.eq(new GetSecretValueRequest().withSecretId("testSecret")))).thenReturn(new GetSecretValueResult().withSecretString("{\"username\": \"testUser\", \"password\": \"testPassword\"}")); + Mockito.when(this.secretsManager.getSecretValue(Mockito.eq(GetSecretValueRequest.builder().secretId("testSecret").build()))).thenReturn(GetSecretValueResponse.builder().secretString("{\"username\": \"testUser\", \"password\": \"testPassword\"}").build()); this.mySqlMetadataHandler = new MySqlMetadataHandler(databaseConnectionConfig, this.secretsManager, this.athena, this.jdbcConnectionFactory, com.google.common.collect.ImmutableMap.of()); this.federatedIdentity = Mockito.mock(FederatedIdentity.class); this.blockAllocator = Mockito.mock(BlockAllocator.class); diff --git a/athena-mysql/src/test/java/com/amazonaws/athena/connectors/mysql/MySqlMuxJdbcMetadataHandlerTest.java b/athena-mysql/src/test/java/com/amazonaws/athena/connectors/mysql/MySqlMuxJdbcMetadataHandlerTest.java index e09a78f2f0..4afa68c8c1 100644 --- a/athena-mysql/src/test/java/com/amazonaws/athena/connectors/mysql/MySqlMuxJdbcMetadataHandlerTest.java +++ b/athena-mysql/src/test/java/com/amazonaws/athena/connectors/mysql/MySqlMuxJdbcMetadataHandlerTest.java @@ -33,10 +33,10 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Collections; import java.util.Map; @@ -49,7 +49,7 @@ public class MySqlMuxJdbcMetadataHandlerTest private MySqlMetadataHandler mySqlMetadataHandler; private JdbcMetadataHandler jdbcMetadataHandler; private BlockAllocator allocator; - private AWSSecretsManager secretsManager; + private SecretsManagerClient secretsManager; private AmazonAthena athena; private QueryStatusChecker queryStatusChecker; private JdbcConnectionFactory jdbcConnectionFactory; @@ -62,7 +62,7 @@ public void setup() //Mockito.when(this.allocator.createBlock(nullable(Schema.class))).thenReturn(Mockito.mock(Block.class)); this.mySqlMetadataHandler = Mockito.mock(MySqlMetadataHandler.class); this.metadataHandlerMap = Collections.singletonMap("fakedatabase", this.mySqlMetadataHandler); - this.secretsManager = Mockito.mock(AWSSecretsManager.class); + this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AmazonAthena.class); this.queryStatusChecker = Mockito.mock(QueryStatusChecker.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); diff --git a/athena-mysql/src/test/java/com/amazonaws/athena/connectors/mysql/MySqlMuxJdbcRecordHandlerTest.java b/athena-mysql/src/test/java/com/amazonaws/athena/connectors/mysql/MySqlMuxJdbcRecordHandlerTest.java index 1af05bbddb..c1352221ad 100644 --- a/athena-mysql/src/test/java/com/amazonaws/athena/connectors/mysql/MySqlMuxJdbcRecordHandlerTest.java +++ b/athena-mysql/src/test/java/com/amazonaws/athena/connectors/mysql/MySqlMuxJdbcRecordHandlerTest.java @@ -30,11 +30,11 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import org.apache.arrow.vector.types.pojo.Schema; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; import java.sql.SQLException; @@ -47,7 +47,7 @@ public class MySqlMuxJdbcRecordHandlerTest private MySqlRecordHandler mySqlRecordHandler; private JdbcRecordHandler jdbcRecordHandler; private AmazonS3 amazonS3; - private AWSSecretsManager secretsManager; + private SecretsManagerClient secretsManager; private AmazonAthena athena; private QueryStatusChecker queryStatusChecker; private JdbcConnectionFactory jdbcConnectionFactory; @@ -58,7 +58,7 @@ public void setup() this.mySqlRecordHandler = Mockito.mock(MySqlRecordHandler.class); this.recordHandlerMap = Collections.singletonMap("mysql", this.mySqlRecordHandler); this.amazonS3 = Mockito.mock(AmazonS3.class); - this.secretsManager = Mockito.mock(AWSSecretsManager.class); + this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AmazonAthena.class); this.queryStatusChecker = Mockito.mock(QueryStatusChecker.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); diff --git a/athena-mysql/src/test/java/com/amazonaws/athena/connectors/mysql/MySqlRecordHandlerTest.java b/athena-mysql/src/test/java/com/amazonaws/athena/connectors/mysql/MySqlRecordHandlerTest.java index 06248de46b..53e6bfb9b6 100644 --- a/athena-mysql/src/test/java/com/amazonaws/athena/connectors/mysql/MySqlRecordHandlerTest.java +++ b/athena-mysql/src/test/java/com/amazonaws/athena/connectors/mysql/MySqlRecordHandlerTest.java @@ -38,7 +38,6 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcSplitQueryBuilder; import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import org.apache.arrow.vector.types.Types; @@ -48,6 +47,7 @@ import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; import java.sql.PreparedStatement; @@ -67,7 +67,7 @@ public class MySqlRecordHandlerTest private JdbcConnectionFactory jdbcConnectionFactory; private JdbcSplitQueryBuilder jdbcSplitQueryBuilder; private AmazonS3 amazonS3; - private AWSSecretsManager secretsManager; + private SecretsManagerClient secretsManager; private AmazonAthena athena; @Before @@ -75,7 +75,7 @@ public void setup() throws Exception { this.amazonS3 = Mockito.mock(AmazonS3.class); - this.secretsManager = Mockito.mock(AWSSecretsManager.class); + this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AmazonAthena.class); this.connection = Mockito.mock(Connection.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); diff --git a/athena-neptune/src/main/java/com/amazonaws/athena/connectors/neptune/NeptuneMetadataHandler.java b/athena-neptune/src/main/java/com/amazonaws/athena/connectors/neptune/NeptuneMetadataHandler.java index ddb3759696..3f26ef07a8 100644 --- a/athena-neptune/src/main/java/com/amazonaws/athena/connectors/neptune/NeptuneMetadataHandler.java +++ b/athena-neptune/src/main/java/com/amazonaws/athena/connectors/neptune/NeptuneMetadataHandler.java @@ -49,7 +49,6 @@ import com.amazonaws.services.glue.model.GetTablesRequest; import com.amazonaws.services.glue.model.GetTablesResult; import com.amazonaws.services.glue.model.Table; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import com.google.common.collect.ImmutableMap; import org.apache.arrow.util.VisibleForTesting; import org.apache.arrow.vector.types.pojo.Field; @@ -59,6 +58,7 @@ import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.ArrayList; import java.util.HashSet; @@ -112,7 +112,7 @@ protected NeptuneMetadataHandler( AWSGlue glue, NeptuneConnection neptuneConnection, EncryptionKeyFactory keyFactory, - AWSSecretsManager awsSecretsManager, + SecretsManagerClient awsSecretsManager, AmazonAthena athena, String spillBucket, String spillPrefix, diff --git a/athena-neptune/src/main/java/com/amazonaws/athena/connectors/neptune/NeptuneRecordHandler.java b/athena-neptune/src/main/java/com/amazonaws/athena/connectors/neptune/NeptuneRecordHandler.java index e6de3b070d..f347449eb1 100644 --- a/athena-neptune/src/main/java/com/amazonaws/athena/connectors/neptune/NeptuneRecordHandler.java +++ b/athena-neptune/src/main/java/com/amazonaws/athena/connectors/neptune/NeptuneRecordHandler.java @@ -30,11 +30,10 @@ import com.amazonaws.services.athena.AmazonAthenaClientBuilder; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3ClientBuilder; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; -import com.amazonaws.services.secretsmanager.AWSSecretsManagerClientBuilder; import org.apache.arrow.util.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; /** * This class is part of an tutorial that will walk you through how to build a @@ -66,7 +65,7 @@ public NeptuneRecordHandler(java.util.Map configOptions) { this( AmazonS3ClientBuilder.defaultClient(), - AWSSecretsManagerClientBuilder.defaultClient(), + SecretsManagerClient.create(), AmazonAthenaClientBuilder.defaultClient(), NeptuneConnection.createConnection(configOptions), configOptions); @@ -75,7 +74,7 @@ public NeptuneRecordHandler(java.util.Map configOptions) @VisibleForTesting protected NeptuneRecordHandler( AmazonS3 amazonS3, - AWSSecretsManager secretsManager, + SecretsManagerClient secretsManager, AmazonAthena amazonAthena, NeptuneConnection neptuneConnection, java.util.Map configOptions) diff --git a/athena-neptune/src/test/java/com/amazonaws/athena/connectors/neptune/NeptuneMetadataHandlerTest.java b/athena-neptune/src/test/java/com/amazonaws/athena/connectors/neptune/NeptuneMetadataHandlerTest.java index a6c31c88ae..07457b89b9 100644 --- a/athena-neptune/src/test/java/com/amazonaws/athena/connectors/neptune/NeptuneMetadataHandlerTest.java +++ b/athena-neptune/src/test/java/com/amazonaws/athena/connectors/neptune/NeptuneMetadataHandlerTest.java @@ -36,7 +36,6 @@ import com.amazonaws.services.glue.model.GetTablesResult; import com.amazonaws.services.glue.model.StorageDescriptor; import com.amazonaws.services.glue.model.Table; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import org.junit.After; import org.junit.Before; @@ -46,6 +45,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; + import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; @@ -86,7 +87,7 @@ public void setUp() throws Exception { logger.info("setUpBefore - enter"); allocator = new BlockAllocatorImpl(); handler = new NeptuneMetadataHandler(glue,neptuneConnection, - new LocalKeyFactory(), mock(AWSSecretsManager.class), mock(AmazonAthena.class), "spill-bucket", + new LocalKeyFactory(), mock(SecretsManagerClient.class), mock(AmazonAthena.class), "spill-bucket", "spill-prefix", com.google.common.collect.ImmutableMap.of()); logger.info("setUpBefore - exit"); } diff --git a/athena-neptune/src/test/java/com/amazonaws/athena/connectors/neptune/NeptuneRecordHandlerTest.java b/athena-neptune/src/test/java/com/amazonaws/athena/connectors/neptune/NeptuneRecordHandlerTest.java index 5f4ed5ea24..eacce75fbb 100644 --- a/athena-neptune/src/test/java/com/amazonaws/athena/connectors/neptune/NeptuneRecordHandlerTest.java +++ b/athena-neptune/src/test/java/com/amazonaws/athena/connectors/neptune/NeptuneRecordHandlerTest.java @@ -52,7 +52,6 @@ import com.amazonaws.services.s3.model.PutObjectResult; import com.amazonaws.services.s3.model.S3Object; import com.amazonaws.services.s3.model.S3ObjectInputStream; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import com.google.common.io.ByteStreams; import org.apache.arrow.vector.types.Types; import org.apache.arrow.vector.types.pojo.Schema; @@ -77,6 +76,7 @@ import org.mockito.junit.MockitoJUnitRunner; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.io.ByteArrayInputStream; import java.io.InputStream; @@ -98,7 +98,7 @@ public class NeptuneRecordHandlerTest extends TestBase { private Schema schemaPGEdgeForRead; private Schema schemaPGQueryForRead; private AmazonS3 amazonS3; - private AWSSecretsManager awsSecretsManager; + private SecretsManagerClient awsSecretsManager; private AmazonAthena athena; private S3BlockSpillReader spillReader; private EncryptionKeyFactory keyFactory = new LocalKeyFactory(); @@ -165,7 +165,7 @@ public void setUp() { allocator = new BlockAllocatorImpl(); amazonS3 = mock(AmazonS3.class); - awsSecretsManager = mock(AWSSecretsManager.class); + awsSecretsManager = mock(SecretsManagerClient.class); athena = mock(AmazonAthena.class); when(amazonS3.putObject(any())) diff --git a/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleMetadataHandler.java b/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleMetadataHandler.java index 8570bc1df3..e3d96f2af0 100644 --- a/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleMetadataHandler.java +++ b/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleMetadataHandler.java @@ -53,7 +53,6 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler; import com.amazonaws.athena.connectors.jdbc.manager.PreparedStatementBuilder; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableMap; @@ -65,6 +64,7 @@ import org.apache.arrow.vector.types.pojo.Schema; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; import java.sql.DatabaseMetaData; @@ -125,7 +125,7 @@ public OracleMetadataHandler(DatabaseConnectionConfig databaseConnectionConfig, @VisibleForTesting protected OracleMetadataHandler( DatabaseConnectionConfig databaseConnectionConfig, - AWSSecretsManager secretsManager, + SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, java.util.Map configOptions) diff --git a/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleMuxMetadataHandler.java b/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleMuxMetadataHandler.java index 6bf4fac5b0..df3399281b 100644 --- a/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleMuxMetadataHandler.java +++ b/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleMuxMetadataHandler.java @@ -26,8 +26,8 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandlerFactory; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import org.apache.arrow.util.VisibleForTesting; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Map; @@ -56,7 +56,7 @@ public OracleMuxMetadataHandler(java.util.Map configOptions) } @VisibleForTesting - protected OracleMuxMetadataHandler(AWSSecretsManager secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, + protected OracleMuxMetadataHandler(SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, Map metadataHandlerMap, DatabaseConnectionConfig databaseConnectionConfig, java.util.Map configOptions) { super(secretsManager, athena, jdbcConnectionFactory, metadataHandlerMap, databaseConnectionConfig, configOptions); diff --git a/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleMuxRecordHandler.java b/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleMuxRecordHandler.java index 02b873ec7c..820fe3162c 100644 --- a/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleMuxRecordHandler.java +++ b/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleMuxRecordHandler.java @@ -27,8 +27,8 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandlerFactory; import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import com.google.common.annotations.VisibleForTesting; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Map; @@ -57,7 +57,7 @@ public OracleMuxRecordHandler(java.util.Map configOptions) } @VisibleForTesting - OracleMuxRecordHandler(AmazonS3 amazonS3, AWSSecretsManager secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, + OracleMuxRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, DatabaseConnectionConfig databaseConnectionConfig, Map recordHandlerMap, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, jdbcConnectionFactory, databaseConnectionConfig, recordHandlerMap, configOptions); diff --git a/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleRecordHandler.java b/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleRecordHandler.java index 83c2d66654..d2efc886a5 100644 --- a/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleRecordHandler.java +++ b/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleRecordHandler.java @@ -32,13 +32,12 @@ import com.amazonaws.services.athena.AmazonAthenaClientBuilder; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3ClientBuilder; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; -import com.amazonaws.services.secretsmanager.AWSSecretsManagerClientBuilder; import com.google.common.annotations.VisibleForTesting; import org.apache.arrow.vector.types.pojo.Schema; import org.apache.commons.lang3.Validate; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; import java.sql.PreparedStatement; @@ -77,12 +76,12 @@ public OracleRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, ja public OracleRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, JdbcConnectionFactory jdbcConnectionFactory, java.util.Map configOptions) { - this(databaseConnectionConfig, AmazonS3ClientBuilder.defaultClient(), AWSSecretsManagerClientBuilder.defaultClient(), AmazonAthenaClientBuilder.defaultClient(), + this(databaseConnectionConfig, AmazonS3ClientBuilder.defaultClient(), SecretsManagerClient.create(), AmazonAthenaClientBuilder.defaultClient(), jdbcConnectionFactory, new OracleQueryStringBuilder(ORACLE_QUOTE_CHARACTER, new OracleFederationExpressionParser(ORACLE_QUOTE_CHARACTER)), configOptions); } @VisibleForTesting - OracleRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, final AmazonS3 amazonS3, final AWSSecretsManager secretsManager, + OracleRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, final AmazonS3 amazonS3, final SecretsManagerClient secretsManager, final AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, JdbcSplitQueryBuilder jdbcSplitQueryBuilder, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, databaseConnectionConfig, jdbcConnectionFactory, configOptions); diff --git a/athena-oracle/src/test/java/com/amazonaws/athena/connectors/oracle/OracleMetadataHandlerTest.java b/athena-oracle/src/test/java/com/amazonaws/athena/connectors/oracle/OracleMetadataHandlerTest.java index 63a4e6b7bd..f3f8d58484 100644 --- a/athena-oracle/src/test/java/com/amazonaws/athena/connectors/oracle/OracleMetadataHandlerTest.java +++ b/athena-oracle/src/test/java/com/amazonaws/athena/connectors/oracle/OracleMetadataHandlerTest.java @@ -34,15 +34,15 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.connection.JdbcCredentialProvider; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; -import com.amazonaws.services.secretsmanager.model.GetSecretValueRequest; -import com.amazonaws.services.secretsmanager.model.GetSecretValueResult; import org.apache.arrow.vector.types.pojo.Field; import org.apache.arrow.vector.types.pojo.Schema; import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; +import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueRequest; +import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueResponse; import java.sql.Connection; import java.sql.PreparedStatement; @@ -72,7 +72,7 @@ public class OracleMetadataHandlerTest private JdbcConnectionFactory jdbcConnectionFactory; private Connection connection; private FederatedIdentity federatedIdentity; - private AWSSecretsManager secretsManager; + private SecretsManagerClient secretsManager; private AmazonAthena athena; @Before @@ -82,9 +82,9 @@ public void setup() this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class, Mockito.RETURNS_DEEP_STUBS); this.connection = Mockito.mock(Connection.class, Mockito.RETURNS_DEEP_STUBS); Mockito.when(this.jdbcConnectionFactory.getConnection(nullable(JdbcCredentialProvider.class))).thenReturn(this.connection); - this.secretsManager = Mockito.mock(AWSSecretsManager.class); + this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AmazonAthena.class); - Mockito.when(this.secretsManager.getSecretValue(Mockito.eq(new GetSecretValueRequest().withSecretId("testSecret")))).thenReturn(new GetSecretValueResult().withSecretString("{\"username\": \"testUser\", \"password\": \"testPassword\"}")); + Mockito.when(this.secretsManager.getSecretValue(Mockito.eq(GetSecretValueRequest.builder().secretId("testSecret").build()))).thenReturn(GetSecretValueResponse.builder().secretString("{\"username\": \"testUser\", \"password\": \"testPassword\"}").build()); this.oracleMetadataHandler = new OracleMetadataHandler(databaseConnectionConfig, this.secretsManager, this.athena, this.jdbcConnectionFactory, com.google.common.collect.ImmutableMap.of()); this.federatedIdentity = Mockito.mock(FederatedIdentity.class); } diff --git a/athena-oracle/src/test/java/com/amazonaws/athena/connectors/oracle/OracleMuxJdbcMetadataHandlerTest.java b/athena-oracle/src/test/java/com/amazonaws/athena/connectors/oracle/OracleMuxJdbcMetadataHandlerTest.java index 3520219b56..f2a80bd428 100644 --- a/athena-oracle/src/test/java/com/amazonaws/athena/connectors/oracle/OracleMuxJdbcMetadataHandlerTest.java +++ b/athena-oracle/src/test/java/com/amazonaws/athena/connectors/oracle/OracleMuxJdbcMetadataHandlerTest.java @@ -35,10 +35,10 @@ import com.amazonaws.athena.connectors.oracle.OracleMetadataHandler; import com.amazonaws.athena.connectors.oracle.OracleMuxMetadataHandler; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Collections; import java.util.Map; @@ -51,7 +51,7 @@ public class OracleMuxJdbcMetadataHandlerTest private OracleMetadataHandler oracleMetadataHandler; private JdbcMetadataHandler jdbcMetadataHandler; private BlockAllocator allocator; - private AWSSecretsManager secretsManager; + private SecretsManagerClient secretsManager; private AmazonAthena athena; private QueryStatusChecker queryStatusChecker; private JdbcConnectionFactory jdbcConnectionFactory; @@ -64,7 +64,7 @@ public void setup() //Mockito.when(this.allocator.createBlock(nullable(Schema.class))).thenReturn(Mockito.mock(Block.class)); this.oracleMetadataHandler = Mockito.mock(OracleMetadataHandler.class); this.metadataHandlerMap = Collections.singletonMap("fakedatabase", this.oracleMetadataHandler); - this.secretsManager = Mockito.mock(AWSSecretsManager.class); + this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AmazonAthena.class); this.queryStatusChecker = Mockito.mock(QueryStatusChecker.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); diff --git a/athena-oracle/src/test/java/com/amazonaws/athena/connectors/oracle/OracleMuxJdbcRecordHandlerTest.java b/athena-oracle/src/test/java/com/amazonaws/athena/connectors/oracle/OracleMuxJdbcRecordHandlerTest.java index 84c01f1eaf..4284c3559b 100644 --- a/athena-oracle/src/test/java/com/amazonaws/athena/connectors/oracle/OracleMuxJdbcRecordHandlerTest.java +++ b/athena-oracle/src/test/java/com/amazonaws/athena/connectors/oracle/OracleMuxJdbcRecordHandlerTest.java @@ -32,11 +32,11 @@ import com.amazonaws.athena.connectors.oracle.OracleRecordHandler; import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import org.apache.arrow.vector.types.pojo.Schema; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; import java.sql.SQLException; @@ -49,7 +49,7 @@ public class OracleMuxJdbcRecordHandlerTest private OracleRecordHandler oracleRecordHandler; private JdbcRecordHandler jdbcRecordHandler; private AmazonS3 amazonS3; - private AWSSecretsManager secretsManager; + private SecretsManagerClient secretsManager; private AmazonAthena athena; private QueryStatusChecker queryStatusChecker; private JdbcConnectionFactory jdbcConnectionFactory; @@ -60,7 +60,7 @@ public void setup() this.oracleRecordHandler = Mockito.mock(OracleRecordHandler.class); this.recordHandlerMap = Collections.singletonMap("oracle", this.oracleRecordHandler); this.amazonS3 = Mockito.mock(AmazonS3.class); - this.secretsManager = Mockito.mock(AWSSecretsManager.class); + this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AmazonAthena.class); this.queryStatusChecker = Mockito.mock(QueryStatusChecker.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); diff --git a/athena-oracle/src/test/java/com/amazonaws/athena/connectors/oracle/OracleRecordHandlerTest.java b/athena-oracle/src/test/java/com/amazonaws/athena/connectors/oracle/OracleRecordHandlerTest.java index d0b3ade385..783c4df86b 100644 --- a/athena-oracle/src/test/java/com/amazonaws/athena/connectors/oracle/OracleRecordHandlerTest.java +++ b/athena-oracle/src/test/java/com/amazonaws/athena/connectors/oracle/OracleRecordHandlerTest.java @@ -34,7 +34,6 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcSplitQueryBuilder; import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import org.apache.arrow.vector.types.Types; @@ -43,6 +42,7 @@ import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; import java.sql.PreparedStatement; @@ -59,7 +59,7 @@ public class OracleRecordHandlerTest private JdbcConnectionFactory jdbcConnectionFactory; private JdbcSplitQueryBuilder jdbcSplitQueryBuilder; private AmazonS3 amazonS3; - private AWSSecretsManager secretsManager; + private SecretsManagerClient secretsManager; private AmazonAthena athena; private static final String ORACLE_QUOTE_CHARACTER = "\""; @@ -70,7 +70,7 @@ public void setup() throws Exception { this.amazonS3 = Mockito.mock(AmazonS3.class); - this.secretsManager = Mockito.mock(AWSSecretsManager.class); + this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AmazonAthena.class); this.connection = Mockito.mock(Connection.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); diff --git a/athena-postgresql/src/main/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlMetadataHandler.java b/athena-postgresql/src/main/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlMetadataHandler.java index e7faa77494..2aae525373 100644 --- a/athena-postgresql/src/main/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlMetadataHandler.java +++ b/athena-postgresql/src/main/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlMetadataHandler.java @@ -47,7 +47,6 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler; import com.amazonaws.athena.connectors.jdbc.manager.PreparedStatementBuilder; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; @@ -57,6 +56,7 @@ import org.apache.arrow.vector.types.pojo.Schema; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; import java.sql.PreparedStatement; @@ -114,7 +114,7 @@ public PostGreSqlMetadataHandler(DatabaseConnectionConfig databaseConnectionConf @VisibleForTesting protected PostGreSqlMetadataHandler( DatabaseConnectionConfig databaseConnectionConfig, - AWSSecretsManager secretsManager, + SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, java.util.Map configOptions) diff --git a/athena-postgresql/src/main/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlMuxMetadataHandler.java b/athena-postgresql/src/main/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlMuxMetadataHandler.java index 3f6ae65f6e..c1a6ef2795 100644 --- a/athena-postgresql/src/main/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlMuxMetadataHandler.java +++ b/athena-postgresql/src/main/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlMuxMetadataHandler.java @@ -26,8 +26,8 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandlerFactory; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import org.apache.arrow.util.VisibleForTesting; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Map; @@ -58,7 +58,7 @@ public PostGreSqlMuxMetadataHandler(java.util.Map configOptions) } @VisibleForTesting - protected PostGreSqlMuxMetadataHandler(AWSSecretsManager secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, + protected PostGreSqlMuxMetadataHandler(SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, Map metadataHandlerMap, DatabaseConnectionConfig databaseConnectionConfig, java.util.Map configOptions) { super(secretsManager, athena, jdbcConnectionFactory, metadataHandlerMap, databaseConnectionConfig, configOptions); diff --git a/athena-postgresql/src/main/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlMuxRecordHandler.java b/athena-postgresql/src/main/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlMuxRecordHandler.java index 61198d3fec..c82d597a4f 100644 --- a/athena-postgresql/src/main/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlMuxRecordHandler.java +++ b/athena-postgresql/src/main/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlMuxRecordHandler.java @@ -27,8 +27,8 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandlerFactory; import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import com.google.common.annotations.VisibleForTesting; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Map; @@ -57,7 +57,7 @@ public PostGreSqlMuxRecordHandler(java.util.Map configOptions) } @VisibleForTesting - PostGreSqlMuxRecordHandler(AmazonS3 amazonS3, AWSSecretsManager secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, + PostGreSqlMuxRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, DatabaseConnectionConfig databaseConnectionConfig, Map recordHandlerMap, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, jdbcConnectionFactory, databaseConnectionConfig, recordHandlerMap, configOptions); diff --git a/athena-postgresql/src/main/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlRecordHandler.java b/athena-postgresql/src/main/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlRecordHandler.java index 877c450a05..a32339a739 100644 --- a/athena-postgresql/src/main/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlRecordHandler.java +++ b/athena-postgresql/src/main/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlRecordHandler.java @@ -33,13 +33,12 @@ import com.amazonaws.services.athena.AmazonAthenaClientBuilder; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3ClientBuilder; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; -import com.amazonaws.services.secretsmanager.AWSSecretsManagerClientBuilder; import com.google.common.annotations.VisibleForTesting; import org.apache.arrow.vector.types.pojo.Schema; import org.apache.commons.lang3.Validate; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; import java.sql.PreparedStatement; @@ -71,12 +70,12 @@ public PostGreSqlRecordHandler(java.util.Map configOptions) public PostGreSqlRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, java.util.Map configOptions) { - this(databaseConnectionConfig, AmazonS3ClientBuilder.defaultClient(), AWSSecretsManagerClientBuilder.defaultClient(), AmazonAthenaClientBuilder.defaultClient(), + this(databaseConnectionConfig, AmazonS3ClientBuilder.defaultClient(), SecretsManagerClient.create(), AmazonAthenaClientBuilder.defaultClient(), new GenericJdbcConnectionFactory(databaseConnectionConfig, PostGreSqlMetadataHandler.JDBC_PROPERTIES, new DatabaseConnectionInfo(POSTGRESQL_DRIVER_CLASS, POSTGRESQL_DEFAULT_PORT)), new PostGreSqlQueryStringBuilder(POSTGRES_QUOTE_CHARACTER, new PostgreSqlFederationExpressionParser(POSTGRES_QUOTE_CHARACTER)), configOptions); } @VisibleForTesting - protected PostGreSqlRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, AmazonS3 amazonS3, AWSSecretsManager secretsManager, + protected PostGreSqlRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, AmazonS3 amazonS3, SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, JdbcSplitQueryBuilder jdbcSplitQueryBuilder, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, databaseConnectionConfig, jdbcConnectionFactory, configOptions); diff --git a/athena-postgresql/src/test/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlMetadataHandlerTest.java b/athena-postgresql/src/test/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlMetadataHandlerTest.java index 557619ad7f..60a51f3819 100644 --- a/athena-postgresql/src/test/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlMetadataHandlerTest.java +++ b/athena-postgresql/src/test/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlMetadataHandlerTest.java @@ -41,9 +41,6 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.connection.JdbcCredentialProvider; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; -import com.amazonaws.services.secretsmanager.model.GetSecretValueRequest; -import com.amazonaws.services.secretsmanager.model.GetSecretValueResult; import com.google.common.collect.ImmutableMap; import org.apache.arrow.vector.types.DateUnit; import org.apache.arrow.vector.types.FloatingPointPrecision; @@ -56,6 +53,9 @@ import org.mockito.Mockito; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; +import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueRequest; +import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueResponse; import java.sql.Connection; import java.sql.PreparedStatement; @@ -86,7 +86,7 @@ public class PostGreSqlMetadataHandlerTest private JdbcConnectionFactory jdbcConnectionFactory; private Connection connection; private FederatedIdentity federatedIdentity; - private AWSSecretsManager secretsManager; + private SecretsManagerClient secretsManager; private AmazonAthena athena; @Before @@ -96,8 +96,8 @@ public void setup() this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); this.connection = Mockito.mock(Connection.class, Mockito.RETURNS_DEEP_STUBS); Mockito.when(this.jdbcConnectionFactory.getConnection(nullable(JdbcCredentialProvider.class))).thenReturn(this.connection); - this.secretsManager = Mockito.mock(AWSSecretsManager.class); - Mockito.when(this.secretsManager.getSecretValue(Mockito.eq(new GetSecretValueRequest().withSecretId("testSecret")))).thenReturn(new GetSecretValueResult().withSecretString("{\"username\": \"testUser\", \"password\": \"testPassword\"}")); + this.secretsManager = Mockito.mock(SecretsManagerClient.class); + Mockito.when(this.secretsManager.getSecretValue(Mockito.eq(GetSecretValueRequest.builder().secretId("testSecret").build()))).thenReturn(GetSecretValueResponse.builder().secretString("{\"username\": \"testUser\", \"password\": \"testPassword\"}").build()); this.postGreSqlMetadataHandler = new PostGreSqlMetadataHandler(databaseConnectionConfig, this.secretsManager, this.athena, this.jdbcConnectionFactory, com.google.common.collect.ImmutableMap.of()); this.federatedIdentity = Mockito.mock(FederatedIdentity.class); } diff --git a/athena-postgresql/src/test/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlMuxJdbcMetadataHandlerTest.java b/athena-postgresql/src/test/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlMuxJdbcMetadataHandlerTest.java index f21d694c19..5059e131fc 100644 --- a/athena-postgresql/src/test/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlMuxJdbcMetadataHandlerTest.java +++ b/athena-postgresql/src/test/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlMuxJdbcMetadataHandlerTest.java @@ -33,10 +33,10 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Collections; import java.util.Map; @@ -49,7 +49,7 @@ public class PostGreSqlMuxJdbcMetadataHandlerTest private PostGreSqlMetadataHandler postGreSqlMetadataHandler; private JdbcMetadataHandler jdbcMetadataHandler; private BlockAllocator allocator; - private AWSSecretsManager secretsManager; + private SecretsManagerClient secretsManager; private AmazonAthena athena; private QueryStatusChecker queryStatusChecker; private JdbcConnectionFactory jdbcConnectionFactory; @@ -62,7 +62,7 @@ public void setup() //Mockito.when(this.allocator.createBlock(nullable(Schema.class))).thenReturn(Mockito.mock(Block.class)); this.postGreSqlMetadataHandler = Mockito.mock(PostGreSqlMetadataHandler.class); this.metadataHandlerMap = Collections.singletonMap("postgres", this.postGreSqlMetadataHandler); - this.secretsManager = Mockito.mock(AWSSecretsManager.class); + this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AmazonAthena.class); this.queryStatusChecker = Mockito.mock(QueryStatusChecker.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); diff --git a/athena-postgresql/src/test/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlMuxJdbcRecordHandlerTest.java b/athena-postgresql/src/test/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlMuxJdbcRecordHandlerTest.java index ba498d8f97..b433e7a27b 100644 --- a/athena-postgresql/src/test/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlMuxJdbcRecordHandlerTest.java +++ b/athena-postgresql/src/test/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlMuxJdbcRecordHandlerTest.java @@ -30,11 +30,11 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import org.apache.arrow.vector.types.pojo.Schema; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; import java.sql.SQLException; @@ -47,7 +47,7 @@ public class PostGreSqlMuxJdbcRecordHandlerTest private PostGreSqlRecordHandler postGreSqlRecordHandler; private JdbcRecordHandler jdbcRecordHandler; private AmazonS3 amazonS3; - private AWSSecretsManager secretsManager; + private SecretsManagerClient secretsManager; private AmazonAthena athena; private QueryStatusChecker queryStatusChecker; private JdbcConnectionFactory jdbcConnectionFactory; @@ -58,7 +58,7 @@ public void setup() this.postGreSqlRecordHandler = Mockito.mock(PostGreSqlRecordHandler.class); this.recordHandlerMap = Collections.singletonMap("postgres", this.postGreSqlRecordHandler); this.amazonS3 = Mockito.mock(AmazonS3.class); - this.secretsManager = Mockito.mock(AWSSecretsManager.class); + this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AmazonAthena.class); this.queryStatusChecker = Mockito.mock(QueryStatusChecker.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); diff --git a/athena-postgresql/src/test/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlRecordHandlerTest.java b/athena-postgresql/src/test/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlRecordHandlerTest.java index 17d682b8b0..cd2d988af7 100644 --- a/athena-postgresql/src/test/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlRecordHandlerTest.java +++ b/athena-postgresql/src/test/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlRecordHandlerTest.java @@ -35,7 +35,6 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcSplitQueryBuilder; import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import org.apache.arrow.vector.types.Types; @@ -47,6 +46,7 @@ import org.mockito.Mockito; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.math.BigDecimal; import java.sql.Connection; @@ -69,7 +69,7 @@ public class PostGreSqlRecordHandlerTest extends TestBase private JdbcConnectionFactory jdbcConnectionFactory; private JdbcSplitQueryBuilder jdbcSplitQueryBuilder; private AmazonS3 amazonS3; - private AWSSecretsManager secretsManager; + private SecretsManagerClient secretsManager; private AmazonAthena athena; @Before @@ -77,7 +77,7 @@ public void setup() throws Exception { this.amazonS3 = Mockito.mock(AmazonS3.class); - this.secretsManager = Mockito.mock(AWSSecretsManager.class); + this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AmazonAthena.class); this.connection = Mockito.mock(Connection.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); diff --git a/athena-redis/src/main/java/com/amazonaws/athena/connectors/redis/RedisMetadataHandler.java b/athena-redis/src/main/java/com/amazonaws/athena/connectors/redis/RedisMetadataHandler.java index b0d6be7d06..3204ec501e 100644 --- a/athena-redis/src/main/java/com/amazonaws/athena/connectors/redis/RedisMetadataHandler.java +++ b/athena-redis/src/main/java/com/amazonaws/athena/connectors/redis/RedisMetadataHandler.java @@ -51,7 +51,6 @@ import com.amazonaws.services.glue.AWSGlue; import com.amazonaws.services.glue.model.Database; import com.amazonaws.services.glue.model.Table; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import com.google.common.collect.ImmutableMap; import io.lettuce.core.KeyScanCursor; import io.lettuce.core.Range; @@ -64,6 +63,7 @@ import org.apache.arrow.vector.util.Text; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Arrays; import java.util.HashSet; @@ -153,7 +153,7 @@ public RedisMetadataHandler(java.util.Map configOptions) protected RedisMetadataHandler( AWSGlue awsGlue, EncryptionKeyFactory keyFactory, - AWSSecretsManager secretsManager, + SecretsManagerClient secretsManager, AmazonAthena athena, RedisConnectionFactory redisConnectionFactory, String spillBucket, diff --git a/athena-redis/src/main/java/com/amazonaws/athena/connectors/redis/RedisRecordHandler.java b/athena-redis/src/main/java/com/amazonaws/athena/connectors/redis/RedisRecordHandler.java index dfb490a3a6..86fd3ea55e 100644 --- a/athena-redis/src/main/java/com/amazonaws/athena/connectors/redis/RedisRecordHandler.java +++ b/athena-redis/src/main/java/com/amazonaws/athena/connectors/redis/RedisRecordHandler.java @@ -33,8 +33,6 @@ import com.amazonaws.services.athena.AmazonAthenaClientBuilder; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3ClientBuilder; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; -import com.amazonaws.services.secretsmanager.AWSSecretsManagerClientBuilder; import io.lettuce.core.KeyScanCursor; import io.lettuce.core.ScanArgs; import io.lettuce.core.ScanCursor; @@ -45,6 +43,7 @@ import org.apache.arrow.vector.types.pojo.Field; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.HashMap; import java.util.HashSet; @@ -96,7 +95,7 @@ public RedisRecordHandler(java.util.Map configOptions) { this( AmazonS3ClientBuilder.standard().build(), - AWSSecretsManagerClientBuilder.defaultClient(), + SecretsManagerClient.create(), AmazonAthenaClientBuilder.defaultClient(), new RedisConnectionFactory(), configOptions); @@ -104,7 +103,7 @@ public RedisRecordHandler(java.util.Map configOptions) @VisibleForTesting protected RedisRecordHandler(AmazonS3 amazonS3, - AWSSecretsManager secretsManager, + SecretsManagerClient secretsManager, AmazonAthena athena, RedisConnectionFactory redisConnectionFactory, java.util.Map configOptions) diff --git a/athena-redis/src/test/java/com/amazonaws/athena/connectors/redis/RedisMetadataHandlerTest.java b/athena-redis/src/test/java/com/amazonaws/athena/connectors/redis/RedisMetadataHandlerTest.java index 2767e82eb1..f7fca053a2 100644 --- a/athena-redis/src/test/java/com/amazonaws/athena/connectors/redis/RedisMetadataHandlerTest.java +++ b/athena-redis/src/test/java/com/amazonaws/athena/connectors/redis/RedisMetadataHandlerTest.java @@ -38,9 +38,6 @@ import com.amazonaws.athena.connectors.redis.util.MockKeyScanCursor; import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.glue.AWSGlue; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; -import com.amazonaws.services.secretsmanager.model.GetSecretValueRequest; -import com.amazonaws.services.secretsmanager.model.GetSecretValueResult; import io.lettuce.core.Range; import io.lettuce.core.ScanArgs; import io.lettuce.core.ScanCursor; @@ -57,6 +54,9 @@ import org.mockito.junit.MockitoJUnitRunner; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; +import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueRequest; +import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueResponse; import java.util.ArrayList; import java.util.Collections; @@ -106,7 +106,7 @@ public class RedisMetadataHandlerTest private AWSGlue mockGlue; @Mock - private AWSSecretsManager mockSecretsManager; + private SecretsManagerClient mockSecretsManager; @Mock private AmazonAthena mockAthena; @@ -129,10 +129,10 @@ public void setUp() when(mockSecretsManager.getSecretValue(nullable(GetSecretValueRequest.class))) .thenAnswer((InvocationOnMock invocation) -> { GetSecretValueRequest request = invocation.getArgument(0, GetSecretValueRequest.class); - if ("endpoint".equalsIgnoreCase(request.getSecretId())) { - return new GetSecretValueResult().withSecretString(decodedEndpoint); + if ("endpoint".equalsIgnoreCase(request.secretId())) { + return GetSecretValueResponse.builder().secretString(decodedEndpoint).build(); } - throw new RuntimeException("Unknown secret " + request.getSecretId()); + throw new RuntimeException("Unknown secret " + request.secretId()); }); } diff --git a/athena-redis/src/test/java/com/amazonaws/athena/connectors/redis/RedisRecordHandlerTest.java b/athena-redis/src/test/java/com/amazonaws/athena/connectors/redis/RedisRecordHandlerTest.java index e846d40ff2..1ed0f51301 100644 --- a/athena-redis/src/test/java/com/amazonaws/athena/connectors/redis/RedisRecordHandlerTest.java +++ b/athena-redis/src/test/java/com/amazonaws/athena/connectors/redis/RedisRecordHandlerTest.java @@ -46,9 +46,6 @@ import com.amazonaws.services.s3.model.PutObjectResult; import com.amazonaws.services.s3.model.S3Object; import com.amazonaws.services.s3.model.S3ObjectInputStream; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; -import com.amazonaws.services.secretsmanager.model.GetSecretValueRequest; -import com.amazonaws.services.secretsmanager.model.GetSecretValueResult; import com.google.common.collect.ImmutableList; import com.google.common.io.ByteStreams; import io.lettuce.core.ScanArgs; @@ -69,6 +66,9 @@ import org.mockito.junit.MockitoJUnitRunner; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; +import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueRequest; +import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueResponse; import java.io.ByteArrayInputStream; import java.io.InputStream; @@ -119,7 +119,7 @@ public class RedisRecordHandlerTest private RedisCommandsWrapper mockSyncCommands; @Mock - private AWSSecretsManager mockSecretsManager; + private SecretsManagerClient mockSecretsManager; @Mock private RedisConnectionFactory mockFactory; @@ -169,10 +169,10 @@ public void setUp() when(mockSecretsManager.getSecretValue(nullable(GetSecretValueRequest.class))) .thenAnswer((InvocationOnMock invocation) -> { GetSecretValueRequest request = invocation.getArgument(0, GetSecretValueRequest.class); - if ("endpoint".equalsIgnoreCase(request.getSecretId())) { - return new GetSecretValueResult().withSecretString(decodedEndpoint); + if ("endpoint".equalsIgnoreCase(request.secretId())) { + return GetSecretValueResponse.builder().secretString(decodedEndpoint).build(); } - throw new RuntimeException("Unknown secret " + request.getSecretId()); + throw new RuntimeException("Unknown secret " + request.secretId()); }); handler = new RedisRecordHandler(amazonS3, mockSecretsManager, mockAthena, mockFactory, com.google.common.collect.ImmutableMap.of()); diff --git a/athena-redshift/src/main/java/com/amazonaws/athena/connectors/redshift/RedshiftMetadataHandler.java b/athena-redshift/src/main/java/com/amazonaws/athena/connectors/redshift/RedshiftMetadataHandler.java index ab23c7b7b4..fcc5064d00 100644 --- a/athena-redshift/src/main/java/com/amazonaws/athena/connectors/redshift/RedshiftMetadataHandler.java +++ b/athena-redshift/src/main/java/com/amazonaws/athena/connectors/redshift/RedshiftMetadataHandler.java @@ -37,11 +37,11 @@ import com.amazonaws.athena.connectors.jdbc.manager.JDBCUtil; import com.amazonaws.athena.connectors.postgresql.PostGreSqlMetadataHandler; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import com.google.common.collect.ImmutableMap; import org.apache.arrow.util.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; import java.sql.PreparedStatement; @@ -80,7 +80,7 @@ public RedshiftMetadataHandler(DatabaseConnectionConfig databaseConnectionConfig } @VisibleForTesting - RedshiftMetadataHandler(DatabaseConnectionConfig databaseConnectionConfig, AWSSecretsManager secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, java.util.Map configOptions) + RedshiftMetadataHandler(DatabaseConnectionConfig databaseConnectionConfig, SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, java.util.Map configOptions) { super(databaseConnectionConfig, secretsManager, athena, jdbcConnectionFactory, configOptions); } diff --git a/athena-redshift/src/main/java/com/amazonaws/athena/connectors/redshift/RedshiftMuxMetadataHandler.java b/athena-redshift/src/main/java/com/amazonaws/athena/connectors/redshift/RedshiftMuxMetadataHandler.java index e0ff392750..ac027906c7 100644 --- a/athena-redshift/src/main/java/com/amazonaws/athena/connectors/redshift/RedshiftMuxMetadataHandler.java +++ b/athena-redshift/src/main/java/com/amazonaws/athena/connectors/redshift/RedshiftMuxMetadataHandler.java @@ -26,8 +26,8 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandlerFactory; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import org.apache.arrow.util.VisibleForTesting; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Map; @@ -58,7 +58,7 @@ public RedshiftMuxMetadataHandler(java.util.Map configOptions) } @VisibleForTesting - protected RedshiftMuxMetadataHandler(AWSSecretsManager secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, + protected RedshiftMuxMetadataHandler(SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, Map metadataHandlerMap, DatabaseConnectionConfig databaseConnectionConfig, java.util.Map configOptions) { super(secretsManager, athena, jdbcConnectionFactory, metadataHandlerMap, databaseConnectionConfig, configOptions); diff --git a/athena-redshift/src/main/java/com/amazonaws/athena/connectors/redshift/RedshiftMuxRecordHandler.java b/athena-redshift/src/main/java/com/amazonaws/athena/connectors/redshift/RedshiftMuxRecordHandler.java index 72f1ea1381..0be638dcdb 100644 --- a/athena-redshift/src/main/java/com/amazonaws/athena/connectors/redshift/RedshiftMuxRecordHandler.java +++ b/athena-redshift/src/main/java/com/amazonaws/athena/connectors/redshift/RedshiftMuxRecordHandler.java @@ -27,8 +27,8 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandlerFactory; import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import com.google.common.annotations.VisibleForTesting; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Map; @@ -58,7 +58,7 @@ public RedshiftMuxRecordHandler(java.util.Map configOptions) } @VisibleForTesting - RedshiftMuxRecordHandler(AmazonS3 amazonS3, AWSSecretsManager secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, + RedshiftMuxRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, DatabaseConnectionConfig databaseConnectionConfig, Map recordHandlerMap, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, jdbcConnectionFactory, databaseConnectionConfig, recordHandlerMap, configOptions); diff --git a/athena-redshift/src/main/java/com/amazonaws/athena/connectors/redshift/RedshiftRecordHandler.java b/athena-redshift/src/main/java/com/amazonaws/athena/connectors/redshift/RedshiftRecordHandler.java index 1546ea391b..2bb85fef36 100644 --- a/athena-redshift/src/main/java/com/amazonaws/athena/connectors/redshift/RedshiftRecordHandler.java +++ b/athena-redshift/src/main/java/com/amazonaws/athena/connectors/redshift/RedshiftRecordHandler.java @@ -34,11 +34,10 @@ import com.amazonaws.services.athena.AmazonAthenaClientBuilder; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3ClientBuilder; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; -import com.amazonaws.services.secretsmanager.AWSSecretsManagerClientBuilder; import org.apache.arrow.util.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import static com.amazonaws.athena.connectors.postgresql.PostGreSqlConstants.POSTGRES_QUOTE_CHARACTER; import static com.amazonaws.athena.connectors.redshift.RedshiftConstants.REDSHIFT_DEFAULT_PORT; @@ -62,12 +61,12 @@ public RedshiftRecordHandler(java.util.Map configOptions) public RedshiftRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, java.util.Map configOptions) { - super(databaseConnectionConfig, AmazonS3ClientBuilder.defaultClient(), AWSSecretsManagerClientBuilder.defaultClient(), AmazonAthenaClientBuilder.defaultClient(), + super(databaseConnectionConfig, AmazonS3ClientBuilder.defaultClient(), SecretsManagerClient.create(), AmazonAthenaClientBuilder.defaultClient(), new GenericJdbcConnectionFactory(databaseConnectionConfig, PostGreSqlMetadataHandler.JDBC_PROPERTIES, new DatabaseConnectionInfo(REDSHIFT_DRIVER_CLASS, REDSHIFT_DEFAULT_PORT)), new PostGreSqlQueryStringBuilder(POSTGRES_QUOTE_CHARACTER, new PostgreSqlFederationExpressionParser(POSTGRES_QUOTE_CHARACTER)), configOptions); } @VisibleForTesting - RedshiftRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, AmazonS3 amazonS3, AWSSecretsManager secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, JdbcSplitQueryBuilder jdbcSplitQueryBuilder, java.util.Map configOptions) + RedshiftRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, AmazonS3 amazonS3, SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, JdbcSplitQueryBuilder jdbcSplitQueryBuilder, java.util.Map configOptions) { super(databaseConnectionConfig, amazonS3, secretsManager, athena, jdbcConnectionFactory, jdbcSplitQueryBuilder, configOptions); } diff --git a/athena-redshift/src/test/java/com/amazonaws/athena/connectors/redshift/RedshiftMetadataHandlerTest.java b/athena-redshift/src/test/java/com/amazonaws/athena/connectors/redshift/RedshiftMetadataHandlerTest.java index 027c716876..280102ca5b 100644 --- a/athena-redshift/src/test/java/com/amazonaws/athena/connectors/redshift/RedshiftMetadataHandlerTest.java +++ b/athena-redshift/src/test/java/com/amazonaws/athena/connectors/redshift/RedshiftMetadataHandlerTest.java @@ -42,9 +42,6 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcCredentialProvider; import com.amazonaws.athena.connectors.postgresql.PostGreSqlMetadataHandler; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; -import com.amazonaws.services.secretsmanager.model.GetSecretValueRequest; -import com.amazonaws.services.secretsmanager.model.GetSecretValueResult; import com.google.common.collect.ImmutableMap; import org.apache.arrow.vector.types.DateUnit; import org.apache.arrow.vector.types.FloatingPointPrecision; @@ -57,6 +54,9 @@ import org.mockito.Mockito; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; +import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueRequest; +import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueResponse; import java.sql.Connection; import java.sql.PreparedStatement; @@ -88,7 +88,7 @@ public class RedshiftMetadataHandlerTest private JdbcConnectionFactory jdbcConnectionFactory; private Connection connection; private FederatedIdentity federatedIdentity; - private AWSSecretsManager secretsManager; + private SecretsManagerClient secretsManager; private AmazonAthena athena; @Before @@ -98,8 +98,8 @@ public void setup() this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); this.connection = Mockito.mock(Connection.class, Mockito.RETURNS_DEEP_STUBS); Mockito.when(this.jdbcConnectionFactory.getConnection(nullable(JdbcCredentialProvider.class))).thenReturn(this.connection); - this.secretsManager = Mockito.mock(AWSSecretsManager.class); - Mockito.when(this.secretsManager.getSecretValue(Mockito.eq(new GetSecretValueRequest().withSecretId("testSecret")))).thenReturn(new GetSecretValueResult().withSecretString("{\"username\": \"testUser\", \"password\": \"testPassword\"}")); + this.secretsManager = Mockito.mock(SecretsManagerClient.class); + Mockito.when(this.secretsManager.getSecretValue(Mockito.eq(GetSecretValueRequest.builder().secretId("testSecret").build()))).thenReturn(GetSecretValueResponse.builder().secretString("{\"username\": \"testUser\", \"password\": \"testPassword\"}").build()); this.redshiftMetadataHandler = new RedshiftMetadataHandler(databaseConnectionConfig, this.secretsManager, this.athena, this.jdbcConnectionFactory, com.google.common.collect.ImmutableMap.of()); this.federatedIdentity = Mockito.mock(FederatedIdentity.class); } diff --git a/athena-redshift/src/test/java/com/amazonaws/athena/connectors/redshift/RedshiftMuxJdbcMetadataHandlerTest.java b/athena-redshift/src/test/java/com/amazonaws/athena/connectors/redshift/RedshiftMuxJdbcMetadataHandlerTest.java index b0c5a7d86d..66ddda2d43 100644 --- a/athena-redshift/src/test/java/com/amazonaws/athena/connectors/redshift/RedshiftMuxJdbcMetadataHandlerTest.java +++ b/athena-redshift/src/test/java/com/amazonaws/athena/connectors/redshift/RedshiftMuxJdbcMetadataHandlerTest.java @@ -33,10 +33,10 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Collections; import java.util.Map; @@ -49,7 +49,7 @@ public class RedshiftMuxJdbcMetadataHandlerTest private RedshiftMetadataHandler redshiftMetadataHandler; private JdbcMetadataHandler jdbcMetadataHandler; private BlockAllocator allocator; - private AWSSecretsManager secretsManager; + private SecretsManagerClient secretsManager; private AmazonAthena athena; private QueryStatusChecker queryStatusChecker; private JdbcConnectionFactory jdbcConnectionFactory; @@ -62,7 +62,7 @@ public void setup() //Mockito.when(this.allocator.createBlock(nullable(Schema.class))).thenReturn(Mockito.mock(Block.class)); this.redshiftMetadataHandler = Mockito.mock(RedshiftMetadataHandler.class); this.metadataHandlerMap = Collections.singletonMap("redshift", this.redshiftMetadataHandler); - this.secretsManager = Mockito.mock(AWSSecretsManager.class); + this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AmazonAthena.class); this.queryStatusChecker = Mockito.mock(QueryStatusChecker.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); diff --git a/athena-redshift/src/test/java/com/amazonaws/athena/connectors/redshift/RedshiftMuxJdbcRecordHandlerTest.java b/athena-redshift/src/test/java/com/amazonaws/athena/connectors/redshift/RedshiftMuxJdbcRecordHandlerTest.java index 686e322ccc..e7172492dc 100644 --- a/athena-redshift/src/test/java/com/amazonaws/athena/connectors/redshift/RedshiftMuxJdbcRecordHandlerTest.java +++ b/athena-redshift/src/test/java/com/amazonaws/athena/connectors/redshift/RedshiftMuxJdbcRecordHandlerTest.java @@ -30,11 +30,11 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import org.apache.arrow.vector.types.pojo.Schema; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; import java.sql.SQLException; @@ -47,7 +47,7 @@ public class RedshiftMuxJdbcRecordHandlerTest private RedshiftRecordHandler redshiftRecordHandler; private JdbcRecordHandler jdbcRecordHandler; private AmazonS3 amazonS3; - private AWSSecretsManager secretsManager; + private SecretsManagerClient secretsManager; private AmazonAthena athena; private QueryStatusChecker queryStatusChecker; private JdbcConnectionFactory jdbcConnectionFactory; @@ -58,7 +58,7 @@ public void setup() this.redshiftRecordHandler = Mockito.mock(RedshiftRecordHandler.class); this.recordHandlerMap = Collections.singletonMap("redshift", this.redshiftRecordHandler); this.amazonS3 = Mockito.mock(AmazonS3.class); - this.secretsManager = Mockito.mock(AWSSecretsManager.class); + this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AmazonAthena.class); this.queryStatusChecker = Mockito.mock(QueryStatusChecker.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); diff --git a/athena-redshift/src/test/java/com/amazonaws/athena/connectors/redshift/RedshiftRecordHandlerTest.java b/athena-redshift/src/test/java/com/amazonaws/athena/connectors/redshift/RedshiftRecordHandlerTest.java index b5624a1ab2..024c076b43 100644 --- a/athena-redshift/src/test/java/com/amazonaws/athena/connectors/redshift/RedshiftRecordHandlerTest.java +++ b/athena-redshift/src/test/java/com/amazonaws/athena/connectors/redshift/RedshiftRecordHandlerTest.java @@ -38,7 +38,6 @@ import com.amazonaws.athena.connectors.postgresql.PostgreSqlFederationExpressionParser; import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import org.apache.arrow.vector.types.Types; @@ -50,6 +49,7 @@ import org.mockito.Mockito; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.math.BigDecimal; import java.sql.Connection; @@ -72,7 +72,7 @@ public class RedshiftRecordHandlerTest private JdbcConnectionFactory jdbcConnectionFactory; private JdbcSplitQueryBuilder jdbcSplitQueryBuilder; private AmazonS3 amazonS3; - private AWSSecretsManager secretsManager; + private SecretsManagerClient secretsManager; private AmazonAthena athena; @Before @@ -80,7 +80,7 @@ public void setup() throws Exception { this.amazonS3 = Mockito.mock(AmazonS3.class); - this.secretsManager = Mockito.mock(AWSSecretsManager.class); + this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AmazonAthena.class); this.connection = Mockito.mock(Connection.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); diff --git a/athena-saphana/src/main/java/com/amazonaws/athena/connectors/saphana/SaphanaMetadataHandler.java b/athena-saphana/src/main/java/com/amazonaws/athena/connectors/saphana/SaphanaMetadataHandler.java index b0c9aca0a3..8f3e462212 100644 --- a/athena-saphana/src/main/java/com/amazonaws/athena/connectors/saphana/SaphanaMetadataHandler.java +++ b/athena-saphana/src/main/java/com/amazonaws/athena/connectors/saphana/SaphanaMetadataHandler.java @@ -53,7 +53,6 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler; import com.amazonaws.athena.connectors.jdbc.manager.PreparedStatementBuilder; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ImmutableMap; import org.apache.arrow.vector.complex.reader.FieldReader; @@ -64,6 +63,7 @@ import org.apache.arrow.vector.types.pojo.Schema; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; import java.sql.DatabaseMetaData; @@ -102,7 +102,7 @@ SaphanaConstants.JDBC_PROPERTIES, new DatabaseConnectionInfo(SaphanaConstants.SA @VisibleForTesting protected SaphanaMetadataHandler( DatabaseConnectionConfig databaseConnectionConfig, - AWSSecretsManager secretsManager, + SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, java.util.Map configOptions) diff --git a/athena-saphana/src/main/java/com/amazonaws/athena/connectors/saphana/SaphanaMuxMetadataHandler.java b/athena-saphana/src/main/java/com/amazonaws/athena/connectors/saphana/SaphanaMuxMetadataHandler.java index 0a8d019de0..c9d08e18ea 100644 --- a/athena-saphana/src/main/java/com/amazonaws/athena/connectors/saphana/SaphanaMuxMetadataHandler.java +++ b/athena-saphana/src/main/java/com/amazonaws/athena/connectors/saphana/SaphanaMuxMetadataHandler.java @@ -26,8 +26,8 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandlerFactory; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import org.apache.arrow.util.VisibleForTesting; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Map; @@ -56,7 +56,7 @@ public SaphanaMuxMetadataHandler(java.util.Map configOptions) } @VisibleForTesting - protected SaphanaMuxMetadataHandler(AWSSecretsManager secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, + protected SaphanaMuxMetadataHandler(SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, Map metadataHandlerMap, DatabaseConnectionConfig databaseConnectionConfig, java.util.Map configOptions) { super(secretsManager, athena, jdbcConnectionFactory, metadataHandlerMap, databaseConnectionConfig, configOptions); diff --git a/athena-saphana/src/main/java/com/amazonaws/athena/connectors/saphana/SaphanaMuxRecordHandler.java b/athena-saphana/src/main/java/com/amazonaws/athena/connectors/saphana/SaphanaMuxRecordHandler.java index 050b8ba5d2..2063d2fa0a 100644 --- a/athena-saphana/src/main/java/com/amazonaws/athena/connectors/saphana/SaphanaMuxRecordHandler.java +++ b/athena-saphana/src/main/java/com/amazonaws/athena/connectors/saphana/SaphanaMuxRecordHandler.java @@ -27,8 +27,8 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandlerFactory; import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import com.google.common.annotations.VisibleForTesting; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Map; @@ -55,7 +55,7 @@ public SaphanaMuxRecordHandler(java.util.Map configOptions) } @VisibleForTesting - SaphanaMuxRecordHandler(AmazonS3 amazonS3, AWSSecretsManager secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, + SaphanaMuxRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, DatabaseConnectionConfig databaseConnectionConfig, Map recordHandlerMap, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, jdbcConnectionFactory, databaseConnectionConfig, recordHandlerMap, configOptions); diff --git a/athena-saphana/src/main/java/com/amazonaws/athena/connectors/saphana/SaphanaRecordHandler.java b/athena-saphana/src/main/java/com/amazonaws/athena/connectors/saphana/SaphanaRecordHandler.java index c65656c45f..27b0aa6446 100644 --- a/athena-saphana/src/main/java/com/amazonaws/athena/connectors/saphana/SaphanaRecordHandler.java +++ b/athena-saphana/src/main/java/com/amazonaws/athena/connectors/saphana/SaphanaRecordHandler.java @@ -35,14 +35,13 @@ import com.amazonaws.services.athena.AmazonAthenaClientBuilder; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3ClientBuilder; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; -import com.amazonaws.services.secretsmanager.AWSSecretsManagerClientBuilder; import com.google.common.annotations.VisibleForTesting; import org.apache.arrow.vector.types.pojo.Field; import org.apache.arrow.vector.types.pojo.Schema; import org.apache.commons.lang3.Validate; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; import java.sql.PreparedStatement; @@ -68,7 +67,7 @@ public SaphanaRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, j SaphanaConstants.SAPHANA_DEFAULT_PORT)), configOptions); } @VisibleForTesting - SaphanaRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, AmazonS3 amazonS3, AWSSecretsManager secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, JdbcSplitQueryBuilder jdbcSplitQueryBuilder, java.util.Map configOptions) + SaphanaRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, AmazonS3 amazonS3, SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, JdbcSplitQueryBuilder jdbcSplitQueryBuilder, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, databaseConnectionConfig, jdbcConnectionFactory, configOptions); this.jdbcSplitQueryBuilder = Validate.notNull(jdbcSplitQueryBuilder, "query builder must not be null"); @@ -76,7 +75,7 @@ public SaphanaRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, j public SaphanaRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, GenericJdbcConnectionFactory jdbcConnectionFactory, java.util.Map configOptions) { - this(databaseConnectionConfig, AmazonS3ClientBuilder.defaultClient(), AWSSecretsManagerClientBuilder.defaultClient(), + this(databaseConnectionConfig, AmazonS3ClientBuilder.defaultClient(), SecretsManagerClient.create(), AmazonAthenaClientBuilder.defaultClient(), jdbcConnectionFactory, new SaphanaQueryStringBuilder(SAPHANA_QUOTE_CHARACTER, new SaphanaFederationExpressionParser(SAPHANA_QUOTE_CHARACTER)), configOptions); } diff --git a/athena-saphana/src/test/java/com/amazonaws/athena/connectors/saphana/SaphanaMetadataHandlerTest.java b/athena-saphana/src/test/java/com/amazonaws/athena/connectors/saphana/SaphanaMetadataHandlerTest.java index bf8b86c007..ecf49611d1 100644 --- a/athena-saphana/src/test/java/com/amazonaws/athena/connectors/saphana/SaphanaMetadataHandlerTest.java +++ b/athena-saphana/src/test/java/com/amazonaws/athena/connectors/saphana/SaphanaMetadataHandlerTest.java @@ -49,9 +49,9 @@ import com.amazonaws.athena.connector.lambda.domain.predicate.Constraints; import com.amazonaws.athena.connector.lambda.security.FederatedIdentity; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; -import com.amazonaws.services.secretsmanager.model.GetSecretValueRequest; -import com.amazonaws.services.secretsmanager.model.GetSecretValueResult; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; +import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueRequest; +import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueResponse; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.nullable; @@ -65,7 +65,7 @@ public class SaphanaMetadataHandlerTest private JdbcConnectionFactory jdbcConnectionFactory; private Connection connection; private FederatedIdentity federatedIdentity; - private AWSSecretsManager secretsManager; + private SecretsManagerClient secretsManager; private AmazonAthena athena; private BlockAllocator blockAllocator; private static final Schema PARTITION_SCHEMA = SchemaBuilder.newBuilder().addField("PART_ID", org.apache.arrow.vector.types.Types.MinorType.VARCHAR.getType()).build(); @@ -78,9 +78,9 @@ public void setup() this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class, Mockito.RETURNS_DEEP_STUBS); this.connection = Mockito.mock(Connection.class, Mockito.RETURNS_DEEP_STUBS); Mockito.when(this.jdbcConnectionFactory.getConnection(nullable(JdbcCredentialProvider.class))).thenReturn(this.connection); - this.secretsManager = Mockito.mock(AWSSecretsManager.class); + this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AmazonAthena.class); - Mockito.when(this.secretsManager.getSecretValue(Mockito.eq(new GetSecretValueRequest().withSecretId("testSecret")))).thenReturn(new GetSecretValueResult().withSecretString("{\"username\": \"testUser\", \"password\": \"testPassword\"}")); + Mockito.when(this.secretsManager.getSecretValue(Mockito.eq(GetSecretValueRequest.builder().secretId("testSecret").build()))).thenReturn(GetSecretValueResponse.builder().secretString("{\"username\": \"testUser\", \"password\": \"testPassword\"}").build()); this.saphanaMetadataHandler = new SaphanaMetadataHandler(databaseConnectionConfig, this.secretsManager, this.athena, this.jdbcConnectionFactory, com.google.common.collect.ImmutableMap.of()); this.federatedIdentity = Mockito.mock(FederatedIdentity.class); this.blockAllocator = Mockito.mock(BlockAllocator.class); diff --git a/athena-saphana/src/test/java/com/amazonaws/athena/connectors/saphana/SaphanaMuxJdbcMetadataHandlerTest.java b/athena-saphana/src/test/java/com/amazonaws/athena/connectors/saphana/SaphanaMuxJdbcMetadataHandlerTest.java index 1e4dc8fffd..9a26fc14d9 100644 --- a/athena-saphana/src/test/java/com/amazonaws/athena/connectors/saphana/SaphanaMuxJdbcMetadataHandlerTest.java +++ b/athena-saphana/src/test/java/com/amazonaws/athena/connectors/saphana/SaphanaMuxJdbcMetadataHandlerTest.java @@ -29,10 +29,10 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Collections; import java.util.Map; @@ -45,7 +45,7 @@ public class SaphanaMuxJdbcMetadataHandlerTest { private SaphanaMetadataHandler saphanaMetadataHandler; private JdbcMetadataHandler jdbcMetadataHandler; private BlockAllocator allocator; - private AWSSecretsManager secretsManager; + private SecretsManagerClient secretsManager; private AmazonAthena athena; private QueryStatusChecker queryStatusChecker; private JdbcConnectionFactory jdbcConnectionFactory; @@ -58,7 +58,7 @@ public void setup() //Mockito.when(this.allocator.createBlock(nullable(Schema.class))).thenReturn(Mockito.mock(Block.class)); this.saphanaMetadataHandler = Mockito.mock(SaphanaMetadataHandler.class); this.metadataHandlerMap = Collections.singletonMap("fakedatabase", this.saphanaMetadataHandler); - this.secretsManager = Mockito.mock(AWSSecretsManager.class); + this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AmazonAthena.class); this.queryStatusChecker = Mockito.mock(QueryStatusChecker.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); diff --git a/athena-saphana/src/test/java/com/amazonaws/athena/connectors/saphana/SaphanaMuxJdbcRecordHandlerTest.java b/athena-saphana/src/test/java/com/amazonaws/athena/connectors/saphana/SaphanaMuxJdbcRecordHandlerTest.java index 1336af4d7c..ca00262b23 100644 --- a/athena-saphana/src/test/java/com/amazonaws/athena/connectors/saphana/SaphanaMuxJdbcRecordHandlerTest.java +++ b/athena-saphana/src/test/java/com/amazonaws/athena/connectors/saphana/SaphanaMuxJdbcRecordHandlerTest.java @@ -30,11 +30,11 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import org.apache.arrow.vector.types.pojo.Schema; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; import java.sql.SQLException; @@ -47,7 +47,7 @@ public class SaphanaMuxJdbcRecordHandlerTest private SaphanaRecordHandler saphanaRecordHandler; private JdbcRecordHandler jdbcRecordHandler; private AmazonS3 amazonS3; - private AWSSecretsManager secretsManager; + private SecretsManagerClient secretsManager; private AmazonAthena athena; private QueryStatusChecker queryStatusChecker; private JdbcConnectionFactory jdbcConnectionFactory; @@ -58,7 +58,7 @@ public void setup() this.saphanaRecordHandler = Mockito.mock(SaphanaRecordHandler.class); this.recordHandlerMap = Collections.singletonMap("saphana", this.saphanaRecordHandler); this.amazonS3 = Mockito.mock(AmazonS3.class); - this.secretsManager = Mockito.mock(AWSSecretsManager.class); + this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AmazonAthena.class); this.queryStatusChecker = Mockito.mock(QueryStatusChecker.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); diff --git a/athena-saphana/src/test/java/com/amazonaws/athena/connectors/saphana/SaphanaRecordHandlerTest.java b/athena-saphana/src/test/java/com/amazonaws/athena/connectors/saphana/SaphanaRecordHandlerTest.java index 833dd0a0d3..0b8b58fd9f 100644 --- a/athena-saphana/src/test/java/com/amazonaws/athena/connectors/saphana/SaphanaRecordHandlerTest.java +++ b/athena-saphana/src/test/java/com/amazonaws/athena/connectors/saphana/SaphanaRecordHandlerTest.java @@ -34,7 +34,6 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcSplitQueryBuilder; import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import org.apache.arrow.vector.types.Types; @@ -43,6 +42,7 @@ import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; import java.sql.PreparedStatement; @@ -59,7 +59,7 @@ public class SaphanaRecordHandlerTest private JdbcConnectionFactory jdbcConnectionFactory; private JdbcSplitQueryBuilder jdbcSplitQueryBuilder; private AmazonS3 amazonS3; - private AWSSecretsManager secretsManager; + private SecretsManagerClient secretsManager; private AmazonAthena athena; @Before @@ -67,7 +67,7 @@ public void setup() throws Exception { this.amazonS3 = Mockito.mock(AmazonS3.class); - this.secretsManager = Mockito.mock(AWSSecretsManager.class); + this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AmazonAthena.class); this.connection = Mockito.mock(Connection.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); diff --git a/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMetadataHandler.java b/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMetadataHandler.java index 234718760a..751bd58f21 100644 --- a/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMetadataHandler.java +++ b/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMetadataHandler.java @@ -56,7 +56,6 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler; import com.amazonaws.athena.connectors.jdbc.manager.PreparedStatementBuilder; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Strings; import com.google.common.collect.ImmutableMap; @@ -68,6 +67,7 @@ import org.apache.arrow.vector.types.pojo.Schema; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; import java.sql.DatabaseMetaData; @@ -139,7 +139,7 @@ JDBC_PROPERTIES, new DatabaseConnectionInfo(SnowflakeConstants.SNOWFLAKE_DRIVER_ @VisibleForTesting protected SnowflakeMetadataHandler( DatabaseConnectionConfig databaseConnectionConfig, - AWSSecretsManager secretsManager, + SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, java.util.Map configOptions) diff --git a/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMuxMetadataHandler.java b/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMuxMetadataHandler.java index 5754598bd8..27f9135f49 100644 --- a/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMuxMetadataHandler.java +++ b/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMuxMetadataHandler.java @@ -26,8 +26,8 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandlerFactory; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import org.apache.arrow.util.VisibleForTesting; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Map; @@ -56,7 +56,7 @@ public SnowflakeMuxMetadataHandler(java.util.Map configOptions) } @VisibleForTesting - protected SnowflakeMuxMetadataHandler(AWSSecretsManager secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, + protected SnowflakeMuxMetadataHandler(SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, Map metadataHandlerMap, DatabaseConnectionConfig databaseConnectionConfig, java.util.Map configOptions) { super(secretsManager, athena, jdbcConnectionFactory, metadataHandlerMap, databaseConnectionConfig, configOptions); diff --git a/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMuxRecordHandler.java b/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMuxRecordHandler.java index 07694a20a6..98b1fb6c10 100644 --- a/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMuxRecordHandler.java +++ b/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMuxRecordHandler.java @@ -27,8 +27,8 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandlerFactory; import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import com.google.common.annotations.VisibleForTesting; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Map; @@ -55,7 +55,7 @@ public SnowflakeMuxRecordHandler(java.util.Map configOptions) } @VisibleForTesting - SnowflakeMuxRecordHandler(AmazonS3 amazonS3, AWSSecretsManager secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, + SnowflakeMuxRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, DatabaseConnectionConfig databaseConnectionConfig, Map recordHandlerMap, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, jdbcConnectionFactory, databaseConnectionConfig, recordHandlerMap, configOptions); diff --git a/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeRecordHandler.java b/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeRecordHandler.java index 1f4b8ad1b2..649a722862 100644 --- a/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeRecordHandler.java +++ b/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeRecordHandler.java @@ -34,11 +34,10 @@ import com.amazonaws.services.athena.AmazonAthenaClientBuilder; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3ClientBuilder; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; -import com.amazonaws.services.secretsmanager.AWSSecretsManagerClientBuilder; import com.google.common.annotations.VisibleForTesting; import org.apache.arrow.vector.types.pojo.Schema; import org.apache.commons.lang3.Validate; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; import java.sql.PreparedStatement; @@ -68,11 +67,11 @@ public SnowflakeRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, } public SnowflakeRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, GenericJdbcConnectionFactory jdbcConnectionFactory, java.util.Map configOptions) { - this(databaseConnectionConfig, AmazonS3ClientBuilder.defaultClient(), AWSSecretsManagerClientBuilder.defaultClient(), AmazonAthenaClientBuilder.defaultClient(), + this(databaseConnectionConfig, AmazonS3ClientBuilder.defaultClient(), SecretsManagerClient.create(), AmazonAthenaClientBuilder.defaultClient(), jdbcConnectionFactory, new SnowflakeQueryStringBuilder(SNOWFLAKE_QUOTE_CHARACTER, new SnowflakeFederationExpressionParser(SNOWFLAKE_QUOTE_CHARACTER)), configOptions); } @VisibleForTesting - SnowflakeRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, final AmazonS3 amazonS3, final AWSSecretsManager secretsManager, + SnowflakeRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, final AmazonS3 amazonS3, final SecretsManagerClient secretsManager, final AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, JdbcSplitQueryBuilder jdbcSplitQueryBuilder, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, databaseConnectionConfig, jdbcConnectionFactory, configOptions); diff --git a/athena-snowflake/src/test/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMetadataHandlerTest.java b/athena-snowflake/src/test/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMetadataHandlerTest.java index b18fde1cda..5fe668f5a3 100644 --- a/athena-snowflake/src/test/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMetadataHandlerTest.java +++ b/athena-snowflake/src/test/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMetadataHandlerTest.java @@ -29,10 +29,10 @@ import com.amazonaws.athena.connectors.jdbc.connection.DatabaseConnectionConfig; import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.connection.JdbcCredentialProvider; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; +import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueRequest; +import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueResponse; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; -import com.amazonaws.services.secretsmanager.model.GetSecretValueRequest; -import com.amazonaws.services.secretsmanager.model.GetSecretValueResult; import org.apache.arrow.vector.types.pojo.Field; import org.apache.arrow.vector.types.pojo.Schema; import org.junit.Assert; @@ -58,7 +58,7 @@ public class SnowflakeMetadataHandlerTest private JdbcConnectionFactory jdbcConnectionFactory; private Connection connection; private FederatedIdentity federatedIdentity; - private AWSSecretsManager secretsManager; + private SecretsManagerClient secretsManager; private AmazonAthena athena; private BlockAllocator blockAllocator; private static final Schema PARTITION_SCHEMA = SchemaBuilder.newBuilder().addField("partition", org.apache.arrow.vector.types.Types.MinorType.VARCHAR.getType()).build(); @@ -71,9 +71,9 @@ public void setup() this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class , Mockito.RETURNS_DEEP_STUBS); this.connection = Mockito.mock(Connection.class, Mockito.RETURNS_DEEP_STUBS); Mockito.when(this.jdbcConnectionFactory.getConnection(nullable(JdbcCredentialProvider.class))).thenReturn(this.connection); - this.secretsManager = Mockito.mock(AWSSecretsManager.class); + this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AmazonAthena.class); - Mockito.when(this.secretsManager.getSecretValue(Mockito.eq(new GetSecretValueRequest().withSecretId("testSecret")))).thenReturn(new GetSecretValueResult().withSecretString("{\"username\": \"testUser\", \"password\": \"testPassword\"}")); + Mockito.when(this.secretsManager.getSecretValue(Mockito.eq(GetSecretValueRequest.builder().secretId("testSecret").build()))).thenReturn(GetSecretValueResponse.builder().secretString("{\"username\": \"testUser\", \"password\": \"testPassword\"}").build()); this.snowflakeMetadataHandler = new SnowflakeMetadataHandler(databaseConnectionConfig, this.secretsManager, this.athena, this.jdbcConnectionFactory, com.google.common.collect.ImmutableMap.of()); this.federatedIdentity = Mockito.mock(FederatedIdentity.class); this.blockAllocator = Mockito.mock(BlockAllocator.class); diff --git a/athena-snowflake/src/test/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMuxJdbcMetadataHandlerTest.java b/athena-snowflake/src/test/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMuxJdbcMetadataHandlerTest.java index c566df626d..ac62d22e7f 100644 --- a/athena-snowflake/src/test/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMuxJdbcMetadataHandlerTest.java +++ b/athena-snowflake/src/test/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMuxJdbcMetadataHandlerTest.java @@ -29,10 +29,10 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Collections; import java.util.Map; @@ -45,7 +45,7 @@ public class SnowflakeMuxJdbcMetadataHandlerTest private SnowflakeMetadataHandler snowflakeMetadataHandler; private JdbcMetadataHandler jdbcMetadataHandler; private BlockAllocator allocator; - private AWSSecretsManager secretsManager; + private SecretsManagerClient secretsManager; private AmazonAthena athena; private QueryStatusChecker queryStatusChecker; private JdbcConnectionFactory jdbcConnectionFactory; @@ -56,7 +56,7 @@ public void setup() this.allocator = new BlockAllocatorImpl(); this.snowflakeMetadataHandler = Mockito.mock(SnowflakeMetadataHandler.class); this.metadataHandlerMap = Collections.singletonMap("fakedatabase", this.snowflakeMetadataHandler); - this.secretsManager = Mockito.mock(AWSSecretsManager.class); + this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AmazonAthena.class); this.queryStatusChecker = Mockito.mock(QueryStatusChecker.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); diff --git a/athena-snowflake/src/test/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMuxJdbcRecordHandlerTest.java b/athena-snowflake/src/test/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMuxJdbcRecordHandlerTest.java index 5dee99bfb7..5582a34454 100644 --- a/athena-snowflake/src/test/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMuxJdbcRecordHandlerTest.java +++ b/athena-snowflake/src/test/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMuxJdbcRecordHandlerTest.java @@ -32,11 +32,11 @@ import com.amazonaws.athena.connectors.snowflake.SnowflakeRecordHandler; import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import org.apache.arrow.vector.types.pojo.Schema; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; import java.sql.SQLException; @@ -51,7 +51,7 @@ public class SnowflakeMuxJdbcRecordHandlerTest private SnowflakeRecordHandler snowflakeRecordHandler; private JdbcRecordHandler jdbcRecordHandler; private AmazonS3 amazonS3; - private AWSSecretsManager secretsManager; + private SecretsManagerClient secretsManager; private AmazonAthena athena; private QueryStatusChecker queryStatusChecker; private JdbcConnectionFactory jdbcConnectionFactory; @@ -62,7 +62,7 @@ public void setup() this.snowflakeRecordHandler = Mockito.mock(SnowflakeRecordHandler.class); this.recordHandlerMap = Collections.singletonMap("snowflake", this.snowflakeRecordHandler); this.amazonS3 = Mockito.mock(AmazonS3.class); - this.secretsManager = Mockito.mock(AWSSecretsManager.class); + this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AmazonAthena.class); this.queryStatusChecker = Mockito.mock(QueryStatusChecker.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); diff --git a/athena-snowflake/src/test/java/com/amazonaws/athena/connectors/snowflake/SnowflakeRecordHandlerTest.java b/athena-snowflake/src/test/java/com/amazonaws/athena/connectors/snowflake/SnowflakeRecordHandlerTest.java index 006cbd95e5..bd0cbed6c9 100644 --- a/athena-snowflake/src/test/java/com/amazonaws/athena/connectors/snowflake/SnowflakeRecordHandlerTest.java +++ b/athena-snowflake/src/test/java/com/amazonaws/athena/connectors/snowflake/SnowflakeRecordHandlerTest.java @@ -35,7 +35,6 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcSplitQueryBuilder; import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import org.apache.arrow.vector.types.Types; @@ -44,6 +43,7 @@ import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; import java.sql.PreparedStatement; @@ -60,7 +60,7 @@ public class SnowflakeRecordHandlerTest private JdbcConnectionFactory jdbcConnectionFactory; private JdbcSplitQueryBuilder jdbcSplitQueryBuilder; private AmazonS3 amazonS3; - private AWSSecretsManager secretsManager; + private SecretsManagerClient secretsManager; private AmazonAthena athena; @Before @@ -68,7 +68,7 @@ public void setup() throws Exception { this.amazonS3 = Mockito.mock(AmazonS3.class); - this.secretsManager = Mockito.mock(AWSSecretsManager.class); + this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AmazonAthena.class); this.connection = Mockito.mock(Connection.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); diff --git a/athena-sqlserver/src/main/java/com/amazonaws/athena/connectors/sqlserver/SqlServerMetadataHandler.java b/athena-sqlserver/src/main/java/com/amazonaws/athena/connectors/sqlserver/SqlServerMetadataHandler.java index 9ffd2dd16e..099cf0171f 100644 --- a/athena-sqlserver/src/main/java/com/amazonaws/athena/connectors/sqlserver/SqlServerMetadataHandler.java +++ b/athena-sqlserver/src/main/java/com/amazonaws/athena/connectors/sqlserver/SqlServerMetadataHandler.java @@ -55,7 +55,6 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler; import com.amazonaws.athena.connectors.jdbc.manager.PreparedStatementBuilder; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; @@ -66,6 +65,7 @@ import org.apache.arrow.vector.types.pojo.Schema; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; import java.sql.DatabaseMetaData; @@ -149,7 +149,7 @@ public SqlServerMetadataHandler(DatabaseConnectionConfig databaseConnectionConfi @VisibleForTesting protected SqlServerMetadataHandler( DatabaseConnectionConfig databaseConnectionConfig, - AWSSecretsManager secretsManager, + SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, java.util.Map configOptions) diff --git a/athena-sqlserver/src/main/java/com/amazonaws/athena/connectors/sqlserver/SqlServerMuxMetadataHandler.java b/athena-sqlserver/src/main/java/com/amazonaws/athena/connectors/sqlserver/SqlServerMuxMetadataHandler.java index 4320ac8afe..bd577beb8b 100644 --- a/athena-sqlserver/src/main/java/com/amazonaws/athena/connectors/sqlserver/SqlServerMuxMetadataHandler.java +++ b/athena-sqlserver/src/main/java/com/amazonaws/athena/connectors/sqlserver/SqlServerMuxMetadataHandler.java @@ -26,8 +26,8 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandlerFactory; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import org.apache.arrow.util.VisibleForTesting; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Map; @@ -56,7 +56,7 @@ public SqlServerMuxMetadataHandler(java.util.Map configOptions) } @VisibleForTesting - protected SqlServerMuxMetadataHandler(AWSSecretsManager secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, + protected SqlServerMuxMetadataHandler(SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, Map metadataHandlerMap, DatabaseConnectionConfig databaseConnectionConfig, java.util.Map configOptions) { super(secretsManager, athena, jdbcConnectionFactory, metadataHandlerMap, databaseConnectionConfig, configOptions); diff --git a/athena-sqlserver/src/main/java/com/amazonaws/athena/connectors/sqlserver/SqlServerMuxRecordHandler.java b/athena-sqlserver/src/main/java/com/amazonaws/athena/connectors/sqlserver/SqlServerMuxRecordHandler.java index 5a195fc0f3..5872227624 100644 --- a/athena-sqlserver/src/main/java/com/amazonaws/athena/connectors/sqlserver/SqlServerMuxRecordHandler.java +++ b/athena-sqlserver/src/main/java/com/amazonaws/athena/connectors/sqlserver/SqlServerMuxRecordHandler.java @@ -27,8 +27,8 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandlerFactory; import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import com.google.common.annotations.VisibleForTesting; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Map; @@ -55,7 +55,7 @@ public SqlServerMuxRecordHandler(java.util.Map configOptions) } @VisibleForTesting - SqlServerMuxRecordHandler(AmazonS3 amazonS3, AWSSecretsManager secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, + SqlServerMuxRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, DatabaseConnectionConfig databaseConnectionConfig, Map recordHandlerMap, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, jdbcConnectionFactory, databaseConnectionConfig, recordHandlerMap, configOptions); diff --git a/athena-sqlserver/src/main/java/com/amazonaws/athena/connectors/sqlserver/SqlServerRecordHandler.java b/athena-sqlserver/src/main/java/com/amazonaws/athena/connectors/sqlserver/SqlServerRecordHandler.java index e1b64e79f5..814131685c 100644 --- a/athena-sqlserver/src/main/java/com/amazonaws/athena/connectors/sqlserver/SqlServerRecordHandler.java +++ b/athena-sqlserver/src/main/java/com/amazonaws/athena/connectors/sqlserver/SqlServerRecordHandler.java @@ -33,11 +33,10 @@ import com.amazonaws.services.athena.AmazonAthenaClientBuilder; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3ClientBuilder; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; -import com.amazonaws.services.secretsmanager.AWSSecretsManagerClientBuilder; import com.google.common.annotations.VisibleForTesting; import org.apache.arrow.vector.types.pojo.Schema; import org.apache.commons.lang3.Validate; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; import java.sql.PreparedStatement; @@ -63,12 +62,12 @@ public SqlServerRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, public SqlServerRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, JdbcConnectionFactory jdbcConnectionFactory, java.util.Map configOptions) { - this(databaseConnectionConfig, AmazonS3ClientBuilder.defaultClient(), AWSSecretsManagerClientBuilder.defaultClient(), + this(databaseConnectionConfig, AmazonS3ClientBuilder.defaultClient(), SecretsManagerClient.create(), AmazonAthenaClientBuilder.defaultClient(), jdbcConnectionFactory, new SqlServerQueryStringBuilder(SQLSERVER_QUOTE_CHARACTER, new SqlServerFederationExpressionParser(SQLSERVER_QUOTE_CHARACTER)), configOptions); } @VisibleForTesting - SqlServerRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, final AmazonS3 amazonS3, final AWSSecretsManager secretsManager, + SqlServerRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, final AmazonS3 amazonS3, final SecretsManagerClient secretsManager, final AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, JdbcSplitQueryBuilder jdbcSplitQueryBuilder, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, databaseConnectionConfig, jdbcConnectionFactory, configOptions); diff --git a/athena-sqlserver/src/test/java/com/amazonaws/athena/connectors/sqlserver/SqlServerMetadataHandlerTest.java b/athena-sqlserver/src/test/java/com/amazonaws/athena/connectors/sqlserver/SqlServerMetadataHandlerTest.java index 27704cfdb1..adea9f5dc9 100644 --- a/athena-sqlserver/src/test/java/com/amazonaws/athena/connectors/sqlserver/SqlServerMetadataHandlerTest.java +++ b/athena-sqlserver/src/test/java/com/amazonaws/athena/connectors/sqlserver/SqlServerMetadataHandlerTest.java @@ -43,9 +43,6 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.connection.JdbcCredentialProvider; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; -import com.amazonaws.services.secretsmanager.model.GetSecretValueRequest; -import com.amazonaws.services.secretsmanager.model.GetSecretValueResult; import org.apache.arrow.vector.types.pojo.Field; import org.apache.arrow.vector.types.pojo.Schema; import org.junit.Assert; @@ -54,6 +51,9 @@ import org.mockito.Mockito; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; +import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueRequest; +import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueResponse; import java.sql.Connection; import java.sql.PreparedStatement; @@ -84,7 +84,7 @@ public class SqlServerMetadataHandlerTest private JdbcConnectionFactory jdbcConnectionFactory; private Connection connection; private FederatedIdentity federatedIdentity; - private AWSSecretsManager secretsManager; + private SecretsManagerClient secretsManager; private AmazonAthena athena; private BlockAllocator allocator; @@ -97,9 +97,9 @@ public void setup() this.connection = Mockito.mock(Connection.class, Mockito.RETURNS_DEEP_STUBS); logger.info(" this.connection.."+ this.connection); Mockito.when(this.jdbcConnectionFactory.getConnection(nullable(JdbcCredentialProvider.class))).thenReturn(this.connection); - this.secretsManager = Mockito.mock(AWSSecretsManager.class); + this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AmazonAthena.class); - Mockito.when(this.secretsManager.getSecretValue(Mockito.eq(new GetSecretValueRequest().withSecretId("testSecret")))).thenReturn(new GetSecretValueResult().withSecretString("{\"user\": \"testUser\", \"password\": \"testPassword\"}")); + Mockito.when(this.secretsManager.getSecretValue(Mockito.eq(GetSecretValueRequest.builder().secretId("testSecret").build()))).thenReturn(GetSecretValueResponse.builder().secretString("{\"user\": \"testUser\", \"password\": \"testPassword\"}").build()); this.sqlServerMetadataHandler = new SqlServerMetadataHandler(databaseConnectionConfig, this.secretsManager, this.athena, this.jdbcConnectionFactory, com.google.common.collect.ImmutableMap.of()); this.federatedIdentity = Mockito.mock(FederatedIdentity.class); this.allocator = new BlockAllocatorImpl(); diff --git a/athena-sqlserver/src/test/java/com/amazonaws/athena/connectors/sqlserver/SqlServerMuxMetadataHandlerTest.java b/athena-sqlserver/src/test/java/com/amazonaws/athena/connectors/sqlserver/SqlServerMuxMetadataHandlerTest.java index 96f8b3e5e2..bd2827b6e4 100644 --- a/athena-sqlserver/src/test/java/com/amazonaws/athena/connectors/sqlserver/SqlServerMuxMetadataHandlerTest.java +++ b/athena-sqlserver/src/test/java/com/amazonaws/athena/connectors/sqlserver/SqlServerMuxMetadataHandlerTest.java @@ -33,10 +33,10 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Collections; import java.util.Map; @@ -49,7 +49,7 @@ public class SqlServerMuxMetadataHandlerTest private SqlServerMetadataHandler sqlServerMetadataHandler; private JdbcMetadataHandler jdbcMetadataHandler; private BlockAllocator allocator; - private AWSSecretsManager secretsManager; + private SecretsManagerClient secretsManager; private AmazonAthena athena; private QueryStatusChecker queryStatusChecker; private JdbcConnectionFactory jdbcConnectionFactory; @@ -60,7 +60,7 @@ public void setup() this.allocator = new BlockAllocatorImpl(); this.sqlServerMetadataHandler = Mockito.mock(SqlServerMetadataHandler.class); this.metadataHandlerMap = Collections.singletonMap("fakedatabase", this.sqlServerMetadataHandler); - this.secretsManager = Mockito.mock(AWSSecretsManager.class); + this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AmazonAthena.class); this.queryStatusChecker = Mockito.mock(QueryStatusChecker.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); diff --git a/athena-sqlserver/src/test/java/com/amazonaws/athena/connectors/sqlserver/SqlServerMuxRecordHandlerTest.java b/athena-sqlserver/src/test/java/com/amazonaws/athena/connectors/sqlserver/SqlServerMuxRecordHandlerTest.java index 7b337185fd..2c6de5c5f9 100644 --- a/athena-sqlserver/src/test/java/com/amazonaws/athena/connectors/sqlserver/SqlServerMuxRecordHandlerTest.java +++ b/athena-sqlserver/src/test/java/com/amazonaws/athena/connectors/sqlserver/SqlServerMuxRecordHandlerTest.java @@ -30,11 +30,11 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import org.apache.arrow.vector.types.pojo.Schema; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; import java.sql.SQLException; @@ -47,7 +47,7 @@ public class SqlServerMuxRecordHandlerTest private SqlServerRecordHandler sqlServerRecordHandler; private JdbcRecordHandler jdbcRecordHandler; private AmazonS3 amazonS3; - private AWSSecretsManager secretsManager; + private SecretsManagerClient secretsManager; private AmazonAthena athena; private QueryStatusChecker queryStatusChecker; private JdbcConnectionFactory jdbcConnectionFactory; @@ -58,7 +58,7 @@ public void setup() this.sqlServerRecordHandler = Mockito.mock(SqlServerRecordHandler.class); this.recordHandlerMap = Collections.singletonMap(SqlServerConstants.NAME, this.sqlServerRecordHandler); this.amazonS3 = Mockito.mock(AmazonS3.class); - this.secretsManager = Mockito.mock(AWSSecretsManager.class); + this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AmazonAthena.class); this.queryStatusChecker = Mockito.mock(QueryStatusChecker.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); diff --git a/athena-sqlserver/src/test/java/com/amazonaws/athena/connectors/sqlserver/SqlServerRecordHandlerTest.java b/athena-sqlserver/src/test/java/com/amazonaws/athena/connectors/sqlserver/SqlServerRecordHandlerTest.java index 8ca6ebf791..7b1651bf89 100644 --- a/athena-sqlserver/src/test/java/com/amazonaws/athena/connectors/sqlserver/SqlServerRecordHandlerTest.java +++ b/athena-sqlserver/src/test/java/com/amazonaws/athena/connectors/sqlserver/SqlServerRecordHandlerTest.java @@ -34,7 +34,6 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcSplitQueryBuilder; import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import com.google.common.collect.ImmutableMap; import org.apache.arrow.vector.types.Types; import org.apache.arrow.vector.types.pojo.Schema; @@ -42,6 +41,7 @@ import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; import java.sql.PreparedStatement; @@ -58,7 +58,7 @@ public class SqlServerRecordHandlerTest private JdbcConnectionFactory jdbcConnectionFactory; private JdbcSplitQueryBuilder jdbcSplitQueryBuilder; private AmazonS3 amazonS3; - private AWSSecretsManager secretsManager; + private SecretsManagerClient secretsManager; private AmazonAthena athena; @Before @@ -67,7 +67,7 @@ public void setup() { System.setProperty("aws.region", "us-east-1"); this.amazonS3 = Mockito.mock(AmazonS3.class); - this.secretsManager = Mockito.mock(AWSSecretsManager.class); + this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AmazonAthena.class); this.connection = Mockito.mock(Connection.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); diff --git a/athena-synapse/src/main/java/com/amazonaws/athena/connectors/synapse/SynapseMetadataHandler.java b/athena-synapse/src/main/java/com/amazonaws/athena/connectors/synapse/SynapseMetadataHandler.java index 1ad714e9c5..504321b329 100644 --- a/athena-synapse/src/main/java/com/amazonaws/athena/connectors/synapse/SynapseMetadataHandler.java +++ b/athena-synapse/src/main/java/com/amazonaws/athena/connectors/synapse/SynapseMetadataHandler.java @@ -49,7 +49,6 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcArrowTypeConverter; import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; @@ -63,6 +62,7 @@ import org.stringtemplate.v4.ST; import org.stringtemplate.v4.STGroup; import org.stringtemplate.v4.STGroupDir; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; import java.sql.DatabaseMetaData; @@ -111,7 +111,7 @@ public SynapseMetadataHandler(DatabaseConnectionConfig databaseConnectionConfig, @VisibleForTesting protected SynapseMetadataHandler( DatabaseConnectionConfig databaseConnectionConfig, - AWSSecretsManager secretsManager, + SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, java.util.Map configOptions) diff --git a/athena-synapse/src/main/java/com/amazonaws/athena/connectors/synapse/SynapseMuxMetadataHandler.java b/athena-synapse/src/main/java/com/amazonaws/athena/connectors/synapse/SynapseMuxMetadataHandler.java index daa1b25609..6b88c83d50 100644 --- a/athena-synapse/src/main/java/com/amazonaws/athena/connectors/synapse/SynapseMuxMetadataHandler.java +++ b/athena-synapse/src/main/java/com/amazonaws/athena/connectors/synapse/SynapseMuxMetadataHandler.java @@ -25,8 +25,8 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandlerFactory; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import org.apache.arrow.util.VisibleForTesting; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Map; @@ -55,7 +55,7 @@ public SynapseMuxMetadataHandler(java.util.Map configOptions) } @VisibleForTesting - protected SynapseMuxMetadataHandler(AWSSecretsManager secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, + protected SynapseMuxMetadataHandler(SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, Map metadataHandlerMap, DatabaseConnectionConfig databaseConnectionConfig, java.util.Map configOptions) { super(secretsManager, athena, jdbcConnectionFactory, metadataHandlerMap, databaseConnectionConfig, configOptions); diff --git a/athena-synapse/src/main/java/com/amazonaws/athena/connectors/synapse/SynapseMuxRecordHandler.java b/athena-synapse/src/main/java/com/amazonaws/athena/connectors/synapse/SynapseMuxRecordHandler.java index 62b0f315c3..70502037b0 100644 --- a/athena-synapse/src/main/java/com/amazonaws/athena/connectors/synapse/SynapseMuxRecordHandler.java +++ b/athena-synapse/src/main/java/com/amazonaws/athena/connectors/synapse/SynapseMuxRecordHandler.java @@ -26,8 +26,8 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandlerFactory; import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import com.google.common.annotations.VisibleForTesting; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Map; @@ -54,7 +54,7 @@ public SynapseMuxRecordHandler(java.util.Map configOptions) } @VisibleForTesting - SynapseMuxRecordHandler(AmazonS3 amazonS3, AWSSecretsManager secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, + SynapseMuxRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, DatabaseConnectionConfig databaseConnectionConfig, Map recordHandlerMap, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, jdbcConnectionFactory, databaseConnectionConfig, recordHandlerMap, configOptions); diff --git a/athena-synapse/src/main/java/com/amazonaws/athena/connectors/synapse/SynapseRecordHandler.java b/athena-synapse/src/main/java/com/amazonaws/athena/connectors/synapse/SynapseRecordHandler.java index f7a6fa70cd..6198e00dd7 100644 --- a/athena-synapse/src/main/java/com/amazonaws/athena/connectors/synapse/SynapseRecordHandler.java +++ b/athena-synapse/src/main/java/com/amazonaws/athena/connectors/synapse/SynapseRecordHandler.java @@ -37,8 +37,6 @@ import com.amazonaws.services.athena.AmazonAthenaClientBuilder; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3ClientBuilder; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; -import com.amazonaws.services.secretsmanager.AWSSecretsManagerClientBuilder; import com.google.common.annotations.VisibleForTesting; import org.apache.arrow.vector.types.pojo.ArrowType; import org.apache.arrow.vector.types.pojo.Field; @@ -46,6 +44,7 @@ import org.apache.commons.lang3.Validate; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; import java.sql.PreparedStatement; @@ -66,14 +65,14 @@ public SynapseRecordHandler(java.util.Map configOptions) } public SynapseRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, java.util.Map configOptions) { - this(databaseConnectionConfig, AmazonS3ClientBuilder.defaultClient(), AWSSecretsManagerClientBuilder.defaultClient(), + this(databaseConnectionConfig, AmazonS3ClientBuilder.defaultClient(), SecretsManagerClient.create(), AmazonAthenaClientBuilder.defaultClient(), new SynapseJdbcConnectionFactory(databaseConnectionConfig, SynapseMetadataHandler.JDBC_PROPERTIES, new DatabaseConnectionInfo(SynapseConstants.DRIVER_CLASS, SynapseConstants.DEFAULT_PORT)), new SynapseQueryStringBuilder(QUOTE_CHARACTER, new SynapseFederationExpressionParser(QUOTE_CHARACTER)), configOptions); } @VisibleForTesting - SynapseRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, final AmazonS3 amazonS3, final AWSSecretsManager secretsManager, + SynapseRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, final AmazonS3 amazonS3, final SecretsManagerClient secretsManager, final AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, JdbcSplitQueryBuilder jdbcSplitQueryBuilder, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, databaseConnectionConfig, jdbcConnectionFactory, configOptions); diff --git a/athena-synapse/src/test/java/com/amazonaws/athena/connectors/synapse/SynapseMetadataHandlerTest.java b/athena-synapse/src/test/java/com/amazonaws/athena/connectors/synapse/SynapseMetadataHandlerTest.java index 037b0540e5..19ccc69b2c 100644 --- a/athena-synapse/src/test/java/com/amazonaws/athena/connectors/synapse/SynapseMetadataHandlerTest.java +++ b/athena-synapse/src/test/java/com/amazonaws/athena/connectors/synapse/SynapseMetadataHandlerTest.java @@ -39,9 +39,6 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.connection.JdbcCredentialProvider; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; -import com.amazonaws.services.secretsmanager.model.GetSecretValueRequest; -import com.amazonaws.services.secretsmanager.model.GetSecretValueResult; import org.apache.arrow.vector.types.pojo.Field; import org.apache.arrow.vector.types.pojo.Schema; import org.junit.Assert; @@ -50,6 +47,9 @@ import org.mockito.Mockito; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; +import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueRequest; +import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueResponse; import java.sql.Connection; import java.sql.PreparedStatement; @@ -80,7 +80,7 @@ public class SynapseMetadataHandlerTest private JdbcConnectionFactory jdbcConnectionFactory; private Connection connection; private FederatedIdentity federatedIdentity; - private AWSSecretsManager secretsManager; + private SecretsManagerClient secretsManager; private AmazonAthena athena; @Before @@ -92,9 +92,9 @@ public void setup() this.connection = Mockito.mock(Connection.class, Mockito.RETURNS_DEEP_STUBS); logger.info(" this.connection.."+ this.connection); Mockito.when(this.jdbcConnectionFactory.getConnection(nullable(JdbcCredentialProvider.class))).thenReturn(this.connection); - this.secretsManager = Mockito.mock(AWSSecretsManager.class); + this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AmazonAthena.class); - Mockito.when(this.secretsManager.getSecretValue(Mockito.eq(new GetSecretValueRequest().withSecretId("testSecret")))).thenReturn(new GetSecretValueResult().withSecretString("{\"user\": \"testUser\", \"password\": \"testPassword\"}")); + Mockito.when(this.secretsManager.getSecretValue(Mockito.eq(GetSecretValueRequest.builder().secretId("testSecret").build()))).thenReturn(GetSecretValueResponse.builder().secretString("{\"user\": \"testUser\", \"password\": \"testPassword\"}").build()); this.synapseMetadataHandler = new SynapseMetadataHandler(databaseConnectionConfig, this.secretsManager, this.athena, this.jdbcConnectionFactory, com.google.common.collect.ImmutableMap.of()); this.federatedIdentity = Mockito.mock(FederatedIdentity.class); } diff --git a/athena-synapse/src/test/java/com/amazonaws/athena/connectors/synapse/SynapseMuxMetadataHandlerTest.java b/athena-synapse/src/test/java/com/amazonaws/athena/connectors/synapse/SynapseMuxMetadataHandlerTest.java index 9b233d4201..bed0b399eb 100644 --- a/athena-synapse/src/test/java/com/amazonaws/athena/connectors/synapse/SynapseMuxMetadataHandlerTest.java +++ b/athena-synapse/src/test/java/com/amazonaws/athena/connectors/synapse/SynapseMuxMetadataHandlerTest.java @@ -33,10 +33,10 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Collections; import java.util.Map; @@ -49,7 +49,7 @@ public class SynapseMuxMetadataHandlerTest private SynapseMetadataHandler synapseMetadataHandler; private JdbcMetadataHandler jdbcMetadataHandler; private BlockAllocator allocator; - private AWSSecretsManager secretsManager; + private SecretsManagerClient secretsManager; private AmazonAthena athena; private QueryStatusChecker queryStatusChecker; private JdbcConnectionFactory jdbcConnectionFactory; @@ -60,7 +60,7 @@ public void setup() this.allocator = new BlockAllocatorImpl(); this.synapseMetadataHandler = Mockito.mock(SynapseMetadataHandler.class); this.metadataHandlerMap = Collections.singletonMap("fakedatabase", this.synapseMetadataHandler); - this.secretsManager = Mockito.mock(AWSSecretsManager.class); + this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AmazonAthena.class); this.queryStatusChecker = Mockito.mock(QueryStatusChecker.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); diff --git a/athena-synapse/src/test/java/com/amazonaws/athena/connectors/synapse/SynapseMuxRecordHandlerTest.java b/athena-synapse/src/test/java/com/amazonaws/athena/connectors/synapse/SynapseMuxRecordHandlerTest.java index 0b3daed367..4fcf8bb4a9 100644 --- a/athena-synapse/src/test/java/com/amazonaws/athena/connectors/synapse/SynapseMuxRecordHandlerTest.java +++ b/athena-synapse/src/test/java/com/amazonaws/athena/connectors/synapse/SynapseMuxRecordHandlerTest.java @@ -30,11 +30,11 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import org.apache.arrow.vector.types.pojo.Schema; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; import java.sql.SQLException; @@ -47,7 +47,7 @@ public class SynapseMuxRecordHandlerTest private SynapseRecordHandler synapseRecordHandler; private JdbcRecordHandler jdbcRecordHandler; private AmazonS3 amazonS3; - private AWSSecretsManager secretsManager; + private SecretsManagerClient secretsManager; private AmazonAthena athena; private QueryStatusChecker queryStatusChecker; private JdbcConnectionFactory jdbcConnectionFactory; @@ -58,7 +58,7 @@ public void setup() this.synapseRecordHandler = Mockito.mock(SynapseRecordHandler.class); this.recordHandlerMap = Collections.singletonMap(SynapseConstants.NAME, this.synapseRecordHandler); this.amazonS3 = Mockito.mock(AmazonS3.class); - this.secretsManager = Mockito.mock(AWSSecretsManager.class); + this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AmazonAthena.class); this.queryStatusChecker = Mockito.mock(QueryStatusChecker.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); diff --git a/athena-synapse/src/test/java/com/amazonaws/athena/connectors/synapse/SynapseRecordHandlerTest.java b/athena-synapse/src/test/java/com/amazonaws/athena/connectors/synapse/SynapseRecordHandlerTest.java index c8f27f7887..f035f90688 100644 --- a/athena-synapse/src/test/java/com/amazonaws/athena/connectors/synapse/SynapseRecordHandlerTest.java +++ b/athena-synapse/src/test/java/com/amazonaws/athena/connectors/synapse/SynapseRecordHandlerTest.java @@ -33,7 +33,6 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcSplitQueryBuilder; import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import com.google.common.collect.ImmutableMap; import org.apache.arrow.vector.types.Types; import org.apache.arrow.vector.types.pojo.Schema; @@ -41,6 +40,7 @@ import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; import java.sql.PreparedStatement; @@ -59,7 +59,7 @@ public class SynapseRecordHandlerTest private JdbcConnectionFactory jdbcConnectionFactory; private JdbcSplitQueryBuilder jdbcSplitQueryBuilder; private AmazonS3 amazonS3; - private AWSSecretsManager secretsManager; + private SecretsManagerClient secretsManager; private AmazonAthena athena; @Before @@ -67,7 +67,7 @@ public void setup() throws Exception { this.amazonS3 = Mockito.mock(AmazonS3.class); - this.secretsManager = Mockito.mock(AWSSecretsManager.class); + this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AmazonAthena.class); this.connection = Mockito.mock(Connection.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); diff --git a/athena-teradata/src/main/java/com/amazonaws/athena/connectors/teradata/TeradataMetadataHandler.java b/athena-teradata/src/main/java/com/amazonaws/athena/connectors/teradata/TeradataMetadataHandler.java index b7d7ffe201..8a456a9fbd 100644 --- a/athena-teradata/src/main/java/com/amazonaws/athena/connectors/teradata/TeradataMetadataHandler.java +++ b/athena-teradata/src/main/java/com/amazonaws/athena/connectors/teradata/TeradataMetadataHandler.java @@ -51,7 +51,6 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler; import com.amazonaws.athena.connectors.jdbc.manager.PreparedStatementBuilder; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; @@ -65,6 +64,7 @@ import org.apache.arrow.vector.types.pojo.Schema; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; import java.sql.DatabaseMetaData; @@ -129,7 +129,7 @@ public TeradataMetadataHandler( @VisibleForTesting protected TeradataMetadataHandler( DatabaseConnectionConfig databaseConnectionConfig, - AWSSecretsManager secretsManager, + SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, java.util.Map configOptions) diff --git a/athena-teradata/src/main/java/com/amazonaws/athena/connectors/teradata/TeradataMuxMetadataHandler.java b/athena-teradata/src/main/java/com/amazonaws/athena/connectors/teradata/TeradataMuxMetadataHandler.java index c043e4c9e4..9799c1f7b0 100644 --- a/athena-teradata/src/main/java/com/amazonaws/athena/connectors/teradata/TeradataMuxMetadataHandler.java +++ b/athena-teradata/src/main/java/com/amazonaws/athena/connectors/teradata/TeradataMuxMetadataHandler.java @@ -25,8 +25,8 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandlerFactory; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import org.apache.arrow.util.VisibleForTesting; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Map; @@ -55,7 +55,7 @@ public TeradataMuxMetadataHandler(java.util.Map configOptions) } @VisibleForTesting - protected TeradataMuxMetadataHandler(AWSSecretsManager secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, + protected TeradataMuxMetadataHandler(SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, Map metadataHandlerMap, DatabaseConnectionConfig databaseConnectionConfig, java.util.Map configOptions) { super(secretsManager, athena, jdbcConnectionFactory, metadataHandlerMap, databaseConnectionConfig, configOptions); diff --git a/athena-teradata/src/main/java/com/amazonaws/athena/connectors/teradata/TeradataMuxRecordHandler.java b/athena-teradata/src/main/java/com/amazonaws/athena/connectors/teradata/TeradataMuxRecordHandler.java index 8ec63445a3..99a616a895 100644 --- a/athena-teradata/src/main/java/com/amazonaws/athena/connectors/teradata/TeradataMuxRecordHandler.java +++ b/athena-teradata/src/main/java/com/amazonaws/athena/connectors/teradata/TeradataMuxRecordHandler.java @@ -27,8 +27,8 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandlerFactory; import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import com.google.common.annotations.VisibleForTesting; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Map; @@ -55,7 +55,7 @@ public TeradataMuxRecordHandler(java.util.Map configOptions) } @VisibleForTesting - TeradataMuxRecordHandler(AmazonS3 amazonS3, AWSSecretsManager secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, + TeradataMuxRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, DatabaseConnectionConfig databaseConnectionConfig, Map recordHandlerMap, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, jdbcConnectionFactory, databaseConnectionConfig, recordHandlerMap, configOptions); diff --git a/athena-teradata/src/main/java/com/amazonaws/athena/connectors/teradata/TeradataRecordHandler.java b/athena-teradata/src/main/java/com/amazonaws/athena/connectors/teradata/TeradataRecordHandler.java index 4a9f820581..99a74d6747 100644 --- a/athena-teradata/src/main/java/com/amazonaws/athena/connectors/teradata/TeradataRecordHandler.java +++ b/athena-teradata/src/main/java/com/amazonaws/athena/connectors/teradata/TeradataRecordHandler.java @@ -33,11 +33,10 @@ import com.amazonaws.services.athena.AmazonAthenaClientBuilder; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3ClientBuilder; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; -import com.amazonaws.services.secretsmanager.AWSSecretsManagerClientBuilder; import com.google.common.annotations.VisibleForTesting; import org.apache.arrow.vector.types.pojo.Schema; import org.apache.commons.lang3.Validate; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; import java.sql.PreparedStatement; @@ -60,12 +59,12 @@ public TeradataRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, public TeradataRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, JdbcConnectionFactory jdbcConnectionFactory, java.util.Map configOptions) { - this(databaseConnectionConfig, AmazonS3ClientBuilder.defaultClient(), AWSSecretsManagerClientBuilder.defaultClient(), AmazonAthenaClientBuilder.defaultClient(), + this(databaseConnectionConfig, AmazonS3ClientBuilder.defaultClient(), SecretsManagerClient.create(), AmazonAthenaClientBuilder.defaultClient(), jdbcConnectionFactory, new TeradataQueryStringBuilder(TERADATA_QUOTE_CHARACTER, new TeradataFederationExpressionParser(TERADATA_QUOTE_CHARACTER)), configOptions); } @VisibleForTesting - TeradataRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, final AmazonS3 amazonS3, final AWSSecretsManager secretsManager, + TeradataRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, final AmazonS3 amazonS3, final SecretsManagerClient secretsManager, final AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, JdbcSplitQueryBuilder jdbcSplitQueryBuilder, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, databaseConnectionConfig, jdbcConnectionFactory, configOptions); diff --git a/athena-teradata/src/test/java/com/amazonaws/athena/connectors/teradata/TeradataMetadataHandlerTest.java b/athena-teradata/src/test/java/com/amazonaws/athena/connectors/teradata/TeradataMetadataHandlerTest.java index d7ad931f00..991304b50c 100644 --- a/athena-teradata/src/test/java/com/amazonaws/athena/connectors/teradata/TeradataMetadataHandlerTest.java +++ b/athena-teradata/src/test/java/com/amazonaws/athena/connectors/teradata/TeradataMetadataHandlerTest.java @@ -30,15 +30,15 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.connection.JdbcCredentialProvider; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; -import com.amazonaws.services.secretsmanager.model.GetSecretValueRequest; -import com.amazonaws.services.secretsmanager.model.GetSecretValueResult; import org.apache.arrow.vector.types.pojo.Field; import org.apache.arrow.vector.types.pojo.Schema; import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; +import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueRequest; +import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueResponse; import java.sql.*; import java.util.*; @@ -58,7 +58,7 @@ public class TeradataMetadataHandlerTest private JdbcConnectionFactory jdbcConnectionFactory; private Connection connection; private FederatedIdentity federatedIdentity; - private AWSSecretsManager secretsManager; + private SecretsManagerClient secretsManager; private AmazonAthena athena; private BlockAllocator blockAllocator; @@ -67,9 +67,9 @@ public void setup() throws Exception { this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class, Mockito.RETURNS_DEEP_STUBS); this.connection = Mockito.mock(Connection.class, Mockito.RETURNS_DEEP_STUBS); Mockito.when(this.jdbcConnectionFactory.getConnection(nullable(JdbcCredentialProvider.class))).thenReturn(this.connection); - this.secretsManager = Mockito.mock(AWSSecretsManager.class); + this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AmazonAthena.class); - Mockito.when(this.secretsManager.getSecretValue(Mockito.eq(new GetSecretValueRequest().withSecretId("testSecret")))).thenReturn(new GetSecretValueResult().withSecretString("{\"username\": \"testUser\", \"password\": \"testPassword\"}")); + Mockito.when(this.secretsManager.getSecretValue(Mockito.eq(GetSecretValueRequest.builder().secretId("testSecret").build()))).thenReturn(GetSecretValueResponse.builder().secretString("{\"username\": \"testUser\", \"password\": \"testPassword\"}").build()); this.teradataMetadataHandler = new TeradataMetadataHandler(databaseConnectionConfig, this.secretsManager, this.athena, this.jdbcConnectionFactory, com.google.common.collect.ImmutableMap.of("partitioncount", "1000")); this.federatedIdentity = Mockito.mock(FederatedIdentity.class); this.blockAllocator = Mockito.mock(BlockAllocator.class); diff --git a/athena-teradata/src/test/java/com/amazonaws/athena/connectors/teradata/TeradataMuxJdbcMetadataHandlerTest.java b/athena-teradata/src/test/java/com/amazonaws/athena/connectors/teradata/TeradataMuxJdbcMetadataHandlerTest.java index a0373109bc..2e9ed78cca 100644 --- a/athena-teradata/src/test/java/com/amazonaws/athena/connectors/teradata/TeradataMuxJdbcMetadataHandlerTest.java +++ b/athena-teradata/src/test/java/com/amazonaws/athena/connectors/teradata/TeradataMuxJdbcMetadataHandlerTest.java @@ -29,10 +29,10 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Collections; import java.util.Map; @@ -45,7 +45,7 @@ public class TeradataMuxJdbcMetadataHandlerTest { private TeradataMetadataHandler teradataMetadataHandler; private JdbcMetadataHandler jdbcMetadataHandler; private BlockAllocator allocator; - private AWSSecretsManager secretsManager; + private SecretsManagerClient secretsManager; private AmazonAthena athena; private QueryStatusChecker queryStatusChecker; private JdbcConnectionFactory jdbcConnectionFactory; @@ -56,7 +56,7 @@ public void setup() this.allocator = new BlockAllocatorImpl(); this.teradataMetadataHandler = Mockito.mock(TeradataMetadataHandler.class); this.metadataHandlerMap = Collections.singletonMap("fakedatabase", this.teradataMetadataHandler); - this.secretsManager = Mockito.mock(AWSSecretsManager.class); + this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AmazonAthena.class); this.queryStatusChecker = Mockito.mock(QueryStatusChecker.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); diff --git a/athena-teradata/src/test/java/com/amazonaws/athena/connectors/teradata/TeradataMuxJdbcRecordHandlerTest.java b/athena-teradata/src/test/java/com/amazonaws/athena/connectors/teradata/TeradataMuxJdbcRecordHandlerTest.java index 600b450c3e..c26849aae7 100644 --- a/athena-teradata/src/test/java/com/amazonaws/athena/connectors/teradata/TeradataMuxJdbcRecordHandlerTest.java +++ b/athena-teradata/src/test/java/com/amazonaws/athena/connectors/teradata/TeradataMuxJdbcRecordHandlerTest.java @@ -30,11 +30,11 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import org.apache.arrow.vector.types.pojo.Schema; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; import java.sql.SQLException; @@ -47,7 +47,7 @@ public class TeradataMuxJdbcRecordHandlerTest private TeradataRecordHandler teradataRecordHandler; private JdbcRecordHandler jdbcRecordHandler; private AmazonS3 amazonS3; - private AWSSecretsManager secretsManager; + private SecretsManagerClient secretsManager; private AmazonAthena athena; private QueryStatusChecker queryStatusChecker; private JdbcConnectionFactory jdbcConnectionFactory; @@ -58,7 +58,7 @@ public void setup() this.teradataRecordHandler = Mockito.mock(TeradataRecordHandler.class); this.recordHandlerMap = Collections.singletonMap("teradata", this.teradataRecordHandler); this.amazonS3 = Mockito.mock(AmazonS3.class); - this.secretsManager = Mockito.mock(AWSSecretsManager.class); + this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AmazonAthena.class); this.queryStatusChecker = Mockito.mock(QueryStatusChecker.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); diff --git a/athena-teradata/src/test/java/com/amazonaws/athena/connectors/teradata/TeradataRecordHandlerTest.java b/athena-teradata/src/test/java/com/amazonaws/athena/connectors/teradata/TeradataRecordHandlerTest.java index 9118bc3492..3422add715 100644 --- a/athena-teradata/src/test/java/com/amazonaws/athena/connectors/teradata/TeradataRecordHandlerTest.java +++ b/athena-teradata/src/test/java/com/amazonaws/athena/connectors/teradata/TeradataRecordHandlerTest.java @@ -34,7 +34,6 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcSplitQueryBuilder; import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import org.apache.arrow.vector.types.Types; @@ -43,6 +42,7 @@ import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; import java.sql.PreparedStatement; @@ -59,7 +59,7 @@ public class TeradataRecordHandlerTest private JdbcConnectionFactory jdbcConnectionFactory; private JdbcSplitQueryBuilder jdbcSplitQueryBuilder; private AmazonS3 amazonS3; - private AWSSecretsManager secretsManager; + private SecretsManagerClient secretsManager; private AmazonAthena athena; @Before @@ -67,7 +67,7 @@ public void setup() throws Exception { this.amazonS3 = Mockito.mock(AmazonS3.class); - this.secretsManager = Mockito.mock(AWSSecretsManager.class); + this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AmazonAthena.class); this.connection = Mockito.mock(Connection.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); diff --git a/athena-timestream/src/main/java/com/amazonaws/athena/connectors/timestream/TimestreamMetadataHandler.java b/athena-timestream/src/main/java/com/amazonaws/athena/connectors/timestream/TimestreamMetadataHandler.java index 990673f527..5d2200c64e 100644 --- a/athena-timestream/src/main/java/com/amazonaws/athena/connectors/timestream/TimestreamMetadataHandler.java +++ b/athena-timestream/src/main/java/com/amazonaws/athena/connectors/timestream/TimestreamMetadataHandler.java @@ -45,7 +45,6 @@ import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.glue.AWSGlue; import com.amazonaws.services.glue.model.Table; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import com.amazonaws.services.timestreamquery.AmazonTimestreamQuery; import com.amazonaws.services.timestreamquery.model.ColumnInfo; import com.amazonaws.services.timestreamquery.model.Datum; @@ -62,6 +61,7 @@ import org.apache.arrow.vector.types.pojo.Schema; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Collections; import java.util.List; @@ -110,7 +110,7 @@ protected TimestreamMetadataHandler( AmazonTimestreamWrite tsMeta, AWSGlue glue, EncryptionKeyFactory keyFactory, - AWSSecretsManager secretsManager, + SecretsManagerClient secretsManager, AmazonAthena athena, String spillBucket, String spillPrefix, diff --git a/athena-timestream/src/main/java/com/amazonaws/athena/connectors/timestream/TimestreamRecordHandler.java b/athena-timestream/src/main/java/com/amazonaws/athena/connectors/timestream/TimestreamRecordHandler.java index 7c70089abc..fab8718f48 100644 --- a/athena-timestream/src/main/java/com/amazonaws/athena/connectors/timestream/TimestreamRecordHandler.java +++ b/athena-timestream/src/main/java/com/amazonaws/athena/connectors/timestream/TimestreamRecordHandler.java @@ -44,8 +44,6 @@ import com.amazonaws.services.athena.AmazonAthenaClientBuilder; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3ClientBuilder; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; -import com.amazonaws.services.secretsmanager.AWSSecretsManagerClientBuilder; import com.amazonaws.services.timestreamquery.AmazonTimestreamQuery; import com.amazonaws.services.timestreamquery.model.Datum; import com.amazonaws.services.timestreamquery.model.QueryRequest; @@ -62,6 +60,7 @@ import org.apache.arrow.vector.types.pojo.Field; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.time.Instant; import java.time.ZoneId; @@ -95,14 +94,14 @@ public TimestreamRecordHandler(java.util.Map configOptions) { this( AmazonS3ClientBuilder.defaultClient(), - AWSSecretsManagerClientBuilder.defaultClient(), + SecretsManagerClient.create(), AmazonAthenaClientBuilder.defaultClient(), TimestreamClientBuilder.buildQueryClient(SOURCE_TYPE), configOptions); } @VisibleForTesting - protected TimestreamRecordHandler(AmazonS3 amazonS3, AWSSecretsManager secretsManager, AmazonAthena athena, AmazonTimestreamQuery tsQuery, java.util.Map configOptions) + protected TimestreamRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AmazonAthena athena, AmazonTimestreamQuery tsQuery, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, SOURCE_TYPE, configOptions); this.tsQuery = tsQuery; diff --git a/athena-timestream/src/test/java/com/amazonaws/athena/connectors/timestream/TimestreamMetadataHandlerTest.java b/athena-timestream/src/test/java/com/amazonaws/athena/connectors/timestream/TimestreamMetadataHandlerTest.java index 45478f3b84..71b998d042 100644 --- a/athena-timestream/src/test/java/com/amazonaws/athena/connectors/timestream/TimestreamMetadataHandlerTest.java +++ b/athena-timestream/src/test/java/com/amazonaws/athena/connectors/timestream/TimestreamMetadataHandlerTest.java @@ -45,7 +45,6 @@ import com.amazonaws.services.glue.model.Column; import com.amazonaws.services.glue.model.GetTableResult; import com.amazonaws.services.glue.model.StorageDescriptor; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import com.amazonaws.services.timestreamquery.AmazonTimestreamQuery; import com.amazonaws.services.timestreamquery.model.Datum; import com.amazonaws.services.timestreamquery.model.QueryRequest; @@ -69,6 +68,7 @@ import org.mockito.junit.MockitoJUnitRunner; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.ArrayList; import java.util.Collections; @@ -97,7 +97,7 @@ public class TimestreamMetadataHandlerTest private BlockAllocator allocator; @Mock - protected AWSSecretsManager mockSecretsManager; + protected SecretsManagerClient mockSecretsManager; @Mock protected AmazonAthena mockAthena; @Mock diff --git a/athena-timestream/src/test/java/com/amazonaws/athena/connectors/timestream/TimestreamRecordHandlerTest.java b/athena-timestream/src/test/java/com/amazonaws/athena/connectors/timestream/TimestreamRecordHandlerTest.java index 1682b4ef53..6b3f79fb57 100644 --- a/athena-timestream/src/test/java/com/amazonaws/athena/connectors/timestream/TimestreamRecordHandlerTest.java +++ b/athena-timestream/src/test/java/com/amazonaws/athena/connectors/timestream/TimestreamRecordHandlerTest.java @@ -46,7 +46,6 @@ import com.amazonaws.services.s3.model.PutObjectResult; import com.amazonaws.services.s3.model.S3Object; import com.amazonaws.services.s3.model.S3ObjectInputStream; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import com.amazonaws.services.timestreamquery.AmazonTimestreamQuery; import com.amazonaws.services.timestreamquery.model.QueryRequest; import com.amazonaws.services.timestreamquery.model.QueryResult; @@ -66,6 +65,7 @@ import org.mockito.stubbing.Answer; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.io.ByteArrayInputStream; import java.io.IOException; @@ -116,7 +116,7 @@ public class TimestreamRecordHandlerTest private AmazonTimestreamQuery mockClient; @Mock - private AWSSecretsManager mockSecretsManager; + private SecretsManagerClient mockSecretsManager; @Mock private AmazonAthena mockAthena; diff --git a/athena-tpcds/src/main/java/com/amazonaws/athena/connectors/tpcds/TPCDSMetadataHandler.java b/athena-tpcds/src/main/java/com/amazonaws/athena/connectors/tpcds/TPCDSMetadataHandler.java index 20b90436a6..3e98dc12ee 100644 --- a/athena-tpcds/src/main/java/com/amazonaws/athena/connectors/tpcds/TPCDSMetadataHandler.java +++ b/athena-tpcds/src/main/java/com/amazonaws/athena/connectors/tpcds/TPCDSMetadataHandler.java @@ -37,13 +37,13 @@ import com.amazonaws.athena.connector.lambda.metadata.ListTablesResponse; import com.amazonaws.athena.connector.lambda.security.EncryptionKeyFactory; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import com.google.common.collect.ImmutableSet; import com.teradata.tpcds.Table; import com.teradata.tpcds.column.Column; import org.apache.arrow.util.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Collections; import java.util.HashSet; @@ -89,7 +89,7 @@ public TPCDSMetadataHandler(java.util.Map configOptions) @VisibleForTesting protected TPCDSMetadataHandler( EncryptionKeyFactory keyFactory, - AWSSecretsManager secretsManager, + SecretsManagerClient secretsManager, AmazonAthena athena, String spillBucket, String spillPrefix, diff --git a/athena-tpcds/src/main/java/com/amazonaws/athena/connectors/tpcds/TPCDSRecordHandler.java b/athena-tpcds/src/main/java/com/amazonaws/athena/connectors/tpcds/TPCDSRecordHandler.java index 7a76de942d..12dc7a0667 100644 --- a/athena-tpcds/src/main/java/com/amazonaws/athena/connectors/tpcds/TPCDSRecordHandler.java +++ b/athena-tpcds/src/main/java/com/amazonaws/athena/connectors/tpcds/TPCDSRecordHandler.java @@ -30,8 +30,6 @@ import com.amazonaws.services.athena.AmazonAthenaClientBuilder; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3ClientBuilder; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; -import com.amazonaws.services.secretsmanager.AWSSecretsManagerClientBuilder; import com.teradata.tpcds.Results; import com.teradata.tpcds.Session; import com.teradata.tpcds.Table; @@ -42,6 +40,7 @@ import org.apache.arrow.vector.types.pojo.Schema; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.io.IOException; import java.math.BigDecimal; @@ -78,11 +77,11 @@ public class TPCDSRecordHandler public TPCDSRecordHandler(java.util.Map configOptions) { - super(AmazonS3ClientBuilder.defaultClient(), AWSSecretsManagerClientBuilder.defaultClient(), AmazonAthenaClientBuilder.defaultClient(), SOURCE_TYPE, configOptions); + super(AmazonS3ClientBuilder.defaultClient(), SecretsManagerClient.create(), AmazonAthenaClientBuilder.defaultClient(), SOURCE_TYPE, configOptions); } @VisibleForTesting - protected TPCDSRecordHandler(AmazonS3 amazonS3, AWSSecretsManager secretsManager, AmazonAthena athena, java.util.Map configOptions) + protected TPCDSRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AmazonAthena athena, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, SOURCE_TYPE, configOptions); } diff --git a/athena-tpcds/src/test/java/com/amazonaws/athena/connectors/tpcds/TPCDSMetadataHandlerTest.java b/athena-tpcds/src/test/java/com/amazonaws/athena/connectors/tpcds/TPCDSMetadataHandlerTest.java index 7e39835921..47affa5f1c 100644 --- a/athena-tpcds/src/test/java/com/amazonaws/athena/connectors/tpcds/TPCDSMetadataHandlerTest.java +++ b/athena-tpcds/src/test/java/com/amazonaws/athena/connectors/tpcds/TPCDSMetadataHandlerTest.java @@ -43,7 +43,6 @@ import com.amazonaws.athena.connector.lambda.security.FederatedIdentity; import com.amazonaws.athena.connector.lambda.security.LocalKeyFactory; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import org.apache.arrow.vector.types.Types; import org.apache.arrow.vector.types.pojo.Schema; import org.junit.After; @@ -54,6 +53,7 @@ import org.mockito.junit.MockitoJUnitRunner; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Collections; import java.util.HashMap; @@ -76,7 +76,7 @@ public class TPCDSMetadataHandlerTest private BlockAllocator allocator; @Mock - private AWSSecretsManager mockSecretsManager; + private SecretsManagerClient mockSecretsManager; @Mock private AmazonAthena mockAthena; diff --git a/athena-tpcds/src/test/java/com/amazonaws/athena/connectors/tpcds/TPCDSRecordHandlerTest.java b/athena-tpcds/src/test/java/com/amazonaws/athena/connectors/tpcds/TPCDSRecordHandlerTest.java index 2fc214cee8..2bc6193e70 100644 --- a/athena-tpcds/src/test/java/com/amazonaws/athena/connectors/tpcds/TPCDSRecordHandlerTest.java +++ b/athena-tpcds/src/test/java/com/amazonaws/athena/connectors/tpcds/TPCDSRecordHandlerTest.java @@ -47,7 +47,6 @@ import com.amazonaws.services.s3.model.PutObjectResult; import com.amazonaws.services.s3.model.S3Object; import com.amazonaws.services.s3.model.S3ObjectInputStream; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; import com.google.common.collect.ImmutableMap; import com.google.common.io.ByteStreams; import com.teradata.tpcds.Table; @@ -63,6 +62,7 @@ import org.mockito.junit.MockitoJUnitRunner; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.io.ByteArrayInputStream; import java.io.InputStream; @@ -101,7 +101,7 @@ public class TPCDSRecordHandlerTest private AmazonS3 mockS3; @Mock - private AWSSecretsManager mockSecretsManager; + private SecretsManagerClient mockSecretsManager; @Mock private AmazonAthena mockAthena; diff --git a/athena-udfs/src/main/java/com/amazonaws/athena/connectors/udfs/AthenaUDFHandler.java b/athena-udfs/src/main/java/com/amazonaws/athena/connectors/udfs/AthenaUDFHandler.java index 3c90b60e99..3d9be11baa 100644 --- a/athena-udfs/src/main/java/com/amazonaws/athena/connectors/udfs/AthenaUDFHandler.java +++ b/athena-udfs/src/main/java/com/amazonaws/athena/connectors/udfs/AthenaUDFHandler.java @@ -21,8 +21,8 @@ import com.amazonaws.athena.connector.lambda.handlers.UserDefinedFunctionHandler; import com.amazonaws.athena.connector.lambda.security.CachableSecretsManager; -import com.amazonaws.services.secretsmanager.AWSSecretsManagerClient; import com.google.common.annotations.VisibleForTesting; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import javax.crypto.BadPaddingException; import javax.crypto.Cipher; @@ -55,7 +55,7 @@ public class AthenaUDFHandler public AthenaUDFHandler() { - this(new CachableSecretsManager(AWSSecretsManagerClient.builder().build())); + this(new CachableSecretsManager(SecretsManagerClient.create())); } @VisibleForTesting diff --git a/athena-vertica/src/main/java/com/amazonaws/athena/connectors/vertica/VerticaRecordHandler.java b/athena-vertica/src/main/java/com/amazonaws/athena/connectors/vertica/VerticaRecordHandler.java index b315d0e454..899fa11370 100644 --- a/athena-vertica/src/main/java/com/amazonaws/athena/connectors/vertica/VerticaRecordHandler.java +++ b/athena-vertica/src/main/java/com/amazonaws/athena/connectors/vertica/VerticaRecordHandler.java @@ -37,8 +37,6 @@ import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3ClientBuilder; import com.amazonaws.services.s3.model.*; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; -import com.amazonaws.services.secretsmanager.AWSSecretsManagerClientBuilder; import com.fasterxml.jackson.databind.ObjectMapper; import org.apache.arrow.util.VisibleForTesting; import org.apache.arrow.vector.holders.*; @@ -47,6 +45,7 @@ import org.apache.arrow.vector.types.pojo.Schema; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.io.BufferedReader; import java.io.IOException; @@ -70,12 +69,12 @@ public class VerticaRecordHandler public VerticaRecordHandler(java.util.Map configOptions) { this(AmazonS3ClientBuilder.defaultClient(), - AWSSecretsManagerClientBuilder.defaultClient(), + SecretsManagerClient.create(), AmazonAthenaClientBuilder.defaultClient(), configOptions); } @VisibleForTesting - protected VerticaRecordHandler(AmazonS3 amazonS3, AWSSecretsManager secretsManager, AmazonAthena amazonAthena, java.util.Map configOptions) + protected VerticaRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AmazonAthena amazonAthena, java.util.Map configOptions) { super(amazonS3, secretsManager, amazonAthena, SOURCE_TYPE, configOptions); this.amazonS3 = amazonS3; diff --git a/athena-vertica/src/test/java/com/amazonaws/athena/connectors/vertica/VerticaMetadataHandlerTest.java b/athena-vertica/src/test/java/com/amazonaws/athena/connectors/vertica/VerticaMetadataHandlerTest.java index 2a5adad8a4..125b28cc9c 100644 --- a/athena-vertica/src/test/java/com/amazonaws/athena/connectors/vertica/VerticaMetadataHandlerTest.java +++ b/athena-vertica/src/test/java/com/amazonaws/athena/connectors/vertica/VerticaMetadataHandlerTest.java @@ -53,9 +53,6 @@ import com.amazonaws.services.s3.model.ObjectListing; import com.amazonaws.services.s3.model.Region; import com.amazonaws.services.s3.model.S3ObjectSummary; -import com.amazonaws.services.secretsmanager.AWSSecretsManager; -import com.amazonaws.services.secretsmanager.model.GetSecretValueRequest; -import com.amazonaws.services.secretsmanager.model.GetSecretValueResult; import com.google.common.collect.ImmutableList; import org.apache.arrow.vector.types.pojo.Schema; import org.junit.After; @@ -69,6 +66,9 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.stringtemplate.v4.ST; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; +import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueRequest; +import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueResponse; import java.sql.Connection; import java.sql.DatabaseMetaData; @@ -103,7 +103,7 @@ public class VerticaMetadataHandlerTest extends TestBase private VerticaExportQueryBuilder verticaExportQueryBuilder; private VerticaSchemaUtils verticaSchemaUtils; private Connection connection; - private AWSSecretsManager secretsManager; + private SecretsManagerClient secretsManager; private AmazonAthena athena; private AmazonS3 amazonS3; private FederatedIdentity federatedIdentity; @@ -134,7 +134,7 @@ public void setUp() throws Exception this.queryFactory = Mockito.mock(QueryFactory.class); this.verticaExportQueryBuilder = Mockito.mock(VerticaExportQueryBuilder.class); this.connection = Mockito.mock(Connection.class, Mockito.RETURNS_DEEP_STUBS); - this.secretsManager = Mockito.mock(AWSSecretsManager.class); + this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AmazonAthena.class); this.federatedIdentity = Mockito.mock(FederatedIdentity.class); this.databaseMetaData = Mockito.mock(DatabaseMetaData.class); @@ -146,14 +146,14 @@ public void setUp() throws Exception this.queryStatusChecker = Mockito.mock(QueryStatusChecker.class); this.amazonS3 = Mockito.mock(AmazonS3.class); - Mockito.lenient().when(this.secretsManager.getSecretValue(Mockito.eq(new GetSecretValueRequest().withSecretId("testSecret")))).thenReturn(new GetSecretValueResult().withSecretString("{\"username\": \"testUser\", \"password\": \"testPassword\"}")); + Mockito.lenient().when(this.secretsManager.getSecretValue(Mockito.eq(GetSecretValueRequest.builder().secretId("testSecret").build()))).thenReturn(GetSecretValueResponse.builder().secretString("{\"username\": \"testUser\", \"password\": \"testPassword\"}").build()); Mockito.when(connection.getMetaData()).thenReturn(databaseMetaData); Mockito.when(amazonS3.getRegion()).thenReturn(Region.US_West_2); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class, Mockito.RETURNS_DEEP_STUBS); this.connection = Mockito.mock(Connection.class, Mockito.RETURNS_DEEP_STUBS); Mockito.when(this.jdbcConnectionFactory.getConnection(nullable(JdbcCredentialProvider.class))).thenReturn(this.connection); - this.secretsManager = Mockito.mock(AWSSecretsManager.class); + this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AmazonAthena.class); this.verticaMetadataHandler = new VerticaMetadataHandler(databaseConnectionConfig, this.jdbcConnectionFactory, com.google.common.collect.ImmutableMap.of(), amazonS3, verticaSchemaUtils); this.federatedIdentity = Mockito.mock(FederatedIdentity.class); diff --git a/pom.xml b/pom.xml index 26ff1fdea5..f66381fb09 100644 --- a/pom.xml +++ b/pom.xml @@ -14,6 +14,7 @@ 11 3.13.0 + 2.25.56 1.12.761 1.2.2 1.6.0 From 6d2542473a6d5fbf841f480eec08d6536d159ba5 Mon Sep 17 00:00:00 2001 From: Aimery Methena <159072740+aimethed@users.noreply.github.com> Date: Mon, 22 Jul 2024 16:26:56 -0400 Subject: [PATCH 02/87] V2 migration glue (#2052) Co-authored-by: Trianz-Akshay <108925344+Trianz-Akshay@users.noreply.github.com> Co-authored-by: Jithendar Trianz <106380520+Jithendar12@users.noreply.github.com> --- .../docdb/DocDBMetadataHandler.java | 14 +- .../docdb/DocDBMetadataHandlerTest.java | 4 +- .../docdb/DocDBRecordHandlerTest.java | 4 +- athena-dynamodb/pom.xml | 11 -- .../dynamodb/DynamoDBMetadataHandler.java | 18 +- .../dynamodb/DynamoDBMetadataHandlerTest.java | 129 +++++++----- .../dynamodb/DynamoDBRecordHandlerTest.java | 155 ++++++++------- .../ElasticsearchMetadataHandler.java | 6 +- .../ElasticsearchMetadataHandlerTest.java | 4 +- athena-federation-sdk/pom.xml | 21 +- .../lambda/handlers/GlueMetadataHandler.java | 145 +++++++------- .../handlers/GlueMetadataHandlerTest.java | 184 +++++++++--------- .../connectors/gcs/GcsMetadataHandler.java | 24 +-- .../athena/connectors/gcs/GcsUtil.java | 20 +- .../connectors/gcs/common/PartitionUtil.java | 22 +-- .../gcs/filter/FilterExpressionBuilder.java | 6 +- .../gcs/storage/StorageMetadata.java | 16 +- .../gcs/GcsMetadataHandlerTest.java | 184 +++++++++--------- .../athena/connectors/gcs/GcsTestUtils.java | 9 +- .../gcs/common/PartitionUtilTest.java | 126 +++++++----- .../filter/FilterExpressionBuilderTest.java | 18 +- .../gcs/storage/StorageMetadataTest.java | 50 ++--- athena-google-bigquery/pom.xml | 30 --- athena-hbase/pom.xml | 101 ---------- .../hbase/HbaseMetadataHandler.java | 10 +- .../hbase/HbaseMetadataHandlerTest.java | 4 +- .../hbase/HbaseRecordHandlerTest.java | 4 - athena-kafka/pom.xml | 5 - .../connectors/kafka/GlueRegistryReader.java | 44 ++--- .../kafka/KafkaMetadataHandler.java | 88 +++++---- .../kafka/KafkaMetadataHandlerTest.java | 124 ++++++------ .../kafka/KafkaRecordHandlerTest.java | 110 ++++++----- .../connectors/kafka/KafkaUtilsTest.java | 4 - athena-msk/pom.xml | 5 - .../msk/AmazonMskMetadataHandler.java | 93 +++++---- .../connectors/msk/GlueRegistryReader.java | 41 ++-- .../msk/AmazonMskMetadataHandlerTest.java | 118 ++++++----- .../neptune/NeptuneMetadataHandler.java | 23 +-- .../neptune/NeptuneMetadataHandlerTest.java | 74 +++---- .../redis/RedisMetadataHandler.java | 14 +- .../redis/RedisMetadataHandlerTest.java | 4 +- .../redis/integ/RedisIntegTest.java | 117 +++++------ .../timestream/TimestreamMetadataHandler.java | 10 +- .../TimestreamMetadataHandlerTest.java | 80 ++++---- 44 files changed, 1100 insertions(+), 1173 deletions(-) diff --git a/athena-docdb/src/main/java/com/amazonaws/athena/connectors/docdb/DocDBMetadataHandler.java b/athena-docdb/src/main/java/com/amazonaws/athena/connectors/docdb/DocDBMetadataHandler.java index 381186bf45..0491e381ef 100644 --- a/athena-docdb/src/main/java/com/amazonaws/athena/connectors/docdb/DocDBMetadataHandler.java +++ b/athena-docdb/src/main/java/com/amazonaws/athena/connectors/docdb/DocDBMetadataHandler.java @@ -43,9 +43,6 @@ import com.amazonaws.athena.connector.lambda.security.EncryptionKeyFactory; import com.amazonaws.athena.connectors.docdb.qpt.DocDBQueryPassthrough; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.glue.AWSGlue; -import com.amazonaws.services.glue.model.Database; -import com.amazonaws.services.glue.model.Table; import com.google.common.base.Strings; import com.google.common.collect.ImmutableMap; import com.mongodb.client.MongoClient; @@ -57,6 +54,9 @@ import org.bson.Document; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.glue.GlueClient; +import software.amazon.awssdk.services.glue.model.Database; +import software.amazon.awssdk.services.glue.model.Table; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.ArrayList; @@ -95,13 +95,13 @@ public class DocDBMetadataHandler //is indeed enabled for use by this connector. private static final String DOCDB_METADATA_FLAG = "docdb-metadata-flag"; //Used to filter out Glue tables which lack a docdb metadata flag. - private static final TableFilter TABLE_FILTER = (Table table) -> table.getParameters().containsKey(DOCDB_METADATA_FLAG); + private static final TableFilter TABLE_FILTER = (Table table) -> table.parameters().containsKey(DOCDB_METADATA_FLAG); //The number of documents to scan when attempting to infer schema from an DocDB collection. private static final int SCHEMA_INFERRENCE_NUM_DOCS = 10; // used to filter out Glue databases which lack the docdb-metadata-flag in the URI. - private static final DatabaseFilter DB_FILTER = (Database database) -> (database.getLocationUri() != null && database.getLocationUri().contains(DOCDB_METADATA_FLAG)); + private static final DatabaseFilter DB_FILTER = (Database database) -> (database.locationUri() != null && database.locationUri().contains(DOCDB_METADATA_FLAG)); - private final AWSGlue glue; + private final GlueClient glue; private final DocDBConnectionFactory connectionFactory; private final DocDBQueryPassthrough queryPassthrough = new DocDBQueryPassthrough(); @@ -114,7 +114,7 @@ public DocDBMetadataHandler(java.util.Map configOptions) @VisibleForTesting protected DocDBMetadataHandler( - AWSGlue glue, + GlueClient glue, DocDBConnectionFactory connectionFactory, EncryptionKeyFactory keyFactory, SecretsManagerClient secretsManager, diff --git a/athena-docdb/src/test/java/com/amazonaws/athena/connectors/docdb/DocDBMetadataHandlerTest.java b/athena-docdb/src/test/java/com/amazonaws/athena/connectors/docdb/DocDBMetadataHandlerTest.java index 9a65cb275d..da334e0092 100644 --- a/athena-docdb/src/test/java/com/amazonaws/athena/connectors/docdb/DocDBMetadataHandlerTest.java +++ b/athena-docdb/src/test/java/com/amazonaws/athena/connectors/docdb/DocDBMetadataHandlerTest.java @@ -40,7 +40,6 @@ import com.amazonaws.athena.connector.lambda.metadata.MetadataResponse; import com.amazonaws.athena.connector.lambda.security.LocalKeyFactory; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.glue.AWSGlue; import com.google.common.collect.ImmutableList; import com.mongodb.client.FindIterable; import com.mongodb.client.MongoClient; @@ -62,6 +61,7 @@ import org.mockito.junit.MockitoJUnitRunner; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.glue.GlueClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.ArrayList; @@ -100,7 +100,7 @@ public class DocDBMetadataHandlerTest private MongoClient mockClient; @Mock - private AWSGlue awsGlue; + private GlueClient awsGlue; @Mock private SecretsManagerClient secretsManager; diff --git a/athena-docdb/src/test/java/com/amazonaws/athena/connectors/docdb/DocDBRecordHandlerTest.java b/athena-docdb/src/test/java/com/amazonaws/athena/connectors/docdb/DocDBRecordHandlerTest.java index ea2a5da993..e1a72b2477 100644 --- a/athena-docdb/src/test/java/com/amazonaws/athena/connectors/docdb/DocDBRecordHandlerTest.java +++ b/athena-docdb/src/test/java/com/amazonaws/athena/connectors/docdb/DocDBRecordHandlerTest.java @@ -41,7 +41,6 @@ import com.amazonaws.athena.connector.lambda.security.EncryptionKeyFactory; import com.amazonaws.athena.connector.lambda.security.LocalKeyFactory; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.glue.AWSGlue; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.model.PutObjectRequest; import com.amazonaws.services.s3.model.PutObjectResult; @@ -70,6 +69,7 @@ import org.mockito.junit.MockitoJUnitRunner; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.glue.GlueClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.io.ByteArrayInputStream; @@ -122,7 +122,7 @@ public class DocDBRecordHandlerTest private AmazonAthena mockAthena; @Mock - private AWSGlue awsGlue; + private GlueClient awsGlue; @Mock private SecretsManagerClient secretsManager; diff --git a/athena-dynamodb/pom.xml b/athena-dynamodb/pom.xml index c3e91e6e74..c3841e0e6f 100644 --- a/athena-dynamodb/pom.xml +++ b/athena-dynamodb/pom.xml @@ -8,17 +8,6 @@ 4.0.0 athena-dynamodb 2022.47.1 - - - - software.amazon.awssdk - bom - 2.26.20 - pom - import - - - com.amazonaws diff --git a/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/DynamoDBMetadataHandler.java b/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/DynamoDBMetadataHandler.java index a82e3c78bd..5bdb64a1e5 100644 --- a/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/DynamoDBMetadataHandler.java +++ b/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/DynamoDBMetadataHandler.java @@ -57,9 +57,6 @@ import com.amazonaws.athena.connectors.dynamodb.util.DDBTypeUtils; import com.amazonaws.athena.connectors.dynamodb.util.IncrementingValueNameProducer; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.glue.AWSGlue; -import com.amazonaws.services.glue.model.Database; -import com.amazonaws.services.glue.model.Table; import com.amazonaws.util.json.Jackson; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ImmutableMap; @@ -74,6 +71,9 @@ import software.amazon.awssdk.services.dynamodb.model.AttributeValue; import software.amazon.awssdk.services.dynamodb.model.ExecuteStatementRequest; import software.amazon.awssdk.services.dynamodb.model.ExecuteStatementResponse; +import software.amazon.awssdk.services.glue.GlueClient; +import software.amazon.awssdk.services.glue.model.Database; +import software.amazon.awssdk.services.glue.model.Table; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.ArrayList; @@ -131,15 +131,15 @@ public class DynamoDBMetadataHandler // defines the value that should be present in the Glue Database URI to enable the DB for DynamoDB. static final String DYNAMO_DB_FLAG = "dynamo-db-flag"; // used to filter out Glue tables which lack indications of being used for DDB. - private static final TableFilter TABLE_FILTER = (Table table) -> table.getStorageDescriptor().getLocation().contains(DYNAMODB) - || (table.getParameters() != null && DYNAMODB.equals(table.getParameters().get("classification"))) - || (table.getStorageDescriptor().getParameters() != null && DYNAMODB.equals(table.getStorageDescriptor().getParameters().get("classification"))); + private static final TableFilter TABLE_FILTER = (Table table) -> table.storageDescriptor().location().contains(DYNAMODB) + || (table.parameters() != null && DYNAMODB.equals(table.parameters().get("classification"))) + || (table.storageDescriptor().parameters() != null && DYNAMODB.equals(table.storageDescriptor().parameters().get("classification"))); // used to filter out Glue databases which lack the DYNAMO_DB_FLAG in the URI. - private static final DatabaseFilter DB_FILTER = (Database database) -> (database.getLocationUri() != null && database.getLocationUri().contains(DYNAMO_DB_FLAG)); + private static final DatabaseFilter DB_FILTER = (Database database) -> (database.locationUri() != null && database.locationUri().contains(DYNAMO_DB_FLAG)); private final ThrottlingInvoker invoker; private final DynamoDbClient ddbClient; - private final AWSGlue glueClient; + private final GlueClient glueClient; private final DynamoDBTableResolver tableResolver; private final DDBQueryPassthrough queryPassthrough; @@ -164,7 +164,7 @@ public DynamoDBMetadataHandler(java.util.Map configOptions) String spillBucket, String spillPrefix, DynamoDbClient ddbClient, - AWSGlue glueClient, + GlueClient glueClient, java.util.Map configOptions) { super(glueClient, keyFactory, secretsManager, athena, SOURCE_TYPE, spillBucket, spillPrefix, configOptions); diff --git a/athena-dynamodb/src/test/java/com/amazonaws/athena/connectors/dynamodb/DynamoDBMetadataHandlerTest.java b/athena-dynamodb/src/test/java/com/amazonaws/athena/connectors/dynamodb/DynamoDBMetadataHandlerTest.java index df7935a8e2..a6bf58438b 100644 --- a/athena-dynamodb/src/test/java/com/amazonaws/athena/connectors/dynamodb/DynamoDBMetadataHandlerTest.java +++ b/athena-dynamodb/src/test/java/com/amazonaws/athena/connectors/dynamodb/DynamoDBMetadataHandlerTest.java @@ -47,14 +47,6 @@ import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.dynamodbv2.document.ItemUtils; -import com.amazonaws.services.glue.AWSGlue; -import com.amazonaws.services.glue.model.Column; -import com.amazonaws.services.glue.model.Database; -import com.amazonaws.services.glue.model.GetDatabasesResult; -import com.amazonaws.services.glue.model.GetTableResult; -import com.amazonaws.services.glue.model.GetTablesResult; -import com.amazonaws.services.glue.model.StorageDescriptor; -import com.amazonaws.services.glue.model.Table; import com.amazonaws.util.json.Jackson; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; @@ -74,6 +66,16 @@ import org.slf4j.LoggerFactory; import software.amazon.awssdk.enhanced.dynamodb.document.EnhancedDocument; import software.amazon.awssdk.services.dynamodb.model.AttributeValue; +import software.amazon.awssdk.services.glue.GlueClient; +import software.amazon.awssdk.services.glue.model.Column; +import software.amazon.awssdk.services.glue.model.Database; +import software.amazon.awssdk.services.glue.model.GetDatabasesRequest; +import software.amazon.awssdk.services.glue.model.GetDatabasesResponse; +import software.amazon.awssdk.services.glue.model.GetTablesRequest; +import software.amazon.awssdk.services.glue.model.GetTablesResponse; +import software.amazon.awssdk.services.glue.model.StorageDescriptor; +import software.amazon.awssdk.services.glue.model.Table; +import software.amazon.awssdk.services.glue.paginators.GetDatabasesIterable; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.time.Instant; @@ -114,6 +116,7 @@ import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.*; import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; /** @@ -129,7 +132,7 @@ public class DynamoDBMetadataHandlerTest public TestName testName = new TestName(); @Mock - private AWSGlue glueClient; + private GlueClient glueClient; @Mock private SecretsManagerClient secretsManager; @@ -162,7 +165,7 @@ public void tearDown() public void doListSchemaNamesGlueError() throws Exception { - when(glueClient.getDatabases(any())).thenThrow(new AmazonServiceException("")); + when(glueClient.getDatabasesPaginator(any(GetDatabasesRequest.class))).thenThrow(new AmazonServiceException("")); ListSchemasRequest req = new ListSchemasRequest(TEST_IDENTITY, TEST_QUERY_ID, TEST_CATALOG_NAME); ListSchemasResponse res = handler.doListSchemaNames(allocator, req); @@ -176,12 +179,16 @@ public void doListSchemaNamesGlueError() public void doListSchemaNamesGlue() throws Exception { - GetDatabasesResult result = new GetDatabasesResult().withDatabaseList( - new Database().withName(DEFAULT_SCHEMA), - new Database().withName("ddb").withLocationUri(DYNAMO_DB_FLAG), - new Database().withName("s3").withLocationUri("blah")); + GetDatabasesResponse response = GetDatabasesResponse.builder() + .databaseList( + Database.builder().name(DEFAULT_SCHEMA).build(), + Database.builder().name("ddb").locationUri(DYNAMO_DB_FLAG).build(), + Database.builder().name("s3").locationUri("blah").build()) + .build(); - when(glueClient.getDatabases(any())).thenReturn(result); + GetDatabasesIterable mockIterable = mock(GetDatabasesIterable.class); + when(mockIterable.stream()).thenReturn(Collections.singletonList(response).stream()); + when(glueClient.getDatabasesPaginator(any(GetDatabasesRequest.class))).thenReturn(mockIterable); ListSchemasRequest req = new ListSchemasRequest(TEST_IDENTITY, TEST_QUERY_ID, TEST_CATALOG_NAME); ListSchemasResponse res = handler.doListSchemaNames(allocator, req); @@ -202,25 +209,37 @@ public void doListTablesGlueAndDynamo() tableNames.add("table2"); tableNames.add("table3"); - GetTablesResult mockResult = new GetTablesResult(); List tableList = new ArrayList<>(); - tableList.add(new Table().withName("table1") - .withParameters(ImmutableMap.of("classification", "dynamodb")) - .withStorageDescriptor(new StorageDescriptor() - .withLocation("some.location"))); - tableList.add(new Table().withName("table2") - .withParameters(ImmutableMap.of()) - .withStorageDescriptor(new StorageDescriptor() - .withLocation("some.location") - .withParameters(ImmutableMap.of("classification", "dynamodb")))); - tableList.add(new Table().withName("table3") - .withParameters(ImmutableMap.of()) - .withStorageDescriptor(new StorageDescriptor() - .withLocation("arn:aws:dynamodb:us-east-1:012345678910:table/table3"))); - tableList.add(new Table().withName("notADynamoTable").withParameters(ImmutableMap.of()).withStorageDescriptor( - new StorageDescriptor().withParameters(ImmutableMap.of()).withLocation("some_location"))); - mockResult.setTableList(tableList); - when(glueClient.getTables(any())).thenReturn(mockResult); + tableList.add(Table.builder().name("table1") + .parameters(ImmutableMap.of("classification", "dynamodb")) + .storageDescriptor(StorageDescriptor.builder() + .location("some.location") + .build()) + .build()); + tableList.add(Table.builder().name("table2") + .parameters(ImmutableMap.of()) + .storageDescriptor(StorageDescriptor.builder() + .location("some.location") + .parameters(ImmutableMap.of("classification", "dynamodb")) + .build()) + .build()); + tableList.add(Table.builder().name("table3") + .parameters(ImmutableMap.of()) + .storageDescriptor(StorageDescriptor.builder() + .location("arn:aws:dynamodb:us-east-1:012345678910:table/table3") + .build()) + .build()); + tableList.add(Table.builder().name("notADynamoTable") + .parameters(ImmutableMap.of()) + .storageDescriptor(StorageDescriptor.builder() + .location("some_location") + .parameters(ImmutableMap.of()) + .build()) + .build()); + GetTablesResponse mockResponse = GetTablesResponse.builder() + .tableList(tableList) + .build(); + when(glueClient.getTables(any(GetTablesRequest.class))).thenReturn(mockResponse); ListTablesRequest req = new ListTablesRequest(TEST_IDENTITY, TEST_QUERY_ID, TEST_CATALOG_NAME, DEFAULT_SCHEMA, null, UNLIMITED_PAGE_SIZE_VALUE); @@ -257,7 +276,7 @@ public void doListPaginatedTables() public void doGetTable() throws Exception { - when(glueClient.getTable(any())).thenThrow(new AmazonServiceException("")); + when(glueClient.getTable(any(software.amazon.awssdk.services.glue.model.GetTableRequest.class))).thenThrow(new AmazonServiceException("")); GetTableRequest req = new GetTableRequest(TEST_IDENTITY, TEST_QUERY_ID, TEST_CATALOG_NAME, TEST_TABLE_NAME, Collections.emptyMap()); GetTableResponse res = handler.doGetTable(allocator, req); @@ -273,7 +292,7 @@ public void doGetTable() public void doGetEmptyTable() throws Exception { - when(glueClient.getTable(any())).thenThrow(new AmazonServiceException("")); + when(glueClient.getTable(any(software.amazon.awssdk.services.glue.model.GetTableRequest.class))).thenThrow(new AmazonServiceException("")); GetTableRequest req = new GetTableRequest(TEST_IDENTITY, TEST_QUERY_ID, TEST_CATALOG_NAME, TEST_TABLE_2_NAME, Collections.emptyMap()); GetTableResponse res = handler.doGetTable(allocator, req); @@ -288,7 +307,7 @@ public void doGetEmptyTable() public void testCaseInsensitiveResolve() throws Exception { - when(glueClient.getTable(any())).thenThrow(new AmazonServiceException("")); + when(glueClient.getTable(any(software.amazon.awssdk.services.glue.model.GetTableRequest.class))).thenThrow(new AmazonServiceException("")); GetTableRequest req = new GetTableRequest(TEST_IDENTITY, TEST_QUERY_ID, TEST_CATALOG_NAME, TEST_TABLE_2_NAME, Collections.emptyMap()); GetTableResponse res = handler.doGetTable(allocator, req); @@ -594,20 +613,21 @@ public void validateSourceTableNamePropagation() throws Exception { List columns = new ArrayList<>(); - columns.add(new Column().withName("col1").withType("int")); - columns.add(new Column().withName("col2").withType("bigint")); - columns.add(new Column().withName("col3").withType("string")); + columns.add(Column.builder().name("col1").type("int").build()); + columns.add(Column.builder().name("col2").type("bigint").build()); + columns.add(Column.builder().name("col3").type("string").build()); Map param = ImmutableMap.of( SOURCE_TABLE_PROPERTY, TEST_TABLE, COLUMN_NAME_MAPPING_PROPERTY, "col1=Col1 , col2=Col2 ,col3=Col3", DATETIME_FORMAT_MAPPING_PROPERTY, "col1=datetime1,col3=datetime3 "); - Table table = new Table() - .withParameters(param) - .withPartitionKeys() - .withStorageDescriptor(new StorageDescriptor().withColumns(columns)); - GetTableResult mockResult = new GetTableResult().withTable(table); - when(glueClient.getTable(any())).thenReturn(mockResult); + Table table = Table.builder() + .parameters(param) + .storageDescriptor(StorageDescriptor.builder().columns(columns).build()) + .partitionKeys(Collections.EMPTY_SET) + .build(); + software.amazon.awssdk.services.glue.model.GetTableResponse tableResponse = software.amazon.awssdk.services.glue.model.GetTableResponse.builder().table(table).build(); + when(glueClient.getTable(any(software.amazon.awssdk.services.glue.model.GetTableRequest.class))).thenReturn(tableResponse); TableName tableName = new TableName(DEFAULT_SCHEMA, "glueTableForTestTable"); GetTableRequest getTableRequest = new GetTableRequest(TEST_IDENTITY, TEST_QUERY_ID, TEST_CATALOG_NAME, tableName, Collections.emptyMap()); @@ -635,20 +655,21 @@ public void doGetTableLayoutScanWithTypeOverride() throws Exception { List columns = new ArrayList<>(); - columns.add(new Column().withName("col1").withType("int")); - columns.add(new Column().withName("col2").withType("timestamptz")); - columns.add(new Column().withName("col3").withType("string")); + columns.add(Column.builder().name("col1").type("int").build()); + columns.add(Column.builder().name("col2").type("timestamptz").build()); + columns.add(Column.builder().name("col3").type("string").build()); Map param = ImmutableMap.of( SOURCE_TABLE_PROPERTY, TEST_TABLE, COLUMN_NAME_MAPPING_PROPERTY, "col1=Col1", DATETIME_FORMAT_MAPPING_PROPERTY, "col1=datetime1,col3=datetime3 "); - Table table = new Table() - .withParameters(param) - .withPartitionKeys() - .withStorageDescriptor(new StorageDescriptor().withColumns(columns)); - GetTableResult mockResult = new GetTableResult().withTable(table); - when(glueClient.getTable(any())).thenReturn(mockResult); + Table table = Table.builder() + .parameters(param) + .partitionKeys(Collections.EMPTY_SET) + .storageDescriptor(StorageDescriptor.builder().columns(columns).build()) + .build(); + software.amazon.awssdk.services.glue.model.GetTableResponse tableResponse = software.amazon.awssdk.services.glue.model.GetTableResponse.builder().table(table).build(); + when(glueClient.getTable(any(software.amazon.awssdk.services.glue.model.GetTableRequest.class))).thenReturn(tableResponse); TableName tableName = new TableName(DEFAULT_SCHEMA, "glueTableForTestTable"); GetTableRequest getTableRequest = new GetTableRequest(TEST_IDENTITY, TEST_QUERY_ID, TEST_CATALOG_NAME, tableName, Collections.emptyMap()); diff --git a/athena-dynamodb/src/test/java/com/amazonaws/athena/connectors/dynamodb/DynamoDBRecordHandlerTest.java b/athena-dynamodb/src/test/java/com/amazonaws/athena/connectors/dynamodb/DynamoDBRecordHandlerTest.java index 68362c087d..cff9e0159e 100644 --- a/athena-dynamodb/src/test/java/com/amazonaws/athena/connectors/dynamodb/DynamoDBRecordHandlerTest.java +++ b/athena-dynamodb/src/test/java/com/amazonaws/athena/connectors/dynamodb/DynamoDBRecordHandlerTest.java @@ -39,12 +39,6 @@ import com.amazonaws.athena.connector.lambda.security.LocalKeyFactory; import com.amazonaws.athena.connectors.dynamodb.util.DDBTypeUtils; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.glue.AWSGlue; -import com.amazonaws.services.glue.model.Column; -import com.amazonaws.services.glue.model.EntityNotFoundException; -import com.amazonaws.services.glue.model.GetTableResult; -import com.amazonaws.services.glue.model.StorageDescriptor; -import com.amazonaws.services.glue.model.Table; import com.amazonaws.services.s3.AmazonS3; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; @@ -59,7 +53,6 @@ import org.junit.Before; import org.junit.Rule; import org.junit.Test; - import org.junit.rules.TestName; import org.junit.runner.RunWith; import org.mockito.Mock; @@ -68,6 +61,11 @@ import org.slf4j.LoggerFactory; import software.amazon.awssdk.enhanced.dynamodb.document.EnhancedDocument; import software.amazon.awssdk.services.dynamodb.model.AttributeValue; +import software.amazon.awssdk.services.glue.GlueClient; +import software.amazon.awssdk.services.glue.model.Column; +import software.amazon.awssdk.services.glue.model.EntityNotFoundException; +import software.amazon.awssdk.services.glue.model.StorageDescriptor; +import software.amazon.awssdk.services.glue.model.Table; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.time.LocalDate; @@ -121,7 +119,7 @@ public class DynamoDBRecordHandlerTest private DynamoDBMetadataHandler metadataHandler; @Mock - private AWSGlue glueClient; + private GlueClient glueClient; @Mock private SecretsManagerClient secretsManager; @@ -398,25 +396,26 @@ public void testDateTimeSupportFromGlueTable() throws Exception TimeZone.setDefault(TimeZone.getTimeZone("UTC")); List columns = new ArrayList<>(); - columns.add(new Column().withName("col0").withType("string")); - columns.add(new Column().withName("col1").withType("timestamp")); - columns.add(new Column().withName("col2").withType("timestamp")); - columns.add(new Column().withName("col3").withType("date")); - columns.add(new Column().withName("col4").withType("date")); - columns.add(new Column().withName("col5").withType("timestamptz")); - columns.add(new Column().withName("col6").withType("timestamptz")); - columns.add(new Column().withName("col7").withType("timestamptz")); + columns.add(Column.builder().name("col0").type("string").build()); + columns.add(Column.builder().name("col1").type("timestamp").build()); + columns.add(Column.builder().name("col2").type("timestamp").build()); + columns.add(Column.builder().name("col3").type("date").build()); + columns.add(Column.builder().name("col4").type("date").build()); + columns.add(Column.builder().name("col5").type("timestamptz").build()); + columns.add(Column.builder().name("col6").type("timestamptz").build()); + columns.add(Column.builder().name("col7").type("timestamptz").build()); Map param = ImmutableMap.of( SOURCE_TABLE_PROPERTY, TEST_TABLE3, COLUMN_NAME_MAPPING_PROPERTY, "col1=Col1 , col2=Col2 ,col3=Col3, col4=Col4,col5=Col5,col6=Col6,col7=Col7", DATETIME_FORMAT_MAPPING_PROPERTY, "col1=yyyyMMdd'S'HHmmss,col3=dd/MM/yyyy "); - Table table = new Table() - .withParameters(param) - .withPartitionKeys() - .withStorageDescriptor(new StorageDescriptor().withColumns(columns)); - GetTableResult mockResult = new GetTableResult().withTable(table); - when(glueClient.getTable(any())).thenReturn(mockResult); + Table table = Table.builder() + .parameters(param) + .partitionKeys(Collections.EMPTY_SET) + .storageDescriptor(StorageDescriptor.builder().columns(columns).build()) + .build(); + software.amazon.awssdk.services.glue.model.GetTableResponse tableResponse = software.amazon.awssdk.services.glue.model.GetTableResponse.builder().table(table).build(); + when(glueClient.getTable(any(software.amazon.awssdk.services.glue.model.GetTableRequest.class))).thenReturn(tableResponse); TableName tableName = new TableName(DEFAULT_SCHEMA, TEST_TABLE3); GetTableRequest getTableRequest = new GetTableRequest(TEST_IDENTITY, TEST_QUERY_ID, TEST_CATALOG_NAME, tableName, Collections.emptyMap()); @@ -463,17 +462,18 @@ public void testDateTimeSupportFromGlueTable() throws Exception public void testStructWithNullFromGlueTable() throws Exception { List columns = new ArrayList<>(); - columns.add(new Column().withName("col0").withType("string")); - columns.add(new Column().withName("col1").withType("struct")); + columns.add(Column.builder().name("col0").type("string").build()); + columns.add(Column.builder().name("col1").type("struct").build()); Map param = ImmutableMap.of( SOURCE_TABLE_PROPERTY, TEST_TABLE4, COLUMN_NAME_MAPPING_PROPERTY, "col0=Col0,col1=Col1,col2=Col2"); - Table table = new Table() - .withParameters(param) - .withPartitionKeys() - .withStorageDescriptor(new StorageDescriptor().withColumns(columns)); - GetTableResult mockResult = new GetTableResult().withTable(table); - when(glueClient.getTable(any())).thenReturn(mockResult); + Table table = Table.builder() + .parameters(param) + .partitionKeys(Collections.EMPTY_SET) + .storageDescriptor(StorageDescriptor.builder().columns(columns).build()) + .build(); + software.amazon.awssdk.services.glue.model.GetTableResponse tableResponse = software.amazon.awssdk.services.glue.model.GetTableResponse.builder().table(table).build(); + when(glueClient.getTable(any(software.amazon.awssdk.services.glue.model.GetTableRequest.class))).thenReturn(tableResponse); TableName tableName = new TableName(DEFAULT_SCHEMA, TEST_TABLE4); GetTableRequest getTableRequest = new GetTableRequest(TEST_IDENTITY, TEST_QUERY_ID, TEST_CATALOG_NAME, tableName, Collections.emptyMap()); @@ -520,7 +520,7 @@ public void testStructWithNullFromGlueTable() throws Exception @Test public void testStructWithNullFromDdbTable() throws Exception { - when(glueClient.getTable(any())).thenThrow(new EntityNotFoundException("")); + when(glueClient.getTable(any(software.amazon.awssdk.services.glue.model.GetTableRequest.class))).thenThrow(EntityNotFoundException.builder().message("").build()); TableName tableName = new TableName(DEFAULT_SCHEMA, TEST_TABLE4); GetTableRequest getTableRequest = new GetTableRequest(TEST_IDENTITY, TEST_QUERY_ID, TEST_CATALOG_NAME, tableName, Collections.emptyMap()); @@ -571,19 +571,20 @@ public void testMapWithSchemaFromGlueTable() throws Exception } List columns = new ArrayList<>(); - columns.add(new Column().withName("col0").withType("string")); - columns.add(new Column().withName("outermap").withType("MAP>")); - columns.add(new Column().withName("structcol").withType("MAP>")); + columns.add(Column.builder().name("col0").type("string").build()); + columns.add(Column.builder().name("outermap").type("MAP>").build()); + columns.add(Column.builder().name("structcol").type("MAP>").build()); Map param = ImmutableMap.of( SOURCE_TABLE_PROPERTY, TEST_TABLE5, COLUMN_NAME_MAPPING_PROPERTY, "col0=Col0,col1=Col1,col2=Col2"); - Table table = new Table() - .withParameters(param) - .withPartitionKeys() - .withStorageDescriptor(new StorageDescriptor().withColumns(columns)); - GetTableResult mockResult = new GetTableResult().withTable(table); - when(glueClient.getTable(any())).thenReturn(mockResult); + Table table = Table.builder() + .parameters(param) + .partitionKeys(Collections.EMPTY_SET) + .storageDescriptor(StorageDescriptor.builder().columns(columns).build()) + .build(); + software.amazon.awssdk.services.glue.model.GetTableResponse tableResponse = software.amazon.awssdk.services.glue.model.GetTableResponse.builder().table(table).build(); + when(glueClient.getTable(any(software.amazon.awssdk.services.glue.model.GetTableRequest.class))).thenReturn(tableResponse); TableName tableName = new TableName(DEFAULT_SCHEMA, TEST_TABLE5); GetTableRequest getTableRequest = new GetTableRequest(TEST_IDENTITY, TEST_QUERY_ID, TEST_CATALOG_NAME, tableName, Collections.emptyMap()); @@ -624,19 +625,20 @@ public void testMapWithSchemaFromGlueTable() throws Exception public void testStructWithSchemaFromGlueTable() throws Exception { List columns = new ArrayList<>(); - columns.add(new Column().withName("col0").withType("string")); - columns.add(new Column().withName("outermap").withType("struct>")); - columns.add(new Column().withName("structcol").withType("struct>")); + columns.add(Column.builder().name("col0").type("string").build()); + columns.add(Column.builder().name("outermap").type("struct>").build()); + columns.add(Column.builder().name("structcol").type("struct>").build()); Map param = ImmutableMap.of( SOURCE_TABLE_PROPERTY, TEST_TABLE6, COLUMN_NAME_MAPPING_PROPERTY, "col0=Col0,col1=Col1,col2=Col2"); - Table table = new Table() - .withParameters(param) - .withPartitionKeys() - .withStorageDescriptor(new StorageDescriptor().withColumns(columns)); - GetTableResult mockResult = new GetTableResult().withTable(table); - when(glueClient.getTable(any())).thenReturn(mockResult); + Table table = Table.builder() + .parameters(param) + .partitionKeys(Collections.EMPTY_SET) + .storageDescriptor(StorageDescriptor.builder().columns(columns).build()) + .build(); + software.amazon.awssdk.services.glue.model.GetTableResponse tableResponse = software.amazon.awssdk.services.glue.model.GetTableResponse.builder().table(table).build(); + when(glueClient.getTable(any(software.amazon.awssdk.services.glue.model.GetTableRequest.class))).thenReturn(tableResponse); TableName tableName = new TableName(DEFAULT_SCHEMA, TEST_TABLE6); GetTableRequest getTableRequest = new GetTableRequest(TEST_IDENTITY, TEST_QUERY_ID, TEST_CATALOG_NAME, tableName, Collections.emptyMap()); @@ -678,20 +680,21 @@ public void testStructWithSchemaFromGlueTable() throws Exception public void testListWithSchemaFromGlueTable() throws Exception { List columns = new ArrayList<>(); - columns.add(new Column().withName("col0").withType("string")); - columns.add(new Column().withName("stringList").withType("ARRAY ")); - columns.add(new Column().withName("intList").withType("ARRAY ")); - columns.add(new Column().withName("listStructCol").withType("array>")); + columns.add(Column.builder().name("col0").type("string").build()); + columns.add(Column.builder().name("stringList").type("ARRAY ").build()); + columns.add(Column.builder().name("intList").type("ARRAY ").build()); + columns.add(Column.builder().name("listStructCol").type("array>").build()); Map param = ImmutableMap.of( SOURCE_TABLE_PROPERTY, TEST_TABLE7, COLUMN_NAME_MAPPING_PROPERTY, "col0=Col0,col1=Col1,col2=Col2"); - Table table = new Table() - .withParameters(param) - .withPartitionKeys() - .withStorageDescriptor(new StorageDescriptor().withColumns(columns)); - GetTableResult mockResult = new GetTableResult().withTable(table); - when(glueClient.getTable(any())).thenReturn(mockResult); + Table table = Table.builder() + .parameters(param) + .partitionKeys(Collections.EMPTY_SET) + .storageDescriptor(StorageDescriptor.builder().columns(columns).build()) + .build(); + software.amazon.awssdk.services.glue.model.GetTableResponse tableResponse = software.amazon.awssdk.services.glue.model.GetTableResponse.builder().table(table).build(); + when(glueClient.getTable(any(software.amazon.awssdk.services.glue.model.GetTableRequest.class))).thenReturn(tableResponse); TableName tableName = new TableName(DEFAULT_SCHEMA, TEST_TABLE7); GetTableRequest getTableRequest = new GetTableRequest(TEST_IDENTITY, TEST_QUERY_ID, TEST_CATALOG_NAME, tableName, Collections.emptyMap()); @@ -760,18 +763,19 @@ public void testNumMapWithSchemaFromGlueTable() throws Exception } List columns = new ArrayList<>(); - columns.add(new Column().withName("col0").withType("string")); - columns.add(new Column().withName("nummap").withType("map")); + columns.add(Column.builder().name("col0").type("string").build()); + columns.add(Column.builder().name("nummap").type("map").build()); Map param = ImmutableMap.of( SOURCE_TABLE_PROPERTY, TEST_TABLE8, COLUMN_NAME_MAPPING_PROPERTY, "col0=Col0,col1=Col1,col2=Col2"); - Table table = new Table() - .withParameters(param) - .withPartitionKeys() - .withStorageDescriptor(new StorageDescriptor().withColumns(columns)); - GetTableResult mockResult = new GetTableResult().withTable(table); - when(glueClient.getTable(any())).thenReturn(mockResult); + Table table = Table.builder() + .parameters(param) + .partitionKeys(Collections.EMPTY_SET) + .storageDescriptor(StorageDescriptor.builder().columns(columns).build()) + .build(); + software.amazon.awssdk.services.glue.model.GetTableResponse mockResult = software.amazon.awssdk.services.glue.model.GetTableResponse.builder().table(table).build(); + when(glueClient.getTable(any(software.amazon.awssdk.services.glue.model.GetTableRequest.class))).thenReturn(mockResult); TableName tableName = new TableName(DEFAULT_SCHEMA, TEST_TABLE8); GetTableRequest getTableRequest = new GetTableRequest(TEST_IDENTITY, TEST_QUERY_ID, TEST_CATALOG_NAME, tableName, Collections.emptyMap()); @@ -824,18 +828,19 @@ public void testNumMapWithSchemaFromGlueTable() throws Exception public void testNumStructWithSchemaFromGlueTable() throws Exception { List columns = new ArrayList<>(); - columns.add(new Column().withName("col0").withType("string")); - columns.add(new Column().withName("nummap").withType("struct")); + columns.add(Column.builder().name("col0").type("string").build()); + columns.add(Column.builder().name("nummap").type("struct").build()); Map param = ImmutableMap.of( SOURCE_TABLE_PROPERTY, TEST_TABLE8, COLUMN_NAME_MAPPING_PROPERTY, "col0=Col0,col1=Col1,col2=Col2"); - Table table = new Table() - .withParameters(param) - .withPartitionKeys() - .withStorageDescriptor(new StorageDescriptor().withColumns(columns)); - GetTableResult mockResult = new GetTableResult().withTable(table); - when(glueClient.getTable(any())).thenReturn(mockResult); + Table table = Table.builder() + .parameters(param) + .partitionKeys(Collections.EMPTY_SET) + .storageDescriptor(StorageDescriptor.builder().columns(columns).build()) + .build(); + software.amazon.awssdk.services.glue.model.GetTableResponse mockResult = software.amazon.awssdk.services.glue.model.GetTableResponse.builder().table(table).build(); + when(glueClient.getTable(any(software.amazon.awssdk.services.glue.model.GetTableRequest.class))).thenReturn(mockResult); TableName tableName = new TableName(DEFAULT_SCHEMA, TEST_TABLE8); GetTableRequest getTableRequest = new GetTableRequest(TEST_IDENTITY, TEST_QUERY_ID, TEST_CATALOG_NAME, tableName, Collections.emptyMap()); diff --git a/athena-elasticsearch/src/main/java/com/amazonaws/athena/connectors/elasticsearch/ElasticsearchMetadataHandler.java b/athena-elasticsearch/src/main/java/com/amazonaws/athena/connectors/elasticsearch/ElasticsearchMetadataHandler.java index eb781ef5b9..35c7ce671e 100644 --- a/athena-elasticsearch/src/main/java/com/amazonaws/athena/connectors/elasticsearch/ElasticsearchMetadataHandler.java +++ b/athena-elasticsearch/src/main/java/com/amazonaws/athena/connectors/elasticsearch/ElasticsearchMetadataHandler.java @@ -42,7 +42,6 @@ import com.amazonaws.athena.connector.lambda.security.EncryptionKeyFactory; import com.amazonaws.athena.connectors.elasticsearch.qpt.ElasticsearchQueryPassthrough; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.glue.AWSGlue; import com.google.common.collect.ImmutableMap; import org.apache.arrow.util.VisibleForTesting; import org.apache.arrow.vector.types.pojo.Field; @@ -53,6 +52,7 @@ import org.elasticsearch.client.indices.GetIndexResponse; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.glue.GlueClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.io.IOException; @@ -109,7 +109,7 @@ public class ElasticsearchMetadataHandler protected static final String INDEX_KEY = "index"; - private final AWSGlue awsGlue; + private final GlueClient awsGlue; private final AwsRestHighLevelClientFactory clientFactory; private final ElasticsearchDomainMapProvider domainMapProvider; @@ -130,7 +130,7 @@ public ElasticsearchMetadataHandler(Map configOptions) @VisibleForTesting protected ElasticsearchMetadataHandler( - AWSGlue awsGlue, + GlueClient awsGlue, EncryptionKeyFactory keyFactory, SecretsManagerClient awsSecretsManager, AmazonAthena athena, diff --git a/athena-elasticsearch/src/test/java/com/amazonaws/athena/connectors/elasticsearch/ElasticsearchMetadataHandlerTest.java b/athena-elasticsearch/src/test/java/com/amazonaws/athena/connectors/elasticsearch/ElasticsearchMetadataHandlerTest.java index 9478de2c5f..eed4d06414 100644 --- a/athena-elasticsearch/src/test/java/com/amazonaws/athena/connectors/elasticsearch/ElasticsearchMetadataHandlerTest.java +++ b/athena-elasticsearch/src/test/java/com/amazonaws/athena/connectors/elasticsearch/ElasticsearchMetadataHandlerTest.java @@ -29,7 +29,6 @@ import com.amazonaws.athena.connector.lambda.security.FederatedIdentity; import com.amazonaws.athena.connector.lambda.security.LocalKeyFactory; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.glue.AWSGlue; import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; @@ -53,6 +52,7 @@ import org.mockito.junit.MockitoJUnitRunner; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.glue.GlueClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.io.IOException; @@ -90,7 +90,7 @@ public class ElasticsearchMetadataHandlerTest private BlockAllocatorImpl allocator; @Mock - private AWSGlue awsGlue; + private GlueClient awsGlue; @Mock private SecretsManagerClient awsSecretsManager; diff --git a/athena-federation-sdk/pom.xml b/athena-federation-sdk/pom.xml index 932614edbb..07722a343d 100644 --- a/athena-federation-sdk/pom.xml +++ b/athena-federation-sdk/pom.xml @@ -117,6 +117,10 @@ commons-logging commons-logging + + software.amazon.awssdk + netty-nio-client + @@ -147,9 +151,20 @@ - com.amazonaws - aws-java-sdk-glue - ${aws-sdk.version} + software.amazon.awssdk + glue + ${aws-sdk-v2.version} + + + software.amazon.awssdk + netty-nio-client + + + + + software.amazon.awssdk + apache-client + ${aws-sdk-v2.version} com.amazonaws diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/handlers/GlueMetadataHandler.java b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/handlers/GlueMetadataHandler.java index c639f4f197..c492cb208e 100644 --- a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/handlers/GlueMetadataHandler.java +++ b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/handlers/GlueMetadataHandler.java @@ -20,7 +20,6 @@ * #L% */ -import com.amazonaws.ClientConfiguration; import com.amazonaws.athena.connector.lambda.data.BlockAllocator; import com.amazonaws.athena.connector.lambda.data.SchemaBuilder; import com.amazonaws.athena.connector.lambda.domain.TableName; @@ -34,16 +33,6 @@ import com.amazonaws.athena.connector.lambda.metadata.glue.GlueFieldLexer; import com.amazonaws.athena.connector.lambda.security.EncryptionKeyFactory; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.glue.AWSGlue; -import com.amazonaws.services.glue.AWSGlueClientBuilder; -import com.amazonaws.services.glue.model.Column; -import com.amazonaws.services.glue.model.Database; -import com.amazonaws.services.glue.model.GetDatabasesRequest; -import com.amazonaws.services.glue.model.GetDatabasesResult; -import com.amazonaws.services.glue.model.GetTableResult; -import com.amazonaws.services.glue.model.GetTablesRequest; -import com.amazonaws.services.glue.model.GetTablesResult; -import com.amazonaws.services.glue.model.Table; import com.google.common.base.Splitter; import com.google.common.base.Strings; import com.google.common.collect.ImmutableMap; @@ -53,8 +42,18 @@ import org.apache.arrow.vector.types.pojo.Schema; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.http.apache.ApacheHttpClient; +import software.amazon.awssdk.services.glue.GlueClient; +import software.amazon.awssdk.services.glue.model.Column; +import software.amazon.awssdk.services.glue.model.Database; +import software.amazon.awssdk.services.glue.model.GetDatabasesRequest; +import software.amazon.awssdk.services.glue.model.GetTablesRequest; +import software.amazon.awssdk.services.glue.model.GetTablesResponse; +import software.amazon.awssdk.services.glue.model.Table; +import software.amazon.awssdk.services.glue.paginators.GetDatabasesIterable; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; +import java.time.Duration; import java.util.ArrayList; import java.util.HashMap; import java.util.HashSet; @@ -137,7 +136,7 @@ public abstract class GlueMetadataHandler // emulate behavior from prior versions. public static final String GLUE_TABLE_CONTAINS_PREVIOUSLY_UNSUPPORTED_TYPE = "glueTableContainsPreviouslyUnsupportedType"; - private final AWSGlue awsGlue; + private final GlueClient awsGlue; /** * Basic constructor which is recommended when extending this class. @@ -156,8 +155,10 @@ public GlueMetadataHandler(String sourceType, java.util.Map conf boolean disabled = configOptions.get(DISABLE_GLUE) != null && !"false".equalsIgnoreCase(configOptions.get(DISABLE_GLUE)); // null if the current instance does not want to leverage Glue for metadata - awsGlue = disabled ? null : (AWSGlueClientBuilder.standard() - .withClientConfiguration(new ClientConfiguration().withConnectionTimeout(CONNECT_TIMEOUT)) + awsGlue = disabled ? null : (GlueClient.builder() + .httpClientBuilder(ApacheHttpClient + .builder() + .connectionTimeout(Duration.ofMillis(CONNECT_TIMEOUT))) .build()); } @@ -168,7 +169,7 @@ public GlueMetadataHandler(String sourceType, java.util.Map conf * @param sourceType The source type, used in diagnostic logging. * @param configOptions The configOptions for this MetadataHandler. */ - public GlueMetadataHandler(AWSGlue awsGlue, String sourceType, java.util.Map configOptions) + public GlueMetadataHandler(GlueClient awsGlue, String sourceType, java.util.Map configOptions) { super(sourceType, configOptions); this.awsGlue = awsGlue; @@ -187,7 +188,7 @@ public GlueMetadataHandler(AWSGlue awsGlue, String sourceType, java.util.Map schemas = new ArrayList<>(); - String nextToken = null; - do { - getDatabasesRequest.setNextToken(nextToken); - GetDatabasesResult result = awsGlue.getDatabases(getDatabasesRequest); - - for (Database next : result.getDatabaseList()) { - if (filter == null || filter.filter(next)) { - schemas.add(next.getName()); - } - } - - nextToken = result.getNextToken(); - } - while (nextToken != null); - + GetDatabasesRequest getDatabasesRequest = GetDatabasesRequest.builder() + .catalogId(getCatalog(request)) + .build(); + + GetDatabasesIterable responses = awsGlue.getDatabasesPaginator(getDatabasesRequest); + List schemas = responses.stream() + .flatMap(response -> response.databaseList().stream()) + .filter(database -> filter == null || filter.filter(database)) + .map(Database::name) + .collect(Collectors.toList()); + return new ListSchemasResponse(request.getCatalogName(), schemas); } @@ -309,31 +302,29 @@ public ListTablesResponse doListTables(BlockAllocator blockAllocator, ListTables protected ListTablesResponse doListTables(BlockAllocator blockAllocator, ListTablesRequest request, TableFilter filter) throws Exception { - GetTablesRequest getTablesRequest = new GetTablesRequest(); - getTablesRequest.setCatalogId(getCatalog(request)); - getTablesRequest.setDatabaseName(request.getSchemaName()); - Set tables = new HashSet<>(); String nextToken = request.getNextToken(); int pageSize = request.getPageSize(); do { - getTablesRequest.setNextToken(nextToken); + GetTablesRequest.Builder getTablesRequest = GetTablesRequest.builder() + .catalogId(getCatalog(request)) + .databaseName(request.getSchemaName()) + .nextToken(nextToken); if (pageSize != UNLIMITED_PAGE_SIZE_VALUE) { // Paginated requests will include the maxResults argument determined by the minimum value between the // pageSize and the maximum results supported by Glue (as defined in the Glue API docs). int maxResults = Math.min(pageSize, GET_TABLES_REQUEST_MAX_RESULTS); - getTablesRequest.setMaxResults(maxResults); + getTablesRequest.maxResults(maxResults); pageSize -= maxResults; } - GetTablesResult result = awsGlue.getTables(getTablesRequest); - - for (Table next : result.getTableList()) { - if (filter == null || filter.filter(next)) { - tables.add(new TableName(request.getSchemaName(), next.getName())); - } - } - - nextToken = result.getNextToken(); + GetTablesResponse response = awsGlue.getTables(getTablesRequest.build()); + tables.addAll(response.tableList() + .stream() + .filter(table -> filter == null || filter.filter(table)) + .map(table -> new TableName(request.getSchemaName(), table.name())) + .collect(Collectors.toSet())); + + nextToken = response.nextToken(); } while (nextToken != null && (pageSize == UNLIMITED_PAGE_SIZE_VALUE || pageSize > 0)); @@ -387,21 +378,23 @@ protected GetTableResponse doGetTable(BlockAllocator blockAllocator, GetTableReq throws Exception { TableName tableName = request.getTableName(); - com.amazonaws.services.glue.model.GetTableRequest getTableRequest = new com.amazonaws.services.glue.model.GetTableRequest(); - getTableRequest.setCatalogId(getCatalog(request)); - getTableRequest.setDatabaseName(tableName.getSchemaName()); - getTableRequest.setName(tableName.getTableName()); + //Full class name required due to name overlap with athena + software.amazon.awssdk.services.glue.model.GetTableRequest getTableRequest = software.amazon.awssdk.services.glue.model.GetTableRequest.builder() + .catalogId(getCatalog(request)) + .databaseName(tableName.getSchemaName()) + .name(tableName.getTableName()) + .build(); - GetTableResult result = awsGlue.getTable(getTableRequest); - Table table = result.getTable(); + software.amazon.awssdk.services.glue.model.GetTableResponse response = awsGlue.getTable(getTableRequest); + Table table = response.table(); if (filter != null && !filter.filter(table)) { throw new RuntimeException("No matching table found " + request.getTableName()); } SchemaBuilder schemaBuilder = SchemaBuilder.newBuilder(); - if (table.getParameters() != null) { - table.getParameters() + if (table.parameters() != null) { + table.parameters() .entrySet() .forEach(next -> schemaBuilder.addMetadata(next.getKey(), next.getValue())); } @@ -412,35 +405,37 @@ protected GetTableResponse doGetTable(BlockAllocator blockAllocator, GetTableReq Map datetimeFormatMappingWithColumnName = new HashMap<>(); Set partitionCols = new HashSet<>(); - if (table.getPartitionKeys() != null) { - partitionCols = table.getPartitionKeys() - .stream().map(next -> columnNameMapping.getOrDefault(next.getName(), next.getName())).collect(Collectors.toSet()); + if (table.partitionKeys() != null) { + partitionCols = table.partitionKeys() + .stream() + .map(next -> columnNameMapping.getOrDefault(next.name(), next.name())) + .collect(Collectors.toSet()); } // partition columns should be added to the schema if they exist - List allColumns = Stream.of(table.getStorageDescriptor().getColumns(), table.getPartitionKeys() == null ? new ArrayList() : table.getPartitionKeys()) + List allColumns = Stream.of(table.storageDescriptor().columns(), table.partitionKeys() == null ? new ArrayList() : table.partitionKeys()) .flatMap(x -> x.stream()) .collect(Collectors.toList()); boolean glueTableContainsPreviouslyUnsupportedType = false; for (Column next : allColumns) { - String rawColumnName = next.getName(); + String rawColumnName = next.name(); String mappedColumnName = columnNameMapping.getOrDefault(rawColumnName, rawColumnName); // apply any type override provided in typeOverrideMapping from metadata // this is currently only used for timestamp with timezone support - logger.info("Column {} with registered type {}", rawColumnName, next.getType()); - Field arrowField = convertField(mappedColumnName, next.getType()); + logger.info("Column {} with registered type {}", rawColumnName, next.type()); + Field arrowField = convertField(mappedColumnName, next.type()); schemaBuilder.addField(arrowField); // Add non-null non-empty comments to metadata - if (next.getComment() != null && !next.getComment().trim().isEmpty()) { - schemaBuilder.addMetadata(mappedColumnName, next.getComment()); + if (next.comment() != null && !next.comment().trim().isEmpty()) { + schemaBuilder.addMetadata(mappedColumnName, next.comment()); } if (dateTimeFormatMapping.containsKey(rawColumnName)) { datetimeFormatMappingWithColumnName.put(mappedColumnName, dateTimeFormatMapping.get(rawColumnName)); } // Indicate that we found a `set` or `decimal` type so that we can set this metadata on the schemaBuilder later on - if (glueTableContainsPreviouslyUnsupportedType == false && isPreviouslyUnsupported(next.getType(), arrowField)) { + if (glueTableContainsPreviouslyUnsupportedType == false && isPreviouslyUnsupported(next.type(), arrowField)) { glueTableContainsPreviouslyUnsupportedType = true; } } @@ -449,8 +444,8 @@ protected GetTableResponse doGetTable(BlockAllocator blockAllocator, GetTableReq populateSourceTableNameIfAvailable(table, schemaBuilder); - if (table.getViewOriginalText() != null && !table.getViewOriginalText().isEmpty()) { - schemaBuilder.addMetadata(VIEW_METADATA_FIELD, table.getViewOriginalText()); + if (table.viewOriginalText() != null && !table.viewOriginalText().isEmpty()) { + schemaBuilder.addMetadata(VIEW_METADATA_FIELD, table.viewOriginalText()); } schemaBuilder.addMetadata(GLUE_TABLE_CONTAINS_PREVIOUSLY_UNSUPPORTED_TYPE, String.valueOf(glueTableContainsPreviouslyUnsupportedType)); @@ -515,12 +510,12 @@ public interface DatabaseFilter */ protected static void populateSourceTableNameIfAvailable(Table table, SchemaBuilder schemaBuilder) { - String sourceTableProperty = table.getParameters().get(SOURCE_TABLE_PROPERTY); + String sourceTableProperty = table.parameters().get(SOURCE_TABLE_PROPERTY); if (sourceTableProperty != null) { // table property exists so nothing to do (assumes all table properties were already copied) return; } - String location = table.getStorageDescriptor().getLocation(); + String location = table.storageDescriptor().location(); if (location != null) { Matcher matcher = TABLE_ARN_REGEX.matcher(location); if (matcher.matches()) { @@ -550,7 +545,7 @@ protected static String getSourceTableName(Schema schema) */ protected static Map getColumnNameMapping(Table table) { - String columnNameMappingParam = table.getParameters().get(COLUMN_NAME_MAPPING_PROPERTY); + String columnNameMappingParam = table.parameters().get(COLUMN_NAME_MAPPING_PROPERTY); if (!Strings.isNullOrEmpty(columnNameMappingParam)) { return MAP_SPLITTER.split(columnNameMappingParam); } @@ -567,7 +562,7 @@ protected static Map getColumnNameMapping(Table table) */ private Map getDateTimeFormatMapping(Table table) { - String datetimeFormatMappingParam = table.getParameters().get(DATETIME_FORMAT_MAPPING_PROPERTY); + String datetimeFormatMappingParam = table.parameters().get(DATETIME_FORMAT_MAPPING_PROPERTY); if (!Strings.isNullOrEmpty(datetimeFormatMappingParam)) { return MAP_SPLITTER.split(datetimeFormatMappingParam); } diff --git a/athena-federation-sdk/src/test/java/com/amazonaws/athena/connector/lambda/handlers/GlueMetadataHandlerTest.java b/athena-federation-sdk/src/test/java/com/amazonaws/athena/connector/lambda/handlers/GlueMetadataHandlerTest.java index 20c0e5819d..816456ef73 100644 --- a/athena-federation-sdk/src/test/java/com/amazonaws/athena/connector/lambda/handlers/GlueMetadataHandlerTest.java +++ b/athena-federation-sdk/src/test/java/com/amazonaws/athena/connector/lambda/handlers/GlueMetadataHandlerTest.java @@ -42,19 +42,20 @@ import com.amazonaws.athena.connector.lambda.security.IdentityUtil; import com.amazonaws.athena.connector.lambda.security.LocalKeyFactory; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.glue.AWSGlue; -import com.amazonaws.services.glue.model.Column; -import com.amazonaws.services.glue.model.Database; -import com.amazonaws.services.glue.model.GetDatabasesRequest; -import com.amazonaws.services.glue.model.GetDatabasesResult; -import com.amazonaws.services.glue.model.GetTableResult; -import com.amazonaws.services.glue.model.GetTablesRequest; -import com.amazonaws.services.glue.model.GetTablesResult; -import com.amazonaws.services.glue.model.StorageDescriptor; -import com.amazonaws.services.glue.model.Table; import com.amazonaws.services.lambda.runtime.Context; import com.google.common.collect.ImmutableList; +import software.amazon.awssdk.services.glue.GlueClient; +import software.amazon.awssdk.services.glue.model.Column; +import software.amazon.awssdk.services.glue.model.Database; +import software.amazon.awssdk.services.glue.model.GetDatabaseResponse; +import software.amazon.awssdk.services.glue.model.GetDatabasesRequest; +import software.amazon.awssdk.services.glue.model.GetDatabasesResponse; +import software.amazon.awssdk.services.glue.model.GetTablesRequest; +import software.amazon.awssdk.services.glue.model.GetTablesResponse; +import software.amazon.awssdk.services.glue.model.StorageDescriptor; +import software.amazon.awssdk.services.glue.model.Table; +import software.amazon.awssdk.services.glue.paginators.GetDatabasesIterable; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import org.apache.arrow.vector.types.Types; @@ -116,11 +117,11 @@ public class GlueMetadataHandlerTest // consider the order of the tables deterministic (i.e. pagination will work irrespective of the order that the // tables are returned from the source). private final List
unPaginatedTables = new ImmutableList.Builder
() - .add(new Table().withName("table3")) - .add(new Table().withName("table2")) - .add(new Table().withName("table5")) - .add(new Table().withName("table4")) - .add(new Table().withName("table1")) + .add(Table.builder().name("table3").build()) + .add(Table.builder().name("table2").build()) + .add(Table.builder().name("table5").build()) + .add(Table.builder().name("table4").build()) + .add(Table.builder().name("table1").build()) .build(); // The following response is expected be returned from doListTables when the pagination pageSize is greater than @@ -138,7 +139,7 @@ public class GlueMetadataHandlerTest public TestName testName = new TestName(); @Mock - private AWSGlue mockGlue; + private GlueClient mockGlue; @Mock private Context mockContext; @@ -189,33 +190,39 @@ public GetDataSourceCapabilitiesResponse doGetDataSourceCapabilities(BlockAlloca .thenAnswer((InvocationOnMock invocationOnMock) -> { GetTablesRequest request = (GetTablesRequest) invocationOnMock.getArguments()[0]; - String nextToken = request.getNextToken(); - int pageSize = request.getMaxResults() == null ? UNLIMITED_PAGE_SIZE_VALUE : request.getMaxResults(); - assertEquals(accountId, request.getCatalogId()); - assertEquals(schema, request.getDatabaseName()); - GetTablesResult mockResult = mock(GetTablesResult.class); + String nextToken = request.nextToken(); + int pageSize = request.maxResults() == null ? UNLIMITED_PAGE_SIZE_VALUE : request.maxResults(); + assertEquals(accountId, request.catalogId()); + assertEquals(schema, request.databaseName()); + GetTablesResponse response; if (pageSize == UNLIMITED_PAGE_SIZE_VALUE) { // Simulate full list of tables returned from Glue. - when(mockResult.getTableList()).thenReturn(unPaginatedTables); - when(mockResult.getNextToken()).thenReturn(null); + response = GetTablesResponse.builder() + .tableList(unPaginatedTables) + .nextToken(null) + .build(); } else { // Simulate paginated list of tables returned from Glue. List
paginatedTables = unPaginatedTables.stream() - .sorted(Comparator.comparing(Table::getName)) - .filter(table -> nextToken == null || table.getName().compareTo(nextToken) >= 0) + .sorted(Comparator.comparing(Table::name)) + .filter(table -> nextToken == null || table.name().compareTo(nextToken) >= 0) .limit(pageSize + 1) .collect(Collectors.toList()); if (paginatedTables.size() > pageSize) { - when(mockResult.getNextToken()).thenReturn(paginatedTables.get(pageSize).getName()); - when(mockResult.getTableList()).thenReturn(paginatedTables.subList(0, pageSize)); + response = GetTablesResponse.builder() + .tableList(paginatedTables.subList(0, pageSize)) + .nextToken(paginatedTables.get(pageSize).name()) + .build(); } else { - when(mockResult.getNextToken()).thenReturn(null); - when(mockResult.getTableList()).thenReturn(paginatedTables); + response = GetTablesResponse.builder() + .tableList(paginatedTables) + .nextToken(null) + .build(); } } - return mockResult; + return response; }); } @@ -224,6 +231,7 @@ public void tearDown() throws Exception { allocator.close(); + mockGlue.close(); logger.info("{}: exit ", testName.getMethodName()); } @@ -232,25 +240,18 @@ public void doListSchemaNames() throws Exception { List databases = new ArrayList<>(); - databases.add(new Database().withName("db1")); - databases.add(new Database().withName("db2")); + databases.add(Database.builder().name("db1").build()); + databases.add(Database.builder().name("db2").build()); - when(mockGlue.getDatabases(nullable(GetDatabasesRequest.class))) + when(mockGlue.getDatabasesPaginator(nullable(GetDatabasesRequest.class))) .thenAnswer((InvocationOnMock invocationOnMock) -> { GetDatabasesRequest request = (GetDatabasesRequest) invocationOnMock.getArguments()[0]; - assertEquals(accountId, request.getCatalogId()); - GetDatabasesResult mockResult = mock(GetDatabasesResult.class); - if (request.getNextToken() == null) { - when(mockResult.getDatabaseList()).thenReturn(databases); - when(mockResult.getNextToken()).thenReturn("next"); - } - else { - //only return real info on 1st call - when(mockResult.getDatabaseList()).thenReturn(new ArrayList<>()); - when(mockResult.getNextToken()).thenReturn(null); - } - return mockResult; + assertEquals(accountId, request.catalogId()); + GetDatabasesIterable mockIterable = mock(GetDatabasesIterable.class); + GetDatabasesResponse response = GetDatabasesResponse.builder().databaseList(databases).build(); + when(mockIterable.stream()).thenReturn(Collections.singletonList(response).stream()); + return mockIterable; }); ListSchemasRequest req = new ListSchemasRequest(IdentityUtil.fakeIdentity(), queryId, catalog); @@ -258,10 +259,8 @@ public void doListSchemaNames() logger.info("doListSchemas - {}", res.getSchemas()); - assertEquals(databases.stream().map(next -> next.getName()).collect(Collectors.toList()), + assertEquals(databases.stream().map(next -> next.name()).collect(Collectors.toList()), new ArrayList<>(res.getSchemas())); - - verify(mockGlue, times(2)).getDatabases(nullable(GetDatabasesRequest.class)); } @Test @@ -332,38 +331,32 @@ public void doGetTable() expectedParams.put(DATETIME_FORMAT_MAPPING_PROPERTY, "col2=someformat2, col1=someformat1 "); List columns = new ArrayList<>(); - columns.add(new Column().withName("col1").withType("int").withComment("comment")); - columns.add(new Column().withName("col2").withType("bigint").withComment("comment")); - columns.add(new Column().withName("col3").withType("string").withComment("comment")); - columns.add(new Column().withName("col4").withType("timestamp").withComment("comment")); - columns.add(new Column().withName("col5").withType("date").withComment("comment")); - columns.add(new Column().withName("col6").withType("timestamptz").withComment("comment")); - columns.add(new Column().withName("col7").withType("timestamptz").withComment("comment")); - - List partitionKeys = new ArrayList<>(); - columns.add(new Column().withName("partition_col1").withType("int").withComment("comment")); - - Table mockTable = mock(Table.class); - StorageDescriptor mockSd = mock(StorageDescriptor.class); - - Mockito.lenient().when(mockTable.getName()).thenReturn(table); - when(mockTable.getStorageDescriptor()).thenReturn(mockSd); - when(mockTable.getParameters()).thenReturn(expectedParams); - when(mockSd.getColumns()).thenReturn(columns); - - when(mockGlue.getTable(nullable(com.amazonaws.services.glue.model.GetTableRequest.class))) + columns.add(Column.builder().name("col1").type("int").comment("comment").build()); + columns.add(Column.builder().name("col2").type("bigint").comment("comment").build()); + columns.add(Column.builder().name("col3").type("string").comment("comment").build()); + columns.add(Column.builder().name("col4").type("timestamp").comment("comment").build()); + columns.add(Column.builder().name("col5").type("date").comment("comment").build()); + columns.add(Column.builder().name("col6").type("timestamptz").comment("comment").build()); + columns.add(Column.builder().name("col7").type("timestamptz").comment("comment").build()); + columns.add(Column.builder().name("partition_col1").type("int").comment("comment").build()); + + StorageDescriptor sd = StorageDescriptor.builder().columns(columns).build(); + Table returnTable = Table.builder().storageDescriptor(sd).name(table).parameters(expectedParams).build(); + + when(mockGlue.getTable(nullable(software.amazon.awssdk.services.glue.model.GetTableRequest.class))) .thenAnswer((InvocationOnMock invocationOnMock) -> { - com.amazonaws.services.glue.model.GetTableRequest request = - (com.amazonaws.services.glue.model.GetTableRequest) invocationOnMock.getArguments()[0]; + software.amazon.awssdk.services.glue.model.GetTableRequest request = + (software.amazon.awssdk.services.glue.model.GetTableRequest) invocationOnMock.getArguments()[0]; - assertEquals(accountId, request.getCatalogId()); - assertEquals(schema, request.getDatabaseName()); - assertEquals(table, request.getName()); + assertEquals(accountId, request.catalogId()); + assertEquals(schema, request.databaseName()); + assertEquals(table, request.name()); - GetTableResult mockResult = mock(GetTableResult.class); - when(mockResult.getTable()).thenReturn(mockTable); - return mockResult; + software.amazon.awssdk.services.glue.model.GetTableResponse tableResponse = software.amazon.awssdk.services.glue.model.GetTableResponse.builder() + .table(returnTable) + .build(); + return tableResponse; }); GetTableRequest req = new GetTableRequest(IdentityUtil.fakeIdentity(), queryId, catalog, new TableName(schema, table), Collections.emptyMap()); @@ -403,8 +396,13 @@ public void populateSourceTableFromLocation() { Map params = new HashMap<>(); List partitions = Arrays.asList("aws", "aws-cn", "aws-us-gov"); for (String partition : partitions) { - StorageDescriptor storageDescriptor = new StorageDescriptor().withLocation(String.format("arn:%s:dynamodb:us-east-1:012345678910:table/My-Table", partition)); - Table table = new Table().withParameters(params).withStorageDescriptor(storageDescriptor); + StorageDescriptor storageDescriptor = StorageDescriptor.builder() + .location(String.format("arn:%s:dynamodb:us-east-1:012345678910:table/My-Table", partition)) + .build(); + Table table = Table.builder() + .parameters(params) + .storageDescriptor(storageDescriptor) + .build(); SchemaBuilder schemaBuilder = new SchemaBuilder(); populateSourceTableNameIfAvailable(table, schemaBuilder); Schema schema = schemaBuilder.build(); @@ -424,29 +422,25 @@ public void doGetTableEmptyComment() expectedParams.put("col1", "col1"); List columns = new ArrayList<>(); - columns.add(new Column().withName("col1").withType("int").withComment(" ")); - - Table mockTable = mock(Table.class); - StorageDescriptor mockSd = mock(StorageDescriptor.class); + columns.add(Column.builder().name("col1").type("int").comment(" ").build()); - Mockito.lenient().when(mockTable.getName()).thenReturn(table); - when(mockTable.getStorageDescriptor()).thenReturn(mockSd); - when(mockTable.getParameters()).thenReturn(expectedParams); - when(mockSd.getColumns()).thenReturn(columns); + StorageDescriptor sd = StorageDescriptor.builder().columns(columns).build(); + Table resultTable = Table.builder().storageDescriptor(sd).parameters(expectedParams).build(); - when(mockGlue.getTable(nullable(com.amazonaws.services.glue.model.GetTableRequest.class))) + when(mockGlue.getTable(nullable(software.amazon.awssdk.services.glue.model.GetTableRequest.class))) .thenAnswer((InvocationOnMock invocationOnMock) -> { - com.amazonaws.services.glue.model.GetTableRequest request = - (com.amazonaws.services.glue.model.GetTableRequest) invocationOnMock.getArguments()[0]; + software.amazon.awssdk.services.glue.model.GetTableRequest request = + (software.amazon.awssdk.services.glue.model.GetTableRequest) invocationOnMock.getArguments()[0]; - assertEquals(accountId, request.getCatalogId()); - assertEquals(schema, request.getDatabaseName()); - assertEquals(table, request.getName()); + assertEquals(accountId, request.catalogId()); + assertEquals(schema, request.databaseName()); + assertEquals(table, request.name()); - GetTableResult mockResult = mock(GetTableResult.class); - when(mockResult.getTable()).thenReturn(mockTable); - return mockResult; + software.amazon.awssdk.services.glue.model.GetTableResponse response = software.amazon.awssdk.services.glue.model.GetTableResponse.builder() + .table(resultTable) + .build(); + return response; }); GetTableRequest req = new GetTableRequest(IdentityUtil.fakeIdentity(), queryId, catalog, new TableName(schema, table), Collections.emptyMap()); diff --git a/athena-gcs/src/main/java/com/amazonaws/athena/connectors/gcs/GcsMetadataHandler.java b/athena-gcs/src/main/java/com/amazonaws/athena/connectors/gcs/GcsMetadataHandler.java index 1fcb52c7b1..466a9708c5 100644 --- a/athena-gcs/src/main/java/com/amazonaws/athena/connectors/gcs/GcsMetadataHandler.java +++ b/athena-gcs/src/main/java/com/amazonaws/athena/connectors/gcs/GcsMetadataHandler.java @@ -40,10 +40,6 @@ import com.amazonaws.athena.connectors.gcs.common.PartitionUtil; import com.amazonaws.athena.connectors.gcs.storage.StorageMetadata; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.glue.AWSGlue; -import com.amazonaws.services.glue.model.Column; -import com.amazonaws.services.glue.model.Database; -import com.amazonaws.services.glue.model.Table; import com.fasterxml.jackson.databind.ObjectMapper; import org.apache.arrow.memory.BufferAllocator; import org.apache.arrow.util.VisibleForTesting; @@ -51,6 +47,10 @@ import org.apache.arrow.vector.types.pojo.Schema; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.glue.GlueClient; +import software.amazon.awssdk.services.glue.model.Column; +import software.amazon.awssdk.services.glue.model.Database; +import software.amazon.awssdk.services.glue.model.Table; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.io.IOException; @@ -80,11 +80,11 @@ public class GcsMetadataHandler */ private static final String SOURCE_TYPE = "gcs"; private static final CharSequence GCS_FLAG = "google-cloud-storage-flag"; - private static final DatabaseFilter DB_FILTER = (Database database) -> (database.getLocationUri() != null && database.getLocationUri().contains(GCS_FLAG)); + private static final DatabaseFilter DB_FILTER = (Database database) -> (database.locationUri() != null && database.locationUri().contains(GCS_FLAG)); // used to filter out Glue tables which lack indications of being used for GCS. - private static final TableFilter TABLE_FILTER = (Table table) -> table.getStorageDescriptor().getLocation().startsWith(GCS_LOCATION_PREFIX); + private static final TableFilter TABLE_FILTER = (Table table) -> table.storageDescriptor().location().startsWith(GCS_LOCATION_PREFIX); private final StorageMetadata datasource; - private final AWSGlue glueClient; + private final GlueClient glueClient; private final BufferAllocator allocator; public GcsMetadataHandler(BufferAllocator allocator, java.util.Map configOptions) throws IOException @@ -104,7 +104,7 @@ protected GcsMetadataHandler( AmazonAthena athena, String spillBucket, String spillPrefix, - AWSGlue glueClient, BufferAllocator allocator, + GlueClient glueClient, BufferAllocator allocator, java.util.Map configOptions) throws IOException { super(glueClient, keyFactory, awsSecretsManager, athena, SOURCE_TYPE, spillBucket, spillPrefix, configOptions); @@ -174,9 +174,9 @@ public GetTableResponse doGetTable(BlockAllocator blockAllocator, GetTableReques //fetch schema from dataset api Schema schema = datasource.buildTableSchema(table, allocator); Map columnNameMapping = getColumnNameMapping(table); - List partitionKeys = table.getPartitionKeys() == null ? com.google.common.collect.ImmutableList.of() : table.getPartitionKeys(); + List partitionKeys = table.partitionKeys() == null ? com.google.common.collect.ImmutableList.of() : table.partitionKeys(); Set partitionCols = partitionKeys.stream() - .map(next -> columnNameMapping.getOrDefault(next.getName(), next.getName())).collect(Collectors.toSet()); + .map(next -> columnNameMapping.getOrDefault(next.name(), next.name())).collect(Collectors.toSet()); return new GetTableResponse(request.getCatalogName(), request.getTableName(), schema, partitionCols); } } @@ -246,14 +246,14 @@ public GetSplitsResponse doGetSplits(BlockAllocator allocator, GetSplitsRequest //getting storage file list List fileList = datasource.getStorageSplits(locationUri); SpillLocation spillLocation = makeSpillLocation(request); - LOGGER.info("Split list for {}.{} is \n{}", table.getDatabaseName(), table.getName(), fileList); + LOGGER.info("Split list for {}.{} is \n{}", table.databaseName(), table.name(), fileList); //creating splits based folder String storageSplitJson = new ObjectMapper().writeValueAsString(fileList); LOGGER.info("MetadataHandler=GcsMetadataHandler|Method=doGetSplits|Message=StorageSplit JSON\n{}", storageSplitJson); Split.Builder splitBuilder = Split.newBuilder(spillLocation, makeEncryptionKey()) - .add(FILE_FORMAT, table.getParameters().get(CLASSIFICATION_GLUE_TABLE_PARAM)) + .add(FILE_FORMAT, table.parameters().get(CLASSIFICATION_GLUE_TABLE_PARAM)) .add(STORAGE_SPLIT_JSON, storageSplitJson); // set partition column name and value in split diff --git a/athena-gcs/src/main/java/com/amazonaws/athena/connectors/gcs/GcsUtil.java b/athena-gcs/src/main/java/com/amazonaws/athena/connectors/gcs/GcsUtil.java index 121ea96a15..aea6586626 100644 --- a/athena-gcs/src/main/java/com/amazonaws/athena/connectors/gcs/GcsUtil.java +++ b/athena-gcs/src/main/java/com/amazonaws/athena/connectors/gcs/GcsUtil.java @@ -22,15 +22,14 @@ import com.amazonaws.athena.connector.lambda.data.DateTimeFormatterUtil; import com.amazonaws.athena.connector.lambda.domain.TableName; import com.amazonaws.athena.connector.lambda.security.CachableSecretsManager; -import com.amazonaws.services.glue.AWSGlue; -import com.amazonaws.services.glue.model.GetTableRequest; -import com.amazonaws.services.glue.model.GetTableResult; -import com.amazonaws.services.glue.model.Table; import com.sun.jna.platform.unix.LibC; import org.apache.arrow.vector.FieldVector; import org.apache.arrow.vector.types.pojo.ArrowType; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.glue.GlueClient; +import software.amazon.awssdk.services.glue.model.GetTableRequest; +import software.amazon.awssdk.services.glue.model.Table; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import javax.net.ssl.TrustManager; @@ -143,14 +142,15 @@ public static String createUri(String path) * @param awsGlue AWS Glue client * @return Table object */ - public static Table getGlueTable(TableName tableName, AWSGlue awsGlue) + public static Table getGlueTable(TableName tableName, GlueClient awsGlue) { - GetTableRequest getTableRequest = new GetTableRequest(); - getTableRequest.setDatabaseName(tableName.getSchemaName()); - getTableRequest.setName(tableName.getTableName()); + GetTableRequest getTableRequest = GetTableRequest.builder() + .databaseName(tableName.getSchemaName()) + .name(tableName.getTableName()) + .build(); - GetTableResult result = awsGlue.getTable(getTableRequest); - return result.getTable(); + software.amazon.awssdk.services.glue.model.GetTableResponse response = awsGlue.getTable(getTableRequest); + return response.table(); } // The value returned here is going to block.offerValue, which eventually invokes BlockUtils.setValue() diff --git a/athena-gcs/src/main/java/com/amazonaws/athena/connectors/gcs/common/PartitionUtil.java b/athena-gcs/src/main/java/com/amazonaws/athena/connectors/gcs/common/PartitionUtil.java index efd0bcf9dd..6de7deffc5 100644 --- a/athena-gcs/src/main/java/com/amazonaws/athena/connectors/gcs/common/PartitionUtil.java +++ b/athena-gcs/src/main/java/com/amazonaws/athena/connectors/gcs/common/PartitionUtil.java @@ -19,12 +19,12 @@ */ package com.amazonaws.athena.connectors.gcs.common; -import com.amazonaws.services.glue.model.Column; -import com.amazonaws.services.glue.model.Table; import org.apache.arrow.vector.FieldVector; import org.apache.commons.lang3.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.glue.model.Column; +import software.amazon.awssdk.services.glue.model.Table; import java.net.URI; import java.net.URISyntaxException; @@ -72,9 +72,9 @@ private PartitionUtil() */ public static Map getPartitionColumnData(Table table, String partitionFolder) { - List partitionKeys = table.getPartitionKeys() == null ? com.google.common.collect.ImmutableList.of() : table.getPartitionKeys(); + List partitionKeys = table.partitionKeys() == null ? com.google.common.collect.ImmutableList.of() : table.partitionKeys(); return getRegExExpression(table).map(folderNameRegEx -> - getPartitionColumnData(table.getParameters().get(PARTITION_PATTERN_KEY), partitionFolder, folderNameRegEx, partitionKeys)) + getPartitionColumnData(table.parameters().get(PARTITION_PATTERN_KEY), partitionFolder, folderNameRegEx, partitionKeys)) .orElse(com.google.common.collect.ImmutableMap.of()); } @@ -93,7 +93,7 @@ protected static Map getPartitionColumnData(String partitionPatt Matcher partitionPatternMatcher = PARTITION_PATTERN.matcher(partitionPattern); Matcher partitionFolderMatcher = Pattern.compile(folderNameRegEx).matcher(partitionFolder); java.util.TreeSet partitionColumnsSet = partitionColumns.stream() - .map(c -> c.getName()) + .map(c -> c.name()) .collect(Collectors.toCollection(() -> new java.util.TreeSet<>(String.CASE_INSENSITIVE_ORDER))); while (partitionFolderMatcher.find()) { for (int j = 1; j <= partitionFolderMatcher.groupCount() && partitionPatternMatcher.find(); j++) { @@ -117,8 +117,8 @@ protected static Map getPartitionColumnData(String partitionPatt private static void validatePartitionColumnTypes(List columns) { for (Column column : columns) { - String columnType = column.getType().toLowerCase(); - LOGGER.info("validatePartitionColumnTypes - Field type of {} is {}", column.getName(), columnType); + String columnType = column.type().toLowerCase(); + LOGGER.info("validatePartitionColumnTypes - Field type of {} is {}", column.name(), columnType); switch (columnType) { case "string": case "varchar": @@ -140,9 +140,9 @@ private static void validatePartitionColumnTypes(List columns) */ protected static Optional getRegExExpression(Table table) { - List partitionColumns = table.getPartitionKeys() == null ? com.google.common.collect.ImmutableList.of() : table.getPartitionKeys(); + List partitionColumns = table.partitionKeys() == null ? com.google.common.collect.ImmutableList.of() : table.partitionKeys(); validatePartitionColumnTypes(partitionColumns); - String partitionPattern = table.getParameters().get(PARTITION_PATTERN_KEY); + String partitionPattern = table.parameters().get(PARTITION_PATTERN_KEY); // Check to see if there is a partition pattern configured for the Table by the user // if not, it returns empty value if (partitionPattern == null || StringUtils.isBlank(partitionPattern)) { @@ -170,8 +170,8 @@ protected static Optional getRegExExpression(Table table) public static URI getPartitionsFolderLocationUri(Table table, List fieldVectors, int readerPosition) throws URISyntaxException { String locationUri; - String tableLocation = table.getStorageDescriptor().getLocation(); - String partitionPattern = table.getParameters().get(PARTITION_PATTERN_KEY); + String tableLocation = table.storageDescriptor().location(); + String partitionPattern = table.parameters().get(PARTITION_PATTERN_KEY); if (null != partitionPattern) { for (FieldVector fieldVector : fieldVectors) { fieldVector.getReader().setPosition(readerPosition); diff --git a/athena-gcs/src/main/java/com/amazonaws/athena/connectors/gcs/filter/FilterExpressionBuilder.java b/athena-gcs/src/main/java/com/amazonaws/athena/connectors/gcs/filter/FilterExpressionBuilder.java index 9218895c35..13b7780ff6 100644 --- a/athena-gcs/src/main/java/com/amazonaws/athena/connectors/gcs/filter/FilterExpressionBuilder.java +++ b/athena-gcs/src/main/java/com/amazonaws/athena/connectors/gcs/filter/FilterExpressionBuilder.java @@ -21,9 +21,9 @@ import com.amazonaws.athena.connector.lambda.domain.predicate.Constraints; import com.amazonaws.athena.connector.lambda.domain.predicate.ValueSet; -import com.amazonaws.services.glue.model.Column; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.glue.model.Column; import java.util.List; import java.util.Map; @@ -50,8 +50,8 @@ public static Map>> getConstraintsForPartitionedCol { LOGGER.info("Constraint summaries: \n{}", constraints.getSummary()); return partitionColumns.stream().collect(Collectors.toMap( - column -> column.getName(), - column -> singleValuesStringSetFromValueSet(constraints.getSummary().get(column.getName())), + column -> column.name(), + column -> singleValuesStringSetFromValueSet(constraints.getSummary().get(column.name())), // Also we are forced to use Optional here because Collectors.toMap() doesn't allow null values to // be passed into the merge function (it asserts that the values are not null) // We shouldn't have duplicates but just merge the sets if we do. diff --git a/athena-gcs/src/main/java/com/amazonaws/athena/connectors/gcs/storage/StorageMetadata.java b/athena-gcs/src/main/java/com/amazonaws/athena/connectors/gcs/storage/StorageMetadata.java index dd48cba8d7..426654f2ca 100644 --- a/athena-gcs/src/main/java/com/amazonaws/athena/connectors/gcs/storage/StorageMetadata.java +++ b/athena-gcs/src/main/java/com/amazonaws/athena/connectors/gcs/storage/StorageMetadata.java @@ -26,9 +26,6 @@ import com.amazonaws.athena.connectors.gcs.GcsUtil; import com.amazonaws.athena.connectors.gcs.common.PartitionUtil; import com.amazonaws.athena.connectors.gcs.filter.FilterExpressionBuilder; -import com.amazonaws.services.glue.AWSGlue; -import com.amazonaws.services.glue.model.Column; -import com.amazonaws.services.glue.model.Table; import com.google.api.gax.paging.Page; import com.google.auth.oauth2.GoogleCredentials; import com.google.cloud.storage.Blob; @@ -47,6 +44,9 @@ import org.apache.arrow.vector.types.pojo.Schema; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.glue.GlueClient; +import software.amazon.awssdk.services.glue.model.Column; +import software.amazon.awssdk.services.glue.model.Table; import java.io.ByteArrayInputStream; import java.io.IOException; @@ -132,17 +132,17 @@ public List getStorageSplits(URI locationUri) * @return A list of {@link Map} instances * @throws URISyntaxException Throws if any occurs during parsing Uri */ - public List> getPartitionFolders(Schema schema, TableName tableInfo, Constraints constraints, AWSGlue awsGlue) + public List> getPartitionFolders(Schema schema, TableName tableInfo, Constraints constraints, GlueClient awsGlue) throws URISyntaxException { LOGGER.info("Getting partition folder(s) for table {}.{}", tableInfo.getSchemaName(), tableInfo.getTableName()); Table table = GcsUtil.getGlueTable(tableInfo, awsGlue); // Build expression only based on partition keys - List partitionColumns = table.getPartitionKeys() == null ? com.google.common.collect.ImmutableList.of() : table.getPartitionKeys(); + List partitionColumns = table.partitionKeys() == null ? com.google.common.collect.ImmutableList.of() : table.partitionKeys(); // getConstraintsForPartitionedColumns gives us a case insensitive mapping of column names to their value set Map>> columnValueConstraintMap = FilterExpressionBuilder.getConstraintsForPartitionedColumns(partitionColumns, constraints); LOGGER.info("columnValueConstraintMap for the request of {}.{} is \n{}", tableInfo.getSchemaName(), tableInfo.getTableName(), columnValueConstraintMap); - URI storageLocation = new URI(table.getStorageDescriptor().getLocation()); + URI storageLocation = new URI(table.storageDescriptor().location()); LOGGER.info("Listing object in location {} under the bucket {}", storageLocation.getAuthority(), storageLocation.getPath()); // Trim leading / String path = storageLocation.getPath().replaceFirst("^/", ""); @@ -226,9 +226,9 @@ private boolean partitionConstraintsSatisfied(Map partitionMap, public Schema buildTableSchema(Table table, BufferAllocator allocator) throws URISyntaxException { SchemaBuilder schemaBuilder = SchemaBuilder.newBuilder(); - String locationUri = table.getStorageDescriptor().getLocation(); + String locationUri = table.storageDescriptor().location(); URI storageLocation = new URI(locationUri); - List fieldList = getFields(storageLocation.getAuthority(), storageLocation.getPath(), table.getParameters().get(CLASSIFICATION_GLUE_TABLE_PARAM), allocator); + List fieldList = getFields(storageLocation.getAuthority(), storageLocation.getPath(), table.parameters().get(CLASSIFICATION_GLUE_TABLE_PARAM), allocator); LOGGER.debug("Schema Fields\n{}", fieldList); for (Field field : fieldList) { diff --git a/athena-gcs/src/test/java/com/amazonaws/athena/connectors/gcs/GcsMetadataHandlerTest.java b/athena-gcs/src/test/java/com/amazonaws/athena/connectors/gcs/GcsMetadataHandlerTest.java index 3f9e4ab632..4e51b66f99 100644 --- a/athena-gcs/src/test/java/com/amazonaws/athena/connectors/gcs/GcsMetadataHandlerTest.java +++ b/athena-gcs/src/test/java/com/amazonaws/athena/connectors/gcs/GcsMetadataHandlerTest.java @@ -41,15 +41,6 @@ import com.amazonaws.athena.connector.lambda.security.LocalKeyFactory; import com.amazonaws.athena.connectors.gcs.storage.StorageMetadata; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.glue.AWSGlue; -import com.amazonaws.services.glue.AWSGlueClientBuilder; -import com.amazonaws.services.glue.model.Column; -import com.amazonaws.services.glue.model.Database; -import com.amazonaws.services.glue.model.GetDatabasesResult; -import com.amazonaws.services.glue.model.GetTableResult; -import com.amazonaws.services.glue.model.GetTablesResult; -import com.amazonaws.services.glue.model.StorageDescriptor; -import com.amazonaws.services.glue.model.Table; import com.google.api.gax.paging.Page; import com.google.auth.oauth2.GoogleCredentials; import com.google.auth.oauth2.ServiceAccountCredentials; @@ -74,6 +65,16 @@ import org.mockito.Mockito; import org.mockito.MockitoAnnotations; import org.mockito.junit.MockitoJUnitRunner; +import software.amazon.awssdk.services.glue.GlueClient; +import software.amazon.awssdk.services.glue.model.Column; +import software.amazon.awssdk.services.glue.model.Database; +import software.amazon.awssdk.services.glue.model.GetDatabasesRequest; +import software.amazon.awssdk.services.glue.model.GetDatabasesResponse; +import software.amazon.awssdk.services.glue.model.GetTablesRequest; +import software.amazon.awssdk.services.glue.model.GetTablesResponse; +import software.amazon.awssdk.services.glue.model.StorageDescriptor; +import software.amazon.awssdk.services.glue.model.Table; +import software.amazon.awssdk.services.glue.paginators.GetDatabasesIterable; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueRequest; import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueResponse; @@ -128,7 +129,7 @@ public class GcsMetadataHandlerTest private BlockAllocator blockAllocator; private FederatedIdentity federatedIdentity; @Mock - private AWSGlue awsGlue; + private GlueClient awsGlue; @Mock private SecretsManagerClient secretsManager; @Mock @@ -140,7 +141,6 @@ public class GcsMetadataHandlerTest private MockedStatic mockedServiceAccountCredentials; private MockedStatic mockedServiceGoogleCredentials; private MockedStatic mockedAWSSecretsManagerClientBuilder; - private MockedStatic mockedAWSGlueClientBuilder; @Before public void setUp() throws Exception @@ -150,7 +150,6 @@ public void setUp() throws Exception mockedServiceAccountCredentials = mockStatic(ServiceAccountCredentials.class); mockedServiceGoogleCredentials = mockStatic(GoogleCredentials.class); mockedAWSSecretsManagerClientBuilder = mockStatic(SecretsManagerClient.class); - mockedAWSGlueClientBuilder = mockStatic(AWSGlueClientBuilder.class); Storage storage = mock(Storage.class); Blob blob = mock(Blob.class); @@ -176,7 +175,6 @@ public void setUp() throws Exception .secretString("{\"gcs_credential_keys\": \"test\"}") .build(); Mockito.when(secretsManager.getSecretValue(Mockito.isA(GetSecretValueRequest.class))).thenReturn(getSecretValueResponse); - Mockito.when(AWSGlueClientBuilder.defaultClient()).thenReturn(awsGlue); gcsMetadataHandler = new GcsMetadataHandler(new LocalKeyFactory(), secretsManager, athena, "spillBucket", "spillPrefix", awsGlue, allocator, ImmutableMap.of()); blockAllocator = new BlockAllocatorImpl(); federatedIdentity = Mockito.mock(FederatedIdentity.class); @@ -189,18 +187,20 @@ public void tearDown() mockedServiceAccountCredentials.close(); mockedServiceGoogleCredentials.close(); mockedAWSSecretsManagerClientBuilder.close(); - mockedAWSGlueClientBuilder.close(); } @Test public void testDoListSchemaNames() throws Exception { - GetDatabasesResult result = new GetDatabasesResult().withDatabaseList( - new Database().withName(DATABASE_NAME).withLocationUri(S3_GOOGLE_CLOUD_STORAGE_FLAG), - new Database().withName(DATABASE_NAME1).withLocationUri(S3_GOOGLE_CLOUD_STORAGE_FLAG)); + GetDatabasesResponse response = GetDatabasesResponse.builder().databaseList( + Database.builder().name(DATABASE_NAME).locationUri(S3_GOOGLE_CLOUD_STORAGE_FLAG).build(), + Database.builder().name(DATABASE_NAME1).locationUri(S3_GOOGLE_CLOUD_STORAGE_FLAG).build() + ).build(); ListSchemasRequest listSchemasRequest = new ListSchemasRequest(federatedIdentity, QUERY_ID, CATALOG); - Mockito.when(awsGlue.getDatabases(any())).thenReturn(result); + GetDatabasesIterable mockIterable = mock(GetDatabasesIterable.class); + when(mockIterable.stream()).thenReturn(Collections.singletonList(response).stream()); + when(awsGlue.getDatabasesPaginator(any(GetDatabasesRequest.class))).thenReturn(mockIterable); ListSchemasResponse schemaNamesResponse = gcsMetadataHandler.doListSchemaNames(blockAllocator, listSchemasRequest); List expectedSchemaNames = new ArrayList<>(); expectedSchemaNames.add(DATABASE_NAME); @@ -219,19 +219,22 @@ public void testDoListSchemaNamesThrowsException() throws Exception @Test public void testDoListTables() throws Exception { - GetTablesResult getTablesResult = new GetTablesResult(); List
tableList = new ArrayList<>(); - tableList.add(new Table().withName(TABLE_1) - .withParameters(ImmutableMap.of(CLASSIFICATION_GLUE_TABLE_PARAM, PARQUET)) - .withStorageDescriptor(new StorageDescriptor() - .withLocation(LOCATION))); - tableList.add(new Table().withName(TABLE_2) - .withParameters(ImmutableMap.of()) - .withStorageDescriptor(new StorageDescriptor() - .withLocation(LOCATION) - .withParameters(ImmutableMap.of(CLASSIFICATION_GLUE_TABLE_PARAM, PARQUET)))); - getTablesResult.setTableList(tableList); - Mockito.when(awsGlue.getTables(any())).thenReturn(getTablesResult); + tableList.add(Table.builder().name(TABLE_1) + .parameters(ImmutableMap.of(CLASSIFICATION_GLUE_TABLE_PARAM, PARQUET)) + .storageDescriptor(StorageDescriptor.builder() + .location(LOCATION) + .build()) + .build()); + tableList.add(Table.builder().name(TABLE_2) + .parameters(ImmutableMap.of()) + .storageDescriptor(StorageDescriptor.builder() + .location(LOCATION) + .parameters(ImmutableMap.of(CLASSIFICATION_GLUE_TABLE_PARAM, PARQUET)) + .build()) + .build()); + GetTablesResponse getTablesResponse = GetTablesResponse.builder().tableList(tableList).build(); + Mockito.when(awsGlue.getTables(any(GetTablesRequest.class))).thenReturn(getTablesResponse); ListTablesRequest listTablesRequest = new ListTablesRequest(federatedIdentity, QUERY_ID, CATALOG, SCHEMA_NAME, TEST_TOKEN, 50); ListTablesResponse tableNamesResponse = gcsMetadataHandler.doListTables(blockAllocator, listTablesRequest); assertEquals(2, tableNamesResponse.getTables().size()); @@ -255,20 +258,24 @@ public void doGetTable() metadataSchema.put("dataFormat", PARQUET); Schema schema = new Schema(asList(field), metadataSchema); GetTableRequest getTableRequest = new GetTableRequest(federatedIdentity, QUERY_ID, "gcs", new TableName(SCHEMA_NAME, "testtable"), Collections.emptyMap()); - Table table = new Table(); - table.setName(TABLE_1); - table.setDatabaseName(DATABASE_NAME); - table.setParameters(ImmutableMap.of(CLASSIFICATION_GLUE_TABLE_PARAM, PARQUET)); - table.setStorageDescriptor(new StorageDescriptor() - .withLocation(LOCATION).withColumns(new Column().withName("name").withType("String"))); - table.setCatalogId(CATALOG); List columns = ImmutableList.of( createColumn("name", "String") ); - table.setPartitionKeys(columns); - GetTableResult getTableResult = new GetTableResult(); - getTableResult.setTable(table); - Mockito.when(awsGlue.getTable(any())).thenReturn(getTableResult); + Table table = Table.builder() + .name(TABLE_1) + .databaseName(DATABASE_NAME) + .parameters(ImmutableMap.of(CLASSIFICATION_GLUE_TABLE_PARAM, PARQUET)) + .storageDescriptor(StorageDescriptor.builder() + .location(LOCATION) + .columns(Column.builder().name("name").type("String").build()) + .build()) + .catalogId(CATALOG) + .partitionKeys(columns) + .build(); + software.amazon.awssdk.services.glue.model.GetTableResponse getTableResponse = software.amazon.awssdk.services.glue.model.GetTableResponse.builder() + .table(table) + .build(); + Mockito.when(awsGlue.getTable(any(software.amazon.awssdk.services.glue.model.GetTableRequest.class))).thenReturn(getTableResponse); StorageMetadata storageMetadata = mock(StorageMetadata.class); FieldUtils.writeField(gcsMetadataHandler, "datasource", storageMetadata, true); Mockito.when(storageMetadata.buildTableSchema(any(), any())).thenReturn(schema); @@ -284,24 +291,28 @@ public void testGetPartitions() throws Exception .addField("year", new ArrowType.Utf8()) .addField("month", new ArrowType.Utf8()) .addField("day", new ArrowType.Utf8()).build(); - Table table = new Table(); - table.setName(TABLE_1); - table.setDatabaseName(DATABASE_NAME); - table.setParameters(ImmutableMap.of(CLASSIFICATION_GLUE_TABLE_PARAM, PARQUET, - PARTITION_PATTERN_KEY, "year=${year}/birth_month${month}/${day}") - ); - table.setStorageDescriptor(new StorageDescriptor() - .withLocation(LOCATION).withColumns(new Column())); - table.setCatalogId(CATALOG); List columns = ImmutableList.of( createColumn("year", "varchar"), createColumn("month", "varchar"), createColumn("day", "varchar") ); - table.setPartitionKeys(columns); - GetTableResult getTableResult = new GetTableResult(); - getTableResult.setTable(table); - Mockito.when(awsGlue.getTable(any())).thenReturn(getTableResult); + Table table = Table.builder() + .name(TABLE_1) + .databaseName(DATABASE_NAME) + .parameters(ImmutableMap.of(CLASSIFICATION_GLUE_TABLE_PARAM, PARQUET, + PARTITION_PATTERN_KEY, "year=${year}/birth_month${month}/${day}") + ) + .storageDescriptor(StorageDescriptor.builder() + .location(LOCATION) + .columns(Column.builder().build()) + .build()) + .catalogId(CATALOG) + .partitionKeys(columns) + .build(); + software.amazon.awssdk.services.glue.model.GetTableResponse getTableResponse = software.amazon.awssdk.services.glue.model.GetTableResponse.builder() + .table(table) + .build(); + Mockito.when(awsGlue.getTable(any(software.amazon.awssdk.services.glue.model.GetTableRequest.class))).thenReturn(getTableResponse); GetTableLayoutRequest getTableLayoutRequest = Mockito.mock(GetTableLayoutRequest.class); Mockito.when(getTableLayoutRequest.getTableName()).thenReturn(new TableName(DATABASE_NAME, TABLE_1)); Mockito.when(getTableLayoutRequest.getSchema()).thenReturn(schema); @@ -321,17 +332,15 @@ public void testDoGetSplits() throws Exception QUERY_ID, CATALOG, TABLE_NAME, partitions, ImmutableList.of("year"), new Constraints(new HashMap<>(), Collections.emptyList(), Collections.emptyList(), DEFAULT_NO_LIMIT, Collections.emptyMap()), null); QueryStatusChecker queryStatusChecker = mock(QueryStatusChecker.class); - GetTableResult getTableResult = mock(GetTableResult.class); - StorageDescriptor storageDescriptor = mock(StorageDescriptor.class); - when(storageDescriptor.getLocation()).thenReturn(LOCATION); - Table table = mock(Table.class); - when(table.getStorageDescriptor()).thenReturn(storageDescriptor); - when(table.getParameters()).thenReturn(ImmutableMap.of(PARTITION_PATTERN_KEY, "year=${year}/", CLASSIFICATION_GLUE_TABLE_PARAM, PARQUET)); - when(awsGlue.getTable(any())).thenReturn(getTableResult); - when(getTableResult.getTable()).thenReturn(table); - List columns = ImmutableList.of( - createColumn("year", "varchar") - ); + StorageDescriptor storageDescriptor = StorageDescriptor.builder().location(LOCATION).build(); + Table table = Table.builder() + .storageDescriptor(storageDescriptor) + .parameters(ImmutableMap.of(PARTITION_PATTERN_KEY, "year=${year}/", CLASSIFICATION_GLUE_TABLE_PARAM, PARQUET)) + .build(); + software.amazon.awssdk.services.glue.model.GetTableResponse getTableResponse = software.amazon.awssdk.services.glue.model.GetTableResponse.builder() + .table(table) + .build(); + when(awsGlue.getTable(any(software.amazon.awssdk.services.glue.model.GetTableRequest.class))).thenReturn(getTableResponse); GetSplitsResponse response = gcsMetadataHandler.doGetSplits(blockAllocator, request); assertEquals(2, response.getSplits().size()); @@ -358,18 +367,17 @@ public void testDoGetSplitsProperty() throws Exception QUERY_ID, CATALOG, TABLE_NAME, partitions, ImmutableList.of("yearCol", "monthCol"), new Constraints(Collections.emptyMap(), Collections.emptyList(), Collections.emptyList(), DEFAULT_NO_LIMIT, Collections.emptyMap()), null); QueryStatusChecker queryStatusChecker = mock(QueryStatusChecker.class); - GetTableResult getTableResult = mock(GetTableResult.class); - StorageDescriptor storageDescriptor = mock(StorageDescriptor.class); - when(storageDescriptor.getLocation()).thenReturn(LOCATION); - Table table = mock(Table.class); - when(table.getStorageDescriptor()).thenReturn(storageDescriptor); - when(table.getParameters()).thenReturn(ImmutableMap.of(PARTITION_PATTERN_KEY, "year=${yearCol}/month${monthCol}/", CLASSIFICATION_GLUE_TABLE_PARAM, PARQUET)); - when(awsGlue.getTable(any())).thenReturn(getTableResult); - when(getTableResult.getTable()).thenReturn(table); - List columns = ImmutableList.of( - createColumn("yearCol", "varchar"), - createColumn("monthCol", "varchar") - ); + StorageDescriptor storageDescriptor = StorageDescriptor.builder() + .location(LOCATION) + .build(); + Table table = Table.builder() + .storageDescriptor(storageDescriptor) + .parameters(ImmutableMap.of(PARTITION_PATTERN_KEY, "year=${yearCol}/month${monthCol}/", CLASSIFICATION_GLUE_TABLE_PARAM, PARQUET)) + .build(); + software.amazon.awssdk.services.glue.model.GetTableResponse getTableResponse = software.amazon.awssdk.services.glue.model.GetTableResponse.builder() + .table(table) + .build(); + when(awsGlue.getTable(any(software.amazon.awssdk.services.glue.model.GetTableRequest.class))).thenReturn(getTableResponse); GetSplitsResponse response = gcsMetadataHandler.doGetSplits(blockAllocator, request); assertEquals(4, response.getSplits().size()); assertEquals(ImmutableList.of("2016", "2017", "2018", "2019"), response.getSplits().stream().map(split -> split.getProperties().get("yearCol")).sorted().collect(Collectors.toList())); @@ -384,17 +392,17 @@ public void testDoGetSplitsException() throws Exception QUERY_ID, CATALOG, TABLE_NAME, partitions, ImmutableList.of("gcs_file_format"), new Constraints(Collections.emptyMap(), Collections.emptyList(), Collections.emptyList(), DEFAULT_NO_LIMIT, Collections.emptyMap()), null); QueryStatusChecker queryStatusChecker = mock(QueryStatusChecker.class); - GetTableResult getTableResult = mock(GetTableResult.class); - StorageDescriptor storageDescriptor = mock(StorageDescriptor.class); - when(storageDescriptor.getLocation()).thenReturn(LOCATION); - Table table = mock(Table.class); - when(table.getStorageDescriptor()).thenReturn(storageDescriptor); - when(table.getParameters()).thenReturn(ImmutableMap.of(PARTITION_PATTERN_KEY, "year=${gcs_file_format}/", CLASSIFICATION_GLUE_TABLE_PARAM, PARQUET)); - when(awsGlue.getTable(any())).thenReturn(getTableResult); - when(getTableResult.getTable()).thenReturn(table); - List columns = ImmutableList.of( - createColumn("gcs_file_format", "varchar") - ); + StorageDescriptor storageDescriptor = StorageDescriptor.builder() + .location(LOCATION) + .build(); + Table table = Table.builder() + .storageDescriptor(storageDescriptor) + .parameters(ImmutableMap.of(PARTITION_PATTERN_KEY, "year=${gcs_file_format}/", CLASSIFICATION_GLUE_TABLE_PARAM, PARQUET)) + .build(); + software.amazon.awssdk.services.glue.model.GetTableResponse getTableResponse = software.amazon.awssdk.services.glue.model.GetTableResponse.builder() + .table(table) + .build(); + when(awsGlue.getTable(any(software.amazon.awssdk.services.glue.model.GetTableRequest.class))).thenReturn(getTableResponse); gcsMetadataHandler.doGetSplits(blockAllocator, request); } } diff --git a/athena-gcs/src/test/java/com/amazonaws/athena/connectors/gcs/GcsTestUtils.java b/athena-gcs/src/test/java/com/amazonaws/athena/connectors/gcs/GcsTestUtils.java index c99e26807d..50cae17b44 100644 --- a/athena-gcs/src/test/java/com/amazonaws/athena/connectors/gcs/GcsTestUtils.java +++ b/athena-gcs/src/test/java/com/amazonaws/athena/connectors/gcs/GcsTestUtils.java @@ -20,8 +20,8 @@ package com.amazonaws.athena.connectors.gcs; import com.amazonaws.athena.connector.lambda.data.SchemaBuilder; -import com.amazonaws.services.glue.model.Column; import com.google.common.collect.ImmutableMap; +import software.amazon.awssdk.services.glue.model.Column; import org.apache.arrow.memory.BufferAllocator; import org.apache.arrow.memory.RootAllocator; import org.apache.arrow.vector.BitVector; @@ -163,9 +163,10 @@ public static VectorSchemaRoot getVectorSchemaRoot() public static Column createColumn(String name, String type) { - Column column = new Column(); - column.setName(name); - column.setType(type); + Column column = Column.builder() + .name(name) + .type(type) + .build(); return column; } public static Map createSummaryWithLValueRangeEqual(String fieldName, ArrowType fieldType, Object fieldValue) diff --git a/athena-gcs/src/test/java/com/amazonaws/athena/connectors/gcs/common/PartitionUtilTest.java b/athena-gcs/src/test/java/com/amazonaws/athena/connectors/gcs/common/PartitionUtilTest.java index 87e845716f..2af0a10c03 100644 --- a/athena-gcs/src/test/java/com/amazonaws/athena/connectors/gcs/common/PartitionUtilTest.java +++ b/athena-gcs/src/test/java/com/amazonaws/athena/connectors/gcs/common/PartitionUtilTest.java @@ -19,13 +19,13 @@ */ package com.amazonaws.athena.connectors.gcs.common; -import com.amazonaws.services.glue.model.Column; -import com.amazonaws.services.glue.model.StorageDescriptor; -import com.amazonaws.services.glue.model.Table; import org.junit.Before; import org.junit.Test; -import java.util.AbstractMap; +import software.amazon.awssdk.services.glue.model.Column; +import software.amazon.awssdk.services.glue.model.StorageDescriptor; +import software.amazon.awssdk.services.glue.model.Table; + import java.util.List; import java.util.Map; import java.util.Optional; @@ -36,32 +36,32 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; public class PartitionUtilTest { - private Table table; + private StorageDescriptor storageDescriptor; + private List defaultColumns; @Before public void setup() { - StorageDescriptor storageDescriptor = mock(StorageDescriptor.class); - when(storageDescriptor.getLocation()).thenReturn("gs://mydatalake1test/birthday/"); - table = mock(Table.class); - when(table.getStorageDescriptor()).thenReturn(storageDescriptor); - - List columns = com.google.common.collect.ImmutableList.of( + storageDescriptor = StorageDescriptor.builder() + .location("gs://mydatalake1test/birthday/") + .build(); + defaultColumns = com.google.common.collect.ImmutableList.of( createColumn("year", "bigint"), createColumn("month", "int") ); - when(table.getPartitionKeys()).thenReturn(columns); } @Test(expected = IllegalArgumentException.class) public void testFolderNameRegExPatterExpectException() { - when(table.getParameters()).thenReturn(com.google.common.collect.ImmutableMap.of(PARTITION_PATTERN_KEY, "year=${year}/birth_month${month}/${day}")); + Table table = Table.builder() + .storageDescriptor(storageDescriptor) + .partitionKeys(defaultColumns) + .parameters(com.google.common.collect.ImmutableMap.of(PARTITION_PATTERN_KEY, "year=${year}/birth_month${month}/${day}")) + .build(); Optional optionalRegEx = PartitionUtil.getRegExExpression(table); assertTrue(optionalRegEx.isPresent()); } @@ -69,7 +69,11 @@ public void testFolderNameRegExPatterExpectException() @Test(expected = IllegalArgumentException.class) public void testFolderNameRegExPatter() { - when(table.getParameters()).thenReturn(com.google.common.collect.ImmutableMap.of(PARTITION_PATTERN_KEY, "year=${year}/birth_month${month}/")); + Table table = Table.builder() + .storageDescriptor(storageDescriptor) + .partitionKeys(defaultColumns) + .parameters(com.google.common.collect.ImmutableMap.of(PARTITION_PATTERN_KEY, "year=${year}/birth_month${month}/")) + .build(); Optional optionalRegEx = PartitionUtil.getRegExExpression(table); assertTrue(optionalRegEx.isPresent()); assertFalse("Expression shouldn't contain a '{' character", optionalRegEx.get().contains("{")); @@ -87,7 +91,11 @@ public void dynamicFolderExpressionWithDigits() "year=2001/birth_month01/", "month01/" ); - when(table.getParameters()).thenReturn(com.google.common.collect.ImmutableMap.of(PARTITION_PATTERN_KEY, "year=${year}/birth_month${month}/")); + Table table = Table.builder() + .storageDescriptor(storageDescriptor) + .partitionKeys(defaultColumns) + .parameters(com.google.common.collect.ImmutableMap.of(PARTITION_PATTERN_KEY, "year=${year}/birth_month${month}/")) + .build(); Optional optionalRegEx = PartitionUtil.getRegExExpression(table); assertTrue(optionalRegEx.isPresent()); Pattern folderMatchPattern = Pattern.compile(optionalRegEx.get()); @@ -112,12 +120,14 @@ public void dynamicFolderExpressionWithDefaultsDates() "creation_dt=2012-01-01/", "month01/" ); - // mock - when(table.getParameters()).thenReturn(com.google.common.collect.ImmutableMap.of(PARTITION_PATTERN_KEY, "creation_dt=${creation_dt}/")); List columns = com.google.common.collect.ImmutableList.of( createColumn("creation_dt", "date") ); - when(table.getPartitionKeys()).thenReturn(columns); + Table table = Table.builder() + .storageDescriptor(storageDescriptor) + .parameters(com.google.common.collect.ImmutableMap.of(PARTITION_PATTERN_KEY, "creation_dt=${creation_dt}/")) + .partitionKeys(columns) + .build(); // build regex Optional optionalRegEx = PartitionUtil.getRegExExpression(table); assertTrue(optionalRegEx.isPresent()); @@ -144,12 +154,14 @@ public void dynamicFolderExpressionWithQuotedVarchar() // failed "state='UP'/", "month01/" ); - // mock - when(table.getParameters()).thenReturn(com.google.common.collect.ImmutableMap.of(PARTITION_PATTERN_KEY, "state='${stateName}'/")); List columns = com.google.common.collect.ImmutableList.of( createColumn("stateName", "string") ); - when(table.getPartitionKeys()).thenReturn(columns); + Table table = Table.builder() + .storageDescriptor(storageDescriptor) + .parameters(com.google.common.collect.ImmutableMap.of(PARTITION_PATTERN_KEY, "state='${stateName}'/")) + .partitionKeys(columns) + .build(); // build regex Optional optionalRegEx = PartitionUtil.getRegExExpression(table); assertTrue(optionalRegEx.isPresent()); @@ -175,12 +187,13 @@ public void dynamicFolderExpressionWithUnquotedVarchar() "state=UP/", "month01/" ); - // mock - when(table.getParameters()).thenReturn(com.google.common.collect.ImmutableMap.of(PARTITION_PATTERN_KEY, "state=${stateName}/")); List columns = com.google.common.collect.ImmutableList.of( createColumn("stateName", "string") ); - when(table.getPartitionKeys()).thenReturn(columns); + Table table = Table.builder() + .parameters(com.google.common.collect.ImmutableMap.of(PARTITION_PATTERN_KEY, "state=${stateName}/")) + .partitionKeys(columns) + .build(); // build regex Optional optionalRegEx = PartitionUtil.getRegExExpression(table); assertTrue(optionalRegEx.isPresent()); @@ -199,11 +212,14 @@ public void dynamicFolderExpressionWithUnquotedVarchar() public void testGetHivePartitions() { String partitionPatten = "year=${year}/birth_month${month}/"; - // mock - when(table.getParameters()).thenReturn(com.google.common.collect.ImmutableMap.of(PARTITION_PATTERN_KEY, partitionPatten )); + Table table = Table.builder() + .storageDescriptor(storageDescriptor) + .partitionKeys(defaultColumns) + .parameters(com.google.common.collect.ImmutableMap.of(PARTITION_PATTERN_KEY, partitionPatten)) + .build(); Optional optionalRegEx = PartitionUtil.getRegExExpression(table); assertTrue(optionalRegEx.isPresent()); - Map partitions = PartitionUtil.getPartitionColumnData(partitionPatten, "year=2000/birth_month09/", optionalRegEx.get(), table.getPartitionKeys()); + Map partitions = PartitionUtil.getPartitionColumnData(partitionPatten, "year=2000/birth_month09/", optionalRegEx.get(), table.partitionKeys()); assertFalse("Map of partition values is empty", partitions.isEmpty()); assertEquals("Partitions map size is more than 2", 2, partitions.size()); // Assert partition 1 @@ -216,20 +232,21 @@ public void testGetHivePartitions() @Test(expected = IllegalArgumentException.class) public void testGetHiveNonHivePartitions() { - // mock List columns = com.google.common.collect.ImmutableList.of( createColumn("year", "bigint"), createColumn("month", "int"), createColumn("day", "int") ); - // mock - when(table.getPartitionKeys()).thenReturn(columns); - String partitionPatten = "year=${year}/birth_month${month}/${day}"; - when(table.getParameters()).thenReturn(com.google.common.collect.ImmutableMap.of(PARTITION_PATTERN_KEY, partitionPatten + "/")); + String partitionPattern = "year=${year}/birth_month${month}/${day}"; + Table table = Table.builder() + .storageDescriptor(storageDescriptor) + .parameters(com.google.common.collect.ImmutableMap.of(PARTITION_PATTERN_KEY, partitionPattern + "/")) + .partitionKeys(columns) + .build(); Optional optionalRegEx = PartitionUtil.getRegExExpression(table); assertTrue(optionalRegEx.isPresent()); - Map partitions = PartitionUtil.getPartitionColumnData(partitionPatten, "year=2000/birth_month09/12/", - optionalRegEx.get(), table.getPartitionKeys()); + Map partitions = PartitionUtil.getPartitionColumnData(partitionPattern, "year=2000/birth_month09/12/", + optionalRegEx.get(), table.partitionKeys()); assertFalse("List of column prefix is empty", partitions.isEmpty()); assertEquals("Partition size is more than 3", 3, partitions.size()); // Assert partition 1 @@ -251,8 +268,11 @@ public void testGetPartitionFolders() createColumn("month", "int"), createColumn("day", "int") ); - when(table.getPartitionKeys()).thenReturn(columns); String partitionPattern = "year=${year}/birth_month${month}/${day}"; + Table table = Table.builder() + .storageDescriptor(storageDescriptor) + .partitionKeys(columns) + .build(); // list of folders in a bucket List bucketFolders = com.google.common.collect.ImmutableList.of( @@ -271,7 +291,7 @@ public void testGetPartitionFolders() Pattern folderMatchingPattern = Pattern.compile(optionalRegEx.get()); for (String folder : bucketFolders) { if (folderMatchingPattern.matcher(folder).matches()) { - Map partitions = PartitionUtil.getPartitionColumnData(partitionPattern, folder, optionalRegEx.get(), table.getPartitionKeys()); + Map partitions = PartitionUtil.getPartitionColumnData(partitionPattern, folder, optionalRegEx.get(), table.partitionKeys()); assertFalse("List of storage partitions is empty", partitions.isEmpty()); assertEquals("Partition size is more than 3", 3, partitions.size()); } @@ -286,10 +306,12 @@ public void testHivePartition() createColumn("statename", "string"), createColumn("zipcode", "varchar") ); - when(table.getPartitionKeys()).thenReturn(columns); String partitionPattern = "StateName=${statename}/ZipCode=${zipcode}"; - // mock - when(table.getParameters()).thenReturn(com.google.common.collect.ImmutableMap.of(PARTITION_PATTERN_KEY, partitionPattern + "/")); + Table table = Table.builder() + .storageDescriptor(storageDescriptor) + .parameters(com.google.common.collect.ImmutableMap.of(PARTITION_PATTERN_KEY, partitionPattern + "/")) + .partitionKeys(columns) + .build(); // list of folders in a bucket List bucketFolders = com.google.common.collect.ImmutableList.of( "StateName=WB/ZipCode=700099/", @@ -311,7 +333,7 @@ public void testHivePartition() folder = folder.substring(1); } if (folderMatchingPattern.matcher(folder).matches()) { - Map partitions = PartitionUtil.getPartitionColumnData(partitionPattern, folder, optionalRegEx.get(), table.getPartitionKeys()); + Map partitions = PartitionUtil.getPartitionColumnData(partitionPattern, folder, optionalRegEx.get(), table.partitionKeys()); assertFalse("List of storage partitions is empty", partitions.isEmpty()); assertEquals("Partition size is more than 2", 2, partitions.size()); matchCount++; @@ -329,10 +351,12 @@ public void testNonHivePartition() createColumn("district", "varchar"), createColumn("zipcode", "string") ); - when(table.getPartitionKeys()).thenReturn(columns); String partitionPattern = "${statename}/${district}/${zipcode}"; - // mock - when(table.getParameters()).thenReturn(com.google.common.collect.ImmutableMap.of(PARTITION_PATTERN_KEY, partitionPattern + "/")); + Table table = Table.builder() + .storageDescriptor(storageDescriptor) + .parameters(com.google.common.collect.ImmutableMap.of(PARTITION_PATTERN_KEY, partitionPattern + "/")) + .partitionKeys(columns) + .build(); // list of folders in a bucket List bucketFolders = com.google.common.collect.ImmutableList.of( "WB/Kolkata/700099/", @@ -351,7 +375,7 @@ public void testNonHivePartition() int matchCount = 0; for (String folder : bucketFolders) { if (folderMatchingPattern.matcher(folder).matches()) { - Map partitions = PartitionUtil.getPartitionColumnData(partitionPattern, folder, optionalRegEx.get(), table.getPartitionKeys()); + Map partitions = PartitionUtil.getPartitionColumnData(partitionPattern, folder, optionalRegEx.get(), table.partitionKeys()); assertFalse("List of storage partitions is empty", partitions.isEmpty()); assertEquals("Partition size is more than 3", 3, partitions.size()); matchCount++; @@ -369,10 +393,12 @@ public void testMixedLayoutStringOnlyPartition() createColumn("district", "varchar"), createColumn("zipcode", "string") ); - when(table.getPartitionKeys()).thenReturn(columns); String partitionPattern = "StateName=${statename}/District${district}/${zipcode}"; - // mock - when(table.getParameters()).thenReturn(com.google.common.collect.ImmutableMap.of(PARTITION_PATTERN_KEY, partitionPattern + "/")); + Table table = Table.builder() + .storageDescriptor(storageDescriptor) + .parameters(com.google.common.collect.ImmutableMap.of(PARTITION_PATTERN_KEY, partitionPattern + "/")) + .partitionKeys(columns) + .build(); // list of folders in a bucket List bucketFolders = com.google.common.collect.ImmutableList.of( "StateName=WB/DistrictKolkata/700099/", @@ -391,7 +417,7 @@ public void testMixedLayoutStringOnlyPartition() int matchCount = 0; for (String folder : bucketFolders) { if (folderMatchingPattern.matcher(folder).matches()) { - Map partitions = PartitionUtil.getPartitionColumnData(partitionPattern, folder, optionalRegEx.get(), table.getPartitionKeys()); + Map partitions = PartitionUtil.getPartitionColumnData(partitionPattern, folder, optionalRegEx.get(), table.partitionKeys()); assertFalse("List of storage partitions is empty", partitions.isEmpty()); assertEquals("Partition size is more than 3", 3, partitions.size()); matchCount++; diff --git a/athena-gcs/src/test/java/com/amazonaws/athena/connectors/gcs/filter/FilterExpressionBuilderTest.java b/athena-gcs/src/test/java/com/amazonaws/athena/connectors/gcs/filter/FilterExpressionBuilderTest.java index b114faa5b0..610aa37ec9 100644 --- a/athena-gcs/src/test/java/com/amazonaws/athena/connectors/gcs/filter/FilterExpressionBuilderTest.java +++ b/athena-gcs/src/test/java/com/amazonaws/athena/connectors/gcs/filter/FilterExpressionBuilderTest.java @@ -19,33 +19,19 @@ */ package com.amazonaws.athena.connectors.gcs.filter; -import com.amazonaws.athena.connector.lambda.data.Block; -import com.amazonaws.athena.connector.lambda.data.BlockAllocatorImpl; -import com.amazonaws.athena.connector.lambda.data.SchemaBuilder; import com.amazonaws.athena.connector.lambda.domain.predicate.Constraints; -import com.amazonaws.athena.connector.lambda.domain.predicate.Marker; -import com.amazonaws.athena.connector.lambda.domain.predicate.Range; -import com.amazonaws.athena.connector.lambda.domain.predicate.SortedRangeSet; -import com.amazonaws.athena.connector.lambda.domain.predicate.ValueSet; import com.amazonaws.athena.connectors.gcs.GcsTestUtils; -import com.amazonaws.services.glue.model.Column; -import org.apache.arrow.vector.complex.reader.FieldReader; import org.apache.arrow.vector.types.pojo.ArrowType; -import org.apache.arrow.vector.types.pojo.Field; -import org.apache.arrow.vector.types.pojo.Schema; import org.junit.Test; import org.junit.runner.RunWith; -import org.mockito.Mockito; import org.mockito.junit.MockitoJUnitRunner; - +import software.amazon.awssdk.services.glue.model.Column; import java.util.Collections; -import java.util.List; import java.util.Map; import static com.amazonaws.athena.connector.lambda.domain.predicate.Constraints.DEFAULT_NO_LIMIT; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.mockito.ArgumentMatchers.anyString; @RunWith(MockitoJUnitRunner.class) public class FilterExpressionBuilderTest @@ -54,7 +40,7 @@ public class FilterExpressionBuilderTest public void testGetExpressions() { Map>> result = FilterExpressionBuilder.getConstraintsForPartitionedColumns( - com.google.common.collect.ImmutableList.of(new Column().withName("year")), + com.google.common.collect.ImmutableList.of(Column.builder().name("year").build()), new Constraints(GcsTestUtils.createSummaryWithLValueRangeEqual("year", new ArrowType.Utf8(), "1"), Collections.emptyList(), Collections.emptyList(), DEFAULT_NO_LIMIT, Collections.emptyMap())); assertEquals(result.size(), 1); diff --git a/athena-gcs/src/test/java/com/amazonaws/athena/connectors/gcs/storage/StorageMetadataTest.java b/athena-gcs/src/test/java/com/amazonaws/athena/connectors/gcs/storage/StorageMetadataTest.java index 55a8139b01..b49ffec104 100644 --- a/athena-gcs/src/test/java/com/amazonaws/athena/connectors/gcs/storage/StorageMetadataTest.java +++ b/athena-gcs/src/test/java/com/amazonaws/athena/connectors/gcs/storage/StorageMetadataTest.java @@ -23,12 +23,6 @@ import com.amazonaws.athena.connector.lambda.domain.TableName; import com.amazonaws.athena.connector.lambda.domain.predicate.Constraints; import com.amazonaws.athena.connectors.gcs.GenericGcsTest; -import com.amazonaws.services.glue.AWSGlue; -import com.amazonaws.services.glue.AWSGlueClient; -import com.amazonaws.services.glue.model.Column; -import com.amazonaws.services.glue.model.GetTableResult; -import com.amazonaws.services.glue.model.StorageDescriptor; -import com.amazonaws.services.glue.model.Table; import com.google.api.gax.paging.Page; import com.google.auth.oauth2.GoogleCredentials; import com.google.auth.oauth2.ServiceAccountCredentials; @@ -53,6 +47,10 @@ import org.mockito.Mockito; import org.mockito.MockitoAnnotations; import org.mockito.junit.MockitoJUnitRunner; +import software.amazon.awssdk.services.glue.GlueClient; +import software.amazon.awssdk.services.glue.model.Column; +import software.amazon.awssdk.services.glue.model.StorageDescriptor; +import software.amazon.awssdk.services.glue.model.Table; import java.util.ArrayList; import java.util.Collection; @@ -141,13 +139,15 @@ private void storageMock() throws Exception @Test public void testBuildTableSchema() throws Exception { - Table table = new Table(); - table.setName("birthday"); - table.setDatabaseName("default"); - table.setParameters(ImmutableMap.of("classification", "parquet")); - table.setStorageDescriptor(new StorageDescriptor() - .withLocation("gs://mydatalake1test/birthday/")); - table.setCatalogId("catalog"); + Table table = Table.builder() + .name("birthday") + .databaseName("default") + .parameters(ImmutableMap.of("classification", "parquet")) + .storageDescriptor(StorageDescriptor.builder() + .location("gs://mydatalake1test/birthday/") + .build()) + .catalogId("catalog") + .build(); storageMetadata = mock(StorageMetadata.class); storageMock(); when(storageMetadata.buildTableSchema(any(), any())).thenCallRealMethod(); @@ -166,7 +166,7 @@ public void testGetPartitionFolders() throws Exception { //single partition getStorageList(ImmutableList.of("year=2000/birthday.parquet", "year=2000/", "year=2000/birthday1.parquet")); - AWSGlue glue = Mockito.mock(AWSGlueClient.class); + GlueClient glue = Mockito.mock(GlueClient.class); List fieldList = ImmutableList.of(new Field("year", FieldType.nullable(new ArrowType.Int(64, true)), null)); List partKeys = ImmutableList.of(createColumn("year", "varchar")); Schema schema = getSchema(glue, fieldList, partKeys, "year=${year}/"); @@ -235,19 +235,23 @@ public void testGetPartitionFolders() throws Exception } @NotNull - private Schema getSchema(AWSGlue glue, List fieldList, List partKeys, String partitionPattern) + private Schema getSchema(GlueClient glue, List fieldList, List partKeys, String partitionPattern) { Map metadataSchema = new HashMap<>(); metadataSchema.put("dataFormat", "parquet"); Schema schema = new Schema(fieldList, metadataSchema); - GetTableResult getTablesResult = new GetTableResult(); - getTablesResult.setTable(new Table().withName(TABLE_1) - .withParameters(ImmutableMap.of(CLASSIFICATION_GLUE_TABLE_PARAM, PARQUET, - PARTITION_PATTERN_KEY, partitionPattern)) - .withPartitionKeys(partKeys) - .withStorageDescriptor(new StorageDescriptor() - .withLocation(LOCATION))); - Mockito.when(glue.getTable(any())).thenReturn(getTablesResult); + software.amazon.awssdk.services.glue.model.GetTableResponse getTableResponse = software.amazon.awssdk.services.glue.model.GetTableResponse.builder() + .table(Table.builder() + .name(TABLE_1) + .parameters(ImmutableMap.of(CLASSIFICATION_GLUE_TABLE_PARAM, PARQUET, + PARTITION_PATTERN_KEY, partitionPattern)) + .partitionKeys(partKeys) + .storageDescriptor(StorageDescriptor.builder() + .location(LOCATION) + .build()) + .build()) + .build(); + Mockito.when(glue.getTable(any(software.amazon.awssdk.services.glue.model.GetTableRequest.class))).thenReturn(getTableResponse); return schema; } diff --git a/athena-google-bigquery/pom.xml b/athena-google-bigquery/pom.xml index 40b248e893..c68fc6d1a8 100644 --- a/athena-google-bigquery/pom.xml +++ b/athena-google-bigquery/pom.xml @@ -25,20 +25,6 @@ jna-platform 5.14.0 - - com.amazonaws - athena-jdbc - 2022.47.1 - test-jar - test - - - - com.amazonaws - aws-java-sdk-rds - ${aws-sdk.version} - test - software.amazon.awscdk @@ -67,22 +53,6 @@ - - io.grpc - grpc-api - 1.63.0 - - - com.google.cloud - google-cloud-resourcemanager - 1.46.0 - - - nl.jqno.equalsverifier - equalsverifier - 3.16.1 - test - org.mockito mockito-inline diff --git a/athena-hbase/pom.xml b/athena-hbase/pom.xml index 3f802a0914..6b6d2d04d3 100644 --- a/athena-hbase/pom.xml +++ b/athena-hbase/pom.xml @@ -47,83 +47,6 @@ ${aws-sdk.version} test - - org.apache.avro - avro - 1.11.3 - - - com.google.protobuf - protobuf-java - ${protobuf3.version} - - - org.apache.directory.api - api-ldap-model - 2.1.6 - - - org.apache.hadoop - hadoop-common - 3.4.0 - - - org.eclipse.jetty - jetty-webapp - - - org.eclipse.jetty - jetty-server - - - org.eclipse.jetty - jetty-xml - - - org.eclipse.jetty - jetty-servlet - - - org.apache.avro - avro - - - org.codehaus.jackson - jackson-mapper-asl - - - org.codehaus.jackson - jackson-xc - - - org.codehaus.jettison - jettison - - - org.eclipse.jetty - jetty-io - - - log4j - log4j - - - com.google.protobuf - protobuf-java - - - - - org.apache.hbase - hbase-common - ${hbase.version} - - - org.apache.hadoop - hadoop-common - - - org.apache.hbase hbase-client @@ -144,30 +67,6 @@ - - - org.apache.httpcomponents - httpclient - ${apache.httpclient.version} - - - - commons-logging - commons-logging - - - - - org.slf4j - slf4j-api - ${slf4j-log4j.version} - - - org.slf4j - jcl-over-slf4j - ${slf4j-log4j.version} - runtime - com.amazonaws aws-lambda-java-log4j2 diff --git a/athena-hbase/src/main/java/com/amazonaws/athena/connectors/hbase/HbaseMetadataHandler.java b/athena-hbase/src/main/java/com/amazonaws/athena/connectors/hbase/HbaseMetadataHandler.java index ab3cca488e..ab90b920c0 100644 --- a/athena-hbase/src/main/java/com/amazonaws/athena/connectors/hbase/HbaseMetadataHandler.java +++ b/athena-hbase/src/main/java/com/amazonaws/athena/connectors/hbase/HbaseMetadataHandler.java @@ -45,8 +45,6 @@ import com.amazonaws.athena.connectors.hbase.connection.HbaseConnectionFactory; import com.amazonaws.athena.connectors.hbase.qpt.HbaseQueryPassthrough; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.glue.AWSGlue; -import com.amazonaws.services.glue.model.Table; import com.google.common.collect.ImmutableMap; import org.apache.arrow.util.VisibleForTesting; import org.apache.arrow.vector.types.Types; @@ -57,6 +55,8 @@ import org.apache.hadoop.hbase.TableName; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.glue.GlueClient; +import software.amazon.awssdk.services.glue.model.Table; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.io.IOException; @@ -101,12 +101,12 @@ public class HbaseMetadataHandler //is indeed enabled for use by this connector. private static final String HBASE_METADATA_FLAG = "hbase-metadata-flag"; //Used to filter out Glue tables which lack HBase metadata flag. - private static final TableFilter TABLE_FILTER = (Table table) -> table.getParameters().containsKey(HBASE_METADATA_FLAG); + private static final TableFilter TABLE_FILTER = (Table table) -> table.parameters().containsKey(HBASE_METADATA_FLAG); //Used to denote the 'type' of this connector for diagnostic purposes. private static final String SOURCE_TYPE = "hbase"; //The number of rows to scan when attempting to infer schema from an HBase table. private static final int NUM_ROWS_TO_SCAN = 10; - private final AWSGlue awsGlue; + private final GlueClient awsGlue; private final HbaseConnectionFactory connectionFactory; private final HbaseQueryPassthrough queryPassthrough = new HbaseQueryPassthrough(); @@ -120,7 +120,7 @@ public HbaseMetadataHandler(java.util.Map configOptions) @VisibleForTesting protected HbaseMetadataHandler( - AWSGlue awsGlue, + GlueClient awsGlue, EncryptionKeyFactory keyFactory, SecretsManagerClient secretsManager, AmazonAthena athena, diff --git a/athena-hbase/src/test/java/com/amazonaws/athena/connectors/hbase/HbaseMetadataHandlerTest.java b/athena-hbase/src/test/java/com/amazonaws/athena/connectors/hbase/HbaseMetadataHandlerTest.java index 04593b066c..9b9fcf39ec 100644 --- a/athena-hbase/src/test/java/com/amazonaws/athena/connectors/hbase/HbaseMetadataHandlerTest.java +++ b/athena-hbase/src/test/java/com/amazonaws/athena/connectors/hbase/HbaseMetadataHandlerTest.java @@ -43,7 +43,6 @@ import com.amazonaws.athena.connectors.hbase.connection.HbaseConnectionFactory; import com.amazonaws.athena.connectors.hbase.connection.ResultProcessor; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.glue.AWSGlue; import org.apache.arrow.vector.types.Types; import org.apache.arrow.vector.types.pojo.Schema; import org.apache.hadoop.hbase.HRegionInfo; @@ -62,6 +61,7 @@ import org.mockito.junit.MockitoJUnitRunner; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.glue.GlueClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.io.IOException; @@ -99,7 +99,7 @@ public class HbaseMetadataHandlerTest private HbaseConnectionFactory mockConnFactory; @Mock - private AWSGlue awsGlue; + private GlueClient awsGlue; @Mock private SecretsManagerClient secretsManager; diff --git a/athena-hbase/src/test/java/com/amazonaws/athena/connectors/hbase/HbaseRecordHandlerTest.java b/athena-hbase/src/test/java/com/amazonaws/athena/connectors/hbase/HbaseRecordHandlerTest.java index ac9c06c6ee..47aefec079 100644 --- a/athena-hbase/src/test/java/com/amazonaws/athena/connectors/hbase/HbaseRecordHandlerTest.java +++ b/athena-hbase/src/test/java/com/amazonaws/athena/connectors/hbase/HbaseRecordHandlerTest.java @@ -44,10 +44,6 @@ import com.amazonaws.athena.connectors.hbase.connection.HbaseConnectionFactory; import com.amazonaws.athena.connectors.hbase.connection.ResultProcessor; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.glue.AWSGlue; -import com.amazonaws.services.glue.model.Column; -import com.amazonaws.services.glue.model.GetTableResult; -import com.amazonaws.services.glue.model.StorageDescriptor; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.model.PutObjectRequest; import com.amazonaws.services.s3.model.PutObjectResult; diff --git a/athena-kafka/pom.xml b/athena-kafka/pom.xml index dcf243e26e..19baf1e780 100644 --- a/athena-kafka/pom.xml +++ b/athena-kafka/pom.xml @@ -130,11 +130,6 @@ ${testng.version} test - - com.amazonaws - aws-java-sdk-glue - ${aws-sdk.version} - org.junit.support testng-engine diff --git a/athena-kafka/src/main/java/com/amazonaws/athena/connectors/kafka/GlueRegistryReader.java b/athena-kafka/src/main/java/com/amazonaws/athena/connectors/kafka/GlueRegistryReader.java index 2ea3ef43ba..c5463d8ad5 100644 --- a/athena-kafka/src/main/java/com/amazonaws/athena/connectors/kafka/GlueRegistryReader.java +++ b/athena-kafka/src/main/java/com/amazonaws/athena/connectors/kafka/GlueRegistryReader.java @@ -19,16 +19,15 @@ */ package com.amazonaws.athena.connectors.kafka; -import com.amazonaws.services.glue.AWSGlue; -import com.amazonaws.services.glue.AWSGlueClientBuilder; -import com.amazonaws.services.glue.model.GetSchemaRequest; -import com.amazonaws.services.glue.model.GetSchemaResult; -import com.amazonaws.services.glue.model.GetSchemaVersionRequest; -import com.amazonaws.services.glue.model.GetSchemaVersionResult; -import com.amazonaws.services.glue.model.SchemaId; -import com.amazonaws.services.glue.model.SchemaVersionNumber; import com.fasterxml.jackson.databind.DeserializationFeature; import com.fasterxml.jackson.databind.ObjectMapper; +import software.amazon.awssdk.services.glue.GlueClient; +import software.amazon.awssdk.services.glue.model.GetSchemaRequest; +import software.amazon.awssdk.services.glue.model.GetSchemaResponse; +import software.amazon.awssdk.services.glue.model.GetSchemaVersionRequest; +import software.amazon.awssdk.services.glue.model.GetSchemaVersionResponse; +import software.amazon.awssdk.services.glue.model.SchemaId; +import software.amazon.awssdk.services.glue.model.SchemaVersionNumber; public class GlueRegistryReader { @@ -46,15 +45,16 @@ public class GlueRegistryReader * @param glueSchemaName * @return */ - public GetSchemaVersionResult getSchemaVersionResult(String glueRegistryName, String glueSchemaName) + public GetSchemaVersionResponse getSchemaVersionResult(String glueRegistryName, String glueSchemaName) { - AWSGlue glue = AWSGlueClientBuilder.defaultClient(); - SchemaId sid = new SchemaId().withRegistryName(glueRegistryName).withSchemaName(glueSchemaName); - GetSchemaResult schemaResult = glue.getSchema(new GetSchemaRequest().withSchemaId(sid)); - SchemaVersionNumber svn = new SchemaVersionNumber().withVersionNumber(schemaResult.getLatestSchemaVersion()); - return glue.getSchemaVersion(new GetSchemaVersionRequest() - .withSchemaId(sid) - .withSchemaVersionNumber(svn) + GlueClient glue = GlueClient.create(); + SchemaId sid = SchemaId.builder().registryName(glueRegistryName).schemaName(glueSchemaName).build(); + GetSchemaResponse schemaResponse = glue.getSchema(GetSchemaRequest.builder().schemaId(sid).build()); + SchemaVersionNumber svn = SchemaVersionNumber.builder().versionNumber(schemaResponse.latestSchemaVersion()).build(); + return glue.getSchemaVersion(GetSchemaVersionRequest.builder() + .schemaId(sid) + .schemaVersionNumber(svn) + .build() ); } /** @@ -69,17 +69,17 @@ public GetSchemaVersionResult getSchemaVersionResult(String glueRegistryName, St */ public T getGlueSchema(String glueRegistryName, String glueSchemaName, Class clazz) throws Exception { - GetSchemaVersionResult result = getSchemaVersionResult(glueRegistryName, glueSchemaName); - return objectMapper.readValue(result.getSchemaDefinition(), clazz); + GetSchemaVersionResponse result = getSchemaVersionResult(glueRegistryName, glueSchemaName); + return objectMapper.readValue(result.schemaDefinition(), clazz); } public String getGlueSchemaType(String glueRegistryName, String glueSchemaName) { - GetSchemaVersionResult result = getSchemaVersionResult(glueRegistryName, glueSchemaName); - return result.getDataFormat(); + GetSchemaVersionResponse result = getSchemaVersionResult(glueRegistryName, glueSchemaName); + return result.dataFormatAsString(); } public String getSchemaDef(String glueRegistryName, String glueSchemaName) { - GetSchemaVersionResult result = getSchemaVersionResult(glueRegistryName, glueSchemaName); - return result.getSchemaDefinition(); + GetSchemaVersionResponse result = getSchemaVersionResult(glueRegistryName, glueSchemaName); + return result.schemaDefinition(); } } diff --git a/athena-kafka/src/main/java/com/amazonaws/athena/connectors/kafka/KafkaMetadataHandler.java b/athena-kafka/src/main/java/com/amazonaws/athena/connectors/kafka/KafkaMetadataHandler.java index f7a399a4f0..9dd62a9cc7 100644 --- a/athena-kafka/src/main/java/com/amazonaws/athena/connectors/kafka/KafkaMetadataHandler.java +++ b/athena-kafka/src/main/java/com/amazonaws/athena/connectors/kafka/KafkaMetadataHandler.java @@ -41,15 +41,6 @@ import com.amazonaws.athena.connectors.kafka.dto.SplitParameters; import com.amazonaws.athena.connectors.kafka.dto.TopicPartitionPiece; import com.amazonaws.athena.connectors.kafka.dto.TopicSchema; -import com.amazonaws.services.glue.AWSGlue; -import com.amazonaws.services.glue.AWSGlueClientBuilder; -import com.amazonaws.services.glue.model.GetRegistryRequest; -import com.amazonaws.services.glue.model.GetRegistryResult; -import com.amazonaws.services.glue.model.ListRegistriesRequest; -import com.amazonaws.services.glue.model.ListRegistriesResult; -import com.amazonaws.services.glue.model.ListSchemasResult; -import com.amazonaws.services.glue.model.RegistryId; -import com.amazonaws.services.glue.model.RegistryListItem; import com.google.common.annotations.VisibleForTesting; import com.google.protobuf.Descriptors; import io.confluent.kafka.schemaregistry.protobuf.ProtobufSchema; @@ -60,6 +51,13 @@ import org.apache.kafka.common.TopicPartition; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.glue.GlueClient; +import software.amazon.awssdk.services.glue.model.GetRegistryRequest; +import software.amazon.awssdk.services.glue.model.GetRegistryResponse; +import software.amazon.awssdk.services.glue.model.ListRegistriesRequest; +import software.amazon.awssdk.services.glue.model.ListRegistriesResponse; +import software.amazon.awssdk.services.glue.model.RegistryId; +import software.amazon.awssdk.services.glue.model.RegistryListItem; import java.util.ArrayList; import java.util.Collections; @@ -99,24 +97,28 @@ public KafkaMetadataHandler(Consumer kafkaConsumer, java.util.Ma private Stream filteredRegistriesStream(Stream registries) { return registries - .filter(r -> r.getDescription() != null && r.getDescription().contains(REGISTRY_MARKER)) - .map(r -> r.getRegistryName()); + .filter(r -> r.description() != null && r.description().contains(REGISTRY_MARKER)) + .map(r -> r.registryName()); } - private ListRegistriesResult listRegistriesFromGlue(AWSGlue glue, String nextToken) + private ListRegistriesResponse listRegistriesFromGlue(GlueClient glue, String nextToken) { - ListRegistriesRequest listRequest = new ListRegistriesRequest().withMaxResults(maxGluePageSize); - listRequest = (nextToken == null) ? listRequest : listRequest.withNextToken(nextToken); - return glue.listRegistries(listRequest); + ListRegistriesRequest.Builder listRequest = ListRegistriesRequest.builder().maxResults(maxGluePageSize); + if (nextToken != null) { + listRequest.nextToken(nextToken); + } + return glue.listRegistries(listRequest.build()); } - private ListSchemasResult listSchemasFromGlue(AWSGlue glue, String glueRegistryName, int pageSize, String nextToken) + private software.amazon.awssdk.services.glue.model.ListSchemasResponse listSchemasFromGlue(GlueClient glue, String glueRegistryName, int pageSize, String nextToken) { - com.amazonaws.services.glue.model.ListSchemasRequest listRequest = new com.amazonaws.services.glue.model.ListSchemasRequest() - .withRegistryId(new RegistryId().withRegistryName(glueRegistryName)) - .withMaxResults(Math.min(pageSize, maxGluePageSize)); - listRequest = (nextToken == null) ? listRequest : listRequest.withNextToken(nextToken); - return glue.listSchemas(listRequest); + software.amazon.awssdk.services.glue.model.ListSchemasRequest.Builder listRequestBuilder = software.amazon.awssdk.services.glue.model.ListSchemasRequest.builder() + .registryId(RegistryId.builder().registryName(glueRegistryName).build()) + .maxResults(Math.min(pageSize, maxGluePageSize)); + if (nextToken != null) { + listRequestBuilder.nextToken(nextToken); + } + return glue.listSchemas(listRequestBuilder.build()); } /** @@ -130,10 +132,10 @@ private ListSchemasResult listSchemasFromGlue(AWSGlue glue, String glueRegistryN public ListSchemasResponse doListSchemaNames(BlockAllocator blockAllocator, ListSchemasRequest listSchemasRequest) { LOGGER.info("doListSchemaNames called with Catalog: {}", listSchemasRequest.getCatalogName()); - AWSGlue glue = AWSGlueClientBuilder.defaultClient(); + GlueClient glue = GlueClient.create(); - Stream allFilteredRegistries = PaginatedRequestIterator.stream((pageToken) -> listRegistriesFromGlue(glue, pageToken), ListRegistriesResult::getNextToken) - .flatMap(result -> filteredRegistriesStream(result.getRegistries().stream())); + Stream allFilteredRegistries = PaginatedRequestIterator.stream((pageToken) -> listRegistriesFromGlue(glue, pageToken), ListRegistriesResponse::nextToken) + .flatMap(result -> filteredRegistriesStream(result.registries().stream())); ListSchemasResponse result = new ListSchemasResponse(listSchemasRequest.getCatalogName(), allFilteredRegistries.collect(Collectors.toList())); LOGGER.debug("doListSchemaNames result: {}", result); return result; @@ -142,12 +144,12 @@ public ListSchemasResponse doListSchemaNames(BlockAllocator blockAllocator, List private String resolveGlueRegistryName(String glueRegistryName) { try { - AWSGlue glue = AWSGlueClientBuilder.defaultClient(); - GetRegistryResult getRegistryResult = glue.getRegistry(new GetRegistryRequest().withRegistryId(new RegistryId().withRegistryName(glueRegistryName))); - if (!(getRegistryResult.getDescription() != null && getRegistryResult.getDescription().contains(REGISTRY_MARKER))) { + GlueClient glue = GlueClient.create(); + GetRegistryResponse getRegistryResult = glue.getRegistry(GetRegistryRequest.builder().registryId(RegistryId.builder().registryName(glueRegistryName).build()).build()); + if (!(getRegistryResult.description() != null && getRegistryResult.description().contains(REGISTRY_MARKER))) { throw new Exception(String.format("Found a registry with a matching name [%s] but not marked for AthenaFederationKafka", glueRegistryName)); } - return getRegistryResult.getRegistryName(); + return getRegistryResult.registryName(); } catch (Exception ex) { LOGGER.info("resolveGlueRegistryName falling back to case insensitive search for: {}. Exception: {}", glueRegistryName, ex); @@ -168,7 +170,7 @@ public ListTablesResponse doListTables(BlockAllocator blockAllocator, ListTables LOGGER.info("doListTables: {}", federationListTablesRequest); String glueRegistryNameResolved = resolveGlueRegistryName(federationListTablesRequest.getSchemaName()); LOGGER.info("Resolved Glue registry name to: {}", glueRegistryNameResolved); - AWSGlue glue = AWSGlueClientBuilder.defaultClient(); + GlueClient glue = GlueClient.create(); // In this situation we want to loop through all the pages to return up to the MAX_RESULTS size // And only do this if we don't have a token passed in, otherwise if we have a token that takes precedence // over the fact that the page size was set to unlimited. @@ -176,10 +178,10 @@ public ListTablesResponse doListTables(BlockAllocator blockAllocator, ListTables federationListTablesRequest.getNextToken() == null) { LOGGER.info("Request page size is UNLIMITED_PAGE_SIZE_VALUE"); - List allTableNames = PaginatedRequestIterator.stream((pageToken) -> listSchemasFromGlue(glue, glueRegistryNameResolved, maxGluePageSize, pageToken), ListSchemasResult::getNextToken) + List allTableNames = PaginatedRequestIterator.stream((pageToken) -> listSchemasFromGlue(glue, glueRegistryNameResolved, maxGluePageSize, pageToken), software.amazon.awssdk.services.glue.model.ListSchemasResponse::nextToken) .flatMap(currentResult -> - currentResult.getSchemas().stream() - .map(schemaListItem -> schemaListItem.getSchemaName()) + currentResult.schemas().stream() + .map(schemaListItem -> schemaListItem.schemaName()) .map(glueSchemaName -> new TableName(glueRegistryNameResolved, glueSchemaName)) ) .limit(MAX_RESULTS + 1) @@ -195,22 +197,22 @@ public ListTablesResponse doListTables(BlockAllocator blockAllocator, ListTables } // Otherwise don't retrieve all pages, just pass through the page token. - ListSchemasResult listSchemasResultFromGlue = listSchemasFromGlue( + software.amazon.awssdk.services.glue.model.ListSchemasResponse listSchemasResultFromGlue = listSchemasFromGlue( glue, glueRegistryNameResolved, federationListTablesRequest.getPageSize(), federationListTablesRequest.getNextToken()); // Convert the glue response into our own federation response - List tableNames = listSchemasResultFromGlue.getSchemas() + List tableNames = listSchemasResultFromGlue.schemas() .stream() - .map(schemaListItem -> schemaListItem.getSchemaName()) + .map(schemaListItem -> schemaListItem.schemaName()) .map(glueSchemaName -> new TableName(glueRegistryNameResolved, glueSchemaName)) .collect(Collectors.toList()); // Pass through whatever token we got from Glue to the user ListTablesResponse result = new ListTablesResponse( federationListTablesRequest.getCatalogName(), tableNames, - listSchemasResultFromGlue.getNextToken()); + listSchemasResultFromGlue.nextToken()); LOGGER.debug("doListTables [paginated] result: {}", result); return result; } @@ -218,11 +220,11 @@ public ListTablesResponse doListTables(BlockAllocator blockAllocator, ListTables private String findGlueRegistryNameIgnoringCasing(String glueRegistryNameIn) { LOGGER.debug("findGlueRegistryNameIgnoringCasing {}", glueRegistryNameIn); - AWSGlue glue = AWSGlueClientBuilder.defaultClient(); + GlueClient glue = GlueClient.create(); // Try to find the registry ignoring the case - String result = PaginatedRequestIterator.stream((pageToken) -> listRegistriesFromGlue(glue, pageToken), ListRegistriesResult::getNextToken) - .flatMap(currentResult -> filteredRegistriesStream(currentResult.getRegistries().stream())) + String result = PaginatedRequestIterator.stream((pageToken) -> listRegistriesFromGlue(glue, pageToken), ListRegistriesResponse::nextToken) + .flatMap(currentResult -> filteredRegistriesStream(currentResult.registries().stream())) .filter(r -> r.equalsIgnoreCase(glueRegistryNameIn)) .findAny() .orElseThrow(() -> new RuntimeException(String.format("Could not find Glue Registry: %s", glueRegistryNameIn))); @@ -234,12 +236,12 @@ private String findGlueRegistryNameIgnoringCasing(String glueRegistryNameIn) private String findGlueSchemaNameIgnoringCasing(String glueRegistryNameIn, String glueSchemaNameIn) { LOGGER.debug("findGlueSchemaNameIgnoringCasing {} {}", glueRegistryNameIn, glueSchemaNameIn); - AWSGlue glue = AWSGlueClientBuilder.defaultClient(); + GlueClient glue = GlueClient.create(); // List all schemas under the input registry // Find the schema name ignoring the case in this page - String result = PaginatedRequestIterator.stream((pageToken) -> listSchemasFromGlue(glue, glueRegistryNameIn, maxGluePageSize, pageToken), ListSchemasResult::getNextToken) - .flatMap(currentResult -> currentResult.getSchemas().stream()) - .map(schemaListItem -> schemaListItem.getSchemaName()) + String result = PaginatedRequestIterator.stream((pageToken) -> listSchemasFromGlue(glue, glueRegistryNameIn, maxGluePageSize, pageToken), software.amazon.awssdk.services.glue.model.ListSchemasResponse::nextToken) + .flatMap(currentResult -> currentResult.schemas().stream()) + .map(schemaListItem -> schemaListItem.schemaName()) .filter(glueSchemaName -> glueSchemaName.equalsIgnoreCase(glueSchemaNameIn)) .findAny() .orElseThrow(() -> new RuntimeException(String.format("Could not find Glue Schema: %s", glueSchemaNameIn))); diff --git a/athena-kafka/src/test/java/com/amazonaws/athena/connectors/kafka/KafkaMetadataHandlerTest.java b/athena-kafka/src/test/java/com/amazonaws/athena/connectors/kafka/KafkaMetadataHandlerTest.java index 0232fc5ce4..efc0d38b5a 100644 --- a/athena-kafka/src/test/java/com/amazonaws/athena/connectors/kafka/KafkaMetadataHandlerTest.java +++ b/athena-kafka/src/test/java/com/amazonaws/athena/connectors/kafka/KafkaMetadataHandlerTest.java @@ -27,12 +27,16 @@ import com.amazonaws.athena.connector.lambda.domain.predicate.Constraints; import com.amazonaws.athena.connector.lambda.metadata.*; import com.amazonaws.athena.connector.lambda.security.FederatedIdentity; -import com.amazonaws.services.glue.AWSGlue; -import com.amazonaws.services.glue.AWSGlueClientBuilder; -import com.amazonaws.services.glue.model.GetSchemaResult; -import com.amazonaws.services.glue.model.GetSchemaVersionResult; -import com.amazonaws.services.glue.model.ListRegistriesResult; -import com.amazonaws.services.glue.model.RegistryListItem; + +import software.amazon.awssdk.services.glue.GlueClient; +import software.amazon.awssdk.services.glue.model.GetSchemaRequest; +import software.amazon.awssdk.services.glue.model.GetSchemaResponse; +import software.amazon.awssdk.services.glue.model.GetSchemaVersionRequest; +import software.amazon.awssdk.services.glue.model.GetSchemaVersionResponse; +import software.amazon.awssdk.services.glue.model.ListRegistriesRequest; +import software.amazon.awssdk.services.glue.model.ListRegistriesResponse; +import software.amazon.awssdk.services.glue.model.RegistryListItem; + import org.apache.kafka.clients.consumer.MockConsumer; import org.apache.kafka.clients.consumer.OffsetResetStrategy; import org.apache.kafka.common.PartitionInfo; @@ -71,10 +75,10 @@ public class KafkaMetadataHandlerTest { private List partitionCols; private Constraints constraints; - private MockedStatic awsGlueClientBuilder; + private MockedStatic awsGlueClientBuilder; @Mock - AWSGlue awsGlue; + GlueClient glueClient; MockConsumer consumer; @@ -113,8 +117,8 @@ public void setUp() { consumer.updateEndOffsets(partitionsEnd); consumer.updatePartitions("testTopic", partitionInfoList); - awsGlueClientBuilder = Mockito.mockStatic(AWSGlueClientBuilder.class); - awsGlueClientBuilder.when(()-> AWSGlueClientBuilder.defaultClient()).thenReturn(awsGlue); + awsGlueClientBuilder = Mockito.mockStatic(GlueClient.class); + awsGlueClientBuilder.when(()-> GlueClient.create()).thenReturn(glueClient); kafkaMetadataHandler = new KafkaMetadataHandler(consumer, configOptions); } @@ -127,9 +131,13 @@ public void tearDown() { @Test public void testDoListSchemaNames() { - Mockito.when(awsGlue.listRegistries(any())).thenAnswer(x -> (new ListRegistriesResult()).withRegistries( - (new RegistryListItem()).withRegistryName("Asdf").withDescription("something something {AthenaFederationKafka} something")) - ); + Mockito.when(glueClient.listRegistries(any(ListRegistriesRequest.class))).thenAnswer(x -> (ListRegistriesResponse.builder() + .registries((RegistryListItem.builder()) + .registryName("Asdf") + .description("something something {AthenaFederationKafka} something") + .build()) + .build() + )); ListSchemasRequest listSchemasRequest = new ListSchemasRequest(federatedIdentity, QUERY_ID, "default"); ListSchemasResponse listSchemasResponse = kafkaMetadataHandler.doListSchemaNames(blockAllocator, listSchemasRequest); @@ -150,27 +158,29 @@ public void testDoListSchemaNamesThrowsException() { public void testDoGetTable() throws Exception { String arn = "defaultarn", schemaName = "defaultschemaname", schemaVersionId = "defaultversionid"; Long latestSchemaVersion = 123L; - GetSchemaResult getSchemaResult = new GetSchemaResult(); - GetSchemaVersionResult getSchemaVersionResult = new GetSchemaVersionResult(); - getSchemaResult.setSchemaArn(arn); - getSchemaResult.setSchemaName(schemaName); - getSchemaResult.setLatestSchemaVersion(latestSchemaVersion); - getSchemaVersionResult.setSchemaArn(arn); - getSchemaVersionResult.setSchemaVersionId(schemaVersionId); - getSchemaVersionResult.setDataFormat("json"); - getSchemaVersionResult.setSchemaDefinition("{\n" + - "\t\"topicName\": \"testtable\",\n" + - "\t\"message\": {\n" + - "\t\t\"dataFormat\": \"json\",\n" + - "\t\t\"fields\": [{\n" + - "\t\t\t\"name\": \"intcol\",\n" + - "\t\t\t\"mapping\": \"intcol\",\n" + - "\t\t\t\"type\": \"INTEGER\"\n" + - "\t\t}]\n" + - "\t}\n" + - "}"); - Mockito.when(awsGlue.getSchema(any())).thenReturn(getSchemaResult); - Mockito.when(awsGlue.getSchemaVersion(any())).thenReturn(getSchemaVersionResult); + GetSchemaResponse getSchemaResponse = GetSchemaResponse.builder() + .schemaArn(arn) + .schemaName(schemaName) + .latestSchemaVersion(latestSchemaVersion) + .build(); + GetSchemaVersionResponse getSchemaVersionResponse = GetSchemaVersionResponse.builder() + .schemaArn(arn) + .schemaVersionId(schemaVersionId) + .dataFormat("json") + .schemaDefinition("{\n" + + "\t\"topicName\": \"testtable\",\n" + + "\t\"message\": {\n" + + "\t\t\"dataFormat\": \"json\",\n" + + "\t\t\"fields\": [{\n" + + "\t\t\t\"name\": \"intcol\",\n" + + "\t\t\t\"mapping\": \"intcol\",\n" + + "\t\t\t\"type\": \"INTEGER\"\n" + + "\t\t}]\n" + + "\t}\n" + + "}") + .build(); + Mockito.when(glueClient.getSchema(any(GetSchemaRequest.class))).thenReturn(getSchemaResponse); + Mockito.when(glueClient.getSchemaVersion(any(GetSchemaVersionRequest.class))).thenReturn(getSchemaVersionResponse); GetTableRequest getTableRequest = new GetTableRequest(federatedIdentity, QUERY_ID, "kafka", new TableName("default", "testtable"), Collections.emptyMap()); GetTableResponse getTableResponse = kafkaMetadataHandler.doGetTable(blockAllocator, getTableRequest); assertEquals(1, getTableResponse.getSchema().getFields().size()); @@ -181,28 +191,30 @@ public void testDoGetSplits() throws Exception { String arn = "defaultarn", schemaName = "defaultschemaname", schemaVersionId = "defaultversionid"; Long latestSchemaVersion = 123L; - GetSchemaResult getSchemaResult = new GetSchemaResult(); - GetSchemaVersionResult getSchemaVersionResult = new GetSchemaVersionResult(); - getSchemaResult.setSchemaArn(arn); - getSchemaResult.setSchemaName(schemaName); - getSchemaResult.setLatestSchemaVersion(latestSchemaVersion); - getSchemaVersionResult.setSchemaArn(arn); - getSchemaVersionResult.setSchemaVersionId(schemaVersionId); - getSchemaVersionResult.setDataFormat("json"); - getSchemaVersionResult.setSchemaDefinition("{\n" + - "\t\"topicName\": \"testTopic\",\n" + - "\t\"message\": {\n" + - "\t\t\"dataFormat\": \"json\",\n" + - "\t\t\"fields\": [{\n" + - "\t\t\t\"name\": \"intcol\",\n" + - "\t\t\t\"mapping\": \"intcol\",\n" + - "\t\t\t\"type\": \"INTEGER\"\n" + - "\t\t}]\n" + - "\t}\n" + - "}"); - - Mockito.when(awsGlue.getSchema(any())).thenReturn(getSchemaResult); - Mockito.when(awsGlue.getSchemaVersion(any())).thenReturn(getSchemaVersionResult); + GetSchemaResponse getSchemaResponse = GetSchemaResponse.builder() + .schemaArn(arn) + .schemaName(schemaName) + .latestSchemaVersion(latestSchemaVersion) + .build(); + GetSchemaVersionResponse getSchemaVersionResponse = GetSchemaVersionResponse.builder() + .schemaArn(arn) + .schemaVersionId(schemaVersionId) + .dataFormat("json") + .schemaDefinition("{\n" + + "\t\"topicName\": \"testTopic\",\n" + + "\t\"message\": {\n" + + "\t\t\"dataFormat\": \"json\",\n" + + "\t\t\"fields\": [{\n" + + "\t\t\t\"name\": \"intcol\",\n" + + "\t\t\t\"mapping\": \"intcol\",\n" + + "\t\t\t\"type\": \"INTEGER\"\n" + + "\t\t}]\n" + + "\t}\n" + + "}") + .build(); + + Mockito.when(glueClient.getSchema(any(GetSchemaRequest.class))).thenReturn(getSchemaResponse); + Mockito.when(glueClient.getSchemaVersion(any(GetSchemaVersionRequest.class))).thenReturn(getSchemaVersionResponse); GetSplitsRequest request = new GetSplitsRequest( federatedIdentity, diff --git a/athena-kafka/src/test/java/com/amazonaws/athena/connectors/kafka/KafkaRecordHandlerTest.java b/athena-kafka/src/test/java/com/amazonaws/athena/connectors/kafka/KafkaRecordHandlerTest.java index 5395640caa..fb1ee2176c 100644 --- a/athena-kafka/src/test/java/com/amazonaws/athena/connectors/kafka/KafkaRecordHandlerTest.java +++ b/athena-kafka/src/test/java/com/amazonaws/athena/connectors/kafka/KafkaRecordHandlerTest.java @@ -33,10 +33,6 @@ import com.amazonaws.athena.connector.lambda.security.LocalKeyFactory; import com.amazonaws.athena.connectors.kafka.dto.*; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.glue.AWSGlue; -import com.amazonaws.services.glue.AWSGlueClientBuilder; -import com.amazonaws.services.glue.model.GetSchemaResult; -import com.amazonaws.services.glue.model.GetSchemaVersionResult; import com.amazonaws.services.s3.AmazonS3; import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.ObjectMapper; @@ -61,6 +57,11 @@ import org.mockito.junit.MockitoJUnitRunner; import org.mockito.MockedStatic; import org.mockito.Mockito; +import software.amazon.awssdk.services.glue.GlueClient; +import software.amazon.awssdk.services.glue.model.GetSchemaRequest; +import software.amazon.awssdk.services.glue.model.GetSchemaResponse; +import software.amazon.awssdk.services.glue.model.GetSchemaVersionRequest; +import software.amazon.awssdk.services.glue.model.GetSchemaVersionResponse; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Collections; @@ -79,10 +80,10 @@ public class KafkaRecordHandlerTest { private static final ObjectMapper objectMapper = new ObjectMapper(); - private MockedStatic awsGlueClientBuilder; + private MockedStatic awsGlueClientBuilder; @Mock - AWSGlue awsGlue; + GlueClient awsGlue; @Mock AmazonS3 amazonS3; @@ -151,8 +152,8 @@ public void setUp() throws Exception { allocator = new BlockAllocatorImpl(); mockedKafkaUtils = Mockito.mockStatic(KafkaUtils.class, Mockito.CALLS_REAL_METHODS); kafkaRecordHandler = new KafkaRecordHandler(amazonS3, awsSecretsManager, athena, com.google.common.collect.ImmutableMap.of()); - awsGlueClientBuilder = Mockito.mockStatic(AWSGlueClientBuilder.class); - awsGlueClientBuilder.when(()-> AWSGlueClientBuilder.defaultClient()).thenReturn(awsGlue); + awsGlueClientBuilder = Mockito.mockStatic(GlueClient.class); + awsGlueClientBuilder.when(()-> GlueClient.create()).thenReturn(awsGlue); } @After @@ -178,8 +179,8 @@ public void testForConsumeDataFromTopic() throws Exception { mockedKafkaUtils.when(() -> KafkaUtils.getKafkaConsumer(schema, com.google.common.collect.ImmutableMap.of())).thenReturn(consumer); mockedKafkaUtils.when(() -> KafkaUtils.createSplitParam(anyMap())).thenReturn(splitParameters); - Mockito.when(awsGlue.getSchema(any())).thenReturn(getSchemaResult()); - Mockito.when(awsGlue.getSchemaVersion(any())).thenReturn(getJsonSchemaVersionResult()); + Mockito.when(awsGlue.getSchema(any(GetSchemaRequest.class))).thenReturn(getSchemaResponse()); + Mockito.when(awsGlue.getSchemaVersion(any(GetSchemaVersionRequest.class))).thenReturn(getJsonSchemaVersionResponse()); QueryStatusChecker queryStatusChecker = mock(QueryStatusChecker.class); when(queryStatusChecker.isQueryRunning()).thenReturn(true); @@ -206,8 +207,8 @@ public void testForConsumeAvroDataFromTopic() throws Exception { mockedKafkaUtils.when(() -> KafkaUtils.getAvroKafkaConsumer(com.google.common.collect.ImmutableMap.of())).thenReturn(avroConsumer); mockedKafkaUtils.when(() -> KafkaUtils.createSplitParam(anyMap())).thenReturn(splitParameters); - Mockito.when(awsGlue.getSchema(any())).thenReturn(getSchemaResult()); - Mockito.when(awsGlue.getSchemaVersion(any())).thenReturn(getAvroSchemaVersionResult()); + Mockito.when(awsGlue.getSchema(any(GetSchemaRequest.class))).thenReturn(getSchemaResponse()); + Mockito.when(awsGlue.getSchemaVersion(any(GetSchemaVersionRequest.class))).thenReturn(getAvroSchemaVersionResponse()); QueryStatusChecker queryStatusChecker = mock(QueryStatusChecker.class); when(queryStatusChecker.isQueryRunning()).thenReturn(true); @@ -235,8 +236,8 @@ public void testForConsumeProtobufDataFromTopic() throws Exception { mockedKafkaUtils.when(() -> KafkaUtils.getProtobufKafkaConsumer(com.google.common.collect.ImmutableMap.of())).thenReturn(protobufConsumer); mockedKafkaUtils.when(() -> KafkaUtils.createSplitParam(anyMap())).thenReturn(splitParameters); - Mockito.when(awsGlue.getSchema(any())).thenReturn(getSchemaResult()); - Mockito.when(awsGlue.getSchemaVersion(any())).thenReturn(getProtobufSchemaVersionResult()); + Mockito.when(awsGlue.getSchema(any(GetSchemaRequest.class))).thenReturn(getSchemaResponse()); + Mockito.when(awsGlue.getSchemaVersion(any(GetSchemaVersionRequest.class))).thenReturn(getProtobufSchemaVersionResponse()); QueryStatusChecker queryStatusChecker = mock(QueryStatusChecker.class); when(queryStatusChecker.isQueryRunning()).thenReturn(true); @@ -264,8 +265,8 @@ public void testForQueryStatusChecker() throws Exception { mockedKafkaUtils.when(() -> KafkaUtils.getKafkaConsumer(schema, com.google.common.collect.ImmutableMap.of())).thenReturn(consumer); mockedKafkaUtils.when(() -> KafkaUtils.createSplitParam(anyMap())).thenReturn(splitParameters); - Mockito.when(awsGlue.getSchema(any())).thenReturn(getSchemaResult()); - Mockito.when(awsGlue.getSchemaVersion(any())).thenReturn(getJsonSchemaVersionResult()); + Mockito.when(awsGlue.getSchema(any(GetSchemaRequest.class))).thenReturn(getSchemaResponse()); + Mockito.when(awsGlue.getSchemaVersion(any(GetSchemaVersionRequest.class))).thenReturn(getJsonSchemaVersionResponse()); QueryStatusChecker queryStatusChecker = mock(QueryStatusChecker.class); when(queryStatusChecker.isQueryRunning()).thenReturn(false); @@ -294,8 +295,8 @@ public void testForEndOffsetIsZero() throws Exception { mockedKafkaUtils.when(() -> KafkaUtils.getKafkaConsumer(schema, com.google.common.collect.ImmutableMap.of())).thenReturn(consumer); mockedKafkaUtils.when(() -> KafkaUtils.createSplitParam(anyMap())).thenReturn(splitParameters); - Mockito.when(awsGlue.getSchema(any())).thenReturn(getSchemaResult()); - Mockito.when(awsGlue.getSchemaVersion(any())).thenReturn(getJsonSchemaVersionResult()); + Mockito.when(awsGlue.getSchema(any(GetSchemaRequest.class))).thenReturn(getSchemaResponse()); + Mockito.when(awsGlue.getSchemaVersion(any(GetSchemaVersionRequest.class))).thenReturn(getJsonSchemaVersionResponse()); ReadRecordsRequest request = createReadRecordsRequest(schema); kafkaRecordHandler.readWithConstraint(null, request, null); @@ -320,8 +321,8 @@ public void testForContinuousEmptyDataFromTopic() throws Exception { mockedKafkaUtils.when(() -> KafkaUtils.getKafkaConsumer(schema, com.google.common.collect.ImmutableMap.of())).thenReturn(consumer); mockedKafkaUtils.when(() -> KafkaUtils.createSplitParam(anyMap())).thenReturn(splitParameters); - Mockito.when(awsGlue.getSchema(any())).thenReturn(getSchemaResult()); - Mockito.when(awsGlue.getSchemaVersion(any())).thenReturn(getJsonSchemaVersionResult()); + Mockito.when(awsGlue.getSchema(any(GetSchemaRequest.class))).thenReturn(getSchemaResponse()); + Mockito.when(awsGlue.getSchemaVersion(any(GetSchemaVersionRequest.class))).thenReturn(getJsonSchemaVersionResponse()); QueryStatusChecker queryStatusChecker = mock(QueryStatusChecker.class); when(queryStatusChecker.isQueryRunning()).thenReturn(true); @@ -492,49 +493,52 @@ private ProtobufSchema createProtobufTopicSchema() { return protobufSchema; } - private GetSchemaResult getSchemaResult() { - + private GetSchemaResponse getSchemaResponse() { String arn = "defaultArn", schemaName = "defaultSchemaName"; Long latestSchemaVersion = 123L; - GetSchemaResult getSchemaResult = new GetSchemaResult(); - getSchemaResult.setSchemaArn(arn); - getSchemaResult.setSchemaName(schemaName); - getSchemaResult.setLatestSchemaVersion(latestSchemaVersion); - return getSchemaResult; + GetSchemaResponse getSchemaResponse = GetSchemaResponse.builder() + .schemaArn(arn) + .schemaName(schemaName) + .latestSchemaVersion(latestSchemaVersion) + .build(); + return getSchemaResponse; } - private GetSchemaVersionResult getJsonSchemaVersionResult() { + private GetSchemaVersionResponse getJsonSchemaVersionResponse() { String arn = "defaultArn", schemaVersionId = "defaultVersionId"; - GetSchemaVersionResult getJsonSchemaVersionResult = new GetSchemaVersionResult(); - getJsonSchemaVersionResult.setSchemaArn(arn); - getJsonSchemaVersionResult.setSchemaVersionId(schemaVersionId); - getJsonSchemaVersionResult.setDataFormat("json"); - getJsonSchemaVersionResult.setSchemaDefinition("{\"topicName\": \"testtable\", \"\"message\": {\"dataFormat\": \"json\", \"fields\": [{\"name\": \"intcol\", \"mapping\": \"intcol\", \"type\": \"INTEGER\"}]}\"}"); - return getJsonSchemaVersionResult; + GetSchemaVersionResponse getJsonSchemaVersionResponse = GetSchemaVersionResponse.builder() + .schemaArn(arn) + .schemaVersionId(schemaVersionId) + .dataFormat("json") + .schemaDefinition("{\"topicName\": \"testtable\", \"\"message\": {\"dataFormat\": \"json\", \"fields\": [{\"name\": \"intcol\", \"mapping\": \"intcol\", \"type\": \"INTEGER\"}]}\"}") + .build(); + return getJsonSchemaVersionResponse; } - private GetSchemaVersionResult getAvroSchemaVersionResult() { + private GetSchemaVersionResponse getAvroSchemaVersionResponse() { String arn = "defaultArn", schemaVersionId = "defaultVersionId"; - GetSchemaVersionResult getAvroSchemaVersionResult = new GetSchemaVersionResult(); - getAvroSchemaVersionResult.setSchemaArn(arn); - getAvroSchemaVersionResult.setSchemaVersionId(schemaVersionId); - getAvroSchemaVersionResult.setDataFormat("avro"); - getAvroSchemaVersionResult.setSchemaDefinition("{\"type\": \"record\",\"name\":\"greetings\",\"fields\": [{\"name\": \"id\", \"type\": \"int\"},{\"name\": \"name\", \"type\": \"string\"},{\"name\": \"greeting\",\"type\": \"string\"}]}"); - return getAvroSchemaVersionResult; + GetSchemaVersionResponse getAvroSchemaVersionResponse = GetSchemaVersionResponse.builder() + .schemaArn(arn) + .schemaVersionId(schemaVersionId) + .dataFormat("avro") + .schemaDefinition("{\"type\": \"record\",\"name\":\"greetings\",\"fields\": [{\"name\": \"id\", \"type\": \"int\"},{\"name\": \"name\", \"type\": \"string\"},{\"name\": \"greeting\",\"type\": \"string\"}]}") + .build(); + return getAvroSchemaVersionResponse; } - private GetSchemaVersionResult getProtobufSchemaVersionResult() { + private GetSchemaVersionResponse getProtobufSchemaVersionResponse() { String arn = "defaultArn", schemaVersionId = "defaultVersionId"; - GetSchemaVersionResult getProtobufSchemaVersionResult = new GetSchemaVersionResult(); - getProtobufSchemaVersionResult.setSchemaArn(arn); - getProtobufSchemaVersionResult.setSchemaVersionId(schemaVersionId); - getProtobufSchemaVersionResult.setDataFormat("protobuf"); - getProtobufSchemaVersionResult.setSchemaDefinition("syntax = \"proto3\";\n" + - "message protobuftest {\n" + - "string name = 1;\n" + - "int32 calories = 2;\n" + - "string colour = 3; \n" + - "}"); - return getProtobufSchemaVersionResult; + GetSchemaVersionResponse getProtobufSchemaVersionResponse = GetSchemaVersionResponse.builder() + .schemaArn(arn) + .schemaVersionId(schemaVersionId) + .dataFormat("protobuf") + .schemaDefinition("syntax = \"proto3\";\n" + + "message protobuftest {\n" + + "string name = 1;\n" + + "int32 calories = 2;\n" + + "string colour = 3; \n" + + "}") + .build(); + return getProtobufSchemaVersionResponse; } } diff --git a/athena-kafka/src/test/java/com/amazonaws/athena/connectors/kafka/KafkaUtilsTest.java b/athena-kafka/src/test/java/com/amazonaws/athena/connectors/kafka/KafkaUtilsTest.java index ce8ee50fea..178a303023 100644 --- a/athena-kafka/src/test/java/com/amazonaws/athena/connectors/kafka/KafkaUtilsTest.java +++ b/athena-kafka/src/test/java/com/amazonaws/athena/connectors/kafka/KafkaUtilsTest.java @@ -24,10 +24,6 @@ import com.amazonaws.auth.AWSStaticCredentialsProvider; import com.amazonaws.auth.BasicAWSCredentials; import com.amazonaws.auth.DefaultAWSCredentialsProviderChain; -import com.amazonaws.services.glue.AWSGlue; -import com.amazonaws.services.glue.AWSGlueClientBuilder; -import com.amazonaws.services.glue.model.ListSchemasResult; -import com.amazonaws.services.glue.model.SchemaListItem; import com.amazonaws.services.s3.AmazonS3Client; import com.amazonaws.services.s3.AmazonS3ClientBuilder; import com.amazonaws.services.s3.model.ListObjectsRequest; diff --git a/athena-msk/pom.xml b/athena-msk/pom.xml index ebe656a897..df19f9f60d 100644 --- a/athena-msk/pom.xml +++ b/athena-msk/pom.xml @@ -119,11 +119,6 @@ ${testng.version} test - - com.amazonaws - aws-java-sdk-glue - ${aws-sdk.version} - org.junit.support testng-engine diff --git a/athena-msk/src/main/java/com/amazonaws/athena/connectors/msk/AmazonMskMetadataHandler.java b/athena-msk/src/main/java/com/amazonaws/athena/connectors/msk/AmazonMskMetadataHandler.java index d29ffd510c..8a2359ad30 100644 --- a/athena-msk/src/main/java/com/amazonaws/athena/connectors/msk/AmazonMskMetadataHandler.java +++ b/athena-msk/src/main/java/com/amazonaws/athena/connectors/msk/AmazonMskMetadataHandler.java @@ -40,15 +40,6 @@ import com.amazonaws.athena.connectors.msk.dto.SplitParameters; import com.amazonaws.athena.connectors.msk.dto.TopicPartitionPiece; import com.amazonaws.athena.connectors.msk.dto.TopicSchema; -import com.amazonaws.services.glue.AWSGlue; -import com.amazonaws.services.glue.AWSGlueClientBuilder; -import com.amazonaws.services.glue.model.GetRegistryRequest; -import com.amazonaws.services.glue.model.GetRegistryResult; -import com.amazonaws.services.glue.model.ListRegistriesRequest; -import com.amazonaws.services.glue.model.ListRegistriesResult; -import com.amazonaws.services.glue.model.ListSchemasResult; -import com.amazonaws.services.glue.model.RegistryId; -import com.amazonaws.services.glue.model.RegistryListItem; import com.google.common.annotations.VisibleForTesting; import org.apache.arrow.vector.types.pojo.Field; import org.apache.arrow.vector.types.pojo.FieldType; @@ -57,6 +48,13 @@ import org.apache.kafka.common.TopicPartition; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.glue.GlueClient; +import software.amazon.awssdk.services.glue.model.GetRegistryRequest; +import software.amazon.awssdk.services.glue.model.GetRegistryResponse; +import software.amazon.awssdk.services.glue.model.ListRegistriesRequest; +import software.amazon.awssdk.services.glue.model.ListRegistriesResponse; +import software.amazon.awssdk.services.glue.model.RegistryId; +import software.amazon.awssdk.services.glue.model.RegistryListItem; import java.util.ArrayList; import java.util.Collections; @@ -94,24 +92,29 @@ public AmazonMskMetadataHandler(Consumer kafkaConsumer, java.uti private Stream filteredRegistriesStream(Stream registries) { return registries - .filter(r -> r.getDescription() != null && r.getDescription().contains(REGISTRY_MARKER)) - .map(r -> r.getRegistryName()); + .filter(r -> r.description() != null && r.description().contains(REGISTRY_MARKER)) + .map(r -> r.registryName()); } - private ListRegistriesResult listRegistriesFromGlue(AWSGlue glue, String nextToken) + private ListRegistriesResponse listRegistriesFromGlue(GlueClient glue, String nextToken) { - ListRegistriesRequest listRequest = new ListRegistriesRequest().withMaxResults(maxGluePageSize); - listRequest = (nextToken == null) ? listRequest : listRequest.withNextToken(nextToken); - return glue.listRegistries(listRequest); + ListRegistriesRequest.Builder listRequestBuilder = ListRegistriesRequest.builder() + .maxResults(maxGluePageSize); + if (nextToken != null) { + listRequestBuilder.nextToken(nextToken); + } + return glue.listRegistries(listRequestBuilder.build()); } - private ListSchemasResult listSchemasFromGlue(AWSGlue glue, String glueRegistryName, int pageSize, String nextToken) + private software.amazon.awssdk.services.glue.model.ListSchemasResponse listSchemasFromGlue(GlueClient glue, String glueRegistryName, int pageSize, String nextToken) { - com.amazonaws.services.glue.model.ListSchemasRequest listRequest = new com.amazonaws.services.glue.model.ListSchemasRequest() - .withRegistryId(new RegistryId().withRegistryName(glueRegistryName)) - .withMaxResults(Math.min(pageSize, maxGluePageSize)); - listRequest = (nextToken == null) ? listRequest : listRequest.withNextToken(nextToken); - return glue.listSchemas(listRequest); + software.amazon.awssdk.services.glue.model.ListSchemasRequest.Builder listRequestBuilder = software.amazon.awssdk.services.glue.model.ListSchemasRequest.builder() + .registryId(RegistryId.builder().registryName(glueRegistryName).build()) + .maxResults(Math.min(pageSize, maxGluePageSize)); + if (nextToken != null) { + listRequestBuilder.nextToken(nextToken); + } + return glue.listSchemas(listRequestBuilder.build()); } /** @@ -125,10 +128,10 @@ private ListSchemasResult listSchemasFromGlue(AWSGlue glue, String glueRegistryN public ListSchemasResponse doListSchemaNames(BlockAllocator blockAllocator, ListSchemasRequest listSchemasRequest) { LOGGER.info("doListSchemaNames called with Catalog: {}", listSchemasRequest.getCatalogName()); - AWSGlue glue = AWSGlueClientBuilder.defaultClient(); + GlueClient glue = GlueClient.create(); - Stream allFilteredRegistries = PaginatedRequestIterator.stream((pageToken) -> listRegistriesFromGlue(glue, pageToken), ListRegistriesResult::getNextToken) - .flatMap(result -> filteredRegistriesStream(result.getRegistries().stream())); + Stream allFilteredRegistries = PaginatedRequestIterator.stream((pageToken) -> listRegistriesFromGlue(glue, pageToken), ListRegistriesResponse::nextToken) + .flatMap(result -> filteredRegistriesStream(result.registries().stream())); ListSchemasResponse result = new ListSchemasResponse(listSchemasRequest.getCatalogName(), allFilteredRegistries.collect(Collectors.toList())); LOGGER.debug("doListSchemaNames result: {}", result); return result; @@ -137,12 +140,16 @@ public ListSchemasResponse doListSchemaNames(BlockAllocator blockAllocator, List private String resolveGlueRegistryName(String glueRegistryName) { try { - AWSGlue glue = AWSGlueClientBuilder.defaultClient(); - GetRegistryResult getRegistryResult = glue.getRegistry(new GetRegistryRequest().withRegistryId(new RegistryId().withRegistryName(glueRegistryName))); - if (!(getRegistryResult.getDescription() != null && getRegistryResult.getDescription().contains(REGISTRY_MARKER))) { + GlueClient glue = GlueClient.create(); + GetRegistryResponse getRegistryResponse = glue.getRegistry(GetRegistryRequest.builder() + .registryId(RegistryId.builder() + .registryName(glueRegistryName) + .build()) + .build()); + if (!(getRegistryResponse.description() != null && getRegistryResponse.description().contains(REGISTRY_MARKER))) { throw new Exception(String.format("Found a registry with a matching name [%s] but not marked for AthenaFederationMSK", glueRegistryName)); } - return getRegistryResult.getRegistryName(); + return getRegistryResponse.registryName(); } catch (Exception ex) { LOGGER.info("resolveGlueRegistryName falling back to case insensitive search for: {}. Exception: {}", glueRegistryName, ex); @@ -163,7 +170,7 @@ public ListTablesResponse doListTables(BlockAllocator blockAllocator, ListTables LOGGER.info("doListTables: {}", federationListTablesRequest); String glueRegistryNameResolved = resolveGlueRegistryName(federationListTablesRequest.getSchemaName()); LOGGER.info("Resolved Glue registry name to: {}", glueRegistryNameResolved); - AWSGlue glue = AWSGlueClientBuilder.defaultClient(); + GlueClient glue = GlueClient.create(); // In this situation we want to loop through all the pages to return up to the MAX_RESULTS size // And only do this if we don't have a token passed in, otherwise if we have a token that takes precedence // over the fact that the page size was set to unlimited. @@ -171,10 +178,10 @@ public ListTablesResponse doListTables(BlockAllocator blockAllocator, ListTables federationListTablesRequest.getNextToken() == null) { LOGGER.info("Request page size is UNLIMITED_PAGE_SIZE_VALUE"); - List allTableNames = PaginatedRequestIterator.stream((pageToken) -> listSchemasFromGlue(glue, glueRegistryNameResolved, maxGluePageSize, pageToken), ListSchemasResult::getNextToken) + List allTableNames = PaginatedRequestIterator.stream((pageToken) -> listSchemasFromGlue(glue, glueRegistryNameResolved, maxGluePageSize, pageToken), software.amazon.awssdk.services.glue.model.ListSchemasResponse::nextToken) .flatMap(currentResult -> - currentResult.getSchemas().stream() - .map(schemaListItem -> schemaListItem.getSchemaName()) + currentResult.schemas().stream() + .map(schemaListItem -> schemaListItem.schemaName()) .map(glueSchemaName -> new TableName(glueRegistryNameResolved, glueSchemaName)) ) .limit(MAX_RESULTS + 1) @@ -190,22 +197,22 @@ public ListTablesResponse doListTables(BlockAllocator blockAllocator, ListTables } // Otherwise don't retrieve all pages, just pass through the page token. - ListSchemasResult listSchemasResultFromGlue = listSchemasFromGlue( + software.amazon.awssdk.services.glue.model.ListSchemasResponse listSchemasResultFromGlue = listSchemasFromGlue( glue, glueRegistryNameResolved, federationListTablesRequest.getPageSize(), federationListTablesRequest.getNextToken()); // Convert the glue response into our own federation response - List tableNames = listSchemasResultFromGlue.getSchemas() + List tableNames = listSchemasResultFromGlue.schemas() .stream() - .map(schemaListItem -> schemaListItem.getSchemaName()) + .map(schemaListItem -> schemaListItem.schemaName()) .map(glueSchemaName -> new TableName(glueRegistryNameResolved, glueSchemaName)) .collect(Collectors.toList()); // Pass through whatever token we got from Glue to the user ListTablesResponse result = new ListTablesResponse( federationListTablesRequest.getCatalogName(), tableNames, - listSchemasResultFromGlue.getNextToken()); + listSchemasResultFromGlue.nextToken()); LOGGER.debug("doListTables [paginated] result: {}", result); return result; } @@ -213,11 +220,11 @@ public ListTablesResponse doListTables(BlockAllocator blockAllocator, ListTables private String findGlueRegistryNameIgnoringCasing(String glueRegistryNameIn) { LOGGER.debug("findGlueRegistryNameIgnoringCasing {}", glueRegistryNameIn); - AWSGlue glue = AWSGlueClientBuilder.defaultClient(); + GlueClient glue = GlueClient.create(); // Try to find the registry ignoring the case - String result = PaginatedRequestIterator.stream((pageToken) -> listRegistriesFromGlue(glue, pageToken), ListRegistriesResult::getNextToken) - .flatMap(currentResult -> filteredRegistriesStream(currentResult.getRegistries().stream())) + String result = PaginatedRequestIterator.stream((pageToken) -> listRegistriesFromGlue(glue, pageToken), ListRegistriesResponse::nextToken) + .flatMap(currentResult -> filteredRegistriesStream(currentResult.registries().stream())) .filter(r -> r.equalsIgnoreCase(glueRegistryNameIn)) .findAny() .orElseThrow(() -> new RuntimeException(String.format("Could not find Glue Registry: %s", glueRegistryNameIn))); @@ -229,12 +236,12 @@ private String findGlueRegistryNameIgnoringCasing(String glueRegistryNameIn) private String findGlueSchemaNameIgnoringCasing(String glueRegistryNameIn, String glueSchemaNameIn) { LOGGER.debug("findGlueSchemaNameIgnoringCasing {} {}", glueRegistryNameIn, glueSchemaNameIn); - AWSGlue glue = AWSGlueClientBuilder.defaultClient(); + GlueClient glue = GlueClient.create(); // List all schemas under the input registry // Find the schema name ignoring the case in this page - String result = PaginatedRequestIterator.stream((pageToken) -> listSchemasFromGlue(glue, glueRegistryNameIn, maxGluePageSize, pageToken), ListSchemasResult::getNextToken) - .flatMap(currentResult -> currentResult.getSchemas().stream()) - .map(schemaListItem -> schemaListItem.getSchemaName()) + String result = PaginatedRequestIterator.stream((pageToken) -> listSchemasFromGlue(glue, glueRegistryNameIn, maxGluePageSize, pageToken), software.amazon.awssdk.services.glue.model.ListSchemasResponse::nextToken) + .flatMap(currentResult -> currentResult.schemas().stream()) + .map(schemaListItem -> schemaListItem.schemaName()) .filter(glueSchemaName -> glueSchemaName.equalsIgnoreCase(glueSchemaNameIn)) .findAny() .orElseThrow(() -> new RuntimeException(String.format("Could not find Glue Schema: %s", glueSchemaNameIn))); diff --git a/athena-msk/src/main/java/com/amazonaws/athena/connectors/msk/GlueRegistryReader.java b/athena-msk/src/main/java/com/amazonaws/athena/connectors/msk/GlueRegistryReader.java index 981520107a..3ee6df8f8d 100644 --- a/athena-msk/src/main/java/com/amazonaws/athena/connectors/msk/GlueRegistryReader.java +++ b/athena-msk/src/main/java/com/amazonaws/athena/connectors/msk/GlueRegistryReader.java @@ -19,16 +19,15 @@ */ package com.amazonaws.athena.connectors.msk; -import com.amazonaws.services.glue.AWSGlue; -import com.amazonaws.services.glue.AWSGlueClientBuilder; -import com.amazonaws.services.glue.model.GetSchemaRequest; -import com.amazonaws.services.glue.model.GetSchemaResult; -import com.amazonaws.services.glue.model.GetSchemaVersionRequest; -import com.amazonaws.services.glue.model.GetSchemaVersionResult; -import com.amazonaws.services.glue.model.SchemaId; -import com.amazonaws.services.glue.model.SchemaVersionNumber; import com.fasterxml.jackson.databind.DeserializationFeature; import com.fasterxml.jackson.databind.ObjectMapper; +import software.amazon.awssdk.services.glue.GlueClient; +import software.amazon.awssdk.services.glue.model.GetSchemaRequest; +import software.amazon.awssdk.services.glue.model.GetSchemaResponse; +import software.amazon.awssdk.services.glue.model.GetSchemaVersionRequest; +import software.amazon.awssdk.services.glue.model.GetSchemaVersionResponse; +import software.amazon.awssdk.services.glue.model.SchemaId; +import software.amazon.awssdk.services.glue.model.SchemaVersionNumber; public class GlueRegistryReader { @@ -46,15 +45,21 @@ public class GlueRegistryReader * @param glueSchemaName * @return */ - public GetSchemaVersionResult getSchemaVersionResult(String glueRegistryName, String glueSchemaName) + public GetSchemaVersionResponse getSchemaVersionResult(String glueRegistryName, String glueSchemaName) { - AWSGlue glue = AWSGlueClientBuilder.defaultClient(); - SchemaId sid = new SchemaId().withRegistryName(glueRegistryName).withSchemaName(glueSchemaName); - GetSchemaResult schemaResult = glue.getSchema(new GetSchemaRequest().withSchemaId(sid)); - SchemaVersionNumber svn = new SchemaVersionNumber().withVersionNumber(schemaResult.getLatestSchemaVersion()); - return glue.getSchemaVersion(new GetSchemaVersionRequest() - .withSchemaId(sid) - .withSchemaVersionNumber(svn) + GlueClient glue = GlueClient.create(); + SchemaId sid = SchemaId.builder() + .registryName(glueRegistryName) + .schemaName(glueSchemaName) + .build(); + GetSchemaResponse schemaResult = glue.getSchema(GetSchemaRequest.builder().schemaId(sid).build()); + SchemaVersionNumber svn = SchemaVersionNumber.builder() + .versionNumber(schemaResult.latestSchemaVersion()) + .build(); + return glue.getSchemaVersion(GetSchemaVersionRequest.builder() + .schemaId(sid) + .schemaVersionNumber(svn) + .build() ); } /** @@ -69,7 +74,7 @@ public GetSchemaVersionResult getSchemaVersionResult(String glueRegistryName, St */ public T getGlueSchema(String glueRegistryName, String glueSchemaName, Class clazz) throws Exception { - GetSchemaVersionResult result = getSchemaVersionResult(glueRegistryName, glueSchemaName); - return objectMapper.readValue(result.getSchemaDefinition(), clazz); + GetSchemaVersionResponse result = getSchemaVersionResult(glueRegistryName, glueSchemaName); + return objectMapper.readValue(result.schemaDefinition(), clazz); } } diff --git a/athena-msk/src/test/java/com/amazonaws/athena/connectors/msk/AmazonMskMetadataHandlerTest.java b/athena-msk/src/test/java/com/amazonaws/athena/connectors/msk/AmazonMskMetadataHandlerTest.java index 06a2fae970..19a5278eed 100644 --- a/athena-msk/src/test/java/com/amazonaws/athena/connectors/msk/AmazonMskMetadataHandlerTest.java +++ b/athena-msk/src/test/java/com/amazonaws/athena/connectors/msk/AmazonMskMetadataHandlerTest.java @@ -27,12 +27,6 @@ import com.amazonaws.athena.connector.lambda.domain.predicate.Constraints; import com.amazonaws.athena.connector.lambda.metadata.*; import com.amazonaws.athena.connector.lambda.security.FederatedIdentity; -import com.amazonaws.services.glue.AWSGlue; -import com.amazonaws.services.glue.AWSGlueClientBuilder; -import com.amazonaws.services.glue.model.GetSchemaResult; -import com.amazonaws.services.glue.model.GetSchemaVersionResult; -import com.amazonaws.services.glue.model.ListRegistriesResult; -import com.amazonaws.services.glue.model.RegistryListItem; import org.apache.kafka.clients.consumer.MockConsumer; import org.apache.kafka.clients.consumer.OffsetResetStrategy; import org.apache.kafka.common.PartitionInfo; @@ -46,6 +40,14 @@ import org.mockito.Mockito; import org.mockito.MockitoAnnotations; import org.mockito.junit.MockitoJUnitRunner; +import software.amazon.awssdk.services.glue.GlueClient; +import software.amazon.awssdk.services.glue.model.GetSchemaRequest; +import software.amazon.awssdk.services.glue.model.GetSchemaResponse; +import software.amazon.awssdk.services.glue.model.GetSchemaVersionRequest; +import software.amazon.awssdk.services.glue.model.GetSchemaVersionResponse; +import software.amazon.awssdk.services.glue.model.ListRegistriesRequest; +import software.amazon.awssdk.services.glue.model.ListRegistriesResponse; +import software.amazon.awssdk.services.glue.model.RegistryListItem; import java.util.ArrayList; import java.util.Collections; @@ -69,10 +71,10 @@ public class AmazonMskMetadataHandlerTest { private Block partitions; private List partitionCols; private Constraints constraints; - private MockedStatic awsGlueClientBuilder; + private MockedStatic awsGlueClientBuilder; @Mock - AWSGlue awsGlue; + GlueClient awsGlue; MockConsumer consumer; @@ -110,8 +112,8 @@ public void setUp() { consumer.updateBeginningOffsets(partitionsStart); consumer.updateEndOffsets(partitionsEnd); consumer.updatePartitions("testTopic", partitionInfoList); - awsGlueClientBuilder = Mockito.mockStatic(AWSGlueClientBuilder.class); - awsGlueClientBuilder.when(()-> AWSGlueClientBuilder.defaultClient()).thenReturn(awsGlue); + awsGlueClientBuilder = Mockito.mockStatic(GlueClient.class); + awsGlueClientBuilder.when(()-> GlueClient.create()).thenReturn(awsGlue); amazonMskMetadataHandler = new AmazonMskMetadataHandler(consumer, configOptions); } @@ -123,14 +125,18 @@ public void tearDown() { @Test public void testDoListSchemaNames() { - Mockito.when(awsGlue.listRegistries(any())).thenAnswer(x -> (new ListRegistriesResult()).withRegistries( - (new RegistryListItem()).withRegistryName("Asdf").withDescription("something something {AthenaFederationMSK} something")) - ); + String registryName = "Asdf"; + Mockito.when(awsGlue.listRegistries(any(ListRegistriesRequest.class))).thenAnswer(x -> ListRegistriesResponse.builder() + .registries(RegistryListItem.builder() + .registryName(registryName) + .description("something something {AthenaFederationMSK} something") + .build()) + .build()); ListSchemasRequest listSchemasRequest = new ListSchemasRequest(federatedIdentity, QUERY_ID, "default"); ListSchemasResponse listSchemasResponse = amazonMskMetadataHandler.doListSchemaNames(blockAllocator, listSchemasRequest); - assertEquals(new ArrayList(com.google.common.collect.ImmutableList.of("Asdf")), new ArrayList(listSchemasResponse.getSchemas())); + assertEquals(new ArrayList(com.google.common.collect.ImmutableList.of(registryName)), new ArrayList(listSchemasResponse.getSchemas())); } @Test(expected = RuntimeException.class) @@ -146,26 +152,28 @@ public void testDoListSchemaNamesThrowsException() { public void testDoGetTable() throws Exception { String arn = "defaultarn", schemaName = "defaultschemaname", schemaVersionId = "defaultversionid"; Long latestSchemaVersion = 123L; - GetSchemaResult getSchemaResult = new GetSchemaResult(); - GetSchemaVersionResult getSchemaVersionResult = new GetSchemaVersionResult(); - getSchemaResult.setSchemaArn(arn); - getSchemaResult.setSchemaName(schemaName); - getSchemaResult.setLatestSchemaVersion(latestSchemaVersion); - getSchemaVersionResult.setSchemaArn(arn); - getSchemaVersionResult.setSchemaVersionId(schemaVersionId); - getSchemaVersionResult.setSchemaDefinition("{\n" + - "\t\"topicName\": \"testtable\",\n" + - "\t\"message\": {\n" + - "\t\t\"dataFormat\": \"json\",\n" + - "\t\t\"fields\": [{\n" + - "\t\t\t\"name\": \"intcol\",\n" + - "\t\t\t\"mapping\": \"intcol\",\n" + - "\t\t\t\"type\": \"INTEGER\"\n" + - "\t\t}]\n" + - "\t}\n" + - "}"); - Mockito.when(awsGlue.getSchema(any())).thenReturn(getSchemaResult); - Mockito.when(awsGlue.getSchemaVersion(any())).thenReturn(getSchemaVersionResult); + GetSchemaResponse getSchemaResponse = GetSchemaResponse.builder() + .schemaArn(arn) + .schemaName(schemaName) + .latestSchemaVersion(latestSchemaVersion) + .build(); + GetSchemaVersionResponse getSchemaVersionResponse = GetSchemaVersionResponse.builder() + .schemaArn(arn) + .schemaVersionId(schemaVersionId) + .schemaDefinition("{\n" + + "\t\"topicName\": \"testtable\",\n" + + "\t\"message\": {\n" + + "\t\t\"dataFormat\": \"json\",\n" + + "\t\t\"fields\": [{\n" + + "\t\t\t\"name\": \"intcol\",\n" + + "\t\t\t\"mapping\": \"intcol\",\n" + + "\t\t\t\"type\": \"INTEGER\"\n" + + "\t\t}]\n" + + "\t}\n" + + "}") + .build(); + Mockito.when(awsGlue.getSchema(any(GetSchemaRequest.class))).thenReturn(getSchemaResponse); + Mockito.when(awsGlue.getSchemaVersion(any(GetSchemaVersionRequest.class))).thenReturn(getSchemaVersionResponse); GetTableRequest getTableRequest = new GetTableRequest(federatedIdentity, QUERY_ID, "kafka", new TableName("default", "testtable"), Collections.emptyMap()); GetTableResponse getTableResponse = amazonMskMetadataHandler.doGetTable(blockAllocator, getTableRequest); assertEquals(1, getTableResponse.getSchema().getFields().size()); @@ -176,26 +184,28 @@ public void testDoGetSplits() throws Exception { String arn = "defaultarn", schemaName = "defaultschemaname", schemaVersionId = "defaultversionid"; Long latestSchemaVersion = 123L; - GetSchemaResult getSchemaResult = new GetSchemaResult(); - GetSchemaVersionResult getSchemaVersionResult = new GetSchemaVersionResult(); - getSchemaResult.setSchemaArn(arn); - getSchemaResult.setSchemaName(schemaName); - getSchemaResult.setLatestSchemaVersion(latestSchemaVersion); - getSchemaVersionResult.setSchemaArn(arn); - getSchemaVersionResult.setSchemaVersionId(schemaVersionId); - getSchemaVersionResult.setSchemaDefinition("{\n" + - "\t\"topicName\": \"testTopic\",\n" + - "\t\"message\": {\n" + - "\t\t\"dataFormat\": \"json\",\n" + - "\t\t\"fields\": [{\n" + - "\t\t\t\"name\": \"intcol\",\n" + - "\t\t\t\"mapping\": \"intcol\",\n" + - "\t\t\t\"type\": \"INTEGER\"\n" + - "\t\t}]\n" + - "\t}\n" + - "}"); - Mockito.when(awsGlue.getSchema(any())).thenReturn(getSchemaResult); - Mockito.when(awsGlue.getSchemaVersion(any())).thenReturn(getSchemaVersionResult); + GetSchemaResponse getSchemaResponse = GetSchemaResponse.builder() + .schemaArn(arn) + .schemaName(schemaName) + .latestSchemaVersion(latestSchemaVersion) + .build(); + GetSchemaVersionResponse getSchemaVersionResponse = GetSchemaVersionResponse.builder() + .schemaArn(arn) + .schemaVersionId(schemaVersionId) + .schemaDefinition("{\n" + + "\t\"topicName\": \"testTopic\",\n" + + "\t\"message\": {\n" + + "\t\t\"dataFormat\": \"json\",\n" + + "\t\t\"fields\": [{\n" + + "\t\t\t\"name\": \"intcol\",\n" + + "\t\t\t\"mapping\": \"intcol\",\n" + + "\t\t\t\"type\": \"INTEGER\"\n" + + "\t\t}]\n" + + "\t}\n" + + "}") + .build(); + Mockito.when(awsGlue.getSchema(any(GetSchemaRequest.class))).thenReturn(getSchemaResponse); + Mockito.when(awsGlue.getSchemaVersion(any(GetSchemaVersionRequest.class))).thenReturn(getSchemaVersionResponse); GetSplitsRequest request = new GetSplitsRequest( federatedIdentity, diff --git a/athena-neptune/src/main/java/com/amazonaws/athena/connectors/neptune/NeptuneMetadataHandler.java b/athena-neptune/src/main/java/com/amazonaws/athena/connectors/neptune/NeptuneMetadataHandler.java index 3f26ef07a8..ce62cb3833 100644 --- a/athena-neptune/src/main/java/com/amazonaws/athena/connectors/neptune/NeptuneMetadataHandler.java +++ b/athena-neptune/src/main/java/com/amazonaws/athena/connectors/neptune/NeptuneMetadataHandler.java @@ -45,10 +45,6 @@ import com.amazonaws.athena.connectors.neptune.qpt.NeptuneQueryPassthrough; import com.amazonaws.athena.connectors.neptune.rdf.NeptuneSparqlConnection; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.glue.AWSGlue; -import com.amazonaws.services.glue.model.GetTablesRequest; -import com.amazonaws.services.glue.model.GetTablesResult; -import com.amazonaws.services.glue.model.Table; import com.google.common.collect.ImmutableMap; import org.apache.arrow.util.VisibleForTesting; import org.apache.arrow.vector.types.pojo.Field; @@ -58,6 +54,10 @@ import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.glue.GlueClient; +import software.amazon.awssdk.services.glue.model.GetTablesRequest; +import software.amazon.awssdk.services.glue.model.GetTablesResponse; +import software.amazon.awssdk.services.glue.model.Table; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.ArrayList; @@ -90,7 +90,7 @@ public class NeptuneMetadataHandler extends GlueMetadataHandler private final Logger logger = LoggerFactory.getLogger(NeptuneMetadataHandler.class); private static final String SOURCE_TYPE = "neptune"; // Used to denote the 'type' of this connector for diagnostic // purposes. - private final AWSGlue glue; + private final GlueClient glue; private final String glueDBName; private NeptuneConnection neptuneConnection = null; @@ -109,7 +109,7 @@ public NeptuneMetadataHandler(java.util.Map configOptions) @VisibleForTesting protected NeptuneMetadataHandler( - AWSGlue glue, + GlueClient glue, NeptuneConnection neptuneConnection, EncryptionKeyFactory keyFactory, SecretsManagerClient awsSecretsManager, @@ -174,14 +174,15 @@ public ListTablesResponse doListTables(BlockAllocator allocator, ListTablesReque logger.info("doListTables: enter - " + request); List tables = new ArrayList<>(); - GetTablesRequest getTablesRequest = new GetTablesRequest(); - getTablesRequest.setDatabaseName(request.getSchemaName()); + GetTablesRequest getTablesRequest = GetTablesRequest.builder() + .databaseName(request.getSchemaName()) + .build(); - GetTablesResult getTablesResult = glue.getTables(getTablesRequest); - List
glueTableList = getTablesResult.getTableList(); + GetTablesResponse getTablesResponse = glue.getTables(getTablesRequest); + List
glueTableList = getTablesResponse.tableList(); String schemaName = request.getSchemaName(); glueTableList.forEach(e -> { - tables.add(new TableName(schemaName, e.getName())); + tables.add(new TableName(schemaName, e.name())); }); return new ListTablesResponse(request.getCatalogName(), tables, null); diff --git a/athena-neptune/src/test/java/com/amazonaws/athena/connectors/neptune/NeptuneMetadataHandlerTest.java b/athena-neptune/src/test/java/com/amazonaws/athena/connectors/neptune/NeptuneMetadataHandlerTest.java index 07457b89b9..19987a844c 100644 --- a/athena-neptune/src/test/java/com/amazonaws/athena/connectors/neptune/NeptuneMetadataHandlerTest.java +++ b/athena-neptune/src/test/java/com/amazonaws/athena/connectors/neptune/NeptuneMetadataHandlerTest.java @@ -29,13 +29,6 @@ import com.amazonaws.athena.connector.lambda.metadata.ListTablesResponse; import com.amazonaws.athena.connector.lambda.security.LocalKeyFactory; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.glue.AWSGlue; -import com.amazonaws.services.glue.model.Column; -import com.amazonaws.services.glue.model.GetTableResult; -import com.amazonaws.services.glue.model.GetTablesRequest; -import com.amazonaws.services.glue.model.GetTablesResult; -import com.amazonaws.services.glue.model.StorageDescriptor; -import com.amazonaws.services.glue.model.Table; import org.junit.After; import org.junit.Before; @@ -45,6 +38,12 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.glue.GlueClient; +import software.amazon.awssdk.services.glue.model.Column; +import software.amazon.awssdk.services.glue.model.GetTablesRequest; +import software.amazon.awssdk.services.glue.model.GetTablesResponse; +import software.amazon.awssdk.services.glue.model.StorageDescriptor; +import software.amazon.awssdk.services.glue.model.Table; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.ArrayList; @@ -67,10 +66,7 @@ public class NeptuneMetadataHandlerTest extends TestBase { private static final Logger logger = LoggerFactory.getLogger(NeptuneMetadataHandlerTest.class); @Mock - private AWSGlue glue; - - @Mock - private GetTablesRequest glueReq = null; + private GlueClient glue; private NeptuneMetadataHandler handler = null; @@ -114,23 +110,19 @@ public void doListTables() { logger.info("doListTables - enter"); List
tables = new ArrayList
(); - Table table1 = new Table(); - table1.setName("table1"); - Table table2 = new Table(); - table2.setName("table2"); - Table table3 = new Table(); - table3.setName("table3"); + Table table1 = Table.builder().name("table1").build(); + Table table2 = Table.builder().name("table2").build(); + Table table3 = Table.builder().name("table3").build(); tables.add(table1); tables.add(table2); tables.add(table3); - GetTablesResult tableResult = new GetTablesResult(); - tableResult.setTableList(tables); + GetTablesResponse tableResponse = GetTablesResponse.builder().tableList(tables).build(); ListTablesRequest req = new ListTablesRequest(IDENTITY, "queryId", "default", "default", null, UNLIMITED_PAGE_SIZE_VALUE); - when(glue.getTables(nullable(GetTablesRequest.class))).thenReturn(tableResult); + when(glue.getTables(nullable(GetTablesRequest.class))).thenReturn(tableResponse); ListTablesResponse res = handler.doListTables(allocator, req); @@ -144,35 +136,33 @@ public void doGetTable() throws Exception { logger.info("doGetTable - enter"); - Table table = new Table(); - table.setName("table1"); - Map expectedParams = new HashMap<>(); - expectedParams.put("sourceTable", table.getName()); - expectedParams.put("columnMapping", "col2=Col2,col3=Col3, col4=Col4"); - expectedParams.put("datetimeFormatMapping", "col2=someformat2, col1=someformat1 "); - - table.setParameters(expectedParams); List columns = new ArrayList<>(); - columns.add(new Column().withName("col1").withType("int").withComment("comment")); - columns.add(new Column().withName("col2").withType("bigint").withComment("comment")); - columns.add(new Column().withName("col3").withType("string").withComment("comment")); - columns.add(new Column().withName("col4").withType("timestamp").withComment("comment")); - columns.add(new Column().withName("col5").withType("date").withComment("comment")); - columns.add(new Column().withName("col6").withType("timestamptz").withComment("comment")); - columns.add(new Column().withName("col7").withType("timestamptz").withComment("comment")); - - StorageDescriptor storageDescriptor = new StorageDescriptor(); - storageDescriptor.setColumns(columns); - table.setStorageDescriptor(storageDescriptor); + columns.add(Column.builder().name("col1").type("int").comment("comment").build()); + columns.add(Column.builder().name("col2").type("bigint").comment("comment").build()); + columns.add(Column.builder().name("col3").type("string").comment("comment").build()); + columns.add(Column.builder().name("col4").type("timestamp").comment("comm.build()ent").build()); + columns.add(Column.builder().name("col5").type("date").comment("comment").build()); + columns.add(Column.builder().name("col6").type("timestamptz").comment("comment").build()); + columns.add(Column.builder().name("col7").type("timestamptz").comment("comment").build()); + + StorageDescriptor storageDescriptor = StorageDescriptor.builder().columns(columns).build(); + Table table = Table.builder() + .name("table1") + .parameters(expectedParams) + .storageDescriptor(storageDescriptor) + .build(); + + expectedParams.put("sourceTable", table.name()); + expectedParams.put("columnMapping", "col2=Col2,col3=Col3, col4=Col4"); + expectedParams.put("datetimeFormatMapping", "col2=someformat2, col1=someformat1 "); GetTableRequest req = new GetTableRequest(IDENTITY, "queryId", "default", new TableName("schema1", "table1"), Collections.emptyMap()); - GetTableResult getTableResult = new GetTableResult(); - getTableResult.setTable(table); + software.amazon.awssdk.services.glue.model.GetTableResponse getTableResponse = software.amazon.awssdk.services.glue.model.GetTableResponse.builder().table(table).build(); - when(glue.getTable(nullable(com.amazonaws.services.glue.model.GetTableRequest.class))).thenReturn(getTableResult); + when(glue.getTable(nullable(software.amazon.awssdk.services.glue.model.GetTableRequest.class))).thenReturn(getTableResponse); GetTableResponse res = handler.doGetTable(allocator, req); diff --git a/athena-redis/src/main/java/com/amazonaws/athena/connectors/redis/RedisMetadataHandler.java b/athena-redis/src/main/java/com/amazonaws/athena/connectors/redis/RedisMetadataHandler.java index 3204ec501e..565f592fc8 100644 --- a/athena-redis/src/main/java/com/amazonaws/athena/connectors/redis/RedisMetadataHandler.java +++ b/athena-redis/src/main/java/com/amazonaws/athena/connectors/redis/RedisMetadataHandler.java @@ -48,9 +48,6 @@ import com.amazonaws.athena.connectors.redis.lettuce.RedisConnectionWrapper; import com.amazonaws.athena.connectors.redis.qpt.RedisQueryPassthrough; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.glue.AWSGlue; -import com.amazonaws.services.glue.model.Database; -import com.amazonaws.services.glue.model.Table; import com.google.common.collect.ImmutableMap; import io.lettuce.core.KeyScanCursor; import io.lettuce.core.Range; @@ -63,6 +60,9 @@ import org.apache.arrow.vector.util.Text; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.glue.GlueClient; +import software.amazon.awssdk.services.glue.model.Database; +import software.amazon.awssdk.services.glue.model.Table; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Arrays; @@ -132,11 +132,11 @@ public class RedisMetadataHandler public static final String DEFAULT_REDIS_DB_NUMBER = "0"; //Used to filter out Glue tables which lack a redis endpoint. - private static final TableFilter TABLE_FILTER = (Table table) -> table.getParameters().containsKey(REDIS_ENDPOINT_PROP); + private static final TableFilter TABLE_FILTER = (Table table) -> table.parameters().containsKey(REDIS_ENDPOINT_PROP); //Used to filter out Glue databases which lack the REDIS_DB_FLAG in the URI. - private static final DatabaseFilter DB_FILTER = (Database database) -> (database.getLocationUri() != null && database.getLocationUri().contains(REDIS_DB_FLAG)); + private static final DatabaseFilter DB_FILTER = (Database database) -> (database.locationUri() != null && database.locationUri().contains(REDIS_DB_FLAG)); - private final AWSGlue awsGlue; + private final GlueClient awsGlue; private final RedisConnectionFactory redisConnectionFactory; private final RedisQueryPassthrough queryPassthrough = new RedisQueryPassthrough(); @@ -151,7 +151,7 @@ public RedisMetadataHandler(java.util.Map configOptions) @VisibleForTesting protected RedisMetadataHandler( - AWSGlue awsGlue, + GlueClient awsGlue, EncryptionKeyFactory keyFactory, SecretsManagerClient secretsManager, AmazonAthena athena, diff --git a/athena-redis/src/test/java/com/amazonaws/athena/connectors/redis/RedisMetadataHandlerTest.java b/athena-redis/src/test/java/com/amazonaws/athena/connectors/redis/RedisMetadataHandlerTest.java index f7fca053a2..ad3a780b82 100644 --- a/athena-redis/src/test/java/com/amazonaws/athena/connectors/redis/RedisMetadataHandlerTest.java +++ b/athena-redis/src/test/java/com/amazonaws/athena/connectors/redis/RedisMetadataHandlerTest.java @@ -37,7 +37,6 @@ import com.amazonaws.athena.connectors.redis.lettuce.RedisConnectionWrapper; import com.amazonaws.athena.connectors.redis.util.MockKeyScanCursor; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.glue.AWSGlue; import io.lettuce.core.Range; import io.lettuce.core.ScanArgs; import io.lettuce.core.ScanCursor; @@ -54,6 +53,7 @@ import org.mockito.junit.MockitoJUnitRunner; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.glue.GlueClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueRequest; import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueResponse; @@ -103,7 +103,7 @@ public class RedisMetadataHandlerTest private RedisCommandsWrapper mockSyncCommands; @Mock - private AWSGlue mockGlue; + private GlueClient mockGlue; @Mock private SecretsManagerClient mockSecretsManager; diff --git a/athena-redis/src/test/java/com/amazonaws/athena/connectors/redis/integ/RedisIntegTest.java b/athena-redis/src/test/java/com/amazonaws/athena/connectors/redis/integ/RedisIntegTest.java index 64776c3708..a73b6d1ba8 100644 --- a/athena-redis/src/test/java/com/amazonaws/athena/connectors/redis/integ/RedisIntegTest.java +++ b/athena-redis/src/test/java/com/amazonaws/athena/connectors/redis/integ/RedisIntegTest.java @@ -36,13 +36,6 @@ import com.amazonaws.services.elasticache.model.DescribeReplicationGroupsRequest; import com.amazonaws.services.elasticache.model.DescribeReplicationGroupsResult; import com.amazonaws.services.elasticache.model.Endpoint; -import com.amazonaws.services.glue.AWSGlue; -import com.amazonaws.services.glue.AWSGlueClientBuilder; -import com.amazonaws.services.glue.model.EntityNotFoundException; -import com.amazonaws.services.glue.model.GetTableRequest; -import com.amazonaws.services.glue.model.GetTableResult; -import com.amazonaws.services.glue.model.TableInput; -import com.amazonaws.services.glue.model.UpdateTableRequest; import com.amazonaws.services.lambda.AWSLambda; import com.amazonaws.services.lambda.AWSLambdaClientBuilder; import com.amazonaws.services.lambda.model.InvocationType; @@ -66,7 +59,13 @@ import software.amazon.awscdk.services.iam.PolicyDocument; import software.amazon.awscdk.services.s3.Bucket; import software.amazon.awscdk.services.s3.IBucket; +import software.amazon.awssdk.http.apache.ApacheHttpClient; +import software.amazon.awssdk.services.glue.GlueClient; +import software.amazon.awssdk.services.glue.model.EntityNotFoundException; +import software.amazon.awssdk.services.glue.model.TableInput; +import software.amazon.awssdk.services.glue.model.UpdateTableRequest; +import java.time.Duration; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; @@ -98,7 +97,7 @@ public class RedisIntegTest extends IntegrationTestBase private final String redisDbName; private final String redisTableNamePrefix; private final String lambdaFunctionName; - private final AWSGlue glue; + private final GlueClient glue; private final String redisStackName; private final Map environmentVars; @@ -120,8 +119,9 @@ public RedisIntegTest() redisDbName = (String) userSettings.get("redis_db_name"); redisTableNamePrefix = (String) userSettings.get("redis_table_name_prefix"); lambdaFunctionName = getLambdaFunctionName(); - glue = AWSGlueClientBuilder.standard() - .withClientConfiguration(new ClientConfiguration().withConnectionTimeout(GLUE_TIMEOUT)) + glue = GlueClient.builder() + .httpClientBuilder(ApacheHttpClient.builder() + .connectionTimeout(Duration.ofMillis(GLUE_TIMEOUT))) .build(); redisStackName = "integ-redis-instance-" + UUID.randomUUID(); environmentVars = new HashMap<>(); @@ -177,7 +177,7 @@ protected void cleanUp() // Delete the CloudFormation stack for Redis. cloudFormationClient.deleteStack(); // close glue client - glue.shutdown(); + glue.close(); } /** @@ -371,15 +371,16 @@ private Endpoint getRedisInstanceData(String redisName, boolean isCluster) * @param tableName * @return Table */ - private com.amazonaws.services.glue.model.Table getGlueTable(String databaseName, String tableName) + private software.amazon.awssdk.services.glue.model.Table getGlueTable(String databaseName, String tableName) { - com.amazonaws.services.glue.model.Table table; - GetTableRequest getTableRequest = new GetTableRequest(); - getTableRequest.setDatabaseName(databaseName); - getTableRequest.setName(tableName); + software.amazon.awssdk.services.glue.model.Table table; + software.amazon.awssdk.services.glue.model.GetTableRequest getTableRequest = software.amazon.awssdk.services.glue.model.GetTableRequest.builder() + .databaseName(databaseName) + .name(tableName) + .build(); try { - GetTableResult tableResult = glue.getTable(getTableRequest); - table = tableResult.getTable(); + software.amazon.awssdk.services.glue.model.GetTableResponse tableResponse = glue.getTable(getTableRequest); + table = tableResponse.table(); } catch (EntityNotFoundException e) { throw e; } @@ -392,23 +393,23 @@ private com.amazonaws.services.glue.model.Table getGlueTable(String databaseName * @param table * @return TableInput */ - private TableInput createTableInput(com.amazonaws.services.glue.model.Table table) { - TableInput tableInput = new TableInput(); - tableInput.setDescription(table.getDescription()); - tableInput.setLastAccessTime(table.getLastAccessTime()); - tableInput.setOwner(table.getOwner()); - tableInput.setName(table.getName()); - if (Optional.ofNullable(table.getStorageDescriptor()).isPresent()) { - tableInput.setStorageDescriptor(table.getStorageDescriptor()); - if (Optional.ofNullable(table.getStorageDescriptor().getParameters()).isPresent()) - tableInput.setParameters(table.getStorageDescriptor().getParameters()); + private TableInput createTableInput(software.amazon.awssdk.services.glue.model.Table table) { + TableInput.Builder tableInput = TableInput.builder() + .description(table.description()) + .lastAccessTime(table.lastAccessTime()) + .owner(table.owner()) + .name(table.name()); + if (Optional.ofNullable(table.storageDescriptor()).isPresent()) { + tableInput.storageDescriptor(table.storageDescriptor()); + if (Optional.ofNullable(table.storageDescriptor().parameters()).isPresent()) + tableInput.parameters(table.storageDescriptor().parameters()); } - tableInput.setPartitionKeys(table.getPartitionKeys()); - tableInput.setTableType(table.getTableType()); - tableInput.setViewExpandedText(table.getViewExpandedText()); - tableInput.setViewOriginalText(table.getViewOriginalText()); - tableInput.setParameters(table.getParameters()); - return tableInput; + tableInput.partitionKeys(table.partitionKeys()); + tableInput.tableType(table.tableType()); + tableInput.viewExpandedText(table.viewExpandedText()); + tableInput.viewOriginalText(table.viewOriginalText()); + tableInput.parameters(table.parameters()); + return tableInput.build(); } private void selectHashValue() @@ -541,8 +542,8 @@ public void standaloneSelectPrefixWithHashValue() tableParams.put("redis-cluster-flag", "false"); tableParams.put("redis-ssl-flag", "false"); tableParams.put("redis-db-number", STANDALONE_REDIS_DB_NUMBER); - TableInput tableInput = createTableInput(getGlueTable(redisDbName, redisTableNamePrefix + "_1")).withParameters(tableParams); - glue.updateTable(new UpdateTableRequest().withDatabaseName(redisDbName).withTableInput(tableInput)); + TableInput tableInput = createTableInput(getGlueTable(redisDbName, redisTableNamePrefix + "_1")).toBuilder().parameters(tableParams).build(); + glue.updateTable(UpdateTableRequest.builder().databaseName(redisDbName).tableInput(tableInput).build()); selectHashValue(); } @@ -562,8 +563,8 @@ public void standaloneSelectZsetWithHashValue() tableParams.put("redis-cluster-flag", "false"); tableParams.put("redis-ssl-flag", "false"); tableParams.put("redis-db-number", STANDALONE_REDIS_DB_NUMBER); - TableInput tableInput = createTableInput(getGlueTable(redisDbName, redisTableNamePrefix + "_1")).withParameters(tableParams); - glue.updateTable(new UpdateTableRequest().withDatabaseName(redisDbName).withTableInput(tableInput)); + TableInput tableInput = createTableInput(getGlueTable(redisDbName, redisTableNamePrefix + "_1")).toBuilder().parameters(tableParams).build(); + glue.updateTable(UpdateTableRequest.builder().databaseName(redisDbName).tableInput(tableInput).build()); selectHashValue(); } @@ -582,8 +583,8 @@ public void clusterSelectPrefixWithHashValue() tableParams.put("redis-value-type", "hash"); // hash tableParams.put("redis-cluster-flag", "true"); tableParams.put("redis-ssl-flag", "true"); - TableInput tableInput = createTableInput(getGlueTable(redisDbName, redisTableNamePrefix + "_1")).withParameters(tableParams); - glue.updateTable(new UpdateTableRequest().withDatabaseName(redisDbName).withTableInput(tableInput)); + TableInput tableInput = createTableInput(getGlueTable(redisDbName, redisTableNamePrefix + "_1")).toBuilder().parameters(tableParams).build(); + glue.updateTable(UpdateTableRequest.builder().databaseName(redisDbName).tableInput(tableInput).build()); selectHashValue(); } @@ -602,8 +603,8 @@ public void clusterSelectZsetWithHashValue() tableParams.put("redis-value-type", "hash"); // hash tableParams.put("redis-cluster-flag", "true"); tableParams.put("redis-ssl-flag", "true"); - TableInput tableInput = createTableInput(getGlueTable(redisDbName, redisTableNamePrefix + "_1")).withParameters(tableParams); - glue.updateTable(new UpdateTableRequest().withDatabaseName(redisDbName).withTableInput(tableInput)); + TableInput tableInput = createTableInput(getGlueTable(redisDbName, redisTableNamePrefix + "_1")).toBuilder().parameters(tableParams).build(); + glue.updateTable(UpdateTableRequest.builder().databaseName(redisDbName).tableInput(tableInput).build()); selectHashValue(); } @@ -623,8 +624,8 @@ public void standaloneSelectPrefixWithZsetValue() tableParams.put("redis-cluster-flag", "false"); tableParams.put("redis-ssl-flag", "false"); tableParams.put("redis-db-number", STANDALONE_REDIS_DB_NUMBER); - TableInput tableInput = createTableInput(getGlueTable(redisDbName, redisTableNamePrefix + "_2")).withParameters(tableParams); - glue.updateTable(new UpdateTableRequest().withDatabaseName(redisDbName).withTableInput(tableInput)); + TableInput tableInput = createTableInput(getGlueTable(redisDbName, redisTableNamePrefix + "_2")).toBuilder().parameters(tableParams).build(); + glue.updateTable(UpdateTableRequest.builder().databaseName(redisDbName).tableInput(tableInput).build()); selectZsetValue(); } @@ -644,8 +645,8 @@ public void standaloneSelectZsetWithZsetValue() tableParams.put("redis-cluster-flag", "false"); tableParams.put("redis-ssl-flag", "false"); tableParams.put("redis-db-number", STANDALONE_REDIS_DB_NUMBER); - TableInput tableInput = createTableInput(getGlueTable(redisDbName, redisTableNamePrefix + "_2")).withParameters(tableParams); - glue.updateTable(new UpdateTableRequest().withDatabaseName(redisDbName).withTableInput(tableInput)); + TableInput tableInput = createTableInput(getGlueTable(redisDbName, redisTableNamePrefix + "_2")).toBuilder().parameters(tableParams).build(); + glue.updateTable(UpdateTableRequest.builder().databaseName(redisDbName).tableInput(tableInput).build()); selectZsetValue(); } @@ -664,8 +665,8 @@ public void clusterSelectPrefixWithZsetValue() tableParams.put("redis-value-type", "zset"); // zset tableParams.put("redis-cluster-flag", "true"); tableParams.put("redis-ssl-flag", "true"); - TableInput tableInput = createTableInput(getGlueTable(redisDbName, redisTableNamePrefix + "_2")).withParameters(tableParams); - glue.updateTable(new UpdateTableRequest().withDatabaseName(redisDbName).withTableInput(tableInput)); + TableInput tableInput = createTableInput(getGlueTable(redisDbName, redisTableNamePrefix + "_2")).toBuilder().parameters(tableParams).build(); + glue.updateTable(UpdateTableRequest.builder().databaseName(redisDbName).tableInput(tableInput).build()); selectZsetValue(); } @@ -684,8 +685,8 @@ public void clusterSelectZsetWithZsetValue() tableParams.put("redis-value-type", "zset"); // zset tableParams.put("redis-cluster-flag", "true"); tableParams.put("redis-ssl-flag", "true"); - TableInput tableInput = createTableInput(getGlueTable(redisDbName, redisTableNamePrefix + "_2")).withParameters(tableParams); - glue.updateTable(new UpdateTableRequest().withDatabaseName(redisDbName).withTableInput(tableInput)); + TableInput tableInput = createTableInput(getGlueTable(redisDbName, redisTableNamePrefix + "_2")).toBuilder().parameters(tableParams).build(); + glue.updateTable(UpdateTableRequest.builder().databaseName(redisDbName).tableInput(tableInput).build()); selectZsetValue(); } @@ -705,8 +706,8 @@ public void standaloneSelectPrefixWithLiteralValue() tableParams.put("redis-cluster-flag", "false"); tableParams.put("redis-ssl-flag", "false"); tableParams.put("redis-db-number", STANDALONE_REDIS_DB_NUMBER); - TableInput tableInput = createTableInput(getGlueTable(redisDbName, redisTableNamePrefix + "_2")).withParameters(tableParams); - glue.updateTable(new UpdateTableRequest().withDatabaseName(redisDbName).withTableInput(tableInput)); + TableInput tableInput = createTableInput(getGlueTable(redisDbName, redisTableNamePrefix + "_2")).toBuilder().parameters(tableParams).build(); + glue.updateTable(UpdateTableRequest.builder().databaseName(redisDbName).tableInput(tableInput).build()); selectLiteralValue(); } @@ -726,8 +727,8 @@ public void standaloneSelectZsetWithLiteralValue() tableParams.put("redis-cluster-flag", "false"); tableParams.put("redis-ssl-flag", "false"); tableParams.put("redis-db-number", STANDALONE_REDIS_DB_NUMBER); - TableInput tableInput = createTableInput(getGlueTable(redisDbName, redisTableNamePrefix + "_2")).withParameters(tableParams); - glue.updateTable(new UpdateTableRequest().withDatabaseName(redisDbName).withTableInput(tableInput)); + TableInput tableInput = createTableInput(getGlueTable(redisDbName, redisTableNamePrefix + "_2")).toBuilder().parameters(tableParams).build(); + glue.updateTable(UpdateTableRequest.builder().databaseName(redisDbName).tableInput(tableInput).build()); selectLiteralValue(); } @@ -746,8 +747,8 @@ public void clusterSelectPrefixWithLiteralValue() tableParams.put("redis-value-type", "literal"); // literal tableParams.put("redis-cluster-flag", "true"); tableParams.put("redis-ssl-flag", "true"); - TableInput tableInput = createTableInput(getGlueTable(redisDbName, redisTableNamePrefix + "_2")).withParameters(tableParams); - glue.updateTable(new UpdateTableRequest().withDatabaseName(redisDbName).withTableInput(tableInput)); + TableInput tableInput = createTableInput(getGlueTable(redisDbName, redisTableNamePrefix + "_2")).toBuilder().parameters(tableParams).build(); + glue.updateTable(UpdateTableRequest.builder().databaseName(redisDbName).tableInput(tableInput).build()); selectLiteralValue(); } @@ -766,8 +767,8 @@ public void clusterSelectZsetWithLiteralValue() tableParams.put("redis-value-type", "literal"); // literal tableParams.put("redis-cluster-flag", "true"); tableParams.put("redis-ssl-flag", "true"); - TableInput tableInput = createTableInput(getGlueTable(redisDbName, redisTableNamePrefix + "_2")).withParameters(tableParams); - glue.updateTable(new UpdateTableRequest().withDatabaseName(redisDbName).withTableInput(tableInput)); + TableInput tableInput = createTableInput(getGlueTable(redisDbName, redisTableNamePrefix + "_2")).toBuilder().parameters(tableParams).build(); + glue.updateTable(UpdateTableRequest.builder().databaseName(redisDbName).tableInput(tableInput).build()); selectLiteralValue(); } diff --git a/athena-timestream/src/main/java/com/amazonaws/athena/connectors/timestream/TimestreamMetadataHandler.java b/athena-timestream/src/main/java/com/amazonaws/athena/connectors/timestream/TimestreamMetadataHandler.java index 5d2200c64e..61a89f90d0 100644 --- a/athena-timestream/src/main/java/com/amazonaws/athena/connectors/timestream/TimestreamMetadataHandler.java +++ b/athena-timestream/src/main/java/com/amazonaws/athena/connectors/timestream/TimestreamMetadataHandler.java @@ -43,8 +43,6 @@ import com.amazonaws.athena.connectors.timestream.qpt.TimestreamQueryPassthrough; import com.amazonaws.athena.connectors.timestream.query.QueryFactory; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.glue.AWSGlue; -import com.amazonaws.services.glue.model.Table; import com.amazonaws.services.timestreamquery.AmazonTimestreamQuery; import com.amazonaws.services.timestreamquery.model.ColumnInfo; import com.amazonaws.services.timestreamquery.model.Datum; @@ -61,6 +59,8 @@ import org.apache.arrow.vector.types.pojo.Schema; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.glue.GlueClient; +import software.amazon.awssdk.services.glue.model.Table; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Collections; @@ -82,14 +82,14 @@ public class TimestreamMetadataHandler //is indeed enabled for use by this connector. private static final String METADATA_FLAG = "timestream-metadata-flag"; //Used to filter out Glue tables which lack a timestream metadata flag. - private static final TableFilter TABLE_FILTER = (Table table) -> table.getParameters().containsKey(METADATA_FLAG); + private static final TableFilter TABLE_FILTER = (Table table) -> table.parameters().containsKey(METADATA_FLAG); private static final long MAX_RESULTS = 100_000; //Used to generate TimeStream queries using templates query patterns. private final QueryFactory queryFactory = new QueryFactory(); - private final AWSGlue glue; + private final GlueClient glue; private final AmazonTimestreamQuery tsQuery; private final AmazonTimestreamWrite tsMeta; @@ -108,7 +108,7 @@ public TimestreamMetadataHandler(java.util.Map configOptions) protected TimestreamMetadataHandler( AmazonTimestreamQuery tsQuery, AmazonTimestreamWrite tsMeta, - AWSGlue glue, + GlueClient glue, EncryptionKeyFactory keyFactory, SecretsManagerClient secretsManager, AmazonAthena athena, diff --git a/athena-timestream/src/test/java/com/amazonaws/athena/connectors/timestream/TimestreamMetadataHandlerTest.java b/athena-timestream/src/test/java/com/amazonaws/athena/connectors/timestream/TimestreamMetadataHandlerTest.java index 71b998d042..7c0746ef94 100644 --- a/athena-timestream/src/test/java/com/amazonaws/athena/connectors/timestream/TimestreamMetadataHandlerTest.java +++ b/athena-timestream/src/test/java/com/amazonaws/athena/connectors/timestream/TimestreamMetadataHandlerTest.java @@ -41,10 +41,6 @@ import com.amazonaws.athena.connector.lambda.security.FederatedIdentity; import com.amazonaws.athena.connector.lambda.security.LocalKeyFactory; import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.glue.AWSGlue; -import com.amazonaws.services.glue.model.Column; -import com.amazonaws.services.glue.model.GetTableResult; -import com.amazonaws.services.glue.model.StorageDescriptor; import com.amazonaws.services.timestreamquery.AmazonTimestreamQuery; import com.amazonaws.services.timestreamquery.model.Datum; import com.amazonaws.services.timestreamquery.model.QueryRequest; @@ -68,6 +64,9 @@ import org.mockito.junit.MockitoJUnitRunner; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.glue.GlueClient; +import software.amazon.awssdk.services.glue.model.Column; +import software.amazon.awssdk.services.glue.model.StorageDescriptor; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.ArrayList; @@ -80,7 +79,6 @@ import static com.amazonaws.athena.connector.lambda.metadata.ListTablesRequest.UNLIMITED_PAGE_SIZE_VALUE; import static org.junit.Assert.*; import static org.mockito.ArgumentMatchers.nullable; -import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; @@ -105,7 +103,7 @@ public class TimestreamMetadataHandlerTest @Mock protected AmazonTimestreamWrite mockTsMeta; @Mock - protected AWSGlue mockGlue; + protected GlueClient mockGlue; @Before public void setUp() @@ -238,8 +236,8 @@ public void doGetTable() { logger.info("doGetTable - enter"); - when(mockGlue.getTable(nullable(com.amazonaws.services.glue.model.GetTableRequest.class))) - .thenReturn(mock(GetTableResult.class)); + when(mockGlue.getTable(nullable(software.amazon.awssdk.services.glue.model.GetTableRequest.class))) + .thenReturn(software.amazon.awssdk.services.glue.model.GetTableResponse.builder().build()); when(mockTsQuery.query(nullable(QueryRequest.class))).thenAnswer((InvocationOnMock invocation) -> { QueryRequest request = invocation.getArgument(0, QueryRequest.class); @@ -294,23 +292,25 @@ public void doGetTableGlue() { logger.info("doGetTable - enter"); - when(mockGlue.getTable(nullable(com.amazonaws.services.glue.model.GetTableRequest.class))).thenAnswer((InvocationOnMock invocation) -> { - com.amazonaws.services.glue.model.GetTableRequest request = invocation.getArgument(0, - com.amazonaws.services.glue.model.GetTableRequest.class); + when(mockGlue.getTable(nullable(software.amazon.awssdk.services.glue.model.GetTableRequest.class))).thenAnswer((InvocationOnMock invocation) -> { + software.amazon.awssdk.services.glue.model.GetTableRequest request = invocation.getArgument(0, + software.amazon.awssdk.services.glue.model.GetTableRequest.class); List columns = new ArrayList<>(); - columns.add(new Column().withName("col1").withType("varchar")); - columns.add(new Column().withName("col2").withType("double")); - com.amazonaws.services.glue.model.Table table = new com.amazonaws.services.glue.model.Table(); - table.setName(request.getName()); - table.setDatabaseName(request.getDatabaseName()); - StorageDescriptor storageDescriptor = new StorageDescriptor(); - storageDescriptor.setColumns(columns); - table.setStorageDescriptor(storageDescriptor); - table.setViewOriginalText("view text"); - table.setParameters(Collections.singletonMap("timestream-metadata-flag", "timestream-metadata-flag")); - - return new GetTableResult().withTable(table); + columns.add(Column.builder().name("col1").type("varchar").build()); + columns.add(Column.builder().name("col2").type("double").build()); + StorageDescriptor storageDescriptor = StorageDescriptor.builder() + .columns(columns) + .build(); + software.amazon.awssdk.services.glue.model.Table table = software.amazon.awssdk.services.glue.model.Table.builder() + .name(request.name()) + .databaseName(request.databaseName()) + .storageDescriptor(storageDescriptor) + .viewOriginalText("view text") + .parameters(Collections.singletonMap("timestream-metadata-flag", "timestream-metadata-flag")) + .build(); + + return software.amazon.awssdk.services.glue.model.GetTableResponse.builder().table(table).build(); }); GetTableRequest req = new GetTableRequest(identity, @@ -340,25 +340,25 @@ public void doGetTimeSeriesTableGlue() { logger.info("doGetTimeSeriesTableGlue - enter"); - when(mockGlue.getTable(nullable(com.amazonaws.services.glue.model.GetTableRequest.class))).thenAnswer((InvocationOnMock invocation) -> { - com.amazonaws.services.glue.model.GetTableRequest request = invocation.getArgument(0, - com.amazonaws.services.glue.model.GetTableRequest.class); + when(mockGlue.getTable(nullable(software.amazon.awssdk.services.glue.model.GetTableRequest.class))).thenAnswer((InvocationOnMock invocation) -> { + software.amazon.awssdk.services.glue.model.GetTableRequest request = invocation.getArgument(0, + software.amazon.awssdk.services.glue.model.GetTableRequest.class); List columns = new ArrayList<>(); - columns.add(new Column().withName("az").withType("varchar")); - columns.add(new Column().withName("hostname").withType("varchar")); - columns.add(new Column().withName("region").withType("varchar")); - columns.add(new Column().withName("cpu_utilization").withType("ARRAY>")); - com.amazonaws.services.glue.model.Table table = new com.amazonaws.services.glue.model.Table(); - table.setName(request.getName()); - table.setDatabaseName(request.getDatabaseName()); - StorageDescriptor storageDescriptor = new StorageDescriptor(); - storageDescriptor.setColumns(columns); - table.setStorageDescriptor(storageDescriptor); - table.setViewOriginalText("SELECT az, hostname, region, cpu_utilization FROM TIMESERIES(metrics_table,'cpu_utilization')"); - table.setParameters(Collections.singletonMap("timestream-metadata-flag", "timestream-metadata-flag")); - - return new GetTableResult().withTable(table); + columns.add(Column.builder().name("az").type("varchar").build()); + columns.add(Column.builder().name("hostname").type("varchar").build()); + columns.add(Column.builder().name("region").type("varchar").build()); + columns.add(Column.builder().name("cpu_utilization").type("ARRAY>").build()); + StorageDescriptor storageDescriptor = StorageDescriptor.builder().columns(columns).build(); + software.amazon.awssdk.services.glue.model.Table table = software.amazon.awssdk.services.glue.model.Table.builder() + .name(request.name()) + .databaseName(request.databaseName()) + .storageDescriptor(storageDescriptor) + .viewOriginalText("SELECT az, hostname, region, cpu_utilization FROM TIMESERIES(metrics_table,'cpu_utilization')") + .parameters(Collections.singletonMap("timestream-metadata-flag", "timestream-metadata-flag")) + .build(); + + return software.amazon.awssdk.services.glue.model.GetTableResponse.builder().table(table).build(); }); GetTableRequest req = new GetTableRequest(identity, From bc12e0ba50d12e495794faf2dc102c8c7a83340b Mon Sep 17 00:00:00 2001 From: Aimery Methena <159072740+aimethed@users.noreply.github.com> Date: Mon, 22 Jul 2024 16:35:34 -0400 Subject: [PATCH 03/87] V2 migration jmes (#2053) --- athena-elasticsearch/pom.xml | 27 ------------------- athena-federation-integ-test/pom.xml | 27 ------------------- athena-federation-sdk/pom.xml | 27 ------------------- .../lambda/handlers/GlueMetadataHandler.java | 1 - athena-jdbc/pom.xml | 27 ------------------- athena-redis/pom.xml | 27 ------------------- 6 files changed, 136 deletions(-) diff --git a/athena-elasticsearch/pom.xml b/athena-elasticsearch/pom.xml index c4525f2353..7e9748b077 100644 --- a/athena-elasticsearch/pom.xml +++ b/athena-elasticsearch/pom.xml @@ -62,33 +62,6 @@ ${log4j2Version} runtime - - com.amazonaws - jmespath-java - ${aws-sdk.version} - - - com.fasterxml.jackson.datatype - jackson-datatype-jsr310 - - - com.fasterxml.jackson.dataformat - jackson-dataformat-cbor - - - com.fasterxml.jackson.core - jackson-core - - - com.fasterxml.jackson.core - jackson-databind - - - com.fasterxml.jackson.core - jackson-annotations - - - com.amazonaws aws-java-sdk-core diff --git a/athena-federation-integ-test/pom.xml b/athena-federation-integ-test/pom.xml index 3bf267087b..820ba09e41 100644 --- a/athena-federation-integ-test/pom.xml +++ b/athena-federation-integ-test/pom.xml @@ -11,33 +11,6 @@ jar Amazon Athena Query Federation Integ Test - - com.amazonaws - jmespath-java - ${aws-sdk.version} - - - com.fasterxml.jackson.datatype - jackson-datatype-jsr310 - - - com.fasterxml.jackson.dataformat - jackson-dataformat-cbor - - - com.fasterxml.jackson.core - jackson-core - - - com.fasterxml.jackson.core - jackson-databind - - - com.fasterxml.jackson.core - jackson-annotations - - - com.amazonaws aws-java-sdk-core diff --git a/athena-federation-sdk/pom.xml b/athena-federation-sdk/pom.xml index 07722a343d..ba3c01f0ab 100644 --- a/athena-federation-sdk/pom.xml +++ b/athena-federation-sdk/pom.xml @@ -26,33 +26,6 @@ - - com.amazonaws - jmespath-java - ${aws-sdk.version} - - - com.fasterxml.jackson.datatype - jackson-datatype-jsr310 - - - com.fasterxml.jackson.dataformat - jackson-dataformat-cbor - - - com.fasterxml.jackson.core - jackson-core - - - com.fasterxml.jackson.core - jackson-databind - - - com.fasterxml.jackson.core - jackson-annotations - - - com.amazonaws aws-java-sdk-core diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/handlers/GlueMetadataHandler.java b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/handlers/GlueMetadataHandler.java index c492cb208e..d1fa74f0ef 100644 --- a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/handlers/GlueMetadataHandler.java +++ b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/handlers/GlueMetadataHandler.java @@ -263,7 +263,6 @@ protected ListSchemasResponse doListSchemaNames(BlockAllocator blockAllocator, L GetDatabasesRequest getDatabasesRequest = GetDatabasesRequest.builder() .catalogId(getCatalog(request)) .build(); - GetDatabasesIterable responses = awsGlue.getDatabasesPaginator(getDatabasesRequest); List schemas = responses.stream() .flatMap(response -> response.databaseList().stream()) diff --git a/athena-jdbc/pom.xml b/athena-jdbc/pom.xml index 46c354fd10..8ea9371ea6 100644 --- a/athena-jdbc/pom.xml +++ b/athena-jdbc/pom.xml @@ -9,33 +9,6 @@ athena-jdbc 2022.47.1 - - com.amazonaws - jmespath-java - ${aws-sdk.version} - - - com.fasterxml.jackson.datatype - jackson-datatype-jsr310 - - - com.fasterxml.jackson.dataformat - jackson-dataformat-cbor - - - com.fasterxml.jackson.core - jackson-core - - - com.fasterxml.jackson.core - jackson-databind - - - com.fasterxml.jackson.core - jackson-annotations - - - com.amazonaws aws-java-sdk-core diff --git a/athena-redis/pom.xml b/athena-redis/pom.xml index d1b63710d9..b4faced6d0 100644 --- a/athena-redis/pom.xml +++ b/athena-redis/pom.xml @@ -9,33 +9,6 @@ athena-redis 2022.47.1 - - com.amazonaws - jmespath-java - ${aws-sdk.version} - - - com.fasterxml.jackson.datatype - jackson-datatype-jsr310 - - - com.fasterxml.jackson.dataformat - jackson-dataformat-cbor - - - com.fasterxml.jackson.core - jackson-core - - - com.fasterxml.jackson.core - jackson-databind - - - com.fasterxml.jackson.core - jackson-annotations - - - com.amazonaws aws-java-sdk-core From 09ea549edc78395d2122622804f877df282b0880 Mon Sep 17 00:00:00 2001 From: Aimery Methena <159072740+aimethed@users.noreply.github.com> Date: Mon, 22 Jul 2024 16:52:14 -0400 Subject: [PATCH 04/87] v2 migration sts (#2054) --- athena-dynamodb/pom.xml | 6 ----- athena-federation-sdk/pom.xml | 6 ++--- .../CrossAccountCredentialsProvider.java | 24 +++++++++---------- athena-kafka/pom.xml | 5 ---- athena-msk/pom.xml | 5 ---- 5 files changed, 15 insertions(+), 31 deletions(-) diff --git a/athena-dynamodb/pom.xml b/athena-dynamodb/pom.xml index c3841e0e6f..effbdf0bb1 100644 --- a/athena-dynamodb/pom.xml +++ b/athena-dynamodb/pom.xml @@ -20,12 +20,6 @@ athena-federation-integ-test 2022.47.1 test - - - com.amazonaws - aws-java-sdk-sts - - software.amazon.awssdk diff --git a/athena-federation-sdk/pom.xml b/athena-federation-sdk/pom.xml index ba3c01f0ab..9c1867fb72 100644 --- a/athena-federation-sdk/pom.xml +++ b/athena-federation-sdk/pom.xml @@ -97,9 +97,9 @@ - com.amazonaws - aws-java-sdk-sts - ${aws-sdk.version} + software.amazon.awssdk + sts + ${aws-sdk-v2.version} com.fasterxml.jackson.datatype diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/credentials/CrossAccountCredentialsProvider.java b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/credentials/CrossAccountCredentialsProvider.java index 5ed2890fb3..d71d1b1657 100644 --- a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/credentials/CrossAccountCredentialsProvider.java +++ b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/credentials/CrossAccountCredentialsProvider.java @@ -23,13 +23,12 @@ import com.amazonaws.auth.AWSStaticCredentialsProvider; import com.amazonaws.auth.BasicSessionCredentials; import com.amazonaws.auth.DefaultAWSCredentialsProviderChain; -import com.amazonaws.services.securitytoken.AWSSecurityTokenService; -import com.amazonaws.services.securitytoken.AWSSecurityTokenServiceAsyncClientBuilder; -import com.amazonaws.services.securitytoken.model.AssumeRoleRequest; -import com.amazonaws.services.securitytoken.model.AssumeRoleResult; -import com.amazonaws.services.securitytoken.model.Credentials; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.sts.StsClient; +import software.amazon.awssdk.services.sts.model.AssumeRoleRequest; +import software.amazon.awssdk.services.sts.model.AssumeRoleResponse; +import software.amazon.awssdk.services.sts.model.Credentials; import java.util.Map; @@ -44,13 +43,14 @@ public static AWSCredentialsProvider getCrossAccountCredentialsIfPresent(Mapguava ${guava.version} - - com.amazonaws - aws-java-sdk-sts - ${aws-sdk.version} - + - com.amazonaws - aws-java-sdk-athena - ${aws-sdk.version} + software.amazon.awssdk + athena + ${aws-sdk-v2.version} diff --git a/athena-federation-integ-test/src/main/java/com/amazonaws/athena/connector/integ/IntegrationTestBase.java b/athena-federation-integ-test/src/main/java/com/amazonaws/athena/connector/integ/IntegrationTestBase.java index bb2b7c5822..880debe5bc 100644 --- a/athena-federation-integ-test/src/main/java/com/amazonaws/athena/connector/integ/IntegrationTestBase.java +++ b/athena-federation-integ-test/src/main/java/com/amazonaws/athena/connector/integ/IntegrationTestBase.java @@ -25,18 +25,6 @@ import com.amazonaws.athena.connector.integ.data.TestConfig; import com.amazonaws.athena.connector.integ.providers.ConnectorVpcAttributesProvider; import com.amazonaws.athena.connector.integ.providers.SecretsManagerCredentialsProvider; -import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.athena.AmazonAthenaClientBuilder; -import com.amazonaws.services.athena.model.Datum; -import com.amazonaws.services.athena.model.GetQueryExecutionRequest; -import com.amazonaws.services.athena.model.GetQueryExecutionResult; -import com.amazonaws.services.athena.model.GetQueryResultsRequest; -import com.amazonaws.services.athena.model.GetQueryResultsResult; -import com.amazonaws.services.athena.model.ListDatabasesRequest; -import com.amazonaws.services.athena.model.ListDatabasesResult; -import com.amazonaws.services.athena.model.ResultConfiguration; -import com.amazonaws.services.athena.model.Row; -import com.amazonaws.services.athena.model.StartQueryExecutionRequest; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.testng.annotations.AfterClass; @@ -44,6 +32,17 @@ import org.testng.annotations.Test; import software.amazon.awscdk.core.Stack; import software.amazon.awscdk.services.iam.PolicyDocument; +import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.athena.model.Datum; +import software.amazon.awssdk.services.athena.model.GetQueryExecutionRequest; +import software.amazon.awssdk.services.athena.model.GetQueryExecutionResponse; +import software.amazon.awssdk.services.athena.model.GetQueryResultsRequest; +import software.amazon.awssdk.services.athena.model.GetQueryResultsResponse; +import software.amazon.awssdk.services.athena.model.ListDatabasesRequest; +import software.amazon.awssdk.services.athena.model.ListDatabasesResponse; +import software.amazon.awssdk.services.athena.model.ResultConfiguration; +import software.amazon.awssdk.services.athena.model.Row; +import software.amazon.awssdk.services.athena.model.StartQueryExecutionRequest; import java.time.LocalDate; import java.time.LocalDateTime; @@ -94,7 +93,7 @@ public abstract class IntegrationTestBase private final ConnectorStackProvider connectorStackProvider; private final String lambdaFunctionName; - private final AmazonAthena athenaClient; + private final AthenaClient athenaClient; private final TestConfig testConfig; private final Optional vpcAttributes; private final Optional secretCredentials; @@ -128,7 +127,7 @@ protected void setSpecificResource(final Stack stack) }; lambdaFunctionName = connectorStackProvider.getLambdaFunctionName(); - athenaClient = AmazonAthenaClientBuilder.defaultClient(); + athenaClient = AthenaClient.create(); athenaWorkgroup = getAthenaWorkgroup(); athenaResultLocation = getAthenaResultLocation(); } @@ -262,14 +261,15 @@ protected void cleanUp() public List listDatabases() { logger.info("listDatabases({})", lambdaFunctionName); - ListDatabasesRequest listDatabasesRequest = new ListDatabasesRequest() - .withCatalogName(lambdaFunctionName); + ListDatabasesRequest listDatabasesRequest = ListDatabasesRequest.builder() + .catalogName(lambdaFunctionName) + .build(); - ListDatabasesResult listDatabasesResult = athenaClient.listDatabases(listDatabasesRequest); - logger.info("Results: [{}]", listDatabasesResult); + ListDatabasesResponse listDatabasesResponse = athenaClient.listDatabases(listDatabasesRequest); + logger.info("Results: [{}]", listDatabasesResponse); List dbNames = new ArrayList<>(); - listDatabasesResult.getDatabaseList().forEach(db -> dbNames.add(db.getName())); + listDatabasesResponse.databaseList().forEach(db -> dbNames.add(db.name())); return dbNames; } @@ -285,8 +285,8 @@ public List listTables(String databaseName) { String query = String.format("show tables in `%s`.`%s`;", lambdaFunctionName, databaseName); List tableNames = new ArrayList<>(); - startQueryExecution(query).getResultSet().getRows() - .forEach(row -> tableNames.add(row.getData().get(0).getVarCharValue())); + startQueryExecution(query).resultSet().rows() + .forEach(row -> tableNames.add(row.data().get(0).varCharValue())); return tableNames; } @@ -303,9 +303,9 @@ public Map describeTable(String databaseName, String tableName) { String query = String.format("describe `%s`.`%s`.`%s`;", lambdaFunctionName, databaseName, tableName); Map schema = new HashMap<>(); - startQueryExecution(query).getResultSet().getRows() + startQueryExecution(query).resultSet().rows() .forEach(row -> { - String property = row.getData().get(0).getVarCharValue(); + String property = row.data().get(0).varCharValue(); String[] columnProperties = property.split("\t"); if (columnProperties.length == 2) { schema.put(columnProperties[0], columnProperties[1]); @@ -321,21 +321,22 @@ public Map describeTable(String databaseName, String tableName) * @return The query results object containing the metadata and row information. * @throws RuntimeException The Query is cancelled or has failed. */ - public GetQueryResultsResult startQueryExecution(String query) + public GetQueryResultsResponse startQueryExecution(String query) throws RuntimeException { - StartQueryExecutionRequest startQueryExecutionRequest = new StartQueryExecutionRequest() - .withWorkGroup(athenaWorkgroup) - .withQueryString(query) - .withResultConfiguration(new ResultConfiguration().withOutputLocation(athenaResultLocation)); + StartQueryExecutionRequest startQueryExecutionRequest = StartQueryExecutionRequest.builder() + .workGroup(athenaWorkgroup) + .queryString(query) + .resultConfiguration(ResultConfiguration.builder().outputLocation(athenaResultLocation).build()) + .build(); String queryExecutionId = sendAthenaQuery(startQueryExecutionRequest); logger.info("Query: [{}], Query Id: [{}]", query, queryExecutionId); waitForAthenaQueryResults(queryExecutionId); - GetQueryResultsResult getQueryResultsResult = getAthenaQueryResults(queryExecutionId); + GetQueryResultsResponse getQueryResultsResponse = getAthenaQueryResults(queryExecutionId); //logger.info("Results: [{}]", getQueryResultsResult.toString()); - return getQueryResultsResult; + return getQueryResultsResponse; } /** @@ -345,7 +346,7 @@ public GetQueryResultsResult startQueryExecution(String query) */ private String sendAthenaQuery(StartQueryExecutionRequest startQueryExecutionRequest) { - return athenaClient.startQueryExecution(startQueryExecutionRequest).getQueryExecutionId(); + return athenaClient.startQueryExecution(startQueryExecutionRequest).queryExecutionId(); } /** @@ -357,12 +358,13 @@ private void waitForAthenaQueryResults(String queryExecutionId) throws RuntimeException { // Poll the state of the query request while it is queued or running - GetQueryExecutionRequest getQueryExecutionRequest = new GetQueryExecutionRequest() - .withQueryExecutionId(queryExecutionId); - GetQueryExecutionResult getQueryExecutionResult; + GetQueryExecutionRequest getQueryExecutionRequest = GetQueryExecutionRequest.builder() + .queryExecutionId(queryExecutionId) + .build(); + GetQueryExecutionResponse getQueryExecutionResponse; while (true) { - getQueryExecutionResult = athenaClient.getQueryExecution(getQueryExecutionRequest); - String queryState = getQueryExecutionResult.getQueryExecution().getStatus().getState(); + getQueryExecutionResponse = athenaClient.getQueryExecution(getQueryExecutionRequest); + String queryState = getQueryExecutionResponse.queryExecution().status().state().toString(); logger.info("Query State: {}", queryState); if (queryState.equals(ATHENA_QUERY_QUEUED_STATE) || queryState.equals(ATHENA_QUERY_RUNNING_STATE)) { try { @@ -374,8 +376,8 @@ private void waitForAthenaQueryResults(String queryExecutionId) } } else if (queryState.equals(ATHENA_QUERY_FAILED_STATE) || queryState.equals(ATHENA_QUERY_CANCELLED_STATE)) { - throw new RuntimeException(getQueryExecutionResult - .getQueryExecution().getStatus().getStateChangeReason()); + throw new RuntimeException(getQueryExecutionResponse + .queryExecution().status().stateChangeReason()); } break; } @@ -386,11 +388,12 @@ else if (queryState.equals(ATHENA_QUERY_FAILED_STATE) || queryState.equals(ATHEN * @param queryExecutionId The query's Id. * @return The query results object containing the metadata and row information. */ - private GetQueryResultsResult getAthenaQueryResults(String queryExecutionId) + private GetQueryResultsResponse getAthenaQueryResults(String queryExecutionId) { // Get query results - GetQueryResultsRequest getQueryResultsRequest = new GetQueryResultsRequest() - .withQueryExecutionId(queryExecutionId); + GetQueryResultsRequest getQueryResultsRequest = GetQueryResultsRequest.builder() + .queryExecutionId(queryExecutionId) + .build(); return athenaClient.getQueryResults(getQueryResultsRequest); } @@ -472,8 +475,8 @@ public float calculateThroughput(String lambdaFnName, String schemaName, String public List processQuery(String query) { List firstColValues = new ArrayList<>(); - skipColumnHeaderRow(startQueryExecution(query).getResultSet().getRows()) - .forEach(row -> firstColValues.add(row.getData().get(0).getVarCharValue())); + skipColumnHeaderRow(startQueryExecution(query).resultSet().rows()) + .forEach(row -> firstColValues.add(row.data().get(0).varCharValue())); return firstColValues; } public List skipColumnHeaderRow(List rows) @@ -493,13 +496,13 @@ public void selectIntegerTypeTest() String query = String.format("select int_type from %s.%s.%s;", lambdaFunctionName, INTEG_TEST_DATABASE_NAME, TEST_DATATYPES_TABLE_NAME); - List rows = startQueryExecution(query).getResultSet().getRows(); + List rows = startQueryExecution(query).resultSet().rows(); if (!rows.isEmpty()) { // Remove the column-header row rows.remove(0); } List values = new ArrayList<>(); - rows.forEach(row -> values.add(Integer.parseInt(row.getData().get(0).getVarCharValue().split("\\.")[0]))); + rows.forEach(row -> values.add(Integer.parseInt(row.data().get(0).varCharValue().split("\\.")[0]))); logger.info("Titles: {}", values); assertEquals("Wrong number of DB records found.", 1, values.size()); assertTrue("Integer not found: " + TEST_DATATYPES_INT_VALUE, values.contains(TEST_DATATYPES_INT_VALUE)); @@ -514,13 +517,13 @@ public void selectVarcharTypeTest() String query = String.format("select varchar_type from %s.%s.%s;", lambdaFunctionName, INTEG_TEST_DATABASE_NAME, TEST_DATATYPES_TABLE_NAME); - List rows = startQueryExecution(query).getResultSet().getRows(); + List rows = startQueryExecution(query).resultSet().rows(); if (!rows.isEmpty()) { // Remove the column-header row rows.remove(0); } List values = new ArrayList<>(); - rows.forEach(row -> values.add(row.getData().get(0).getVarCharValue())); + rows.forEach(row -> values.add(row.data().get(0).varCharValue())); logger.info("Titles: {}", values); assertEquals("Wrong number of DB records found.", 1, values.size()); assertTrue("Varchar not found: " + TEST_DATATYPES_VARCHAR_VALUE, values.contains(TEST_DATATYPES_VARCHAR_VALUE)); @@ -535,13 +538,13 @@ public void selectBooleanTypeTest() String query = String.format("select boolean_type from %s.%s.%s;", lambdaFunctionName, INTEG_TEST_DATABASE_NAME, TEST_DATATYPES_TABLE_NAME); - List rows = startQueryExecution(query).getResultSet().getRows(); + List rows = startQueryExecution(query).resultSet().rows(); if (!rows.isEmpty()) { // Remove the column-header row rows.remove(0); } List values = new ArrayList<>(); - rows.forEach(row -> values.add(Boolean.valueOf(row.getData().get(0).getVarCharValue()))); + rows.forEach(row -> values.add(Boolean.valueOf(row.data().get(0).varCharValue()))); logger.info("Titles: {}", values); assertEquals("Wrong number of DB records found.", 1, values.size()); assertTrue("Boolean not found: " + TEST_DATATYPES_BOOLEAN_VALUE, values.contains(TEST_DATATYPES_BOOLEAN_VALUE)); @@ -556,13 +559,13 @@ public void selectSmallintTypeTest() String query = String.format("select smallint_type from %s.%s.%s;", lambdaFunctionName, INTEG_TEST_DATABASE_NAME, TEST_DATATYPES_TABLE_NAME); - List rows = startQueryExecution(query).getResultSet().getRows(); + List rows = startQueryExecution(query).resultSet().rows(); if (!rows.isEmpty()) { // Remove the column-header row rows.remove(0); } List values = new ArrayList<>(); - rows.forEach(row -> values.add(Short.valueOf(row.getData().get(0).getVarCharValue().split("\\.")[0]))); + rows.forEach(row -> values.add(Short.valueOf(row.data().get(0).varCharValue().split("\\.")[0]))); logger.info("Titles: {}", values); assertEquals("Wrong number of DB records found.", 1, values.size()); assertTrue("Smallint not found: " + TEST_DATATYPES_SHORT_VALUE, values.contains(TEST_DATATYPES_SHORT_VALUE)); @@ -577,13 +580,13 @@ public void selectBigintTypeTest() String query = String.format("select bigint_type from %s.%s.%s;", lambdaFunctionName, INTEG_TEST_DATABASE_NAME, TEST_DATATYPES_TABLE_NAME); - List rows = startQueryExecution(query).getResultSet().getRows(); + List rows = startQueryExecution(query).resultSet().rows(); if (!rows.isEmpty()) { // Remove the column-header row rows.remove(0); } List values = new ArrayList<>(); - rows.forEach(row -> values.add(Long.valueOf(row.getData().get(0).getVarCharValue().split("\\.")[0]))); + rows.forEach(row -> values.add(Long.valueOf(row.data().get(0).varCharValue().split("\\.")[0]))); assertEquals("Wrong number of DB records found.", 1, values.size()); assertTrue("Bigint not found: " + TEST_DATATYPES_LONG_VALUE, values.contains(TEST_DATATYPES_LONG_VALUE)); } @@ -597,13 +600,13 @@ public void selectFloat4TypeTest() String query = String.format("select float4_type from %s.%s.%s;", lambdaFunctionName, INTEG_TEST_DATABASE_NAME, TEST_DATATYPES_TABLE_NAME); - List rows = startQueryExecution(query).getResultSet().getRows(); + List rows = startQueryExecution(query).resultSet().rows(); if (!rows.isEmpty()) { // Remove the column-header row rows.remove(0); } List values = new ArrayList<>(); - rows.forEach(row -> values.add(Float.valueOf(row.getData().get(0).getVarCharValue()))); + rows.forEach(row -> values.add(Float.valueOf(row.data().get(0).varCharValue()))); assertEquals("Wrong number of DB records found.", 1, values.size()); assertTrue("Float4 not found: " + TEST_DATATYPES_SINGLE_PRECISION_VALUE, values.contains(TEST_DATATYPES_SINGLE_PRECISION_VALUE)); } @@ -617,13 +620,13 @@ public void selectFloat8TypeTest() String query = String.format("select float8_type from %s.%s.%s;", lambdaFunctionName, INTEG_TEST_DATABASE_NAME, TEST_DATATYPES_TABLE_NAME); - List rows = startQueryExecution(query).getResultSet().getRows(); + List rows = startQueryExecution(query).resultSet().rows(); if (!rows.isEmpty()) { // Remove the column-header row rows.remove(0); } List values = new ArrayList<>(); - rows.forEach(row -> values.add(Double.valueOf(row.getData().get(0).getVarCharValue()))); + rows.forEach(row -> values.add(Double.valueOf(row.data().get(0).varCharValue()))); assertEquals("Wrong number of DB records found.", 1, values.size()); assertTrue("Float8 not found: " + TEST_DATATYPES_DOUBLE_PRECISION_VALUE, values.contains(TEST_DATATYPES_DOUBLE_PRECISION_VALUE)); } @@ -637,13 +640,13 @@ public void selectDateTypeTest() String query = String.format("select date_type from %s.%s.%s;", lambdaFunctionName, INTEG_TEST_DATABASE_NAME, TEST_DATATYPES_TABLE_NAME); - List rows = startQueryExecution(query).getResultSet().getRows(); + List rows = startQueryExecution(query).resultSet().rows(); if (!rows.isEmpty()) { // Remove the column-header row rows.remove(0); } List values = new ArrayList<>(); - rows.forEach(row -> values.add(LocalDate.parse(row.getData().get(0).getVarCharValue()))); + rows.forEach(row -> values.add(LocalDate.parse(row.data().get(0).varCharValue()))); assertEquals("Wrong number of DB records found.", 1, values.size()); assertTrue("Date not found: " + TEST_DATATYPES_DATE_VALUE, values.contains(LocalDate.parse(TEST_DATATYPES_DATE_VALUE))); } @@ -657,15 +660,15 @@ public void selectTimestampTypeTest() String query = String.format("select timestamp_type from %s.%s.%s;", lambdaFunctionName, INTEG_TEST_DATABASE_NAME, TEST_DATATYPES_TABLE_NAME); - List rows = startQueryExecution(query).getResultSet().getRows(); + List rows = startQueryExecution(query).resultSet().rows(); if (!rows.isEmpty()) { // Remove the column-header row rows.remove(0); } List values = new ArrayList<>(); // for some reason, timestamps lose their 'T'. - rows.forEach(row -> values.add(LocalDateTime.parse(row.getData().get(0).getVarCharValue().replace(' ', 'T')))); - logger.info(rows.get(0).getData().get(0).getVarCharValue()); + rows.forEach(row -> values.add(LocalDateTime.parse(row.data().get(0).varCharValue().replace(' ', 'T')))); + logger.info(rows.get(0).data().get(0).varCharValue()); assertEquals("Wrong number of DB records found.", 1, values.size()); assertTrue("Date not found: " + TEST_DATATYPES_TIMESTAMP_VALUE, values.contains(LocalDateTime.parse(TEST_DATATYPES_TIMESTAMP_VALUE))); } @@ -679,20 +682,19 @@ public void selectByteArrayTypeTest() String query = String.format("select byte_type from %s.%s.%s;", lambdaFunctionName, INTEG_TEST_DATABASE_NAME, TEST_DATATYPES_TABLE_NAME); - List rows = startQueryExecution(query).getResultSet().getRows(); + List rows = startQueryExecution(query).resultSet().rows(); if (!rows.isEmpty()) { // Remove the column-header row rows.remove(0); } List values = new ArrayList<>(); - rows.forEach(row -> values.add(row.getData().get(0).getVarCharValue())); - Datum actual = rows.get(0).getData().get(0); - Datum expected = new Datum(); - expected.setVarCharValue("deadbeef"); - logger.info(rows.get(0).getData().get(0).getVarCharValue()); + rows.forEach(row -> values.add(row.data().get(0).varCharValue())); + Datum actual = rows.get(0).data().get(0); + Datum expected = Datum.builder().varCharValue("deadbeef").build(); + logger.info(rows.get(0).data().get(0).varCharValue()); assertEquals("Wrong number of DB records found.", 1, values.size()); - String bytestring = actual.getVarCharValue().replace(" ", ""); - assertEquals("Byte[] not found: " + Arrays.toString(TEST_DATATYPES_BYTE_ARRAY_VALUE), expected.getVarCharValue(), bytestring); + String bytestring = actual.varCharValue().replace(" ", ""); + assertEquals("Byte[] not found: " + Arrays.toString(TEST_DATATYPES_BYTE_ARRAY_VALUE), expected.varCharValue(), bytestring); } @Test @@ -704,17 +706,16 @@ public void selectVarcharListTypeTest() String query = String.format("select textarray_type from %s.%s.%s;", lambdaFunctionName, INTEG_TEST_DATABASE_NAME, TEST_DATATYPES_TABLE_NAME); - List rows = startQueryExecution(query).getResultSet().getRows(); + List rows = startQueryExecution(query).resultSet().rows(); if (!rows.isEmpty()) { // Remove the column-header row rows.remove(0); } List values = new ArrayList<>(); - rows.forEach(row -> values.add(row.getData().get(0).getVarCharValue())); - Datum actual = rows.get(0).getData().get(0); - Datum expected = new Datum(); - expected.setVarCharValue(TEST_DATATYPES_VARCHAR_ARRAY_VALUE); - logger.info(rows.get(0).getData().get(0).getVarCharValue()); + rows.forEach(row -> values.add(row.data().get(0).varCharValue())); + Datum actual = rows.get(0).data().get(0); + Datum expected = Datum.builder().varCharValue(TEST_DATATYPES_VARCHAR_ARRAY_VALUE).build(); + logger.info(rows.get(0).data().get(0).varCharValue()); assertEquals("Wrong number of DB records found.", 1, values.size()); assertEquals("List not found: " + TEST_DATATYPES_VARCHAR_ARRAY_VALUE, expected, actual); } @@ -728,13 +729,13 @@ public void selectNullValueTest() String query = String.format("select int_type from %s.%s.%s;", lambdaFunctionName, INTEG_TEST_DATABASE_NAME, TEST_NULL_TABLE_NAME); - List rows = startQueryExecution(query).getResultSet().getRows(); + List rows = startQueryExecution(query).resultSet().rows(); if (!rows.isEmpty()) { // Remove the column-header row rows.remove(0); } - Datum actual = rows.get(0).getData().get(0); - assertNull("Value not 'null'. Received: " + actual.getVarCharValue(), actual.getVarCharValue()); + Datum actual = rows.get(0).data().get(0); + assertNull("Value not 'null'. Received: " + actual.varCharValue(), actual.varCharValue()); } @Test @@ -746,7 +747,7 @@ public void selectEmptyTableTest() String query = String.format("select int_type from %s.%s.%s;", lambdaFunctionName, INTEG_TEST_DATABASE_NAME, TEST_EMPTY_TABLE_NAME); - List rows = startQueryExecution(query).getResultSet().getRows(); + List rows = startQueryExecution(query).resultSet().rows(); if (!rows.isEmpty()) { // Remove the column-header row rows.remove(0); diff --git a/athena-federation-sdk/pom.xml b/athena-federation-sdk/pom.xml index 9c1867fb72..b19652c867 100644 --- a/athena-federation-sdk/pom.xml +++ b/athena-federation-sdk/pom.xml @@ -140,9 +140,15 @@ ${aws-sdk-v2.version} - com.amazonaws - aws-java-sdk-athena - ${aws-sdk.version} + software.amazon.awssdk + athena + ${aws-sdk-v2.version} + + + software.amazon.awssdk + netty-nio-client + + org.apache.arrow diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/QueryStatusChecker.java b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/QueryStatusChecker.java index 399424d441..491c46191f 100644 --- a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/QueryStatusChecker.java +++ b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/QueryStatusChecker.java @@ -19,13 +19,13 @@ */ package com.amazonaws.athena.connector.lambda; -import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.athena.model.GetQueryExecutionRequest; -import com.amazonaws.services.athena.model.GetQueryExecutionResult; -import com.amazonaws.services.athena.model.InvalidRequestException; import com.google.common.collect.ImmutableSet; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.athena.model.GetQueryExecutionRequest; +import software.amazon.awssdk.services.athena.model.GetQueryExecutionResponse; +import software.amazon.awssdk.services.athena.model.InvalidRequestException; import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; @@ -50,12 +50,12 @@ public class QueryStatusChecker private boolean wasStarted = false; private final AtomicBoolean isRunning = new AtomicBoolean(true); - private final AmazonAthena athena; + private final AthenaClient athena; private final ThrottlingInvoker athenaInvoker; private final String queryId; private final Thread checkerThread; - public QueryStatusChecker(AmazonAthena athena, ThrottlingInvoker athenaInvoker, String queryId) + public QueryStatusChecker(AthenaClient athena, ThrottlingInvoker athenaInvoker, String queryId) { this.athena = athena; this.athenaInvoker = athenaInvoker; @@ -114,8 +114,8 @@ private void checkStatus(String queryId, int attempt) { logger.debug(format("Background thread checking status of Athena query %s, attempt %d", queryId, attempt)); try { - GetQueryExecutionResult queryExecution = athenaInvoker.invoke(() -> athena.getQueryExecution(new GetQueryExecutionRequest().withQueryExecutionId(queryId))); - String state = queryExecution.getQueryExecution().getStatus().getState(); + GetQueryExecutionResponse queryExecution = athenaInvoker.invoke(() -> athena.getQueryExecution(GetQueryExecutionRequest.builder().queryExecutionId(queryId).build())); + String state = queryExecution.queryExecution().status().state().toString(); if (TERMINAL_STATES.contains(state)) { logger.debug("Query {} has terminated with state {}", queryId, state); isRunning.set(false); diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/handlers/AthenaExceptionFilter.java b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/handlers/AthenaExceptionFilter.java index 9a9d7e2c46..6109038723 100644 --- a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/handlers/AthenaExceptionFilter.java +++ b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/handlers/AthenaExceptionFilter.java @@ -20,7 +20,7 @@ package com.amazonaws.athena.connector.lambda.handlers; import com.amazonaws.athena.connector.lambda.ThrottlingInvoker; -import com.amazonaws.services.athena.model.TooManyRequestsException; +import software.amazon.awssdk.services.athena.model.TooManyRequestsException; public class AthenaExceptionFilter implements ThrottlingInvoker.ExceptionFilter diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/handlers/GlueMetadataHandler.java b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/handlers/GlueMetadataHandler.java index d1fa74f0ef..fcd80f1773 100644 --- a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/handlers/GlueMetadataHandler.java +++ b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/handlers/GlueMetadataHandler.java @@ -32,7 +32,6 @@ import com.amazonaws.athena.connector.lambda.metadata.MetadataRequest; import com.amazonaws.athena.connector.lambda.metadata.glue.GlueFieldLexer; import com.amazonaws.athena.connector.lambda.security.EncryptionKeyFactory; -import com.amazonaws.services.athena.AmazonAthena; import com.google.common.base.Splitter; import com.google.common.base.Strings; import com.google.common.collect.ImmutableMap; @@ -43,6 +42,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import software.amazon.awssdk.http.apache.ApacheHttpClient; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.glue.GlueClient; import software.amazon.awssdk.services.glue.model.Column; import software.amazon.awssdk.services.glue.model.Database; @@ -191,7 +191,7 @@ protected GlueMetadataHandler( GlueClient awsGlue, EncryptionKeyFactory encryptionKeyFactory, SecretsManagerClient secretsManager, - AmazonAthena athena, + AthenaClient athena, String sourceType, String spillBucket, String spillPrefix, @@ -263,13 +263,17 @@ protected ListSchemasResponse doListSchemaNames(BlockAllocator blockAllocator, L GetDatabasesRequest getDatabasesRequest = GetDatabasesRequest.builder() .catalogId(getCatalog(request)) .build(); + + List schemas = new ArrayList<>(); GetDatabasesIterable responses = awsGlue.getDatabasesPaginator(getDatabasesRequest); - List schemas = responses.stream() - .flatMap(response -> response.databaseList().stream()) - .filter(database -> filter == null || filter.filter(database)) - .map(Database::name) - .collect(Collectors.toList()); - + + responses.stream().forEach(response -> response.databaseList() + .forEach(database -> { + if (filter == null || filter.filter(database)) { + schemas.add(database.name()); + } + })); + return new ListSchemasResponse(request.getCatalogName(), schemas); } @@ -317,11 +321,12 @@ protected ListTablesResponse doListTables(BlockAllocator blockAllocator, ListTab pageSize -= maxResults; } GetTablesResponse response = awsGlue.getTables(getTablesRequest.build()); - tables.addAll(response.tableList() - .stream() - .filter(table -> filter == null || filter.filter(table)) - .map(table -> new TableName(request.getSchemaName(), table.name())) - .collect(Collectors.toSet())); + + for (Table next : response.tableList()) { + if (filter == null || filter.filter(next)) { + tables.add(new TableName(request.getSchemaName(), next.name())); + } + } nextToken = response.nextToken(); } diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/handlers/MetadataHandler.java b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/handlers/MetadataHandler.java index cda07a9c68..ef64f9057d 100644 --- a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/handlers/MetadataHandler.java +++ b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/handlers/MetadataHandler.java @@ -58,8 +58,6 @@ import com.amazonaws.athena.connector.lambda.security.KmsKeyFactory; import com.amazonaws.athena.connector.lambda.security.LocalKeyFactory; import com.amazonaws.athena.connector.lambda.serde.VersionedObjectMapperFactory; -import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.athena.AmazonAthenaClientBuilder; import com.amazonaws.services.kms.AWSKMSClientBuilder; import com.amazonaws.services.lambda.runtime.Context; import com.amazonaws.services.lambda.runtime.RequestStreamHandler; @@ -70,6 +68,7 @@ import org.apache.arrow.vector.types.pojo.Schema; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.io.IOException; @@ -118,7 +117,7 @@ public abstract class MetadataHandler protected static final String KMS_KEY_ID_ENV = "kms_key_id"; protected static final String DISABLE_SPILL_ENCRYPTION = "disable_spill_encryption"; private final CachableSecretsManager secretsManager; - private final AmazonAthena athena; + private final AthenaClient athena; private final ThrottlingInvoker athenaInvoker; private final EncryptionKeyFactory encryptionKeyFactory; private final String spillBucket; @@ -151,7 +150,7 @@ public MetadataHandler(String sourceType, java.util.Map configOp } this.secretsManager = new CachableSecretsManager(SecretsManagerClient.create()); - this.athena = AmazonAthenaClientBuilder.defaultClient(); + this.athena = AthenaClient.create(); this.verifier = new SpillLocationVerifier(AmazonS3ClientBuilder.standard().build()); this.athenaInvoker = ThrottlingInvoker.newDefaultBuilder(ATHENA_EXCEPTION_FILTER, configOptions).build(); } @@ -162,7 +161,7 @@ public MetadataHandler(String sourceType, java.util.Map configOp public MetadataHandler( EncryptionKeyFactory encryptionKeyFactory, SecretsManagerClient secretsManager, - AmazonAthena athena, + AthenaClient athena, String sourceType, String spillBucket, String spillPrefix, diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/handlers/RecordHandler.java b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/handlers/RecordHandler.java index 8644615f18..1ac7a85645 100644 --- a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/handlers/RecordHandler.java +++ b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/handlers/RecordHandler.java @@ -40,8 +40,6 @@ import com.amazonaws.athena.connector.lambda.request.PingResponse; import com.amazonaws.athena.connector.lambda.security.CachableSecretsManager; import com.amazonaws.athena.connector.lambda.serde.VersionedObjectMapperFactory; -import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.athena.AmazonAthenaClientBuilder; import com.amazonaws.services.lambda.runtime.Context; import com.amazonaws.services.lambda.runtime.RequestStreamHandler; import com.amazonaws.services.s3.AmazonS3; @@ -49,6 +47,7 @@ import com.fasterxml.jackson.databind.ObjectMapper; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.io.IOException; @@ -73,7 +72,7 @@ public abstract class RecordHandler private final AmazonS3 amazonS3; private final String sourceType; private final CachableSecretsManager secretsManager; - private final AmazonAthena athena; + private final AthenaClient athena; private final ThrottlingInvoker athenaInvoker; /** @@ -84,7 +83,7 @@ public RecordHandler(String sourceType, java.util.Map configOpti this.sourceType = sourceType; this.amazonS3 = AmazonS3ClientBuilder.defaultClient(); this.secretsManager = new CachableSecretsManager(SecretsManagerClient.create()); - this.athena = AmazonAthenaClientBuilder.defaultClient(); + this.athena = AthenaClient.create(); this.configOptions = configOptions; this.athenaInvoker = ThrottlingInvoker.newDefaultBuilder(ATHENA_EXCEPTION_FILTER, configOptions).build(); } @@ -92,7 +91,7 @@ public RecordHandler(String sourceType, java.util.Map configOpti /** * @param sourceType Used to aid in logging diagnostic info when raising a support case. */ - public RecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AmazonAthena athena, String sourceType, java.util.Map configOptions) + public RecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, String sourceType, java.util.Map configOptions) { this.sourceType = sourceType; this.amazonS3 = amazonS3; diff --git a/athena-federation-sdk/src/test/java/com/amazonaws/athena/connector/lambda/QueryStatusCheckerTest.java b/athena-federation-sdk/src/test/java/com/amazonaws/athena/connector/lambda/QueryStatusCheckerTest.java index ba0a641783..98305a5596 100644 --- a/athena-federation-sdk/src/test/java/com/amazonaws/athena/connector/lambda/QueryStatusCheckerTest.java +++ b/athena-federation-sdk/src/test/java/com/amazonaws/athena/connector/lambda/QueryStatusCheckerTest.java @@ -20,13 +20,15 @@ package com.amazonaws.athena.connector.lambda; import com.amazonaws.AmazonServiceException; -import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.athena.model.GetQueryExecutionRequest; -import com.amazonaws.services.athena.model.GetQueryExecutionResult; -import com.amazonaws.services.athena.model.InvalidRequestException; -import com.amazonaws.services.athena.model.QueryExecution; -import com.amazonaws.services.athena.model.QueryExecutionStatus; import com.google.common.collect.ImmutableList; + +import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.athena.model.GetQueryExecutionRequest; +import software.amazon.awssdk.services.athena.model.GetQueryExecutionResponse; +import software.amazon.awssdk.services.athena.model.InvalidRequestException; +import software.amazon.awssdk.services.athena.model.QueryExecution; +import software.amazon.awssdk.services.athena.model.QueryExecutionStatus; + import org.junit.BeforeClass; import org.junit.Test; import org.junit.runner.RunWith; @@ -52,20 +54,21 @@ public class QueryStatusCheckerTest private final ThrottlingInvoker athenaInvoker = ThrottlingInvoker.newDefaultBuilder(ATHENA_EXCEPTION_FILTER, com.google.common.collect.ImmutableMap.of()).build(); @Mock - private AmazonAthena athena; + private AthenaClient athena; @Test public void testFastTermination() throws InterruptedException { String queryId = "query0"; - GetQueryExecutionRequest request = new GetQueryExecutionRequest().withQueryExecutionId(queryId); - when(athena.getQueryExecution(request)).thenReturn(new GetQueryExecutionResult().withQueryExecution(new QueryExecution().withStatus(new QueryExecutionStatus().withState("FAILED")))); + GetQueryExecutionRequest request = GetQueryExecutionRequest.builder().queryExecutionId(queryId).build(); + when(athena.getQueryExecution(request)).thenReturn(GetQueryExecutionResponse.builder().queryExecution(QueryExecution.builder().status(QueryExecutionStatus.builder().state("FAILED").build()).build()).build()); QueryStatusChecker queryStatusChecker = new QueryStatusChecker(athena, athenaInvoker, queryId); assertTrue(queryStatusChecker.isQueryRunning()); Thread.sleep(2000); assertFalse(queryStatusChecker.isQueryRunning()); - verify(athena, times(1)).getQueryExecution(any()); + verify(athena, times(1)).getQueryExecution(any(GetQueryExecutionRequest.class)); + queryStatusChecker.close(); } @Test @@ -73,9 +76,9 @@ public void testSlowTermination() throws InterruptedException { String queryId = "query1"; - GetQueryExecutionRequest request = new GetQueryExecutionRequest().withQueryExecutionId(queryId); - GetQueryExecutionResult result1and2 = new GetQueryExecutionResult().withQueryExecution(new QueryExecution().withStatus(new QueryExecutionStatus().withState("RUNNING"))); - GetQueryExecutionResult result3 = new GetQueryExecutionResult().withQueryExecution(new QueryExecution().withStatus(new QueryExecutionStatus().withState("SUCCEEDED"))); + GetQueryExecutionRequest request = GetQueryExecutionRequest.builder().queryExecutionId(queryId).build(); + GetQueryExecutionResponse result1and2 = GetQueryExecutionResponse.builder().queryExecution(QueryExecution.builder().status(QueryExecutionStatus.builder().state("RUNNING").build()).build()).build(); + GetQueryExecutionResponse result3 = GetQueryExecutionResponse.builder().queryExecution(QueryExecution.builder().status(QueryExecutionStatus.builder().state("SUCCEEDED").build()).build()).build(); when(athena.getQueryExecution(request)).thenReturn(result1and2).thenReturn(result1and2).thenReturn(result3); try (QueryStatusChecker queryStatusChecker = new QueryStatusChecker(athena, athenaInvoker, queryId)) { assertTrue(queryStatusChecker.isQueryRunning()); @@ -83,7 +86,7 @@ public void testSlowTermination() assertTrue(queryStatusChecker.isQueryRunning()); Thread.sleep(3000); assertFalse(queryStatusChecker.isQueryRunning()); - verify(athena, times(3)).getQueryExecution(any()); + verify(athena, times(3)).getQueryExecution(any(GetQueryExecutionRequest.class)); } } @@ -92,13 +95,13 @@ public void testNotFound() throws InterruptedException { String queryId = "query2"; - GetQueryExecutionRequest request = new GetQueryExecutionRequest().withQueryExecutionId(queryId); - when(athena.getQueryExecution(request)).thenThrow(new InvalidRequestException("")); + GetQueryExecutionRequest request = GetQueryExecutionRequest.builder().queryExecutionId(queryId).build(); + when(athena.getQueryExecution(request)).thenThrow(InvalidRequestException.builder().message("").build()); try (QueryStatusChecker queryStatusChecker = new QueryStatusChecker(athena, athenaInvoker, queryId)) { assertTrue(queryStatusChecker.isQueryRunning()); Thread.sleep(2000); assertTrue(queryStatusChecker.isQueryRunning()); - verify(athena, times(1)).getQueryExecution(any()); + verify(athena, times(1)).getQueryExecution(any(GetQueryExecutionRequest.class)); } } @@ -107,13 +110,13 @@ public void testOtherError() throws InterruptedException { String queryId = "query3"; - GetQueryExecutionRequest request = new GetQueryExecutionRequest().withQueryExecutionId(queryId); + GetQueryExecutionRequest request = GetQueryExecutionRequest.builder().queryExecutionId(queryId).build(); when(athena.getQueryExecution(request)).thenThrow(new AmazonServiceException("")); try (QueryStatusChecker queryStatusChecker = new QueryStatusChecker(athena, athenaInvoker, queryId)) { assertTrue(queryStatusChecker.isQueryRunning()); Thread.sleep(3000); assertTrue(queryStatusChecker.isQueryRunning()); - verify(athena, times(2)).getQueryExecution(any()); + verify(athena, times(2)).getQueryExecution(any(GetQueryExecutionRequest.class)); } } } diff --git a/athena-federation-sdk/src/test/java/com/amazonaws/athena/connector/lambda/handlers/GlueMetadataHandlerTest.java b/athena-federation-sdk/src/test/java/com/amazonaws/athena/connector/lambda/handlers/GlueMetadataHandlerTest.java index 816456ef73..010cd57745 100644 --- a/athena-federation-sdk/src/test/java/com/amazonaws/athena/connector/lambda/handlers/GlueMetadataHandlerTest.java +++ b/athena-federation-sdk/src/test/java/com/amazonaws/athena/connector/lambda/handlers/GlueMetadataHandlerTest.java @@ -41,10 +41,9 @@ import com.amazonaws.athena.connector.lambda.metadata.MetadataRequest; import com.amazonaws.athena.connector.lambda.security.IdentityUtil; import com.amazonaws.athena.connector.lambda.security.LocalKeyFactory; -import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.lambda.runtime.Context; import com.google.common.collect.ImmutableList; - +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.glue.GlueClient; import software.amazon.awssdk.services.glue.model.Column; import software.amazon.awssdk.services.glue.model.Database; @@ -153,7 +152,7 @@ public void setUp() handler = new GlueMetadataHandler(mockGlue, new LocalKeyFactory(), mock(SecretsManagerClient.class), - mock(AmazonAthena.class), + mock(AthenaClient.class), "glue-test", "spill-bucket", "spill-prefix", diff --git a/athena-gcs/src/main/java/com/amazonaws/athena/connectors/gcs/GcsMetadataHandler.java b/athena-gcs/src/main/java/com/amazonaws/athena/connectors/gcs/GcsMetadataHandler.java index 466a9708c5..950c2c5745 100644 --- a/athena-gcs/src/main/java/com/amazonaws/athena/connectors/gcs/GcsMetadataHandler.java +++ b/athena-gcs/src/main/java/com/amazonaws/athena/connectors/gcs/GcsMetadataHandler.java @@ -39,7 +39,6 @@ import com.amazonaws.athena.connector.lambda.security.EncryptionKeyFactory; import com.amazonaws.athena.connectors.gcs.common.PartitionUtil; import com.amazonaws.athena.connectors.gcs.storage.StorageMetadata; -import com.amazonaws.services.athena.AmazonAthena; import com.fasterxml.jackson.databind.ObjectMapper; import org.apache.arrow.memory.BufferAllocator; import org.apache.arrow.util.VisibleForTesting; @@ -47,6 +46,7 @@ import org.apache.arrow.vector.types.pojo.Schema; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.glue.GlueClient; import software.amazon.awssdk.services.glue.model.Column; import software.amazon.awssdk.services.glue.model.Database; @@ -101,7 +101,7 @@ public GcsMetadataHandler(BufferAllocator allocator, java.util.Map { this(AmazonS3ClientBuilder.defaultClient(), SecretsManagerClient.create(), - AmazonAthenaClientBuilder.defaultClient(), configOptions); + AthenaClient.create(), configOptions); this.allocator = allocator; } @@ -90,7 +89,7 @@ public GcsRecordHandler(BufferAllocator allocator, java.util.Map * @param amazonAthena An instance of AmazonAthena */ @VisibleForTesting - protected GcsRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AmazonAthena amazonAthena, java.util.Map configOptions) + protected GcsRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AthenaClient amazonAthena, java.util.Map configOptions) { super(amazonS3, secretsManager, amazonAthena, SOURCE_TYPE, configOptions); this.invoker = ThrottlingInvoker.newDefaultBuilder(EXCEPTION_FILTER, configOptions).build(); diff --git a/athena-gcs/src/main/java/com/amazonaws/athena/connectors/gcs/GcsThrottlingExceptionFilter.java b/athena-gcs/src/main/java/com/amazonaws/athena/connectors/gcs/GcsThrottlingExceptionFilter.java index cc71cc64f2..b26c36677d 100644 --- a/athena-gcs/src/main/java/com/amazonaws/athena/connectors/gcs/GcsThrottlingExceptionFilter.java +++ b/athena-gcs/src/main/java/com/amazonaws/athena/connectors/gcs/GcsThrottlingExceptionFilter.java @@ -20,7 +20,7 @@ package com.amazonaws.athena.connectors.gcs; import com.amazonaws.athena.connector.lambda.ThrottlingInvoker; -import com.amazonaws.services.athena.model.AmazonAthenaException; +import software.amazon.awssdk.services.athena.model.AthenaException; public class GcsThrottlingExceptionFilter implements ThrottlingInvoker.ExceptionFilter { @@ -29,7 +29,7 @@ public class GcsThrottlingExceptionFilter implements ThrottlingInvoker.Exception @Override public boolean isMatch(Exception ex) { - return (ex instanceof AmazonAthenaException && ex.getMessage().contains("Rate exceeded")) + return (ex instanceof AthenaException && ex.getMessage().contains("Rate exceeded")) || ex.getMessage().contains("Too Many Requests"); } } diff --git a/athena-gcs/src/test/java/com/amazonaws/athena/connectors/gcs/GcsExceptionFilterTest.java b/athena-gcs/src/test/java/com/amazonaws/athena/connectors/gcs/GcsExceptionFilterTest.java index 9bbb349420..2dbde1f7c8 100644 --- a/athena-gcs/src/test/java/com/amazonaws/athena/connectors/gcs/GcsExceptionFilterTest.java +++ b/athena-gcs/src/test/java/com/amazonaws/athena/connectors/gcs/GcsExceptionFilterTest.java @@ -19,11 +19,10 @@ */ package com.amazonaws.athena.connectors.gcs; -import com.amazonaws.services.athena.model.AmazonAthenaException; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.junit.MockitoJUnitRunner; - +import software.amazon.awssdk.services.athena.model.AthenaException; import static com.amazonaws.athena.connectors.gcs.GcsThrottlingExceptionFilter.EXCEPTION_FILTER; import static org.junit.Assert.assertEquals; @@ -36,11 +35,11 @@ public class GcsExceptionFilterTest @Test public void testIsMatch() { - boolean match = EXCEPTION_FILTER.isMatch(new AmazonAthenaException("Rate exceeded")); + boolean match = EXCEPTION_FILTER.isMatch(AthenaException.builder().message("Rate exceeded").build()); assertTrue(match); - boolean match1 = EXCEPTION_FILTER.isMatch(new AmazonAthenaException("Too Many Requests")); + boolean match1 = EXCEPTION_FILTER.isMatch(AthenaException.builder().message("Too Many Requests").build()); assertTrue(match1); - boolean match3 = EXCEPTION_FILTER.isMatch(new AmazonAthenaException("other")); + boolean match3 = EXCEPTION_FILTER.isMatch(AthenaException.builder().message("other").build()); assertFalse(match3); } } diff --git a/athena-gcs/src/test/java/com/amazonaws/athena/connectors/gcs/GcsMetadataHandlerTest.java b/athena-gcs/src/test/java/com/amazonaws/athena/connectors/gcs/GcsMetadataHandlerTest.java index 4e51b66f99..1db09d5f77 100644 --- a/athena-gcs/src/test/java/com/amazonaws/athena/connectors/gcs/GcsMetadataHandlerTest.java +++ b/athena-gcs/src/test/java/com/amazonaws/athena/connectors/gcs/GcsMetadataHandlerTest.java @@ -40,7 +40,6 @@ import com.amazonaws.athena.connector.lambda.security.FederatedIdentity; import com.amazonaws.athena.connector.lambda.security.LocalKeyFactory; import com.amazonaws.athena.connectors.gcs.storage.StorageMetadata; -import com.amazonaws.services.athena.AmazonAthena; import com.google.api.gax.paging.Page; import com.google.auth.oauth2.GoogleCredentials; import com.google.auth.oauth2.ServiceAccountCredentials; @@ -65,6 +64,7 @@ import org.mockito.Mockito; import org.mockito.MockitoAnnotations; import org.mockito.junit.MockitoJUnitRunner; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.glue.GlueClient; import software.amazon.awssdk.services.glue.model.Column; import software.amazon.awssdk.services.glue.model.Database; @@ -135,7 +135,7 @@ public class GcsMetadataHandlerTest @Mock private ServiceAccountCredentials serviceAccountCredentials; @Mock - private AmazonAthena athena; + private AthenaClient athena; private MockedStatic mockedStorageOptions; private MockedStatic mockedServiceAccountCredentials; diff --git a/athena-gcs/src/test/java/com/amazonaws/athena/connectors/gcs/GcsRecordHandlerTest.java b/athena-gcs/src/test/java/com/amazonaws/athena/connectors/gcs/GcsRecordHandlerTest.java index ee011e00ef..6f88aa4e3c 100644 --- a/athena-gcs/src/test/java/com/amazonaws/athena/connectors/gcs/GcsRecordHandlerTest.java +++ b/athena-gcs/src/test/java/com/amazonaws/athena/connectors/gcs/GcsRecordHandlerTest.java @@ -34,8 +34,6 @@ import com.amazonaws.athena.connector.lambda.security.EncryptionKeyFactory; import com.amazonaws.athena.connector.lambda.security.FederatedIdentity; import com.amazonaws.athena.connector.lambda.security.LocalKeyFactory; -import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.athena.AmazonAthenaClientBuilder; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3ClientBuilder; import com.google.auth.oauth2.GoogleCredentials; @@ -50,6 +48,7 @@ import org.mockito.Mockito; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.io.File; @@ -75,7 +74,7 @@ public class GcsRecordHandlerTest extends GenericGcsTest private SecretsManagerClient secretsManager; @Mock - private AmazonAthena athena; + private AthenaClient athena; @Mock GoogleCredentials credentials; @@ -128,7 +127,7 @@ public void initCommonMockedStatic() // To mock SecretsManagerClient via SecretsManagerClient mockedSecretManagerBuilder.when(SecretsManagerClient::create).thenReturn(secretsManager); // To mock AmazonAthena via AmazonAthenaClientBuilder - mockedAthenaClientBuilder.when(AmazonAthenaClientBuilder::defaultClient).thenReturn(athena); + mockedAthenaClientBuilder.when(AthenaClient::create).thenReturn(athena); mockedGoogleCredentials.when(() -> GoogleCredentials.fromStream(any())).thenReturn(credentials); Schema schemaForRead = new Schema(GcsTestUtils.getTestSchemaFieldsArrow()); spillWriter = new S3BlockSpiller(amazonS3, spillConfig, allocator, schemaForRead, ConstraintEvaluator.emptyEvaluator(), com.google.common.collect.ImmutableMap.of()); diff --git a/athena-gcs/src/test/java/com/amazonaws/athena/connectors/gcs/GenericGcsTest.java b/athena-gcs/src/test/java/com/amazonaws/athena/connectors/gcs/GenericGcsTest.java index 35826badda..885e5ca4d8 100644 --- a/athena-gcs/src/test/java/com/amazonaws/athena/connectors/gcs/GenericGcsTest.java +++ b/athena-gcs/src/test/java/com/amazonaws/athena/connectors/gcs/GenericGcsTest.java @@ -19,12 +19,12 @@ */ package com.amazonaws.athena.connectors.gcs; -import com.amazonaws.services.athena.AmazonAthenaClientBuilder; import com.amazonaws.services.s3.AmazonS3ClientBuilder; import com.google.auth.oauth2.GoogleCredentials; import com.google.auth.oauth2.ServiceAccountCredentials; import org.mockito.MockedStatic; import org.mockito.Mockito; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.lang.reflect.Field; @@ -33,7 +33,7 @@ public class GenericGcsTest { protected MockedStatic mockedS3Builder; protected MockedStatic mockedSecretManagerBuilder; - protected MockedStatic mockedAthenaClientBuilder; + protected MockedStatic mockedAthenaClientBuilder; protected MockedStatic mockedGoogleCredentials; protected MockedStatic mockedGcsUtil; @@ -43,7 +43,7 @@ protected void initCommonMockedStatic() { mockedS3Builder = Mockito.mockStatic(AmazonS3ClientBuilder.class); mockedSecretManagerBuilder = Mockito.mockStatic(SecretsManagerClient.class); - mockedAthenaClientBuilder = Mockito.mockStatic(AmazonAthenaClientBuilder.class); + mockedAthenaClientBuilder = Mockito.mockStatic(AthenaClient.class); mockedGoogleCredentials = Mockito.mockStatic(GoogleCredentials.class); mockedGcsUtil = Mockito.mockStatic(GcsUtil.class); mockedServiceAccountCredentials = Mockito.mockStatic(ServiceAccountCredentials.class); diff --git a/athena-google-bigquery/src/main/java/com/amazonaws/athena/connectors/google/bigquery/BigQueryExceptionFilter.java b/athena-google-bigquery/src/main/java/com/amazonaws/athena/connectors/google/bigquery/BigQueryExceptionFilter.java index 866c7593ca..315e96dfc8 100644 --- a/athena-google-bigquery/src/main/java/com/amazonaws/athena/connectors/google/bigquery/BigQueryExceptionFilter.java +++ b/athena-google-bigquery/src/main/java/com/amazonaws/athena/connectors/google/bigquery/BigQueryExceptionFilter.java @@ -21,8 +21,8 @@ package com.amazonaws.athena.connectors.google.bigquery; import com.amazonaws.athena.connector.lambda.ThrottlingInvoker; -import com.amazonaws.services.athena.model.AmazonAthenaException; import com.google.cloud.bigquery.BigQueryException; +import software.amazon.awssdk.services.athena.model.AthenaException; public class BigQueryExceptionFilter implements ThrottlingInvoker.ExceptionFilter { public static final ThrottlingInvoker.ExceptionFilter EXCEPTION_FILTER = new BigQueryExceptionFilter(); @@ -30,7 +30,7 @@ public class BigQueryExceptionFilter implements ThrottlingInvoker.ExceptionFilte @Override public boolean isMatch(Exception ex) { - if (ex instanceof AmazonAthenaException && ex.getMessage().contains("Rate exceeded")) { + if (ex instanceof AthenaException && ex.getMessage().contains("Rate exceeded")) { return true; } if (ex instanceof BigQueryException && ex.getMessage().contains("Exceeded rate limits")) { diff --git a/athena-google-bigquery/src/main/java/com/amazonaws/athena/connectors/google/bigquery/BigQueryRecordHandler.java b/athena-google-bigquery/src/main/java/com/amazonaws/athena/connectors/google/bigquery/BigQueryRecordHandler.java index 7db7915cd3..a0d82f7284 100644 --- a/athena-google-bigquery/src/main/java/com/amazonaws/athena/connectors/google/bigquery/BigQueryRecordHandler.java +++ b/athena-google-bigquery/src/main/java/com/amazonaws/athena/connectors/google/bigquery/BigQueryRecordHandler.java @@ -29,8 +29,6 @@ import com.amazonaws.athena.connector.lambda.handlers.RecordHandler; import com.amazonaws.athena.connector.lambda.records.ReadRecordsRequest; import com.amazonaws.athena.connectors.google.bigquery.qpt.BigQueryQueryPassthrough; -import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.athena.AmazonAthenaClientBuilder; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3ClientBuilder; import com.google.api.gax.rpc.ServerStream; @@ -60,6 +58,7 @@ import org.apache.arrow.vector.types.pojo.Field; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.io.IOException; @@ -94,11 +93,11 @@ public class BigQueryRecordHandler { this(AmazonS3ClientBuilder.defaultClient(), SecretsManagerClient.create(), - AmazonAthenaClientBuilder.defaultClient(), configOptions, allocator); + AthenaClient.create(), configOptions, allocator); } @VisibleForTesting - public BigQueryRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AmazonAthena athena, java.util.Map configOptions, BufferAllocator allocator) + public BigQueryRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, java.util.Map configOptions, BufferAllocator allocator) { super(amazonS3, secretsManager, athena, BigQueryConstants.SOURCE_TYPE, configOptions); this.invoker = ThrottlingInvoker.newDefaultBuilder(EXCEPTION_FILTER, configOptions).build(); diff --git a/athena-google-bigquery/src/test/java/com/amazonaws/athena/connectors/google/bigquery/BigQueryRecordHandlerTest.java b/athena-google-bigquery/src/test/java/com/amazonaws/athena/connectors/google/bigquery/BigQueryRecordHandlerTest.java index 8294adc6a1..948ee7d677 100644 --- a/athena-google-bigquery/src/test/java/com/amazonaws/athena/connectors/google/bigquery/BigQueryRecordHandlerTest.java +++ b/athena-google-bigquery/src/test/java/com/amazonaws/athena/connectors/google/bigquery/BigQueryRecordHandlerTest.java @@ -35,7 +35,6 @@ import com.amazonaws.athena.connector.lambda.security.EncryptionKeyFactory; import com.amazonaws.athena.connector.lambda.security.FederatedIdentity; import com.amazonaws.athena.connector.lambda.security.LocalKeyFactory; -import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; import com.google.api.gax.rpc.ServerStream; import com.google.api.gax.rpc.ServerStreamingCallable; @@ -78,6 +77,7 @@ import org.mockito.junit.MockitoJUnitRunner; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.nio.charset.StandardCharsets; @@ -112,7 +112,7 @@ public class BigQueryRecordHandlerTest private String prefix = "prefix"; @Mock - private AmazonAthena athena; + private AthenaClient athena; @Mock private BigQueryReadClient bigQueryReadClient; @Mock diff --git a/athena-google-bigquery/src/test/java/com/amazonaws/athena/connectors/google/bigquery/integ/BigQueryIntegTest.java b/athena-google-bigquery/src/test/java/com/amazonaws/athena/connectors/google/bigquery/integ/BigQueryIntegTest.java index 51cb38fe07..ca41c3751c 100644 --- a/athena-google-bigquery/src/test/java/com/amazonaws/athena/connectors/google/bigquery/integ/BigQueryIntegTest.java +++ b/athena-google-bigquery/src/test/java/com/amazonaws/athena/connectors/google/bigquery/integ/BigQueryIntegTest.java @@ -21,7 +21,6 @@ import com.amazonaws.athena.connector.integ.IntegrationTestBase; import com.amazonaws.athena.connector.integ.data.TestConfig; -import com.amazonaws.services.athena.model.Row; import com.google.common.collect.ImmutableList; import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; diff --git a/athena-hbase/src/main/java/com/amazonaws/athena/connectors/hbase/HbaseMetadataHandler.java b/athena-hbase/src/main/java/com/amazonaws/athena/connectors/hbase/HbaseMetadataHandler.java index ab90b920c0..9898a93cec 100644 --- a/athena-hbase/src/main/java/com/amazonaws/athena/connectors/hbase/HbaseMetadataHandler.java +++ b/athena-hbase/src/main/java/com/amazonaws/athena/connectors/hbase/HbaseMetadataHandler.java @@ -44,7 +44,6 @@ import com.amazonaws.athena.connectors.hbase.connection.HBaseConnection; import com.amazonaws.athena.connectors.hbase.connection.HbaseConnectionFactory; import com.amazonaws.athena.connectors.hbase.qpt.HbaseQueryPassthrough; -import com.amazonaws.services.athena.AmazonAthena; import com.google.common.collect.ImmutableMap; import org.apache.arrow.util.VisibleForTesting; import org.apache.arrow.vector.types.Types; @@ -55,6 +54,7 @@ import org.apache.hadoop.hbase.TableName; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.glue.GlueClient; import software.amazon.awssdk.services.glue.model.Table; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; @@ -123,7 +123,7 @@ protected HbaseMetadataHandler( GlueClient awsGlue, EncryptionKeyFactory keyFactory, SecretsManagerClient secretsManager, - AmazonAthena athena, + AthenaClient athena, HbaseConnectionFactory connectionFactory, String spillBucket, String spillPrefix, diff --git a/athena-hbase/src/main/java/com/amazonaws/athena/connectors/hbase/HbaseRecordHandler.java b/athena-hbase/src/main/java/com/amazonaws/athena/connectors/hbase/HbaseRecordHandler.java index 7da3700652..8af36b5e54 100644 --- a/athena-hbase/src/main/java/com/amazonaws/athena/connectors/hbase/HbaseRecordHandler.java +++ b/athena-hbase/src/main/java/com/amazonaws/athena/connectors/hbase/HbaseRecordHandler.java @@ -31,8 +31,6 @@ import com.amazonaws.athena.connectors.hbase.connection.HBaseConnection; import com.amazonaws.athena.connectors.hbase.connection.HbaseConnectionFactory; import com.amazonaws.athena.connectors.hbase.qpt.HbaseQueryPassthrough; -import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.athena.AmazonAthenaClientBuilder; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3ClientBuilder; import org.apache.arrow.util.VisibleForTesting; @@ -53,6 +51,7 @@ import org.apache.hadoop.hbase.util.Bytes; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.io.IOException; @@ -92,13 +91,13 @@ public HbaseRecordHandler(java.util.Map configOptions) this( AmazonS3ClientBuilder.defaultClient(), SecretsManagerClient.create(), - AmazonAthenaClientBuilder.defaultClient(), + AthenaClient.create(), new HbaseConnectionFactory(), configOptions); } @VisibleForTesting - protected HbaseRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AmazonAthena athena, HbaseConnectionFactory connectionFactory, java.util.Map configOptions) + protected HbaseRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, HbaseConnectionFactory connectionFactory, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, SOURCE_TYPE, configOptions); this.amazonS3 = amazonS3; diff --git a/athena-hbase/src/test/java/com/amazonaws/athena/connectors/hbase/HbaseMetadataHandlerTest.java b/athena-hbase/src/test/java/com/amazonaws/athena/connectors/hbase/HbaseMetadataHandlerTest.java index 9b9fcf39ec..5447a8eec1 100644 --- a/athena-hbase/src/test/java/com/amazonaws/athena/connectors/hbase/HbaseMetadataHandlerTest.java +++ b/athena-hbase/src/test/java/com/amazonaws/athena/connectors/hbase/HbaseMetadataHandlerTest.java @@ -42,7 +42,6 @@ import com.amazonaws.athena.connectors.hbase.connection.HBaseConnection; import com.amazonaws.athena.connectors.hbase.connection.HbaseConnectionFactory; import com.amazonaws.athena.connectors.hbase.connection.ResultProcessor; -import com.amazonaws.services.athena.AmazonAthena; import org.apache.arrow.vector.types.Types; import org.apache.arrow.vector.types.pojo.Schema; import org.apache.hadoop.hbase.HRegionInfo; @@ -61,6 +60,7 @@ import org.mockito.junit.MockitoJUnitRunner; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.glue.GlueClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; @@ -105,7 +105,7 @@ public class HbaseMetadataHandlerTest private SecretsManagerClient secretsManager; @Mock - private AmazonAthena athena; + private AthenaClient athena; @Before public void setUp() diff --git a/athena-hbase/src/test/java/com/amazonaws/athena/connectors/hbase/HbaseRecordHandlerTest.java b/athena-hbase/src/test/java/com/amazonaws/athena/connectors/hbase/HbaseRecordHandlerTest.java index 47aefec079..8d3ebd1b45 100644 --- a/athena-hbase/src/test/java/com/amazonaws/athena/connectors/hbase/HbaseRecordHandlerTest.java +++ b/athena-hbase/src/test/java/com/amazonaws/athena/connectors/hbase/HbaseRecordHandlerTest.java @@ -43,7 +43,6 @@ import com.amazonaws.athena.connectors.hbase.connection.HBaseConnection; import com.amazonaws.athena.connectors.hbase.connection.HbaseConnectionFactory; import com.amazonaws.athena.connectors.hbase.connection.ResultProcessor; -import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.model.PutObjectRequest; import com.amazonaws.services.s3.model.PutObjectResult; @@ -69,6 +68,7 @@ import org.mockito.junit.MockitoJUnitRunner; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.io.ByteArrayInputStream; @@ -123,7 +123,7 @@ public class HbaseRecordHandlerTest private SecretsManagerClient mockSecretsManager; @Mock - private AmazonAthena mockAthena; + private AthenaClient mockAthena; @Before public void setUp() diff --git a/athena-hbase/src/test/java/com/amazonaws/athena/connectors/hbase/integ/HbaseIntegTest.java b/athena-hbase/src/test/java/com/amazonaws/athena/connectors/hbase/integ/HbaseIntegTest.java index ee7204f1d3..6edd01d72c 100644 --- a/athena-hbase/src/test/java/com/amazonaws/athena/connectors/hbase/integ/HbaseIntegTest.java +++ b/athena-hbase/src/test/java/com/amazonaws/athena/connectors/hbase/integ/HbaseIntegTest.java @@ -26,7 +26,6 @@ import com.amazonaws.athena.connector.integ.data.ConnectorStackAttributes; import com.amazonaws.athena.connector.integ.data.ConnectorVpcAttributes; import com.amazonaws.athena.connector.integ.providers.ConnectorPackagingAttributesProvider; -import com.amazonaws.services.athena.model.Row; import com.amazonaws.services.elasticmapreduce.AmazonElasticMapReduce; import com.amazonaws.services.elasticmapreduce.AmazonElasticMapReduceClientBuilder; import com.amazonaws.services.elasticmapreduce.model.Application; @@ -50,6 +49,7 @@ import software.amazon.awscdk.core.Stack; import software.amazon.awscdk.services.emr.CfnCluster; import software.amazon.awscdk.services.iam.PolicyDocument; +import software.amazon.awssdk.services.athena.model.Row; import java.time.LocalDate; import java.time.LocalDateTime; @@ -376,13 +376,13 @@ public void selectColumnWithPredicateIntegTest() String query = String .format("select \"info:lead_actor\" from %s.%s.%s where \"movie:title\" = 'Aliens';", lambdaFunctionName, hbaseDbName, hbaseTableName); - List rows = startQueryExecution(query).getResultSet().getRows(); + List rows = startQueryExecution(query).resultSet().rows(); if (!rows.isEmpty()) { // Remove the column-header row rows.remove(0); } List actors = new ArrayList<>(); - rows.forEach(row -> actors.add(row.getData().get(0).getVarCharValue())); + rows.forEach(row -> actors.add(row.data().get(0).varCharValue())); logger.info("Actors: {}", actors); assertEquals("Wrong number of DB records found.", 1, actors.size()); assertTrue("Actor not found: Sigourney Weaver.", actors.contains("Sigourney Weaver")); @@ -397,13 +397,13 @@ public void selectIntegerTypeTest() String query = String.format("select \"datatype:int_type\" from %s.%s.%s;", lambdaFunctionName, INTEG_TEST_DATABASE_NAME, TEST_DATATYPES_TABLE_NAME); - List rows = startQueryExecution(query).getResultSet().getRows(); + List rows = startQueryExecution(query).resultSet().rows(); if (!rows.isEmpty()) { // Remove the column-header row rows.remove(0); } List values = new ArrayList<>(); - rows.forEach(row -> values.add(Integer.parseInt(row.getData().get(0).getVarCharValue().split("\\.")[0]))); + rows.forEach(row -> values.add(Integer.parseInt(row.data().get(0).varCharValue().split("\\.")[0]))); logger.info("Titles: {}", values); assertEquals("Wrong number of DB records found.", 1, values.size()); assertTrue("Integer not found: " + TEST_DATATYPES_INT_VALUE, values.contains(TEST_DATATYPES_INT_VALUE)); @@ -418,13 +418,13 @@ public void selectVarcharTypeTest() String query = String.format("select \"datatype:varchar_type\" from %s.%s.%s;", lambdaFunctionName, INTEG_TEST_DATABASE_NAME, TEST_DATATYPES_TABLE_NAME); - List rows = startQueryExecution(query).getResultSet().getRows(); + List rows = startQueryExecution(query).resultSet().rows(); if (!rows.isEmpty()) { // Remove the column-header row rows.remove(0); } List values = new ArrayList<>(); - rows.forEach(row -> values.add(row.getData().get(0).getVarCharValue())); + rows.forEach(row -> values.add(row.data().get(0).varCharValue())); logger.info("Titles: {}", values); assertEquals("Wrong number of DB records found.", 1, values.size()); assertTrue("Varchar not found: " + TEST_DATATYPES_VARCHAR_VALUE, values.contains(TEST_DATATYPES_VARCHAR_VALUE)); @@ -439,13 +439,13 @@ public void selectBooleanTypeTest() String query = String.format("select \"datatype:boolean_type\" from %s.%s.%s;", lambdaFunctionName, INTEG_TEST_DATABASE_NAME, TEST_DATATYPES_TABLE_NAME); - List rows = startQueryExecution(query).getResultSet().getRows(); + List rows = startQueryExecution(query).resultSet().rows(); if (!rows.isEmpty()) { // Remove the column-header row rows.remove(0); } List values = new ArrayList<>(); - rows.forEach(row -> values.add(Boolean.valueOf(row.getData().get(0).getVarCharValue()))); + rows.forEach(row -> values.add(Boolean.valueOf(row.data().get(0).varCharValue()))); logger.info("Titles: {}", values); assertEquals("Wrong number of DB records found.", 1, values.size()); assertTrue("Boolean not found: " + TEST_DATATYPES_BOOLEAN_VALUE, values.contains(TEST_DATATYPES_BOOLEAN_VALUE)); @@ -460,13 +460,13 @@ public void selectSmallintTypeTest() String query = String.format("select \"datatype:smallint_type\" from %s.%s.%s;", lambdaFunctionName, INTEG_TEST_DATABASE_NAME, TEST_DATATYPES_TABLE_NAME); - List rows = startQueryExecution(query).getResultSet().getRows(); + List rows = startQueryExecution(query).resultSet().rows(); if (!rows.isEmpty()) { // Remove the column-header row rows.remove(0); } List values = new ArrayList<>(); - rows.forEach(row -> values.add(Short.valueOf(row.getData().get(0).getVarCharValue().split("\\.")[0]))); + rows.forEach(row -> values.add(Short.valueOf(row.data().get(0).varCharValue().split("\\.")[0]))); logger.info("Titles: {}", values); assertEquals("Wrong number of DB records found.", 1, values.size()); assertTrue("Smallint not found: " + TEST_DATATYPES_SHORT_VALUE, values.contains(TEST_DATATYPES_SHORT_VALUE)); @@ -481,13 +481,13 @@ public void selectBigintTypeTest() String query = String.format("select \"datatype:bigint_type\" from %s.%s.%s;", lambdaFunctionName, INTEG_TEST_DATABASE_NAME, TEST_DATATYPES_TABLE_NAME); - List rows = startQueryExecution(query).getResultSet().getRows(); + List rows = startQueryExecution(query).resultSet().rows(); if (!rows.isEmpty()) { // Remove the column-header row rows.remove(0); } List values = new ArrayList<>(); - rows.forEach(row -> values.add(Long.valueOf(row.getData().get(0).getVarCharValue().split("\\.")[0]))); + rows.forEach(row -> values.add(Long.valueOf(row.data().get(0).varCharValue().split("\\.")[0]))); assertEquals("Wrong number of DB records found.", 1, values.size()); assertTrue("Bigint not found: " + TEST_DATATYPES_LONG_VALUE, values.contains(TEST_DATATYPES_LONG_VALUE)); } @@ -501,13 +501,13 @@ public void selectFloat4TypeTest() String query = String.format("select \"datatype:float4_type\" from %s.%s.%s;", lambdaFunctionName, INTEG_TEST_DATABASE_NAME, TEST_DATATYPES_TABLE_NAME); - List rows = startQueryExecution(query).getResultSet().getRows(); + List rows = startQueryExecution(query).resultSet().rows(); if (!rows.isEmpty()) { // Remove the column-header row rows.remove(0); } List values = new ArrayList<>(); - rows.forEach(row -> values.add(Float.valueOf(row.getData().get(0).getVarCharValue()))); + rows.forEach(row -> values.add(Float.valueOf(row.data().get(0).varCharValue()))); assertEquals("Wrong number of DB records found.", 1, values.size()); assertTrue("Float4 not found: " + TEST_DATATYPES_SINGLE_PRECISION_VALUE, values.contains(TEST_DATATYPES_SINGLE_PRECISION_VALUE)); } @@ -521,13 +521,13 @@ public void selectFloat8TypeTest() String query = String.format("select \"datatype:float8_type\" from %s.%s.%s;", lambdaFunctionName, INTEG_TEST_DATABASE_NAME, TEST_DATATYPES_TABLE_NAME); - List rows = startQueryExecution(query).getResultSet().getRows(); + List rows = startQueryExecution(query).resultSet().rows(); if (!rows.isEmpty()) { // Remove the column-header row rows.remove(0); } List values = new ArrayList<>(); - rows.forEach(row -> values.add(Double.valueOf(row.getData().get(0).getVarCharValue()))); + rows.forEach(row -> values.add(Double.valueOf(row.data().get(0).varCharValue()))); assertEquals("Wrong number of DB records found.", 1, values.size()); assertTrue("Float8 not found: " + TEST_DATATYPES_DOUBLE_PRECISION_VALUE, values.contains(TEST_DATATYPES_DOUBLE_PRECISION_VALUE)); } @@ -541,13 +541,13 @@ public void selectDateTypeTest() String query = String.format("select \"datatype:date_type\" from %s.%s.%s;", lambdaFunctionName, INTEG_TEST_DATABASE_NAME, TEST_DATATYPES_TABLE_NAME); - List rows = startQueryExecution(query).getResultSet().getRows(); + List rows = startQueryExecution(query).resultSet().rows(); if (!rows.isEmpty()) { // Remove the column-header row rows.remove(0); } List values = new ArrayList<>(); - rows.forEach(row -> values.add(LocalDate.parse(row.getData().get(0).getVarCharValue()))); + rows.forEach(row -> values.add(LocalDate.parse(row.data().get(0).varCharValue()))); assertEquals("Wrong number of DB records found.", 1, values.size()); assertTrue("Date not found: " + TEST_DATATYPES_DATE_VALUE, values.contains(LocalDate.parse(TEST_DATATYPES_DATE_VALUE))); } @@ -561,15 +561,15 @@ public void selectTimestampTypeTest() String query = String.format("select \"datatype:timestamp_type\" from %s.%s.%s;", lambdaFunctionName, INTEG_TEST_DATABASE_NAME, TEST_DATATYPES_TABLE_NAME); - List rows = startQueryExecution(query).getResultSet().getRows(); + List rows = startQueryExecution(query).resultSet().rows(); if (!rows.isEmpty()) { // Remove the column-header row rows.remove(0); } List values = new ArrayList<>(); // for some reason, timestamps lose their 'T'. - rows.forEach(row -> values.add(LocalDateTime.parse(row.getData().get(0).getVarCharValue().replace(' ', 'T')))); - logger.info(rows.get(0).getData().get(0).getVarCharValue()); + rows.forEach(row -> values.add(LocalDateTime.parse(row.data().get(0).varCharValue().replace(' ', 'T')))); + logger.info(rows.get(0).data().get(0).varCharValue()); assertEquals("Wrong number of DB records found.", 1, values.size()); assertTrue("Date not found: " + TEST_DATATYPES_TIMESTAMP_VALUE, values.contains(LocalDateTime.parse(TEST_DATATYPES_TIMESTAMP_VALUE))); } diff --git a/athena-hortonworks-hive/src/main/java/com/amazonaws/athena/connectors/hortonworks/HiveMetadataHandler.java b/athena-hortonworks-hive/src/main/java/com/amazonaws/athena/connectors/hortonworks/HiveMetadataHandler.java index 56016626e2..e3b49989ad 100644 --- a/athena-hortonworks-hive/src/main/java/com/amazonaws/athena/connectors/hortonworks/HiveMetadataHandler.java +++ b/athena-hortonworks-hive/src/main/java/com/amazonaws/athena/connectors/hortonworks/HiveMetadataHandler.java @@ -48,7 +48,6 @@ import com.amazonaws.athena.connectors.jdbc.manager.JDBCUtil; import com.amazonaws.athena.connectors.jdbc.manager.JdbcArrowTypeConverter; import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler; -import com.amazonaws.services.athena.AmazonAthena; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; @@ -59,6 +58,7 @@ import org.apache.arrow.vector.types.pojo.Schema; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -97,7 +97,7 @@ public HiveMetadataHandler(DatabaseConnectionConfig databaseConnectionConfig, ja protected HiveMetadataHandler( DatabaseConnectionConfig databaseConnectionConfiguration, SecretsManagerClient secretManager, - AmazonAthena athena, + AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, java.util.Map configOptions) { diff --git a/athena-hortonworks-hive/src/main/java/com/amazonaws/athena/connectors/hortonworks/HiveMuxMetadataHandler.java b/athena-hortonworks-hive/src/main/java/com/amazonaws/athena/connectors/hortonworks/HiveMuxMetadataHandler.java index 13b93a3f80..81dfff872b 100644 --- a/athena-hortonworks-hive/src/main/java/com/amazonaws/athena/connectors/hortonworks/HiveMuxMetadataHandler.java +++ b/athena-hortonworks-hive/src/main/java/com/amazonaws/athena/connectors/hortonworks/HiveMuxMetadataHandler.java @@ -25,8 +25,8 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandlerFactory; -import com.amazonaws.services.athena.AmazonAthena; import org.apache.arrow.util.VisibleForTesting; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Map; @@ -56,7 +56,7 @@ public HiveMuxMetadataHandler(java.util.Map configOptions) } @VisibleForTesting - protected HiveMuxMetadataHandler(SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, + protected HiveMuxMetadataHandler(SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, Map metadataHandlerMap, DatabaseConnectionConfig databaseConnectionConfig, java.util.Map configOptions) { super(secretsManager, athena, jdbcConnectionFactory, metadataHandlerMap, databaseConnectionConfig, configOptions); diff --git a/athena-hortonworks-hive/src/main/java/com/amazonaws/athena/connectors/hortonworks/HiveMuxRecordHandler.java b/athena-hortonworks-hive/src/main/java/com/amazonaws/athena/connectors/hortonworks/HiveMuxRecordHandler.java index a0aba7271b..9709e4adde 100644 --- a/athena-hortonworks-hive/src/main/java/com/amazonaws/athena/connectors/hortonworks/HiveMuxRecordHandler.java +++ b/athena-hortonworks-hive/src/main/java/com/amazonaws/athena/connectors/hortonworks/HiveMuxRecordHandler.java @@ -25,9 +25,9 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandlerFactory; -import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; import com.google.common.annotations.VisibleForTesting; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Map; @@ -57,7 +57,7 @@ public HiveMuxRecordHandler(java.util.Map configOptions) } @VisibleForTesting - HiveMuxRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, + HiveMuxRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, DatabaseConnectionConfig databaseConnectionConfig, Map recordHandlerMap, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, jdbcConnectionFactory, databaseConnectionConfig, recordHandlerMap, configOptions); diff --git a/athena-hortonworks-hive/src/main/java/com/amazonaws/athena/connectors/hortonworks/HiveRecordHandler.java b/athena-hortonworks-hive/src/main/java/com/amazonaws/athena/connectors/hortonworks/HiveRecordHandler.java index ff1f8e3a65..5d5ab5035a 100644 --- a/athena-hortonworks-hive/src/main/java/com/amazonaws/athena/connectors/hortonworks/HiveRecordHandler.java +++ b/athena-hortonworks-hive/src/main/java/com/amazonaws/athena/connectors/hortonworks/HiveRecordHandler.java @@ -28,13 +28,12 @@ import com.amazonaws.athena.connectors.jdbc.manager.JDBCUtil; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcSplitQueryBuilder; -import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.athena.AmazonAthenaClientBuilder; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3ClientBuilder; import com.google.common.annotations.VisibleForTesting; import org.apache.arrow.vector.types.pojo.Schema; import org.apache.commons.lang3.Validate; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -61,11 +60,11 @@ public HiveRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, java } public HiveRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, JdbcConnectionFactory jdbcConnectionFactory, java.util.Map configOptions) { - this(databaseConnectionConfig, AmazonS3ClientBuilder.defaultClient(), SecretsManagerClient.create(), AmazonAthenaClientBuilder.defaultClient(), + this(databaseConnectionConfig, AmazonS3ClientBuilder.defaultClient(), SecretsManagerClient.create(), AthenaClient.create(), jdbcConnectionFactory, new HiveQueryStringBuilder(HIVE_QUOTE_CHARACTER, new HiveFederationExpressionParser(HIVE_QUOTE_CHARACTER)), configOptions); } @VisibleForTesting - HiveRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, AmazonS3 amazonS3, SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, JdbcSplitQueryBuilder jdbcSplitQueryBuilder, java.util.Map configOptions) + HiveRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, AmazonS3 amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, JdbcSplitQueryBuilder jdbcSplitQueryBuilder, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, databaseConnectionConfig, jdbcConnectionFactory, configOptions); this.jdbcSplitQueryBuilder = Validate.notNull(jdbcSplitQueryBuilder, "query builder must not be null"); diff --git a/athena-hortonworks-hive/src/test/java/com/amazonaws/athena/connectors/hortonworks/HiveMetadataHandlerTest.java b/athena-hortonworks-hive/src/test/java/com/amazonaws/athena/connectors/hortonworks/HiveMetadataHandlerTest.java index 988ffbfe1d..c7acb9f360 100644 --- a/athena-hortonworks-hive/src/test/java/com/amazonaws/athena/connectors/hortonworks/HiveMetadataHandlerTest.java +++ b/athena-hortonworks-hive/src/test/java/com/amazonaws/athena/connectors/hortonworks/HiveMetadataHandlerTest.java @@ -28,7 +28,6 @@ import com.amazonaws.athena.connectors.jdbc.connection.DatabaseConnectionConfig; import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.connection.JdbcCredentialProvider; -import com.amazonaws.services.athena.AmazonAthena; import org.apache.arrow.vector.types.pojo.Field; import org.apache.arrow.vector.types.pojo.Schema; import org.junit.Assert; @@ -36,6 +35,7 @@ import org.junit.BeforeClass; import org.junit.Test; import org.mockito.Mockito; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueRequest; import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueResponse; @@ -58,7 +58,7 @@ public class HiveMetadataHandlerTest private Connection connection; private FederatedIdentity federatedIdentity; private SecretsManagerClient secretsManager; - private AmazonAthena athena; + private AthenaClient athena; private BlockAllocator blockAllocator; @BeforeClass @@ -75,7 +75,7 @@ public void setup() this.connection = Mockito.mock(Connection.class, Mockito.RETURNS_DEEP_STUBS); Mockito.when(this.jdbcConnectionFactory.getConnection(nullable(JdbcCredentialProvider.class))).thenReturn(this.connection); this.secretsManager = Mockito.mock(SecretsManagerClient.class); - this.athena = Mockito.mock(AmazonAthena.class); + this.athena = Mockito.mock(AthenaClient.class); Mockito.when(this.secretsManager.getSecretValue(Mockito.eq(GetSecretValueRequest.builder().secretId("testSecret").build()))).thenReturn(GetSecretValueResponse.builder().secretString("{\"username\": \"testUser\", \"password\": \"testPassword\"}").build()); this.hiveMetadataHandler = new HiveMetadataHandler(databaseConnectionConfig, this.secretsManager, this.athena, this.jdbcConnectionFactory, com.google.common.collect.ImmutableMap.of()); this.federatedIdentity = Mockito.mock(FederatedIdentity.class); diff --git a/athena-hortonworks-hive/src/test/java/com/amazonaws/athena/connectors/hortonworks/HiveMuxMetadataHandlerTest.java b/athena-hortonworks-hive/src/test/java/com/amazonaws/athena/connectors/hortonworks/HiveMuxMetadataHandlerTest.java index 51aec4c5ab..eb04665d33 100644 --- a/athena-hortonworks-hive/src/test/java/com/amazonaws/athena/connectors/hortonworks/HiveMuxMetadataHandlerTest.java +++ b/athena-hortonworks-hive/src/test/java/com/amazonaws/athena/connectors/hortonworks/HiveMuxMetadataHandlerTest.java @@ -43,7 +43,7 @@ import com.amazonaws.athena.connector.lambda.metadata.GetTableRequest; import com.amazonaws.athena.connector.lambda.metadata.ListSchemasRequest; import com.amazonaws.athena.connector.lambda.metadata.ListTablesRequest; -import com.amazonaws.services.athena.AmazonAthena; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import static org.mockito.ArgumentMatchers.nullable; @@ -55,7 +55,7 @@ public class HiveMuxMetadataHandlerTest private JdbcMetadataHandler jdbcMetadataHandler; private BlockAllocator allocator; private SecretsManagerClient secretsManager; - private AmazonAthena athena; + private AthenaClient athena; private QueryStatusChecker queryStatusChecker; private JdbcConnectionFactory jdbcConnectionFactory; @BeforeClass @@ -69,7 +69,7 @@ public void setup() this.hiveMetadataHandler = Mockito.mock(HiveMetadataHandler.class); this.metadataHandlerMap = Collections.singletonMap("metaHive", this.hiveMetadataHandler); this.secretsManager = Mockito.mock(SecretsManagerClient.class); - this.athena = Mockito.mock(AmazonAthena.class); + this.athena = Mockito.mock(AthenaClient.class); this.queryStatusChecker = Mockito.mock(QueryStatusChecker.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); DatabaseConnectionConfig databaseConnectionConfig = new DatabaseConnectionConfig("testCatalog", HiveConstants.HIVE_NAME, diff --git a/athena-hortonworks-hive/src/test/java/com/amazonaws/athena/connectors/hortonworks/HiveMuxRecordHandlerTest.java b/athena-hortonworks-hive/src/test/java/com/amazonaws/athena/connectors/hortonworks/HiveMuxRecordHandlerTest.java index 60b5c89e48..39074a4f36 100644 --- a/athena-hortonworks-hive/src/test/java/com/amazonaws/athena/connectors/hortonworks/HiveMuxRecordHandlerTest.java +++ b/athena-hortonworks-hive/src/test/java/com/amazonaws/athena/connectors/hortonworks/HiveMuxRecordHandlerTest.java @@ -29,7 +29,6 @@ import com.amazonaws.athena.connectors.jdbc.connection.DatabaseConnectionConfig; import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; -import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; import org.apache.arrow.vector.types.pojo.Schema; import org.junit.Before; @@ -37,6 +36,7 @@ import org.junit.Test; import org.mockito.Mockito; import org.testng.Assert; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -52,7 +52,7 @@ public class HiveMuxRecordHandlerTest private JdbcRecordHandler jdbcRecordHandler; private AmazonS3 amazonS3; private SecretsManagerClient secretsManager; - private AmazonAthena athena; + private AthenaClient athena; private QueryStatusChecker queryStatusChecker; private JdbcConnectionFactory jdbcConnectionFactory; @BeforeClass @@ -66,7 +66,7 @@ public void setup() this.recordHandlerMap = Collections.singletonMap("recordHive", this.hiveRecordHandler); this.amazonS3 = Mockito.mock(AmazonS3.class); this.secretsManager = Mockito.mock(SecretsManagerClient.class); - this.athena = Mockito.mock(AmazonAthena.class); + this.athena = Mockito.mock(AthenaClient.class); this.queryStatusChecker = Mockito.mock(QueryStatusChecker.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); DatabaseConnectionConfig databaseConnectionConfig = new DatabaseConnectionConfig("testCatalog", HiveConstants.HIVE_NAME, diff --git a/athena-hortonworks-hive/src/test/java/com/amazonaws/athena/connectors/hortonworks/HiveRecordHandlerTest.java b/athena-hortonworks-hive/src/test/java/com/amazonaws/athena/connectors/hortonworks/HiveRecordHandlerTest.java index 254c4e14bb..5fc3438e2f 100644 --- a/athena-hortonworks-hive/src/test/java/com/amazonaws/athena/connectors/hortonworks/HiveRecordHandlerTest.java +++ b/athena-hortonworks-hive/src/test/java/com/amazonaws/athena/connectors/hortonworks/HiveRecordHandlerTest.java @@ -46,10 +46,10 @@ import com.amazonaws.athena.connector.lambda.domain.predicate.Range; import com.amazonaws.athena.connector.lambda.domain.predicate.SortedRangeSet; import com.amazonaws.athena.connector.lambda.domain.predicate.ValueSet; -import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueRequest; import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueResponse; @@ -66,7 +66,7 @@ public class HiveRecordHandlerTest private JdbcSplitQueryBuilder jdbcSplitQueryBuilder; private AmazonS3 amazonS3; private SecretsManagerClient secretsManager; - private AmazonAthena athena; + private AthenaClient athena; @Before public void setup() @@ -74,7 +74,7 @@ public void setup() { this.amazonS3 = Mockito.mock(AmazonS3.class); this.secretsManager = Mockito.mock(SecretsManagerClient.class); - this.athena = Mockito.mock(AmazonAthena.class); + this.athena = Mockito.mock(AthenaClient.class); Mockito.when(this.secretsManager.getSecretValue(Mockito.eq(GetSecretValueRequest.builder().secretId("testSecret").build()))).thenReturn(GetSecretValueResponse.builder().secretString("{\"username\": \"testUser\", \"password\": \"testPassword\"}").build()); this.connection = Mockito.mock(Connection.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); diff --git a/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/MultiplexingJdbcMetadataHandler.java b/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/MultiplexingJdbcMetadataHandler.java index 71d5873cca..992207a345 100644 --- a/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/MultiplexingJdbcMetadataHandler.java +++ b/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/MultiplexingJdbcMetadataHandler.java @@ -39,9 +39,9 @@ import com.amazonaws.athena.connectors.jdbc.manager.JDBCUtil; import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandlerFactory; -import com.amazonaws.services.athena.AmazonAthena; import org.apache.arrow.vector.types.pojo.Schema; import org.apache.commons.lang3.Validate; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Map; @@ -65,7 +65,7 @@ public class MultiplexingJdbcMetadataHandler */ protected MultiplexingJdbcMetadataHandler( SecretsManagerClient secretsManager, - AmazonAthena athena, + AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, Map metadataHandlerMap, DatabaseConnectionConfig databaseConnectionConfig, diff --git a/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/MultiplexingJdbcRecordHandler.java b/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/MultiplexingJdbcRecordHandler.java index 86c8e4db7e..2b791d3454 100644 --- a/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/MultiplexingJdbcRecordHandler.java +++ b/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/MultiplexingJdbcRecordHandler.java @@ -30,11 +30,11 @@ import com.amazonaws.athena.connectors.jdbc.manager.JDBCUtil; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandlerFactory; -import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; import com.google.common.annotations.VisibleForTesting; import org.apache.arrow.vector.types.pojo.Schema; import org.apache.commons.lang3.Validate; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -63,7 +63,7 @@ public MultiplexingJdbcRecordHandler(JdbcRecordHandlerFactory jdbcRecordHandlerF protected MultiplexingJdbcRecordHandler( AmazonS3 amazonS3, SecretsManagerClient secretsManager, - AmazonAthena athena, + AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, DatabaseConnectionConfig databaseConnectionConfig, Map recordHandlerMap, diff --git a/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/manager/JdbcMetadataHandler.java b/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/manager/JdbcMetadataHandler.java index e882fac762..f901f8bbfa 100644 --- a/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/manager/JdbcMetadataHandler.java +++ b/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/manager/JdbcMetadataHandler.java @@ -45,7 +45,6 @@ import com.amazonaws.athena.connectors.jdbc.qpt.JdbcQueryPassthrough; import com.amazonaws.athena.connectors.jdbc.splits.Splitter; import com.amazonaws.athena.connectors.jdbc.splits.SplitterFactory; -import com.amazonaws.services.athena.AmazonAthena; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableList; @@ -57,6 +56,7 @@ import org.apache.commons.lang3.Validate; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -116,7 +116,7 @@ protected JdbcMetadataHandler( protected JdbcMetadataHandler( DatabaseConnectionConfig databaseConnectionConfig, SecretsManagerClient secretsManager, - AmazonAthena athena, + AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, java.util.Map configOptions) { diff --git a/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/manager/JdbcRecordHandler.java b/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/manager/JdbcRecordHandler.java index 974551550b..044488e256 100644 --- a/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/manager/JdbcRecordHandler.java +++ b/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/manager/JdbcRecordHandler.java @@ -54,7 +54,6 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcCredentialProvider; import com.amazonaws.athena.connectors.jdbc.connection.RdsSecretsCredentialProvider; import com.amazonaws.athena.connectors.jdbc.qpt.JdbcQueryPassthrough; -import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; import org.apache.arrow.util.VisibleForTesting; import org.apache.arrow.vector.FieldVector; @@ -75,6 +74,7 @@ import org.apache.commons.lang3.Validate; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Array; @@ -113,7 +113,7 @@ protected JdbcRecordHandler(String sourceType, java.util.Map con protected JdbcRecordHandler( AmazonS3 amazonS3, SecretsManagerClient secretsManager, - AmazonAthena athena, + AthenaClient athena, DatabaseConnectionConfig databaseConnectionConfig, JdbcConnectionFactory jdbcConnectionFactory, java.util.Map configOptions) diff --git a/athena-jdbc/src/test/java/com/amazonaws/athena/connectors/jdbc/MultiplexingJdbcMetadataHandlerTest.java b/athena-jdbc/src/test/java/com/amazonaws/athena/connectors/jdbc/MultiplexingJdbcMetadataHandlerTest.java index 1e952eff7b..484ffa93b4 100644 --- a/athena-jdbc/src/test/java/com/amazonaws/athena/connectors/jdbc/MultiplexingJdbcMetadataHandlerTest.java +++ b/athena-jdbc/src/test/java/com/amazonaws/athena/connectors/jdbc/MultiplexingJdbcMetadataHandlerTest.java @@ -32,10 +32,10 @@ import com.amazonaws.athena.connectors.jdbc.connection.DatabaseConnectionConfig; import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler; -import com.amazonaws.services.athena.AmazonAthena; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Collections; @@ -50,7 +50,7 @@ public class MultiplexingJdbcMetadataHandlerTest private JdbcMetadataHandler jdbcMetadataHandler; private BlockAllocator allocator; private SecretsManagerClient secretsManager; - private AmazonAthena athena; + private AthenaClient athena; private QueryStatusChecker queryStatusChecker; private JdbcConnectionFactory jdbcConnectionFactory; @@ -63,7 +63,7 @@ public void setup() this.fakeDatabaseHandler = Mockito.mock(JdbcMetadataHandler.class); this.metadataHandlerMap = Collections.singletonMap("fakedatabase", this.fakeDatabaseHandler); this.secretsManager = Mockito.mock(SecretsManagerClient.class); - this.athena = Mockito.mock(AmazonAthena.class); + this.athena = Mockito.mock(AthenaClient.class); this.queryStatusChecker = Mockito.mock(QueryStatusChecker.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); DatabaseConnectionConfig databaseConnectionConfig = new DatabaseConnectionConfig("testCatalog", "fakedatabase", diff --git a/athena-jdbc/src/test/java/com/amazonaws/athena/connectors/jdbc/MultiplexingJdbcRecordHandlerTest.java b/athena-jdbc/src/test/java/com/amazonaws/athena/connectors/jdbc/MultiplexingJdbcRecordHandlerTest.java index 2d42e4c01d..1177f10375 100644 --- a/athena-jdbc/src/test/java/com/amazonaws/athena/connectors/jdbc/MultiplexingJdbcRecordHandlerTest.java +++ b/athena-jdbc/src/test/java/com/amazonaws/athena/connectors/jdbc/MultiplexingJdbcRecordHandlerTest.java @@ -28,12 +28,12 @@ import com.amazonaws.athena.connectors.jdbc.connection.DatabaseConnectionConfig; import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; -import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; import org.apache.arrow.vector.types.pojo.Schema; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -48,7 +48,7 @@ public class MultiplexingJdbcRecordHandlerTest private JdbcRecordHandler jdbcRecordHandler; private AmazonS3 amazonS3; private SecretsManagerClient secretsManager; - private AmazonAthena athena; + private AthenaClient athena; private QueryStatusChecker queryStatusChecker; private JdbcConnectionFactory jdbcConnectionFactory; @@ -59,7 +59,7 @@ public void setup() this.recordHandlerMap = Collections.singletonMap("fakedatabase", this.fakeJdbcRecordHandler); this.amazonS3 = Mockito.mock(AmazonS3.class); this.secretsManager = Mockito.mock(SecretsManagerClient.class); - this.athena = Mockito.mock(AmazonAthena.class); + this.athena = Mockito.mock(AthenaClient.class); this.queryStatusChecker = Mockito.mock(QueryStatusChecker.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); DatabaseConnectionConfig databaseConnectionConfig = new DatabaseConnectionConfig("testCatalog", "fakedatabase", diff --git a/athena-jdbc/src/test/java/com/amazonaws/athena/connectors/jdbc/manager/JdbcMetadataHandlerTest.java b/athena-jdbc/src/test/java/com/amazonaws/athena/connectors/jdbc/manager/JdbcMetadataHandlerTest.java index e1d470af21..5d2d5f1291 100644 --- a/athena-jdbc/src/test/java/com/amazonaws/athena/connectors/jdbc/manager/JdbcMetadataHandlerTest.java +++ b/athena-jdbc/src/test/java/com/amazonaws/athena/connectors/jdbc/manager/JdbcMetadataHandlerTest.java @@ -39,12 +39,12 @@ import com.amazonaws.athena.connectors.jdbc.connection.DatabaseConnectionConfig; import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.connection.JdbcCredentialProvider; -import com.amazonaws.services.athena.AmazonAthena; import org.apache.arrow.vector.types.pojo.Schema; import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueRequest; import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueResponse; @@ -71,7 +71,7 @@ public class JdbcMetadataHandlerTest private Connection connection; private BlockAllocator blockAllocator; private SecretsManagerClient secretsManager; - private AmazonAthena athena; + private AthenaClient athena; private ResultSet resultSetName; @Before @@ -83,7 +83,7 @@ public void setup() Mockito.when(connection.getCatalog()).thenReturn("testCatalog"); Mockito.when(this.jdbcConnectionFactory.getConnection(nullable(JdbcCredentialProvider.class))).thenReturn(this.connection); this.secretsManager = Mockito.mock(SecretsManagerClient.class); - this.athena = Mockito.mock(AmazonAthena.class); + this.athena = Mockito.mock(AthenaClient.class); Mockito.when(this.secretsManager.getSecretValue(Mockito.eq(GetSecretValueRequest.builder().secretId("testSecret").build()))).thenReturn(GetSecretValueResponse.builder().secretString("{\"username\": \"testUser\", \"password\": \"testPassword\"}").build()); DatabaseConnectionConfig databaseConnectionConfig = new DatabaseConnectionConfig("testCatalog", "fakedatabase", "fakedatabase://jdbc:fakedatabase://hostname/${testSecret}", "testSecret"); diff --git a/athena-jdbc/src/test/java/com/amazonaws/athena/connectors/jdbc/manager/JdbcRecordHandlerTest.java b/athena-jdbc/src/test/java/com/amazonaws/athena/connectors/jdbc/manager/JdbcRecordHandlerTest.java index cb297e192a..17a0cb55e3 100644 --- a/athena-jdbc/src/test/java/com/amazonaws/athena/connectors/jdbc/manager/JdbcRecordHandlerTest.java +++ b/athena-jdbc/src/test/java/com/amazonaws/athena/connectors/jdbc/manager/JdbcRecordHandlerTest.java @@ -39,7 +39,6 @@ import com.amazonaws.athena.connectors.jdbc.connection.DatabaseConnectionConfig; import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.connection.JdbcCredentialProvider; -import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.model.PutObjectRequest; import com.amazonaws.services.s3.model.PutObjectResult; @@ -50,6 +49,7 @@ import org.junit.Test; import org.mockito.Mockito; import org.mockito.stubbing.Answer; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueRequest; import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueResponse; @@ -77,7 +77,7 @@ public class JdbcRecordHandlerTest private JdbcConnectionFactory jdbcConnectionFactory; private AmazonS3 amazonS3; private SecretsManagerClient secretsManager; - private AmazonAthena athena; + private AthenaClient athena; private QueryStatusChecker queryStatusChecker; private FederatedIdentity federatedIdentity; private PreparedStatement preparedStatement; @@ -91,7 +91,7 @@ public void setup() Mockito.when(this.jdbcConnectionFactory.getConnection(nullable(JdbcCredentialProvider.class))).thenReturn(this.connection); this.amazonS3 = Mockito.mock(AmazonS3.class); this.secretsManager = Mockito.mock(SecretsManagerClient.class); - this.athena = Mockito.mock(AmazonAthena.class); + this.athena = Mockito.mock(AthenaClient.class); this.queryStatusChecker = Mockito.mock(QueryStatusChecker.class); Mockito.when(this.secretsManager.getSecretValue(Mockito.eq(GetSecretValueRequest.builder().secretId("testSecret").build()))).thenReturn(GetSecretValueResponse.builder().secretString("{\"username\": \"testUser\", \"password\": \"testPassword\"}").build()); this.preparedStatement = Mockito.mock(PreparedStatement.class); diff --git a/athena-kafka/src/main/java/com/amazonaws/athena/connectors/kafka/KafkaRecordHandler.java b/athena-kafka/src/main/java/com/amazonaws/athena/connectors/kafka/KafkaRecordHandler.java index 16d45fc6c5..74d8679563 100644 --- a/athena-kafka/src/main/java/com/amazonaws/athena/connectors/kafka/KafkaRecordHandler.java +++ b/athena-kafka/src/main/java/com/amazonaws/athena/connectors/kafka/KafkaRecordHandler.java @@ -27,8 +27,6 @@ import com.amazonaws.athena.connectors.kafka.dto.KafkaField; import com.amazonaws.athena.connectors.kafka.dto.SplitParameters; import com.amazonaws.athena.connectors.kafka.dto.TopicResultSet; -import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.athena.AmazonAthenaClientBuilder; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3ClientBuilder; import com.google.common.annotations.VisibleForTesting; @@ -43,6 +41,7 @@ import org.apache.kafka.common.TopicPartition; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.time.Duration; @@ -63,12 +62,12 @@ public class KafkaRecordHandler this( AmazonS3ClientBuilder.defaultClient(), SecretsManagerClient.create(), - AmazonAthenaClientBuilder.defaultClient(), + AthenaClient.create(), configOptions); } @VisibleForTesting - public KafkaRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AmazonAthena athena, java.util.Map configOptions) + public KafkaRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, KafkaConstants.KAFKA_SOURCE, configOptions); } diff --git a/athena-kafka/src/test/java/com/amazonaws/athena/connectors/kafka/KafkaRecordHandlerTest.java b/athena-kafka/src/test/java/com/amazonaws/athena/connectors/kafka/KafkaRecordHandlerTest.java index fb1ee2176c..929f8ff14b 100644 --- a/athena-kafka/src/test/java/com/amazonaws/athena/connectors/kafka/KafkaRecordHandlerTest.java +++ b/athena-kafka/src/test/java/com/amazonaws/athena/connectors/kafka/KafkaRecordHandlerTest.java @@ -32,7 +32,6 @@ import com.amazonaws.athena.connector.lambda.security.FederatedIdentity; import com.amazonaws.athena.connector.lambda.security.LocalKeyFactory; import com.amazonaws.athena.connectors.kafka.dto.*; -import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.ObjectMapper; @@ -57,6 +56,7 @@ import org.mockito.junit.MockitoJUnitRunner; import org.mockito.MockedStatic; import org.mockito.Mockito; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.glue.GlueClient; import software.amazon.awssdk.services.glue.model.GetSchemaRequest; import software.amazon.awssdk.services.glue.model.GetSchemaResponse; @@ -92,7 +92,7 @@ public class KafkaRecordHandlerTest { SecretsManagerClient awsSecretsManager; @Mock - private AmazonAthena athena; + private AthenaClient athena; @Mock FederatedIdentity federatedIdentity; diff --git a/athena-msk/src/main/java/com/amazonaws/athena/connectors/msk/AmazonMskRecordHandler.java b/athena-msk/src/main/java/com/amazonaws/athena/connectors/msk/AmazonMskRecordHandler.java index 89c4df6e52..080d72ea44 100644 --- a/athena-msk/src/main/java/com/amazonaws/athena/connectors/msk/AmazonMskRecordHandler.java +++ b/athena-msk/src/main/java/com/amazonaws/athena/connectors/msk/AmazonMskRecordHandler.java @@ -27,8 +27,6 @@ import com.amazonaws.athena.connectors.msk.dto.MSKField; import com.amazonaws.athena.connectors.msk.dto.SplitParameters; import com.amazonaws.athena.connectors.msk.dto.TopicResultSet; -import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.athena.AmazonAthenaClientBuilder; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3ClientBuilder; import com.google.common.annotations.VisibleForTesting; @@ -39,6 +37,7 @@ import org.apache.kafka.common.TopicPartition; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.time.Duration; @@ -56,12 +55,12 @@ public class AmazonMskRecordHandler this( AmazonS3ClientBuilder.defaultClient(), SecretsManagerClient.create(), - AmazonAthenaClientBuilder.defaultClient(), + AthenaClient.create(), configOptions); } @VisibleForTesting - public AmazonMskRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AmazonAthena athena, java.util.Map configOptions) + public AmazonMskRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, AmazonMskConstants.MSK_SOURCE, configOptions); } diff --git a/athena-msk/src/test/java/com/amazonaws/athena/connectors/msk/AmazonMskRecordHandlerTest.java b/athena-msk/src/test/java/com/amazonaws/athena/connectors/msk/AmazonMskRecordHandlerTest.java index 742af33983..7fcfc6a607 100644 --- a/athena-msk/src/test/java/com/amazonaws/athena/connectors/msk/AmazonMskRecordHandlerTest.java +++ b/athena-msk/src/test/java/com/amazonaws/athena/connectors/msk/AmazonMskRecordHandlerTest.java @@ -32,7 +32,6 @@ import com.amazonaws.athena.connector.lambda.security.FederatedIdentity; import com.amazonaws.athena.connector.lambda.security.LocalKeyFactory; import com.amazonaws.athena.connectors.msk.dto.*; -import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.ObjectMapper; @@ -52,6 +51,7 @@ import org.mockito.Mockito; import org.mockito.MockitoAnnotations; import org.mockito.junit.MockitoJUnitRunner; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Collections; @@ -74,7 +74,7 @@ public class AmazonMskRecordHandlerTest { SecretsManagerClient awsSecretsManager; @Mock - private AmazonAthena athena; + private AthenaClient athena; @Mock FederatedIdentity federatedIdentity; diff --git a/athena-mysql/src/main/java/com/amazonaws/athena/connectors/mysql/MySqlMetadataHandler.java b/athena-mysql/src/main/java/com/amazonaws/athena/connectors/mysql/MySqlMetadataHandler.java index c2e668ef53..169fd1be81 100644 --- a/athena-mysql/src/main/java/com/amazonaws/athena/connectors/mysql/MySqlMetadataHandler.java +++ b/athena-mysql/src/main/java/com/amazonaws/athena/connectors/mysql/MySqlMetadataHandler.java @@ -46,7 +46,6 @@ import com.amazonaws.athena.connectors.jdbc.manager.JDBCUtil; import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler; import com.amazonaws.athena.connectors.jdbc.manager.PreparedStatementBuilder; -import com.amazonaws.services.athena.AmazonAthena; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ImmutableMap; import org.apache.arrow.vector.complex.reader.FieldReader; @@ -54,6 +53,7 @@ import org.apache.arrow.vector.types.pojo.Schema; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -115,7 +115,7 @@ public MySqlMetadataHandler(DatabaseConnectionConfig databaseConnectionConfig, J protected MySqlMetadataHandler( DatabaseConnectionConfig databaseConnectionConfig, SecretsManagerClient secretsManager, - AmazonAthena athena, + AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, java.util.Map configOptions) { diff --git a/athena-mysql/src/main/java/com/amazonaws/athena/connectors/mysql/MySqlMuxMetadataHandler.java b/athena-mysql/src/main/java/com/amazonaws/athena/connectors/mysql/MySqlMuxMetadataHandler.java index 0747a1390c..c31ce81f43 100644 --- a/athena-mysql/src/main/java/com/amazonaws/athena/connectors/mysql/MySqlMuxMetadataHandler.java +++ b/athena-mysql/src/main/java/com/amazonaws/athena/connectors/mysql/MySqlMuxMetadataHandler.java @@ -25,8 +25,8 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandlerFactory; -import com.amazonaws.services.athena.AmazonAthena; import org.apache.arrow.util.VisibleForTesting; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Map; @@ -58,7 +58,7 @@ public MySqlMuxMetadataHandler(java.util.Map configOptions) } @VisibleForTesting - protected MySqlMuxMetadataHandler(SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, + protected MySqlMuxMetadataHandler(SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, Map metadataHandlerMap, DatabaseConnectionConfig databaseConnectionConfig, java.util.Map configOptions) { super(secretsManager, athena, jdbcConnectionFactory, metadataHandlerMap, databaseConnectionConfig, configOptions); diff --git a/athena-mysql/src/main/java/com/amazonaws/athena/connectors/mysql/MySqlMuxRecordHandler.java b/athena-mysql/src/main/java/com/amazonaws/athena/connectors/mysql/MySqlMuxRecordHandler.java index 5382fbba2b..659262750e 100644 --- a/athena-mysql/src/main/java/com/amazonaws/athena/connectors/mysql/MySqlMuxRecordHandler.java +++ b/athena-mysql/src/main/java/com/amazonaws/athena/connectors/mysql/MySqlMuxRecordHandler.java @@ -25,9 +25,9 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandlerFactory; -import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; import com.google.common.annotations.VisibleForTesting; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Map; @@ -57,7 +57,7 @@ public MySqlMuxRecordHandler(java.util.Map configOptions) } @VisibleForTesting - MySqlMuxRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, + MySqlMuxRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, DatabaseConnectionConfig databaseConnectionConfig, Map recordHandlerMap, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, jdbcConnectionFactory, databaseConnectionConfig, recordHandlerMap, configOptions); diff --git a/athena-mysql/src/main/java/com/amazonaws/athena/connectors/mysql/MySqlRecordHandler.java b/athena-mysql/src/main/java/com/amazonaws/athena/connectors/mysql/MySqlRecordHandler.java index 1eb7b38d1b..a7cb397c97 100644 --- a/athena-mysql/src/main/java/com/amazonaws/athena/connectors/mysql/MySqlRecordHandler.java +++ b/athena-mysql/src/main/java/com/amazonaws/athena/connectors/mysql/MySqlRecordHandler.java @@ -29,8 +29,6 @@ import com.amazonaws.athena.connectors.jdbc.manager.JDBCUtil; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcSplitQueryBuilder; -import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.athena.AmazonAthenaClientBuilder; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3ClientBuilder; import com.google.common.annotations.VisibleForTesting; @@ -38,6 +36,7 @@ import org.apache.commons.lang3.Validate; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -78,13 +77,13 @@ public MySqlRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, jav public MySqlRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, JdbcConnectionFactory jdbcConnectionFactory, java.util.Map configOptions) { - this(databaseConnectionConfig, AmazonS3ClientBuilder.defaultClient(), SecretsManagerClient.create(), AmazonAthenaClientBuilder.defaultClient(), + this(databaseConnectionConfig, AmazonS3ClientBuilder.defaultClient(), SecretsManagerClient.create(), AthenaClient.create(), jdbcConnectionFactory, new MySqlQueryStringBuilder(MYSQL_QUOTE_CHARACTER, new MySqlFederationExpressionParser(MYSQL_QUOTE_CHARACTER)), configOptions); } @VisibleForTesting MySqlRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, final AmazonS3 amazonS3, final SecretsManagerClient secretsManager, - final AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, JdbcSplitQueryBuilder jdbcSplitQueryBuilder, java.util.Map configOptions) + final AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, JdbcSplitQueryBuilder jdbcSplitQueryBuilder, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, databaseConnectionConfig, jdbcConnectionFactory, configOptions); this.jdbcSplitQueryBuilder = Validate.notNull(jdbcSplitQueryBuilder, "query builder must not be null"); diff --git a/athena-mysql/src/test/java/com/amazonaws/athena/connectors/mysql/MySqlMetadataHandlerTest.java b/athena-mysql/src/test/java/com/amazonaws/athena/connectors/mysql/MySqlMetadataHandlerTest.java index bd59bf4c18..129067e4b8 100644 --- a/athena-mysql/src/test/java/com/amazonaws/athena/connectors/mysql/MySqlMetadataHandlerTest.java +++ b/athena-mysql/src/test/java/com/amazonaws/athena/connectors/mysql/MySqlMetadataHandlerTest.java @@ -40,13 +40,13 @@ import com.amazonaws.athena.connectors.jdbc.connection.DatabaseConnectionConfig; import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.connection.JdbcCredentialProvider; -import com.amazonaws.services.athena.AmazonAthena; import org.apache.arrow.vector.types.pojo.Field; import org.apache.arrow.vector.types.pojo.Schema; import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueRequest; import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueResponse; @@ -79,7 +79,7 @@ public class MySqlMetadataHandlerTest private Connection connection; private FederatedIdentity federatedIdentity; private SecretsManagerClient secretsManager; - private AmazonAthena athena; + private AthenaClient athena; private BlockAllocator blockAllocator; @Before @@ -90,7 +90,7 @@ public void setup() this.connection = Mockito.mock(Connection.class, Mockito.RETURNS_DEEP_STUBS); Mockito.when(this.jdbcConnectionFactory.getConnection(nullable(JdbcCredentialProvider.class))).thenReturn(this.connection); this.secretsManager = Mockito.mock(SecretsManagerClient.class); - this.athena = Mockito.mock(AmazonAthena.class); + this.athena = Mockito.mock(AthenaClient.class); Mockito.when(this.secretsManager.getSecretValue(Mockito.eq(GetSecretValueRequest.builder().secretId("testSecret").build()))).thenReturn(GetSecretValueResponse.builder().secretString("{\"username\": \"testUser\", \"password\": \"testPassword\"}").build()); this.mySqlMetadataHandler = new MySqlMetadataHandler(databaseConnectionConfig, this.secretsManager, this.athena, this.jdbcConnectionFactory, com.google.common.collect.ImmutableMap.of()); this.federatedIdentity = Mockito.mock(FederatedIdentity.class); diff --git a/athena-mysql/src/test/java/com/amazonaws/athena/connectors/mysql/MySqlMuxJdbcMetadataHandlerTest.java b/athena-mysql/src/test/java/com/amazonaws/athena/connectors/mysql/MySqlMuxJdbcMetadataHandlerTest.java index 4afa68c8c1..d74d150cb4 100644 --- a/athena-mysql/src/test/java/com/amazonaws/athena/connectors/mysql/MySqlMuxJdbcMetadataHandlerTest.java +++ b/athena-mysql/src/test/java/com/amazonaws/athena/connectors/mysql/MySqlMuxJdbcMetadataHandlerTest.java @@ -32,10 +32,10 @@ import com.amazonaws.athena.connectors.jdbc.connection.DatabaseConnectionConfig; import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler; -import com.amazonaws.services.athena.AmazonAthena; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Collections; @@ -50,7 +50,7 @@ public class MySqlMuxJdbcMetadataHandlerTest private JdbcMetadataHandler jdbcMetadataHandler; private BlockAllocator allocator; private SecretsManagerClient secretsManager; - private AmazonAthena athena; + private AthenaClient athena; private QueryStatusChecker queryStatusChecker; private JdbcConnectionFactory jdbcConnectionFactory; @@ -63,7 +63,7 @@ public void setup() this.mySqlMetadataHandler = Mockito.mock(MySqlMetadataHandler.class); this.metadataHandlerMap = Collections.singletonMap("fakedatabase", this.mySqlMetadataHandler); this.secretsManager = Mockito.mock(SecretsManagerClient.class); - this.athena = Mockito.mock(AmazonAthena.class); + this.athena = Mockito.mock(AthenaClient.class); this.queryStatusChecker = Mockito.mock(QueryStatusChecker.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); DatabaseConnectionConfig databaseConnectionConfig = new DatabaseConnectionConfig("testCatalog", "fakedatabase", diff --git a/athena-mysql/src/test/java/com/amazonaws/athena/connectors/mysql/MySqlMuxJdbcRecordHandlerTest.java b/athena-mysql/src/test/java/com/amazonaws/athena/connectors/mysql/MySqlMuxJdbcRecordHandlerTest.java index c1352221ad..fdb6f56ec7 100644 --- a/athena-mysql/src/test/java/com/amazonaws/athena/connectors/mysql/MySqlMuxJdbcRecordHandlerTest.java +++ b/athena-mysql/src/test/java/com/amazonaws/athena/connectors/mysql/MySqlMuxJdbcRecordHandlerTest.java @@ -28,12 +28,12 @@ import com.amazonaws.athena.connectors.jdbc.connection.DatabaseConnectionConfig; import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; -import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; import org.apache.arrow.vector.types.pojo.Schema; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -48,7 +48,7 @@ public class MySqlMuxJdbcRecordHandlerTest private JdbcRecordHandler jdbcRecordHandler; private AmazonS3 amazonS3; private SecretsManagerClient secretsManager; - private AmazonAthena athena; + private AthenaClient athena; private QueryStatusChecker queryStatusChecker; private JdbcConnectionFactory jdbcConnectionFactory; @@ -59,7 +59,7 @@ public void setup() this.recordHandlerMap = Collections.singletonMap("mysql", this.mySqlRecordHandler); this.amazonS3 = Mockito.mock(AmazonS3.class); this.secretsManager = Mockito.mock(SecretsManagerClient.class); - this.athena = Mockito.mock(AmazonAthena.class); + this.athena = Mockito.mock(AthenaClient.class); this.queryStatusChecker = Mockito.mock(QueryStatusChecker.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); DatabaseConnectionConfig databaseConnectionConfig = new DatabaseConnectionConfig("testCatalog", "mysql", diff --git a/athena-mysql/src/test/java/com/amazonaws/athena/connectors/mysql/MySqlRecordHandlerTest.java b/athena-mysql/src/test/java/com/amazonaws/athena/connectors/mysql/MySqlRecordHandlerTest.java index 53e6bfb9b6..e4a40ff0b5 100644 --- a/athena-mysql/src/test/java/com/amazonaws/athena/connectors/mysql/MySqlRecordHandlerTest.java +++ b/athena-mysql/src/test/java/com/amazonaws/athena/connectors/mysql/MySqlRecordHandlerTest.java @@ -36,7 +36,6 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.connection.JdbcCredentialProvider; import com.amazonaws.athena.connectors.jdbc.manager.JdbcSplitQueryBuilder; -import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; @@ -47,6 +46,7 @@ import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -68,7 +68,7 @@ public class MySqlRecordHandlerTest private JdbcSplitQueryBuilder jdbcSplitQueryBuilder; private AmazonS3 amazonS3; private SecretsManagerClient secretsManager; - private AmazonAthena athena; + private AthenaClient athena; @Before public void setup() @@ -76,7 +76,7 @@ public void setup() { this.amazonS3 = Mockito.mock(AmazonS3.class); this.secretsManager = Mockito.mock(SecretsManagerClient.class); - this.athena = Mockito.mock(AmazonAthena.class); + this.athena = Mockito.mock(AthenaClient.class); this.connection = Mockito.mock(Connection.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); Mockito.when(this.jdbcConnectionFactory.getConnection(nullable(JdbcCredentialProvider.class))).thenReturn(this.connection); diff --git a/athena-mysql/src/test/java/com/amazonaws/athena/connectors/mysql/integ/MySqlIntegTest.java b/athena-mysql/src/test/java/com/amazonaws/athena/connectors/mysql/integ/MySqlIntegTest.java index 7e2dc082e0..9de09c16a4 100644 --- a/athena-mysql/src/test/java/com/amazonaws/athena/connectors/mysql/integ/MySqlIntegTest.java +++ b/athena-mysql/src/test/java/com/amazonaws/athena/connectors/mysql/integ/MySqlIntegTest.java @@ -26,7 +26,6 @@ import com.amazonaws.athena.connector.lambda.domain.TableName; import com.amazonaws.athena.connectors.jdbc.connection.DatabaseConnectionInfo; import com.amazonaws.athena.connectors.jdbc.integ.JdbcTableUtils; -import com.amazonaws.services.athena.model.Row; import com.amazonaws.services.rds.AmazonRDS; import com.amazonaws.services.rds.AmazonRDSClientBuilder; import com.amazonaws.services.rds.model.DescribeDBInstancesRequest; @@ -53,6 +52,7 @@ import software.amazon.awscdk.services.rds.MysqlEngineVersion; import software.amazon.awscdk.services.rds.StorageType; import software.amazon.awscdk.services.secretsmanager.Secret; +import software.amazon.awssdk.services.athena.model.Row; import java.util.ArrayList; import java.util.Collections; @@ -436,13 +436,13 @@ public void selectColumnWithPredicateIntegTest() String query = String.format("select title from %s.%s.%s where year > 2010;", lambdaFunctionName, mysqlDbName, mysqlTableMovies); - List rows = startQueryExecution(query).getResultSet().getRows(); + List rows = startQueryExecution(query).resultSet().rows(); if (!rows.isEmpty()) { // Remove the column-header row rows.remove(0); } List titles = new ArrayList<>(); - rows.forEach(row -> titles.add(row.getData().get(0).getVarCharValue())); + rows.forEach(row -> titles.add(row.data().get(0).varCharValue())); logger.info("Titles: {}", titles); assertEquals("Wrong number of DB records found.", 1, titles.size()); assertTrue("Movie title not found: Interstellar.", titles.contains("Interstellar")); @@ -459,13 +459,13 @@ public void selectColumnBetweenDatesIntegTest() String query = String.format( "select first_name from %s.%s.%s where birthday between date('2005-10-01') and date('2005-10-31');", lambdaFunctionName, mysqlDbName, mysqlTableBday); - List rows = startQueryExecution(query).getResultSet().getRows(); + List rows = startQueryExecution(query).resultSet().rows(); if (!rows.isEmpty()) { // Remove the column-header row rows.remove(0); } List names = new ArrayList<>(); - rows.forEach(row -> names.add(row.getData().get(0).getVarCharValue())); + rows.forEach(row -> names.add(row.data().get(0).varCharValue())); logger.info("Names: {}", names); assertEquals("Wrong number of DB records found.", 1, names.size()); assertTrue("Name not found: Jane.", names.contains("Jane")); diff --git a/athena-neptune/src/main/java/com/amazonaws/athena/connectors/neptune/NeptuneMetadataHandler.java b/athena-neptune/src/main/java/com/amazonaws/athena/connectors/neptune/NeptuneMetadataHandler.java index ce62cb3833..37c0c89c2e 100644 --- a/athena-neptune/src/main/java/com/amazonaws/athena/connectors/neptune/NeptuneMetadataHandler.java +++ b/athena-neptune/src/main/java/com/amazonaws/athena/connectors/neptune/NeptuneMetadataHandler.java @@ -44,7 +44,6 @@ import com.amazonaws.athena.connectors.neptune.propertygraph.PropertyGraphHandler; import com.amazonaws.athena.connectors.neptune.qpt.NeptuneQueryPassthrough; import com.amazonaws.athena.connectors.neptune.rdf.NeptuneSparqlConnection; -import com.amazonaws.services.athena.AmazonAthena; import com.google.common.collect.ImmutableMap; import org.apache.arrow.util.VisibleForTesting; import org.apache.arrow.vector.types.pojo.Field; @@ -54,6 +53,7 @@ import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.glue.GlueClient; import software.amazon.awssdk.services.glue.model.GetTablesRequest; import software.amazon.awssdk.services.glue.model.GetTablesResponse; @@ -113,7 +113,7 @@ protected NeptuneMetadataHandler( NeptuneConnection neptuneConnection, EncryptionKeyFactory keyFactory, SecretsManagerClient awsSecretsManager, - AmazonAthena athena, + AthenaClient athena, String spillBucket, String spillPrefix, java.util.Map configOptions) diff --git a/athena-neptune/src/main/java/com/amazonaws/athena/connectors/neptune/NeptuneRecordHandler.java b/athena-neptune/src/main/java/com/amazonaws/athena/connectors/neptune/NeptuneRecordHandler.java index f347449eb1..a61278292a 100644 --- a/athena-neptune/src/main/java/com/amazonaws/athena/connectors/neptune/NeptuneRecordHandler.java +++ b/athena-neptune/src/main/java/com/amazonaws/athena/connectors/neptune/NeptuneRecordHandler.java @@ -26,13 +26,12 @@ import com.amazonaws.athena.connectors.neptune.Enums.GraphType; import com.amazonaws.athena.connectors.neptune.propertygraph.PropertyGraphHandler; import com.amazonaws.athena.connectors.neptune.rdf.RDFHandler; -import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.athena.AmazonAthenaClientBuilder; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3ClientBuilder; import org.apache.arrow.util.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; /** @@ -66,7 +65,7 @@ public NeptuneRecordHandler(java.util.Map configOptions) this( AmazonS3ClientBuilder.defaultClient(), SecretsManagerClient.create(), - AmazonAthenaClientBuilder.defaultClient(), + AthenaClient.create(), NeptuneConnection.createConnection(configOptions), configOptions); } @@ -75,7 +74,7 @@ public NeptuneRecordHandler(java.util.Map configOptions) protected NeptuneRecordHandler( AmazonS3 amazonS3, SecretsManagerClient secretsManager, - AmazonAthena amazonAthena, + AthenaClient amazonAthena, NeptuneConnection neptuneConnection, java.util.Map configOptions) { diff --git a/athena-neptune/src/test/java/com/amazonaws/athena/connectors/neptune/NeptuneMetadataHandlerTest.java b/athena-neptune/src/test/java/com/amazonaws/athena/connectors/neptune/NeptuneMetadataHandlerTest.java index 19987a844c..f125037311 100644 --- a/athena-neptune/src/test/java/com/amazonaws/athena/connectors/neptune/NeptuneMetadataHandlerTest.java +++ b/athena-neptune/src/test/java/com/amazonaws/athena/connectors/neptune/NeptuneMetadataHandlerTest.java @@ -28,7 +28,6 @@ import com.amazonaws.athena.connector.lambda.metadata.ListTablesRequest; import com.amazonaws.athena.connector.lambda.metadata.ListTablesResponse; import com.amazonaws.athena.connector.lambda.security.LocalKeyFactory; -import com.amazonaws.services.athena.AmazonAthena; import org.junit.After; import org.junit.Before; @@ -38,6 +37,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.glue.GlueClient; import software.amazon.awssdk.services.glue.model.Column; import software.amazon.awssdk.services.glue.model.GetTablesRequest; @@ -83,7 +83,7 @@ public void setUp() throws Exception { logger.info("setUpBefore - enter"); allocator = new BlockAllocatorImpl(); handler = new NeptuneMetadataHandler(glue,neptuneConnection, - new LocalKeyFactory(), mock(SecretsManagerClient.class), mock(AmazonAthena.class), "spill-bucket", + new LocalKeyFactory(), mock(SecretsManagerClient.class), mock(AthenaClient.class), "spill-bucket", "spill-prefix", com.google.common.collect.ImmutableMap.of()); logger.info("setUpBefore - exit"); } diff --git a/athena-neptune/src/test/java/com/amazonaws/athena/connectors/neptune/NeptuneRecordHandlerTest.java b/athena-neptune/src/test/java/com/amazonaws/athena/connectors/neptune/NeptuneRecordHandlerTest.java index eacce75fbb..13ff521aaf 100644 --- a/athena-neptune/src/test/java/com/amazonaws/athena/connectors/neptune/NeptuneRecordHandlerTest.java +++ b/athena-neptune/src/test/java/com/amazonaws/athena/connectors/neptune/NeptuneRecordHandlerTest.java @@ -46,7 +46,6 @@ import com.amazonaws.athena.connector.lambda.records.RemoteReadRecordsResponse; import com.amazonaws.athena.connector.lambda.security.EncryptionKeyFactory; import com.amazonaws.athena.connector.lambda.security.LocalKeyFactory; -import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.model.PutObjectRequest; import com.amazonaws.services.s3.model.PutObjectResult; @@ -76,6 +75,7 @@ import org.mockito.junit.MockitoJUnitRunner; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.io.ByteArrayInputStream; @@ -99,7 +99,7 @@ public class NeptuneRecordHandlerTest extends TestBase { private Schema schemaPGQueryForRead; private AmazonS3 amazonS3; private SecretsManagerClient awsSecretsManager; - private AmazonAthena athena; + private AthenaClient athena; private S3BlockSpillReader spillReader; private EncryptionKeyFactory keyFactory = new LocalKeyFactory(); private List mockS3Storage = new ArrayList<>(); @@ -166,7 +166,7 @@ public void setUp() { allocator = new BlockAllocatorImpl(); amazonS3 = mock(AmazonS3.class); awsSecretsManager = mock(SecretsManagerClient.class); - athena = mock(AmazonAthena.class); + athena = mock(AthenaClient.class); when(amazonS3.putObject(any())) .thenAnswer((InvocationOnMock invocationOnMock) -> { diff --git a/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleMetadataHandler.java b/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleMetadataHandler.java index e3d96f2af0..3932645fe7 100644 --- a/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleMetadataHandler.java +++ b/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleMetadataHandler.java @@ -52,7 +52,6 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcArrowTypeConverter; import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler; import com.amazonaws.athena.connectors.jdbc.manager.PreparedStatementBuilder; -import com.amazonaws.services.athena.AmazonAthena; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableMap; @@ -64,6 +63,7 @@ import org.apache.arrow.vector.types.pojo.Schema; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -126,7 +126,7 @@ public OracleMetadataHandler(DatabaseConnectionConfig databaseConnectionConfig, protected OracleMetadataHandler( DatabaseConnectionConfig databaseConnectionConfig, SecretsManagerClient secretsManager, - AmazonAthena athena, + AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, java.util.Map configOptions) { diff --git a/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleMuxMetadataHandler.java b/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleMuxMetadataHandler.java index df3399281b..6b854ccc64 100644 --- a/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleMuxMetadataHandler.java +++ b/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleMuxMetadataHandler.java @@ -25,8 +25,8 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandlerFactory; -import com.amazonaws.services.athena.AmazonAthena; import org.apache.arrow.util.VisibleForTesting; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Map; @@ -56,7 +56,7 @@ public OracleMuxMetadataHandler(java.util.Map configOptions) } @VisibleForTesting - protected OracleMuxMetadataHandler(SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, + protected OracleMuxMetadataHandler(SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, Map metadataHandlerMap, DatabaseConnectionConfig databaseConnectionConfig, java.util.Map configOptions) { super(secretsManager, athena, jdbcConnectionFactory, metadataHandlerMap, databaseConnectionConfig, configOptions); diff --git a/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleMuxRecordHandler.java b/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleMuxRecordHandler.java index 820fe3162c..0b6d2f00b2 100644 --- a/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleMuxRecordHandler.java +++ b/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleMuxRecordHandler.java @@ -25,9 +25,9 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandlerFactory; -import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; import com.google.common.annotations.VisibleForTesting; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Map; @@ -57,7 +57,7 @@ public OracleMuxRecordHandler(java.util.Map configOptions) } @VisibleForTesting - OracleMuxRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, + OracleMuxRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, DatabaseConnectionConfig databaseConnectionConfig, Map recordHandlerMap, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, jdbcConnectionFactory, databaseConnectionConfig, recordHandlerMap, configOptions); diff --git a/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleRecordHandler.java b/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleRecordHandler.java index d2efc886a5..9d4f94bc30 100644 --- a/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleRecordHandler.java +++ b/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleRecordHandler.java @@ -28,8 +28,6 @@ import com.amazonaws.athena.connectors.jdbc.manager.JDBCUtil; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcSplitQueryBuilder; -import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.athena.AmazonAthenaClientBuilder; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3ClientBuilder; import com.google.common.annotations.VisibleForTesting; @@ -37,6 +35,7 @@ import org.apache.commons.lang3.Validate; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -76,13 +75,13 @@ public OracleRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, ja public OracleRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, JdbcConnectionFactory jdbcConnectionFactory, java.util.Map configOptions) { - this(databaseConnectionConfig, AmazonS3ClientBuilder.defaultClient(), SecretsManagerClient.create(), AmazonAthenaClientBuilder.defaultClient(), + this(databaseConnectionConfig, AmazonS3ClientBuilder.defaultClient(), SecretsManagerClient.create(), AthenaClient.create(), jdbcConnectionFactory, new OracleQueryStringBuilder(ORACLE_QUOTE_CHARACTER, new OracleFederationExpressionParser(ORACLE_QUOTE_CHARACTER)), configOptions); } @VisibleForTesting OracleRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, final AmazonS3 amazonS3, final SecretsManagerClient secretsManager, - final AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, JdbcSplitQueryBuilder jdbcSplitQueryBuilder, java.util.Map configOptions) + final AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, JdbcSplitQueryBuilder jdbcSplitQueryBuilder, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, databaseConnectionConfig, jdbcConnectionFactory, configOptions); this.jdbcSplitQueryBuilder = Validate.notNull(jdbcSplitQueryBuilder, "query builder must not be null"); diff --git a/athena-oracle/src/test/java/com/amazonaws/athena/connectors/oracle/OracleMetadataHandlerTest.java b/athena-oracle/src/test/java/com/amazonaws/athena/connectors/oracle/OracleMetadataHandlerTest.java index f3f8d58484..c84dc15538 100644 --- a/athena-oracle/src/test/java/com/amazonaws/athena/connectors/oracle/OracleMetadataHandlerTest.java +++ b/athena-oracle/src/test/java/com/amazonaws/athena/connectors/oracle/OracleMetadataHandlerTest.java @@ -33,13 +33,13 @@ import com.amazonaws.athena.connectors.jdbc.connection.DatabaseConnectionConfig; import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.connection.JdbcCredentialProvider; -import com.amazonaws.services.athena.AmazonAthena; import org.apache.arrow.vector.types.pojo.Field; import org.apache.arrow.vector.types.pojo.Schema; import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueRequest; import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueResponse; @@ -73,7 +73,7 @@ public class OracleMetadataHandlerTest private Connection connection; private FederatedIdentity federatedIdentity; private SecretsManagerClient secretsManager; - private AmazonAthena athena; + private AthenaClient athena; @Before public void setup() @@ -83,7 +83,7 @@ public void setup() this.connection = Mockito.mock(Connection.class, Mockito.RETURNS_DEEP_STUBS); Mockito.when(this.jdbcConnectionFactory.getConnection(nullable(JdbcCredentialProvider.class))).thenReturn(this.connection); this.secretsManager = Mockito.mock(SecretsManagerClient.class); - this.athena = Mockito.mock(AmazonAthena.class); + this.athena = Mockito.mock(AthenaClient.class); Mockito.when(this.secretsManager.getSecretValue(Mockito.eq(GetSecretValueRequest.builder().secretId("testSecret").build()))).thenReturn(GetSecretValueResponse.builder().secretString("{\"username\": \"testUser\", \"password\": \"testPassword\"}").build()); this.oracleMetadataHandler = new OracleMetadataHandler(databaseConnectionConfig, this.secretsManager, this.athena, this.jdbcConnectionFactory, com.google.common.collect.ImmutableMap.of()); this.federatedIdentity = Mockito.mock(FederatedIdentity.class); diff --git a/athena-oracle/src/test/java/com/amazonaws/athena/connectors/oracle/OracleMuxJdbcMetadataHandlerTest.java b/athena-oracle/src/test/java/com/amazonaws/athena/connectors/oracle/OracleMuxJdbcMetadataHandlerTest.java index f2a80bd428..67907d9b31 100644 --- a/athena-oracle/src/test/java/com/amazonaws/athena/connectors/oracle/OracleMuxJdbcMetadataHandlerTest.java +++ b/athena-oracle/src/test/java/com/amazonaws/athena/connectors/oracle/OracleMuxJdbcMetadataHandlerTest.java @@ -34,10 +34,10 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler; import com.amazonaws.athena.connectors.oracle.OracleMetadataHandler; import com.amazonaws.athena.connectors.oracle.OracleMuxMetadataHandler; -import com.amazonaws.services.athena.AmazonAthena; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Collections; @@ -52,7 +52,7 @@ public class OracleMuxJdbcMetadataHandlerTest private JdbcMetadataHandler jdbcMetadataHandler; private BlockAllocator allocator; private SecretsManagerClient secretsManager; - private AmazonAthena athena; + private AthenaClient athena; private QueryStatusChecker queryStatusChecker; private JdbcConnectionFactory jdbcConnectionFactory; @@ -65,7 +65,7 @@ public void setup() this.oracleMetadataHandler = Mockito.mock(OracleMetadataHandler.class); this.metadataHandlerMap = Collections.singletonMap("fakedatabase", this.oracleMetadataHandler); this.secretsManager = Mockito.mock(SecretsManagerClient.class); - this.athena = Mockito.mock(AmazonAthena.class); + this.athena = Mockito.mock(AthenaClient.class); this.queryStatusChecker = Mockito.mock(QueryStatusChecker.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); DatabaseConnectionConfig databaseConnectionConfig = new DatabaseConnectionConfig("testCatalog", "fakedatabase", diff --git a/athena-oracle/src/test/java/com/amazonaws/athena/connectors/oracle/OracleMuxJdbcRecordHandlerTest.java b/athena-oracle/src/test/java/com/amazonaws/athena/connectors/oracle/OracleMuxJdbcRecordHandlerTest.java index 4284c3559b..485c1f0ba6 100644 --- a/athena-oracle/src/test/java/com/amazonaws/athena/connectors/oracle/OracleMuxJdbcRecordHandlerTest.java +++ b/athena-oracle/src/test/java/com/amazonaws/athena/connectors/oracle/OracleMuxJdbcRecordHandlerTest.java @@ -30,12 +30,12 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; import com.amazonaws.athena.connectors.oracle.OracleMuxRecordHandler; import com.amazonaws.athena.connectors.oracle.OracleRecordHandler; -import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; import org.apache.arrow.vector.types.pojo.Schema; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -50,7 +50,7 @@ public class OracleMuxJdbcRecordHandlerTest private JdbcRecordHandler jdbcRecordHandler; private AmazonS3 amazonS3; private SecretsManagerClient secretsManager; - private AmazonAthena athena; + private AthenaClient athena; private QueryStatusChecker queryStatusChecker; private JdbcConnectionFactory jdbcConnectionFactory; @@ -61,7 +61,7 @@ public void setup() this.recordHandlerMap = Collections.singletonMap("oracle", this.oracleRecordHandler); this.amazonS3 = Mockito.mock(AmazonS3.class); this.secretsManager = Mockito.mock(SecretsManagerClient.class); - this.athena = Mockito.mock(AmazonAthena.class); + this.athena = Mockito.mock(AthenaClient.class); this.queryStatusChecker = Mockito.mock(QueryStatusChecker.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); DatabaseConnectionConfig databaseConnectionConfig = new DatabaseConnectionConfig("testCatalog", "oracle", diff --git a/athena-oracle/src/test/java/com/amazonaws/athena/connectors/oracle/OracleRecordHandlerTest.java b/athena-oracle/src/test/java/com/amazonaws/athena/connectors/oracle/OracleRecordHandlerTest.java index 783c4df86b..4d0a887602 100644 --- a/athena-oracle/src/test/java/com/amazonaws/athena/connectors/oracle/OracleRecordHandlerTest.java +++ b/athena-oracle/src/test/java/com/amazonaws/athena/connectors/oracle/OracleRecordHandlerTest.java @@ -32,7 +32,6 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.connection.JdbcCredentialProvider; import com.amazonaws.athena.connectors.jdbc.manager.JdbcSplitQueryBuilder; -import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; @@ -42,6 +41,7 @@ import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -60,7 +60,7 @@ public class OracleRecordHandlerTest private JdbcSplitQueryBuilder jdbcSplitQueryBuilder; private AmazonS3 amazonS3; private SecretsManagerClient secretsManager; - private AmazonAthena athena; + private AthenaClient athena; private static final String ORACLE_QUOTE_CHARACTER = "\""; @@ -71,7 +71,7 @@ public void setup() { this.amazonS3 = Mockito.mock(AmazonS3.class); this.secretsManager = Mockito.mock(SecretsManagerClient.class); - this.athena = Mockito.mock(AmazonAthena.class); + this.athena = Mockito.mock(AthenaClient.class); this.connection = Mockito.mock(Connection.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); Mockito.when(this.jdbcConnectionFactory.getConnection(nullable(JdbcCredentialProvider.class))).thenReturn(this.connection); diff --git a/athena-oracle/src/test/java/com/amazonaws/athena/connectors/oracle/integ/OracleIntegTest.java b/athena-oracle/src/test/java/com/amazonaws/athena/connectors/oracle/integ/OracleIntegTest.java index a197daa374..26905abfc9 100644 --- a/athena-oracle/src/test/java/com/amazonaws/athena/connectors/oracle/integ/OracleIntegTest.java +++ b/athena-oracle/src/test/java/com/amazonaws/athena/connectors/oracle/integ/OracleIntegTest.java @@ -20,7 +20,6 @@ package com.amazonaws.athena.connectors.oracle.integ; import com.amazonaws.athena.connector.integ.data.TestConfig; -import com.amazonaws.services.athena.model.Row; import com.google.common.collect.ImmutableList; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -35,6 +34,7 @@ import software.amazon.awscdk.services.iam.Effect; import software.amazon.awscdk.services.iam.PolicyDocument; import software.amazon.awscdk.services.iam.PolicyStatement; +import software.amazon.awssdk.services.athena.model.Row; import static org.junit.Assert.assertEquals; @@ -223,13 +223,13 @@ public void fetchRangePartitionDataTest() logger.info("--------------------------------------------------"); String query = String.format("select * from \"lambda:%s\".\"%s\".\"%s\";", lambdaFunctionName, oracleDBName ,rangePartitionTable); - List rows = startQueryExecution(query).getResultSet().getRows(); + List rows = startQueryExecution(query).resultSet().rows(); if (!rows.isEmpty()) { // Remove the column-header row rows.remove(0); } List rangePartitonData = new ArrayList<>(); - rows.forEach(row -> rangePartitonData.add(row.getData().get(0).getVarCharValue())); + rows.forEach(row -> rangePartitonData.add(row.data().get(0).varCharValue())); logger.info("rangePartitonData: {}", rangePartitonData); } @@ -241,13 +241,13 @@ public void fetchAllDataTypeDataTest() logger.info("--------------------------------------------------"); String query = String.format("select * from \"lambda:%s\".\"%s\".\"%s\";", lambdaFunctionName, oracleDBName ,allDataTypeTable); - List rows = startQueryExecution(query).getResultSet().getRows(); + List rows = startQueryExecution(query).resultSet().rows(); if (!rows.isEmpty()) { // Remove the column-header row rows.remove(0); } List allDataTypeData = new ArrayList<>(); - rows.forEach(row -> allDataTypeData.add(row.getData().get(0).getVarCharValue())); + rows.forEach(row -> allDataTypeData.add(row.data().get(0).varCharValue())); logger.info("allDataTypeData: {}", allDataTypeData); } @@ -259,13 +259,13 @@ public void fetchListPartitionDataTest() logger.info("--------------------------------------------------"); String query = String.format("select * from \"lambda:%s\".\"%s\".\"%s\";", lambdaFunctionName, oracleDBName ,listPartitionTable); - List rows = startQueryExecution(query).getResultSet().getRows(); + List rows = startQueryExecution(query).resultSet().rows(); if (!rows.isEmpty()) { // Remove the column-header row rows.remove(0); } List listPartitonData = new ArrayList<>(); - rows.forEach(row -> listPartitonData.add(row.getData().get(0).getVarCharValue())); + rows.forEach(row -> listPartitonData.add(row.data().get(0).varCharValue())); logger.info("listPartitonData: {}", listPartitonData); } @@ -277,13 +277,13 @@ public void numberDataTypeWhereClauseTest() logger.info("--------------------------------------------------"); String query = String.format("select * from \"lambda:%s\".\"%s\".\"%s\" where NUMBER_TYPE=320;", lambdaFunctionName, oracleDBName ,allDataTypeTable); - List rows = startQueryExecution(query).getResultSet().getRows(); + List rows = startQueryExecution(query).resultSet().rows(); if (!rows.isEmpty()) { // Remove the column-header row rows.remove(0); } List numberDataTypeData = new ArrayList<>(); - rows.forEach(row -> numberDataTypeData.add(row.getData().get(0).getVarCharValue())); + rows.forEach(row -> numberDataTypeData.add(row.data().get(0).varCharValue())); logger.info("numberDataTypeData: {}", numberDataTypeData); } @@ -295,13 +295,13 @@ public void charDataTypeWhereClauseTest() logger.info("--------------------------------------------------"); String query = String.format("select * from \"lambda:%s\".\"%s\".\"%s\" where CHAR_TYPE='A';", lambdaFunctionName, oracleDBName ,allDataTypeTable); - List rows = startQueryExecution(query).getResultSet().getRows(); + List rows = startQueryExecution(query).resultSet().rows(); if (!rows.isEmpty()) { // Remove the column-header row rows.remove(0); } List charDataTypeData = new ArrayList<>(); - rows.forEach(row -> charDataTypeData.add(row.getData().get(0).getVarCharValue())); + rows.forEach(row -> charDataTypeData.add(row.data().get(0).varCharValue())); logger.info("charDataTypeData: {}", charDataTypeData); } @@ -313,13 +313,13 @@ public void dateDataTypeWhereClauseTest() logger.info("--------------------------------------------------"); String query = String.format("select * from \"lambda:%s\".\"%s\".\"%s\" where DATE_COL = date('2021-03-18');", lambdaFunctionName, oracleDBName ,allDataTypeTable); - List rows = startQueryExecution(query).getResultSet().getRows(); + List rows = startQueryExecution(query).resultSet().rows(); if (!rows.isEmpty()) { // Remove the column-header row rows.remove(0); } List dateDataTypeData = new ArrayList<>(); - rows.forEach(row -> dateDataTypeData.add(row.getData().get(0).getVarCharValue())); + rows.forEach(row -> dateDataTypeData.add(row.data().get(0).varCharValue())); logger.info("dateDataTypeData: {}", dateDataTypeData); } @@ -331,13 +331,13 @@ public void timestampDataTypeWhereClauseTest() logger.info("--------------------------------------------------"); String query = String.format("select * from \"lambda:%s\".\"%s\".\"%s\" where TIMESTAMP_WITH_3_FRAC_SEC_COL >= CAST('2021-03-18 09:00:00.123' AS TIMESTAMP);", lambdaFunctionName, oracleDBName ,allDataTypeTable); - List rows = startQueryExecution(query).getResultSet().getRows(); + List rows = startQueryExecution(query).resultSet().rows(); if (!rows.isEmpty()) { // Remove the column-header row rows.remove(0); } List timestampDataTypeData = new ArrayList<>(); - rows.forEach(row -> timestampDataTypeData.add(row.getData().get(0).getVarCharValue())); + rows.forEach(row -> timestampDataTypeData.add(row.data().get(0).varCharValue())); logger.info("timestampDataTypeData: {}", timestampDataTypeData); } @@ -349,13 +349,13 @@ public void varcharDataTypeWhereClauseTest() logger.info("--------------------------------------------------"); String query = String.format("select * from \"lambda:%s\".\"%s\".\"%s\" where VARCHAR_10_COL ='ORACLEXPR';", lambdaFunctionName, oracleDBName ,allDataTypeTable); - List rows = startQueryExecution(query).getResultSet().getRows(); + List rows = startQueryExecution(query).resultSet().rows(); if (!rows.isEmpty()) { // Remove the column-header row rows.remove(0); } List varcharDataTypeData = new ArrayList<>(); - rows.forEach(row -> varcharDataTypeData.add(row.getData().get(0).getVarCharValue())); + rows.forEach(row -> varcharDataTypeData.add(row.data().get(0).varCharValue())); logger.info("varcharDataTypeData: {}", varcharDataTypeData); } @@ -367,13 +367,13 @@ public void decimalDataTypeWhereClauseTest() logger.info("--------------------------------------------------"); String query = String.format("select * from \"lambda:%s\".\"%s\".\"%s\" where NUMBER_3_SF_2_DP = 5.82;", lambdaFunctionName, oracleDBName ,allDataTypeTable); - List rows = startQueryExecution(query).getResultSet().getRows(); + List rows = startQueryExecution(query).resultSet().rows(); if (!rows.isEmpty()) { // Remove the column-header row rows.remove(0); } List decimalDataTypeData = new ArrayList<>(); - rows.forEach(row -> decimalDataTypeData.add(row.getData().get(0).getVarCharValue())); + rows.forEach(row -> decimalDataTypeData.add(row.data().get(0).varCharValue())); logger.info("decimalDataTypeData: {}", decimalDataTypeData); } @@ -385,13 +385,13 @@ public void multiDataTypefilterClauseTest() logger.info("--------------------------------------------------"); String query = String.format("select * from \"lambda:%s\".\"%s\".\"%s\" where DATE_COL= date('2021-03-18') and NUMBER_3_SF_2_DP = 5.82;", lambdaFunctionName, oracleDBName ,allDataTypeTable); - List rows = startQueryExecution(query).getResultSet().getRows(); + List rows = startQueryExecution(query).resultSet().rows(); if (!rows.isEmpty()) { // Remove the column-header row rows.remove(0); } List multiDataTypeFilterData = new ArrayList<>(); - rows.forEach(row -> multiDataTypeFilterData.add(row.getData().get(0).getVarCharValue())); + rows.forEach(row -> multiDataTypeFilterData.add(row.data().get(0).varCharValue())); logger.info("multiDataTypeFilterData: {}", multiDataTypeFilterData); } @@ -404,13 +404,13 @@ public void floatDataTypeWhereClauseTest() logger.info("--------------------------------------------------"); String query = String.format("select * from \"lambda:%s\".\"%s\".\"%s\" where float_col = 39840.0;", lambdaFunctionName, oracleDBName ,allDataTypeTable); - List rows = startQueryExecution(query).getResultSet().getRows(); + List rows = startQueryExecution(query).resultSet().rows(); if (!rows.isEmpty()) { // Remove the column-header row rows.remove(0); } List floatDataTypeData = new ArrayList<>(); - rows.forEach(row -> floatDataTypeData.add(row.getData().get(0).getVarCharValue())); + rows.forEach(row -> floatDataTypeData.add(row.data().get(0).varCharValue())); logger.info("floatDataTypeData: {}", floatDataTypeData); } diff --git a/athena-postgresql/src/main/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlMetadataHandler.java b/athena-postgresql/src/main/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlMetadataHandler.java index 2aae525373..1f8e2b5805 100644 --- a/athena-postgresql/src/main/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlMetadataHandler.java +++ b/athena-postgresql/src/main/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlMetadataHandler.java @@ -46,7 +46,6 @@ import com.amazonaws.athena.connectors.jdbc.manager.JDBCUtil; import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler; import com.amazonaws.athena.connectors.jdbc.manager.PreparedStatementBuilder; -import com.amazonaws.services.athena.AmazonAthena; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; @@ -56,6 +55,7 @@ import org.apache.arrow.vector.types.pojo.Schema; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -115,7 +115,7 @@ public PostGreSqlMetadataHandler(DatabaseConnectionConfig databaseConnectionConf protected PostGreSqlMetadataHandler( DatabaseConnectionConfig databaseConnectionConfig, SecretsManagerClient secretsManager, - AmazonAthena athena, + AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, java.util.Map configOptions) { diff --git a/athena-postgresql/src/main/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlMuxMetadataHandler.java b/athena-postgresql/src/main/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlMuxMetadataHandler.java index c1a6ef2795..f6667acfbf 100644 --- a/athena-postgresql/src/main/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlMuxMetadataHandler.java +++ b/athena-postgresql/src/main/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlMuxMetadataHandler.java @@ -25,8 +25,8 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandlerFactory; -import com.amazonaws.services.athena.AmazonAthena; import org.apache.arrow.util.VisibleForTesting; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Map; @@ -58,7 +58,7 @@ public PostGreSqlMuxMetadataHandler(java.util.Map configOptions) } @VisibleForTesting - protected PostGreSqlMuxMetadataHandler(SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, + protected PostGreSqlMuxMetadataHandler(SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, Map metadataHandlerMap, DatabaseConnectionConfig databaseConnectionConfig, java.util.Map configOptions) { super(secretsManager, athena, jdbcConnectionFactory, metadataHandlerMap, databaseConnectionConfig, configOptions); diff --git a/athena-postgresql/src/main/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlMuxRecordHandler.java b/athena-postgresql/src/main/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlMuxRecordHandler.java index c82d597a4f..b6adea0a75 100644 --- a/athena-postgresql/src/main/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlMuxRecordHandler.java +++ b/athena-postgresql/src/main/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlMuxRecordHandler.java @@ -25,9 +25,9 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandlerFactory; -import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; import com.google.common.annotations.VisibleForTesting; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Map; @@ -57,7 +57,7 @@ public PostGreSqlMuxRecordHandler(java.util.Map configOptions) } @VisibleForTesting - PostGreSqlMuxRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, + PostGreSqlMuxRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, DatabaseConnectionConfig databaseConnectionConfig, Map recordHandlerMap, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, jdbcConnectionFactory, databaseConnectionConfig, recordHandlerMap, configOptions); diff --git a/athena-postgresql/src/main/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlRecordHandler.java b/athena-postgresql/src/main/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlRecordHandler.java index a32339a739..d054ff3d06 100644 --- a/athena-postgresql/src/main/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlRecordHandler.java +++ b/athena-postgresql/src/main/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlRecordHandler.java @@ -29,8 +29,6 @@ import com.amazonaws.athena.connectors.jdbc.manager.JDBCUtil; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcSplitQueryBuilder; -import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.athena.AmazonAthenaClientBuilder; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3ClientBuilder; import com.google.common.annotations.VisibleForTesting; @@ -38,6 +36,7 @@ import org.apache.commons.lang3.Validate; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -70,13 +69,13 @@ public PostGreSqlRecordHandler(java.util.Map configOptions) public PostGreSqlRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, java.util.Map configOptions) { - this(databaseConnectionConfig, AmazonS3ClientBuilder.defaultClient(), SecretsManagerClient.create(), AmazonAthenaClientBuilder.defaultClient(), + this(databaseConnectionConfig, AmazonS3ClientBuilder.defaultClient(), SecretsManagerClient.create(), AthenaClient.create(), new GenericJdbcConnectionFactory(databaseConnectionConfig, PostGreSqlMetadataHandler.JDBC_PROPERTIES, new DatabaseConnectionInfo(POSTGRESQL_DRIVER_CLASS, POSTGRESQL_DEFAULT_PORT)), new PostGreSqlQueryStringBuilder(POSTGRES_QUOTE_CHARACTER, new PostgreSqlFederationExpressionParser(POSTGRES_QUOTE_CHARACTER)), configOptions); } @VisibleForTesting protected PostGreSqlRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, AmazonS3 amazonS3, SecretsManagerClient secretsManager, - AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, JdbcSplitQueryBuilder jdbcSplitQueryBuilder, java.util.Map configOptions) + AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, JdbcSplitQueryBuilder jdbcSplitQueryBuilder, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, databaseConnectionConfig, jdbcConnectionFactory, configOptions); this.jdbcSplitQueryBuilder = Validate.notNull(jdbcSplitQueryBuilder, "query builder must not be null"); diff --git a/athena-postgresql/src/test/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlMetadataHandlerTest.java b/athena-postgresql/src/test/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlMetadataHandlerTest.java index 60a51f3819..97f72e4f8e 100644 --- a/athena-postgresql/src/test/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlMetadataHandlerTest.java +++ b/athena-postgresql/src/test/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlMetadataHandlerTest.java @@ -40,7 +40,6 @@ import com.amazonaws.athena.connectors.jdbc.connection.DatabaseConnectionConfig; import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.connection.JdbcCredentialProvider; -import com.amazonaws.services.athena.AmazonAthena; import com.google.common.collect.ImmutableMap; import org.apache.arrow.vector.types.DateUnit; import org.apache.arrow.vector.types.FloatingPointPrecision; @@ -53,6 +52,7 @@ import org.mockito.Mockito; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueRequest; import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueResponse; @@ -87,7 +87,7 @@ public class PostGreSqlMetadataHandlerTest private Connection connection; private FederatedIdentity federatedIdentity; private SecretsManagerClient secretsManager; - private AmazonAthena athena; + private AthenaClient athena; @Before public void setup() diff --git a/athena-postgresql/src/test/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlMuxJdbcMetadataHandlerTest.java b/athena-postgresql/src/test/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlMuxJdbcMetadataHandlerTest.java index 5059e131fc..109706c13b 100644 --- a/athena-postgresql/src/test/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlMuxJdbcMetadataHandlerTest.java +++ b/athena-postgresql/src/test/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlMuxJdbcMetadataHandlerTest.java @@ -32,10 +32,10 @@ import com.amazonaws.athena.connectors.jdbc.connection.DatabaseConnectionConfig; import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler; -import com.amazonaws.services.athena.AmazonAthena; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Collections; @@ -50,7 +50,7 @@ public class PostGreSqlMuxJdbcMetadataHandlerTest private JdbcMetadataHandler jdbcMetadataHandler; private BlockAllocator allocator; private SecretsManagerClient secretsManager; - private AmazonAthena athena; + private AthenaClient athena; private QueryStatusChecker queryStatusChecker; private JdbcConnectionFactory jdbcConnectionFactory; @@ -63,7 +63,7 @@ public void setup() this.postGreSqlMetadataHandler = Mockito.mock(PostGreSqlMetadataHandler.class); this.metadataHandlerMap = Collections.singletonMap("postgres", this.postGreSqlMetadataHandler); this.secretsManager = Mockito.mock(SecretsManagerClient.class); - this.athena = Mockito.mock(AmazonAthena.class); + this.athena = Mockito.mock(AthenaClient.class); this.queryStatusChecker = Mockito.mock(QueryStatusChecker.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); DatabaseConnectionConfig databaseConnectionConfig = new DatabaseConnectionConfig("testCatalog", "postgres", diff --git a/athena-postgresql/src/test/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlMuxJdbcRecordHandlerTest.java b/athena-postgresql/src/test/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlMuxJdbcRecordHandlerTest.java index b433e7a27b..dd7a6a7736 100644 --- a/athena-postgresql/src/test/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlMuxJdbcRecordHandlerTest.java +++ b/athena-postgresql/src/test/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlMuxJdbcRecordHandlerTest.java @@ -28,12 +28,12 @@ import com.amazonaws.athena.connectors.jdbc.connection.DatabaseConnectionConfig; import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; -import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; import org.apache.arrow.vector.types.pojo.Schema; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -48,7 +48,7 @@ public class PostGreSqlMuxJdbcRecordHandlerTest private JdbcRecordHandler jdbcRecordHandler; private AmazonS3 amazonS3; private SecretsManagerClient secretsManager; - private AmazonAthena athena; + private AthenaClient athena; private QueryStatusChecker queryStatusChecker; private JdbcConnectionFactory jdbcConnectionFactory; @@ -59,7 +59,7 @@ public void setup() this.recordHandlerMap = Collections.singletonMap("postgres", this.postGreSqlRecordHandler); this.amazonS3 = Mockito.mock(AmazonS3.class); this.secretsManager = Mockito.mock(SecretsManagerClient.class); - this.athena = Mockito.mock(AmazonAthena.class); + this.athena = Mockito.mock(AthenaClient.class); this.queryStatusChecker = Mockito.mock(QueryStatusChecker.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); DatabaseConnectionConfig databaseConnectionConfig = new DatabaseConnectionConfig("testCatalog", "postgres", diff --git a/athena-postgresql/src/test/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlRecordHandlerTest.java b/athena-postgresql/src/test/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlRecordHandlerTest.java index cd2d988af7..96b879d41e 100644 --- a/athena-postgresql/src/test/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlRecordHandlerTest.java +++ b/athena-postgresql/src/test/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlRecordHandlerTest.java @@ -33,7 +33,6 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.connection.JdbcCredentialProvider; import com.amazonaws.athena.connectors.jdbc.manager.JdbcSplitQueryBuilder; -import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; @@ -46,6 +45,7 @@ import org.mockito.Mockito; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.math.BigDecimal; @@ -70,7 +70,7 @@ public class PostGreSqlRecordHandlerTest extends TestBase private JdbcSplitQueryBuilder jdbcSplitQueryBuilder; private AmazonS3 amazonS3; private SecretsManagerClient secretsManager; - private AmazonAthena athena; + private AthenaClient athena; @Before public void setup() @@ -78,7 +78,7 @@ public void setup() { this.amazonS3 = Mockito.mock(AmazonS3.class); this.secretsManager = Mockito.mock(SecretsManagerClient.class); - this.athena = Mockito.mock(AmazonAthena.class); + this.athena = Mockito.mock(AthenaClient.class); this.connection = Mockito.mock(Connection.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); Mockito.when(this.jdbcConnectionFactory.getConnection(nullable(JdbcCredentialProvider.class))).thenReturn(this.connection); diff --git a/athena-postgresql/src/test/java/com/amazonaws/athena/connectors/postgresql/integ/PostGreSqlIntegTest.java b/athena-postgresql/src/test/java/com/amazonaws/athena/connectors/postgresql/integ/PostGreSqlIntegTest.java index 3fd840c1a3..adb5646d00 100644 --- a/athena-postgresql/src/test/java/com/amazonaws/athena/connectors/postgresql/integ/PostGreSqlIntegTest.java +++ b/athena-postgresql/src/test/java/com/amazonaws/athena/connectors/postgresql/integ/PostGreSqlIntegTest.java @@ -26,7 +26,6 @@ import com.amazonaws.athena.connector.lambda.domain.TableName; import com.amazonaws.athena.connectors.jdbc.connection.DatabaseConnectionInfo; import com.amazonaws.athena.connectors.jdbc.integ.JdbcTableUtils; -import com.amazonaws.services.athena.model.Row; import com.amazonaws.services.rds.AmazonRDS; import com.amazonaws.services.rds.AmazonRDSClientBuilder; import com.amazonaws.services.rds.model.DescribeDBInstancesRequest; @@ -53,6 +52,7 @@ import software.amazon.awscdk.services.rds.PostgresInstanceEngineProps; import software.amazon.awscdk.services.rds.StorageType; import software.amazon.awscdk.services.secretsmanager.Secret; +import software.amazon.awssdk.services.athena.model.Row; import java.util.ArrayList; import java.util.Collections; @@ -439,13 +439,13 @@ public void selectColumnWithPredicateIntegTest() String query = String.format("select title from %s.%s.%s where year > 2010;", lambdaFunctionName, postgresDbName, postgresTableMovies); - List rows = startQueryExecution(query).getResultSet().getRows(); + List rows = startQueryExecution(query).resultSet().rows(); if (!rows.isEmpty()) { // Remove the column-header row rows.remove(0); } List titles = new ArrayList<>(); - rows.forEach(row -> titles.add(row.getData().get(0).getVarCharValue())); + rows.forEach(row -> titles.add(row.data().get(0).varCharValue())); logger.info("Titles: {}", titles); assertEquals("Wrong number of DB records found.", 1, titles.size()); assertTrue("Movie title not found: Interstellar.", titles.contains("Interstellar")); @@ -462,13 +462,13 @@ public void selectColumnBetweenDatesIntegTest() String query = String.format( "select first_name from %s.%s.%s where birthday between date('2005-10-01') and date('2005-10-31');", lambdaFunctionName, postgresDbName, postgresTableBday); - List rows = startQueryExecution(query).getResultSet().getRows(); + List rows = startQueryExecution(query).resultSet().rows(); if (!rows.isEmpty()) { // Remove the column-header row rows.remove(0); } List names = new ArrayList<>(); - rows.forEach(row -> names.add(row.getData().get(0).getVarCharValue())); + rows.forEach(row -> names.add(row.data().get(0).varCharValue())); logger.info("Names: {}", names); assertEquals("Wrong number of DB records found.", 1, names.size()); assertTrue("Name not found: Jane.", names.contains("Jane")); diff --git a/athena-redis/src/main/java/com/amazonaws/athena/connectors/redis/RedisMetadataHandler.java b/athena-redis/src/main/java/com/amazonaws/athena/connectors/redis/RedisMetadataHandler.java index 565f592fc8..5ddabdece9 100644 --- a/athena-redis/src/main/java/com/amazonaws/athena/connectors/redis/RedisMetadataHandler.java +++ b/athena-redis/src/main/java/com/amazonaws/athena/connectors/redis/RedisMetadataHandler.java @@ -47,7 +47,6 @@ import com.amazonaws.athena.connectors.redis.lettuce.RedisConnectionFactory; import com.amazonaws.athena.connectors.redis.lettuce.RedisConnectionWrapper; import com.amazonaws.athena.connectors.redis.qpt.RedisQueryPassthrough; -import com.amazonaws.services.athena.AmazonAthena; import com.google.common.collect.ImmutableMap; import io.lettuce.core.KeyScanCursor; import io.lettuce.core.Range; @@ -60,6 +59,7 @@ import org.apache.arrow.vector.util.Text; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.glue.GlueClient; import software.amazon.awssdk.services.glue.model.Database; import software.amazon.awssdk.services.glue.model.Table; @@ -154,7 +154,7 @@ protected RedisMetadataHandler( GlueClient awsGlue, EncryptionKeyFactory keyFactory, SecretsManagerClient secretsManager, - AmazonAthena athena, + AthenaClient athena, RedisConnectionFactory redisConnectionFactory, String spillBucket, String spillPrefix, diff --git a/athena-redis/src/main/java/com/amazonaws/athena/connectors/redis/RedisRecordHandler.java b/athena-redis/src/main/java/com/amazonaws/athena/connectors/redis/RedisRecordHandler.java index 86fd3ea55e..837e2decb1 100644 --- a/athena-redis/src/main/java/com/amazonaws/athena/connectors/redis/RedisRecordHandler.java +++ b/athena-redis/src/main/java/com/amazonaws/athena/connectors/redis/RedisRecordHandler.java @@ -29,8 +29,6 @@ import com.amazonaws.athena.connectors.redis.lettuce.RedisConnectionFactory; import com.amazonaws.athena.connectors.redis.lettuce.RedisConnectionWrapper; import com.amazonaws.athena.connectors.redis.qpt.RedisQueryPassthrough; -import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.athena.AmazonAthenaClientBuilder; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3ClientBuilder; import io.lettuce.core.KeyScanCursor; @@ -43,6 +41,7 @@ import org.apache.arrow.vector.types.pojo.Field; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.HashMap; @@ -96,7 +95,7 @@ public RedisRecordHandler(java.util.Map configOptions) this( AmazonS3ClientBuilder.standard().build(), SecretsManagerClient.create(), - AmazonAthenaClientBuilder.defaultClient(), + AthenaClient.create(), new RedisConnectionFactory(), configOptions); } @@ -104,7 +103,7 @@ public RedisRecordHandler(java.util.Map configOptions) @VisibleForTesting protected RedisRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, - AmazonAthena athena, + AthenaClient athena, RedisConnectionFactory redisConnectionFactory, java.util.Map configOptions) { diff --git a/athena-redis/src/test/java/com/amazonaws/athena/connectors/redis/RedisMetadataHandlerTest.java b/athena-redis/src/test/java/com/amazonaws/athena/connectors/redis/RedisMetadataHandlerTest.java index ad3a780b82..2909de1d82 100644 --- a/athena-redis/src/test/java/com/amazonaws/athena/connectors/redis/RedisMetadataHandlerTest.java +++ b/athena-redis/src/test/java/com/amazonaws/athena/connectors/redis/RedisMetadataHandlerTest.java @@ -36,7 +36,6 @@ import com.amazonaws.athena.connectors.redis.lettuce.RedisConnectionFactory; import com.amazonaws.athena.connectors.redis.lettuce.RedisConnectionWrapper; import com.amazonaws.athena.connectors.redis.util.MockKeyScanCursor; -import com.amazonaws.services.athena.AmazonAthena; import io.lettuce.core.Range; import io.lettuce.core.ScanArgs; import io.lettuce.core.ScanCursor; @@ -53,6 +52,7 @@ import org.mockito.junit.MockitoJUnitRunner; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.glue.GlueClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueRequest; @@ -109,7 +109,7 @@ public class RedisMetadataHandlerTest private SecretsManagerClient mockSecretsManager; @Mock - private AmazonAthena mockAthena; + private AthenaClient mockAthena; @Mock private RedisConnectionFactory mockFactory; diff --git a/athena-redis/src/test/java/com/amazonaws/athena/connectors/redis/RedisRecordHandlerTest.java b/athena-redis/src/test/java/com/amazonaws/athena/connectors/redis/RedisRecordHandlerTest.java index 1ed0f51301..b0c774c423 100644 --- a/athena-redis/src/test/java/com/amazonaws/athena/connectors/redis/RedisRecordHandlerTest.java +++ b/athena-redis/src/test/java/com/amazonaws/athena/connectors/redis/RedisRecordHandlerTest.java @@ -40,7 +40,6 @@ import com.amazonaws.athena.connectors.redis.lettuce.RedisConnectionWrapper; import com.amazonaws.athena.connectors.redis.util.MockKeyScanCursor; import com.amazonaws.athena.connectors.redis.util.MockScoredValueScanCursor; -import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.model.PutObjectRequest; import com.amazonaws.services.s3.model.PutObjectResult; @@ -66,6 +65,7 @@ import org.mockito.junit.MockitoJUnitRunner; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueRequest; import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueResponse; @@ -125,7 +125,7 @@ public class RedisRecordHandlerTest private RedisConnectionFactory mockFactory; @Mock - private AmazonAthena mockAthena; + private AthenaClient mockAthena; @Before public void setUp() diff --git a/athena-redis/src/test/java/com/amazonaws/athena/connectors/redis/integ/RedisIntegTest.java b/athena-redis/src/test/java/com/amazonaws/athena/connectors/redis/integ/RedisIntegTest.java index a73b6d1ba8..9c2ea312c2 100644 --- a/athena-redis/src/test/java/com/amazonaws/athena/connectors/redis/integ/RedisIntegTest.java +++ b/athena-redis/src/test/java/com/amazonaws/athena/connectors/redis/integ/RedisIntegTest.java @@ -28,7 +28,6 @@ import com.amazonaws.athena.connector.integ.data.ConnectorVpcAttributes; import com.amazonaws.athena.connector.integ.data.SecretsManagerCredentials; import com.amazonaws.athena.connector.integ.providers.ConnectorPackagingAttributesProvider; -import com.amazonaws.services.athena.model.Row; import com.amazonaws.services.elasticache.AmazonElastiCache; import com.amazonaws.services.elasticache.AmazonElastiCacheClientBuilder; import com.amazonaws.services.elasticache.model.DescribeCacheClustersRequest; @@ -60,6 +59,7 @@ import software.amazon.awscdk.services.s3.Bucket; import software.amazon.awscdk.services.s3.IBucket; import software.amazon.awssdk.http.apache.ApacheHttpClient; +import software.amazon.awssdk.services.athena.model.Row; import software.amazon.awssdk.services.glue.GlueClient; import software.amazon.awssdk.services.glue.model.EntityNotFoundException; import software.amazon.awssdk.services.glue.model.TableInput; @@ -416,16 +416,16 @@ private void selectHashValue() { String query = String.format("select * from \"%s\".\"%s\".\"%s\";", lambdaFunctionName, redisDbName, redisTableNamePrefix + "_1"); - List rows = startQueryExecution(query).getResultSet().getRows(); + List rows = startQueryExecution(query).resultSet().rows(); if (!rows.isEmpty()) { // Remove the column-header row rows.remove(0); } List names = new ArrayList<>(); rows.forEach(row -> { - names.add(row.getData().get(1).getVarCharValue()); + names.add(row.data().get(1).varCharValue()); // redis key is added as an extra col by the connector. so expected #cols is #glue cols + 1 - assertEquals("Wrong number of columns found", 4, row.getData().size()); + assertEquals("Wrong number of columns found", 4, row.data().size()); }); logger.info("names: {}", names); assertEquals("Wrong number of DB records found.", 3, names.size()); @@ -438,15 +438,15 @@ private void selectZsetValue() { String query = String.format("select * from \"%s\".\"%s\".\"%s\";", lambdaFunctionName, redisDbName, redisTableNamePrefix + "_2"); - List rows = startQueryExecution(query).getResultSet().getRows(); + List rows = startQueryExecution(query).resultSet().rows(); if (!rows.isEmpty()) { // Remove the column-header row rows.remove(0); } List names = new ArrayList<>(); rows.forEach(row -> { - names.add(row.getData().get(0).getVarCharValue()); - assertEquals("Wrong number of columns found", 2, row.getData().size()); + names.add(row.data().get(0).varCharValue()); + assertEquals("Wrong number of columns found", 2, row.data().size()); }); logger.info("names: {}", names); assertEquals("Wrong number of DB records found.", 3, names.size()); @@ -459,15 +459,15 @@ private void selectLiteralValue() { String query = String.format("select * from \"%s\".\"%s\".\"%s\";", lambdaFunctionName, redisDbName, redisTableNamePrefix + "_2"); - List rows = startQueryExecution(query).getResultSet().getRows(); + List rows = startQueryExecution(query).resultSet().rows(); if (!rows.isEmpty()) { // Remove the column-header row rows.remove(0); } List names = new ArrayList<>(); rows.forEach(row -> { - names.add(row.getData().get(0).getVarCharValue()); - assertEquals("Wrong number of columns found", 2, row.getData().size()); + names.add(row.data().get(0).varCharValue()); + assertEquals("Wrong number of columns found", 2, row.data().size()); }); logger.info("names: {}", names); assertEquals("Wrong number of DB records found.", 3, names.size()); diff --git a/athena-redshift/src/main/java/com/amazonaws/athena/connectors/redshift/RedshiftMetadataHandler.java b/athena-redshift/src/main/java/com/amazonaws/athena/connectors/redshift/RedshiftMetadataHandler.java index fcc5064d00..9a4dc18fea 100644 --- a/athena-redshift/src/main/java/com/amazonaws/athena/connectors/redshift/RedshiftMetadataHandler.java +++ b/athena-redshift/src/main/java/com/amazonaws/athena/connectors/redshift/RedshiftMetadataHandler.java @@ -36,11 +36,11 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JDBCUtil; import com.amazonaws.athena.connectors.postgresql.PostGreSqlMetadataHandler; -import com.amazonaws.services.athena.AmazonAthena; import com.google.common.collect.ImmutableMap; import org.apache.arrow.util.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -80,7 +80,7 @@ public RedshiftMetadataHandler(DatabaseConnectionConfig databaseConnectionConfig } @VisibleForTesting - RedshiftMetadataHandler(DatabaseConnectionConfig databaseConnectionConfig, SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, java.util.Map configOptions) + RedshiftMetadataHandler(DatabaseConnectionConfig databaseConnectionConfig, SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, java.util.Map configOptions) { super(databaseConnectionConfig, secretsManager, athena, jdbcConnectionFactory, configOptions); } diff --git a/athena-redshift/src/main/java/com/amazonaws/athena/connectors/redshift/RedshiftMuxMetadataHandler.java b/athena-redshift/src/main/java/com/amazonaws/athena/connectors/redshift/RedshiftMuxMetadataHandler.java index ac027906c7..6293dd99fd 100644 --- a/athena-redshift/src/main/java/com/amazonaws/athena/connectors/redshift/RedshiftMuxMetadataHandler.java +++ b/athena-redshift/src/main/java/com/amazonaws/athena/connectors/redshift/RedshiftMuxMetadataHandler.java @@ -25,8 +25,8 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandlerFactory; -import com.amazonaws.services.athena.AmazonAthena; import org.apache.arrow.util.VisibleForTesting; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Map; @@ -58,7 +58,7 @@ public RedshiftMuxMetadataHandler(java.util.Map configOptions) } @VisibleForTesting - protected RedshiftMuxMetadataHandler(SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, + protected RedshiftMuxMetadataHandler(SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, Map metadataHandlerMap, DatabaseConnectionConfig databaseConnectionConfig, java.util.Map configOptions) { super(secretsManager, athena, jdbcConnectionFactory, metadataHandlerMap, databaseConnectionConfig, configOptions); diff --git a/athena-redshift/src/main/java/com/amazonaws/athena/connectors/redshift/RedshiftMuxRecordHandler.java b/athena-redshift/src/main/java/com/amazonaws/athena/connectors/redshift/RedshiftMuxRecordHandler.java index 0be638dcdb..38b1b26a24 100644 --- a/athena-redshift/src/main/java/com/amazonaws/athena/connectors/redshift/RedshiftMuxRecordHandler.java +++ b/athena-redshift/src/main/java/com/amazonaws/athena/connectors/redshift/RedshiftMuxRecordHandler.java @@ -25,9 +25,9 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandlerFactory; -import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; import com.google.common.annotations.VisibleForTesting; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Map; @@ -58,7 +58,7 @@ public RedshiftMuxRecordHandler(java.util.Map configOptions) } @VisibleForTesting - RedshiftMuxRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, + RedshiftMuxRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, DatabaseConnectionConfig databaseConnectionConfig, Map recordHandlerMap, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, jdbcConnectionFactory, databaseConnectionConfig, recordHandlerMap, configOptions); diff --git a/athena-redshift/src/main/java/com/amazonaws/athena/connectors/redshift/RedshiftRecordHandler.java b/athena-redshift/src/main/java/com/amazonaws/athena/connectors/redshift/RedshiftRecordHandler.java index 2bb85fef36..cdb9db0954 100644 --- a/athena-redshift/src/main/java/com/amazonaws/athena/connectors/redshift/RedshiftRecordHandler.java +++ b/athena-redshift/src/main/java/com/amazonaws/athena/connectors/redshift/RedshiftRecordHandler.java @@ -30,13 +30,12 @@ import com.amazonaws.athena.connectors.postgresql.PostGreSqlQueryStringBuilder; import com.amazonaws.athena.connectors.postgresql.PostGreSqlRecordHandler; import com.amazonaws.athena.connectors.postgresql.PostgreSqlFederationExpressionParser; -import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.athena.AmazonAthenaClientBuilder; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3ClientBuilder; import org.apache.arrow.util.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import static com.amazonaws.athena.connectors.postgresql.PostGreSqlConstants.POSTGRES_QUOTE_CHARACTER; @@ -61,12 +60,12 @@ public RedshiftRecordHandler(java.util.Map configOptions) public RedshiftRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, java.util.Map configOptions) { - super(databaseConnectionConfig, AmazonS3ClientBuilder.defaultClient(), SecretsManagerClient.create(), AmazonAthenaClientBuilder.defaultClient(), + super(databaseConnectionConfig, AmazonS3ClientBuilder.defaultClient(), SecretsManagerClient.create(), AthenaClient.create(), new GenericJdbcConnectionFactory(databaseConnectionConfig, PostGreSqlMetadataHandler.JDBC_PROPERTIES, new DatabaseConnectionInfo(REDSHIFT_DRIVER_CLASS, REDSHIFT_DEFAULT_PORT)), new PostGreSqlQueryStringBuilder(POSTGRES_QUOTE_CHARACTER, new PostgreSqlFederationExpressionParser(POSTGRES_QUOTE_CHARACTER)), configOptions); } @VisibleForTesting - RedshiftRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, AmazonS3 amazonS3, SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, JdbcSplitQueryBuilder jdbcSplitQueryBuilder, java.util.Map configOptions) + RedshiftRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, AmazonS3 amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, JdbcSplitQueryBuilder jdbcSplitQueryBuilder, java.util.Map configOptions) { super(databaseConnectionConfig, amazonS3, secretsManager, athena, jdbcConnectionFactory, jdbcSplitQueryBuilder, configOptions); } diff --git a/athena-redshift/src/test/java/com/amazonaws/athena/connectors/redshift/RedshiftMetadataHandlerTest.java b/athena-redshift/src/test/java/com/amazonaws/athena/connectors/redshift/RedshiftMetadataHandlerTest.java index 280102ca5b..2882d1a0d2 100644 --- a/athena-redshift/src/test/java/com/amazonaws/athena/connectors/redshift/RedshiftMetadataHandlerTest.java +++ b/athena-redshift/src/test/java/com/amazonaws/athena/connectors/redshift/RedshiftMetadataHandlerTest.java @@ -41,7 +41,6 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.connection.JdbcCredentialProvider; import com.amazonaws.athena.connectors.postgresql.PostGreSqlMetadataHandler; -import com.amazonaws.services.athena.AmazonAthena; import com.google.common.collect.ImmutableMap; import org.apache.arrow.vector.types.DateUnit; import org.apache.arrow.vector.types.FloatingPointPrecision; @@ -54,6 +53,7 @@ import org.mockito.Mockito; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueRequest; import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueResponse; @@ -89,7 +89,7 @@ public class RedshiftMetadataHandlerTest private Connection connection; private FederatedIdentity federatedIdentity; private SecretsManagerClient secretsManager; - private AmazonAthena athena; + private AthenaClient athena; @Before public void setup() diff --git a/athena-redshift/src/test/java/com/amazonaws/athena/connectors/redshift/RedshiftMuxJdbcMetadataHandlerTest.java b/athena-redshift/src/test/java/com/amazonaws/athena/connectors/redshift/RedshiftMuxJdbcMetadataHandlerTest.java index 66ddda2d43..7db8b60795 100644 --- a/athena-redshift/src/test/java/com/amazonaws/athena/connectors/redshift/RedshiftMuxJdbcMetadataHandlerTest.java +++ b/athena-redshift/src/test/java/com/amazonaws/athena/connectors/redshift/RedshiftMuxJdbcMetadataHandlerTest.java @@ -32,10 +32,10 @@ import com.amazonaws.athena.connectors.jdbc.connection.DatabaseConnectionConfig; import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler; -import com.amazonaws.services.athena.AmazonAthena; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Collections; @@ -50,7 +50,7 @@ public class RedshiftMuxJdbcMetadataHandlerTest private JdbcMetadataHandler jdbcMetadataHandler; private BlockAllocator allocator; private SecretsManagerClient secretsManager; - private AmazonAthena athena; + private AthenaClient athena; private QueryStatusChecker queryStatusChecker; private JdbcConnectionFactory jdbcConnectionFactory; @@ -63,7 +63,7 @@ public void setup() this.redshiftMetadataHandler = Mockito.mock(RedshiftMetadataHandler.class); this.metadataHandlerMap = Collections.singletonMap("redshift", this.redshiftMetadataHandler); this.secretsManager = Mockito.mock(SecretsManagerClient.class); - this.athena = Mockito.mock(AmazonAthena.class); + this.athena = Mockito.mock(AthenaClient.class); this.queryStatusChecker = Mockito.mock(QueryStatusChecker.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); DatabaseConnectionConfig databaseConnectionConfig = new DatabaseConnectionConfig("testCatalog", "redshift", diff --git a/athena-redshift/src/test/java/com/amazonaws/athena/connectors/redshift/RedshiftMuxJdbcRecordHandlerTest.java b/athena-redshift/src/test/java/com/amazonaws/athena/connectors/redshift/RedshiftMuxJdbcRecordHandlerTest.java index e7172492dc..4e0ff391cf 100644 --- a/athena-redshift/src/test/java/com/amazonaws/athena/connectors/redshift/RedshiftMuxJdbcRecordHandlerTest.java +++ b/athena-redshift/src/test/java/com/amazonaws/athena/connectors/redshift/RedshiftMuxJdbcRecordHandlerTest.java @@ -28,12 +28,12 @@ import com.amazonaws.athena.connectors.jdbc.connection.DatabaseConnectionConfig; import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; -import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; import org.apache.arrow.vector.types.pojo.Schema; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -48,7 +48,7 @@ public class RedshiftMuxJdbcRecordHandlerTest private JdbcRecordHandler jdbcRecordHandler; private AmazonS3 amazonS3; private SecretsManagerClient secretsManager; - private AmazonAthena athena; + private AthenaClient athena; private QueryStatusChecker queryStatusChecker; private JdbcConnectionFactory jdbcConnectionFactory; @@ -59,7 +59,7 @@ public void setup() this.recordHandlerMap = Collections.singletonMap("redshift", this.redshiftRecordHandler); this.amazonS3 = Mockito.mock(AmazonS3.class); this.secretsManager = Mockito.mock(SecretsManagerClient.class); - this.athena = Mockito.mock(AmazonAthena.class); + this.athena = Mockito.mock(AthenaClient.class); this.queryStatusChecker = Mockito.mock(QueryStatusChecker.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); DatabaseConnectionConfig databaseConnectionConfig = new DatabaseConnectionConfig("testCatalog", "redshift", diff --git a/athena-redshift/src/test/java/com/amazonaws/athena/connectors/redshift/RedshiftRecordHandlerTest.java b/athena-redshift/src/test/java/com/amazonaws/athena/connectors/redshift/RedshiftRecordHandlerTest.java index 024c076b43..d9e2508b22 100644 --- a/athena-redshift/src/test/java/com/amazonaws/athena/connectors/redshift/RedshiftRecordHandlerTest.java +++ b/athena-redshift/src/test/java/com/amazonaws/athena/connectors/redshift/RedshiftRecordHandlerTest.java @@ -36,7 +36,6 @@ import com.amazonaws.athena.connectors.postgresql.PostGreSqlMetadataHandler; import com.amazonaws.athena.connectors.postgresql.PostGreSqlQueryStringBuilder; import com.amazonaws.athena.connectors.postgresql.PostgreSqlFederationExpressionParser; -import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; @@ -49,6 +48,7 @@ import org.mockito.Mockito; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.math.BigDecimal; @@ -73,7 +73,7 @@ public class RedshiftRecordHandlerTest private JdbcSplitQueryBuilder jdbcSplitQueryBuilder; private AmazonS3 amazonS3; private SecretsManagerClient secretsManager; - private AmazonAthena athena; + private AthenaClient athena; @Before public void setup() @@ -81,7 +81,7 @@ public void setup() { this.amazonS3 = Mockito.mock(AmazonS3.class); this.secretsManager = Mockito.mock(SecretsManagerClient.class); - this.athena = Mockito.mock(AmazonAthena.class); + this.athena = Mockito.mock(AthenaClient.class); this.connection = Mockito.mock(Connection.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); Mockito.when(this.jdbcConnectionFactory.getConnection(nullable(JdbcCredentialProvider.class))).thenReturn(this.connection); diff --git a/athena-redshift/src/test/java/com/amazonaws/athena/connectors/redshift/integ/RedshiftIntegTest.java b/athena-redshift/src/test/java/com/amazonaws/athena/connectors/redshift/integ/RedshiftIntegTest.java index 392a0f5611..d32334a0e2 100644 --- a/athena-redshift/src/test/java/com/amazonaws/athena/connectors/redshift/integ/RedshiftIntegTest.java +++ b/athena-redshift/src/test/java/com/amazonaws/athena/connectors/redshift/integ/RedshiftIntegTest.java @@ -26,8 +26,6 @@ import com.amazonaws.athena.connector.lambda.domain.TableName; import com.amazonaws.athena.connectors.jdbc.connection.DatabaseConnectionInfo; import com.amazonaws.athena.connectors.jdbc.integ.JdbcTableUtils; -import com.amazonaws.services.athena.model.Datum; -import com.amazonaws.services.athena.model.Row; import com.amazonaws.services.redshift.AmazonRedshift; import com.amazonaws.services.redshift.AmazonRedshiftClientBuilder; import com.amazonaws.services.redshift.model.DescribeClustersRequest; @@ -51,6 +49,7 @@ import software.amazon.awscdk.services.redshift.ClusterType; import software.amazon.awscdk.services.redshift.Login; import software.amazon.awscdk.services.redshift.NodeType; +import software.amazon.awssdk.services.athena.model.Row; import java.util.ArrayList; import java.util.Collections; @@ -442,13 +441,13 @@ public void selectColumnWithPredicateIntegTest() String query = String.format("select title from %s.%s.%s where year > 2000;", lambdaFunctionName, redshiftDbName, redshiftTableMovies); - List rows = startQueryExecution(query).getResultSet().getRows(); + List rows = startQueryExecution(query).resultSet().rows(); if (!rows.isEmpty()) { // Remove the column-header row rows.remove(0); } List titles = new ArrayList<>(); - rows.forEach(row -> titles.add(row.getData().get(0).getVarCharValue())); + rows.forEach(row -> titles.add(row.data().get(0).varCharValue())); logger.info("Titles: {}", titles); assertEquals("Wrong number of DB records found.", 1, titles.size()); assertTrue("Movie title not found: Interstellar.", titles.contains("Interstellar")); @@ -465,13 +464,13 @@ public void selectColumnBetweenDatesIntegTest() String query = String.format( "select first_name from %s.%s.%s where birthday between date('2003-1-1') and date('2005-12-31');", lambdaFunctionName, redshiftDbName, redshiftTableBday); - List rows = startQueryExecution(query).getResultSet().getRows(); + List rows = startQueryExecution(query).resultSet().rows(); if (!rows.isEmpty()) { // Remove the column-header row rows.remove(0); } List names = new ArrayList<>(); - rows.forEach(row -> names.add(row.getData().get(0).getVarCharValue())); + rows.forEach(row -> names.add(row.data().get(0).varCharValue())); logger.info("Names: {}", names); assertEquals("Wrong number of DB records found.", 1, names.size()); assertTrue("Name not found: Jane.", names.contains("Jane")); diff --git a/athena-saphana/src/main/java/com/amazonaws/athena/connectors/saphana/SaphanaMetadataHandler.java b/athena-saphana/src/main/java/com/amazonaws/athena/connectors/saphana/SaphanaMetadataHandler.java index 8f3e462212..552c6751a4 100644 --- a/athena-saphana/src/main/java/com/amazonaws/athena/connectors/saphana/SaphanaMetadataHandler.java +++ b/athena-saphana/src/main/java/com/amazonaws/athena/connectors/saphana/SaphanaMetadataHandler.java @@ -52,7 +52,6 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcArrowTypeConverter; import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler; import com.amazonaws.athena.connectors.jdbc.manager.PreparedStatementBuilder; -import com.amazonaws.services.athena.AmazonAthena; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ImmutableMap; import org.apache.arrow.vector.complex.reader.FieldReader; @@ -63,6 +62,7 @@ import org.apache.arrow.vector.types.pojo.Schema; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -103,7 +103,7 @@ SaphanaConstants.JDBC_PROPERTIES, new DatabaseConnectionInfo(SaphanaConstants.SA protected SaphanaMetadataHandler( DatabaseConnectionConfig databaseConnectionConfig, SecretsManagerClient secretsManager, - AmazonAthena athena, + AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, java.util.Map configOptions) { diff --git a/athena-saphana/src/main/java/com/amazonaws/athena/connectors/saphana/SaphanaMuxMetadataHandler.java b/athena-saphana/src/main/java/com/amazonaws/athena/connectors/saphana/SaphanaMuxMetadataHandler.java index c9d08e18ea..238de7146d 100644 --- a/athena-saphana/src/main/java/com/amazonaws/athena/connectors/saphana/SaphanaMuxMetadataHandler.java +++ b/athena-saphana/src/main/java/com/amazonaws/athena/connectors/saphana/SaphanaMuxMetadataHandler.java @@ -25,8 +25,8 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandlerFactory; -import com.amazonaws.services.athena.AmazonAthena; import org.apache.arrow.util.VisibleForTesting; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Map; @@ -56,7 +56,7 @@ public SaphanaMuxMetadataHandler(java.util.Map configOptions) } @VisibleForTesting - protected SaphanaMuxMetadataHandler(SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, + protected SaphanaMuxMetadataHandler(SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, Map metadataHandlerMap, DatabaseConnectionConfig databaseConnectionConfig, java.util.Map configOptions) { super(secretsManager, athena, jdbcConnectionFactory, metadataHandlerMap, databaseConnectionConfig, configOptions); diff --git a/athena-saphana/src/main/java/com/amazonaws/athena/connectors/saphana/SaphanaMuxRecordHandler.java b/athena-saphana/src/main/java/com/amazonaws/athena/connectors/saphana/SaphanaMuxRecordHandler.java index 2063d2fa0a..a61d340fc7 100644 --- a/athena-saphana/src/main/java/com/amazonaws/athena/connectors/saphana/SaphanaMuxRecordHandler.java +++ b/athena-saphana/src/main/java/com/amazonaws/athena/connectors/saphana/SaphanaMuxRecordHandler.java @@ -25,9 +25,9 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandlerFactory; -import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; import com.google.common.annotations.VisibleForTesting; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Map; @@ -55,7 +55,7 @@ public SaphanaMuxRecordHandler(java.util.Map configOptions) } @VisibleForTesting - SaphanaMuxRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, + SaphanaMuxRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, DatabaseConnectionConfig databaseConnectionConfig, Map recordHandlerMap, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, jdbcConnectionFactory, databaseConnectionConfig, recordHandlerMap, configOptions); diff --git a/athena-saphana/src/main/java/com/amazonaws/athena/connectors/saphana/SaphanaRecordHandler.java b/athena-saphana/src/main/java/com/amazonaws/athena/connectors/saphana/SaphanaRecordHandler.java index 27b0aa6446..750bdf2434 100644 --- a/athena-saphana/src/main/java/com/amazonaws/athena/connectors/saphana/SaphanaRecordHandler.java +++ b/athena-saphana/src/main/java/com/amazonaws/athena/connectors/saphana/SaphanaRecordHandler.java @@ -31,8 +31,6 @@ import com.amazonaws.athena.connectors.jdbc.manager.JDBCUtil; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcSplitQueryBuilder; -import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.athena.AmazonAthenaClientBuilder; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3ClientBuilder; import com.google.common.annotations.VisibleForTesting; @@ -41,6 +39,7 @@ import org.apache.commons.lang3.Validate; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -67,7 +66,7 @@ public SaphanaRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, j SaphanaConstants.SAPHANA_DEFAULT_PORT)), configOptions); } @VisibleForTesting - SaphanaRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, AmazonS3 amazonS3, SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, JdbcSplitQueryBuilder jdbcSplitQueryBuilder, java.util.Map configOptions) + SaphanaRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, AmazonS3 amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, JdbcSplitQueryBuilder jdbcSplitQueryBuilder, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, databaseConnectionConfig, jdbcConnectionFactory, configOptions); this.jdbcSplitQueryBuilder = Validate.notNull(jdbcSplitQueryBuilder, "query builder must not be null"); @@ -76,7 +75,7 @@ public SaphanaRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, j public SaphanaRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, GenericJdbcConnectionFactory jdbcConnectionFactory, java.util.Map configOptions) { this(databaseConnectionConfig, AmazonS3ClientBuilder.defaultClient(), SecretsManagerClient.create(), - AmazonAthenaClientBuilder.defaultClient(), jdbcConnectionFactory, new SaphanaQueryStringBuilder(SAPHANA_QUOTE_CHARACTER, new SaphanaFederationExpressionParser(SAPHANA_QUOTE_CHARACTER)), configOptions); + AthenaClient.create(), jdbcConnectionFactory, new SaphanaQueryStringBuilder(SAPHANA_QUOTE_CHARACTER, new SaphanaFederationExpressionParser(SAPHANA_QUOTE_CHARACTER)), configOptions); } @Override diff --git a/athena-saphana/src/test/java/com/amazonaws/athena/connectors/saphana/SaphanaMetadataHandlerTest.java b/athena-saphana/src/test/java/com/amazonaws/athena/connectors/saphana/SaphanaMetadataHandlerTest.java index ecf49611d1..59d9973b8e 100644 --- a/athena-saphana/src/test/java/com/amazonaws/athena/connectors/saphana/SaphanaMetadataHandlerTest.java +++ b/athena-saphana/src/test/java/com/amazonaws/athena/connectors/saphana/SaphanaMetadataHandlerTest.java @@ -48,7 +48,7 @@ import com.amazonaws.athena.connector.lambda.domain.TableName; import com.amazonaws.athena.connector.lambda.domain.predicate.Constraints; import com.amazonaws.athena.connector.lambda.security.FederatedIdentity; -import com.amazonaws.services.athena.AmazonAthena; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueRequest; import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueResponse; @@ -66,7 +66,7 @@ public class SaphanaMetadataHandlerTest private Connection connection; private FederatedIdentity federatedIdentity; private SecretsManagerClient secretsManager; - private AmazonAthena athena; + private AthenaClient athena; private BlockAllocator blockAllocator; private static final Schema PARTITION_SCHEMA = SchemaBuilder.newBuilder().addField("PART_ID", org.apache.arrow.vector.types.Types.MinorType.VARCHAR.getType()).build(); @@ -79,7 +79,7 @@ public void setup() this.connection = Mockito.mock(Connection.class, Mockito.RETURNS_DEEP_STUBS); Mockito.when(this.jdbcConnectionFactory.getConnection(nullable(JdbcCredentialProvider.class))).thenReturn(this.connection); this.secretsManager = Mockito.mock(SecretsManagerClient.class); - this.athena = Mockito.mock(AmazonAthena.class); + this.athena = Mockito.mock(AthenaClient.class); Mockito.when(this.secretsManager.getSecretValue(Mockito.eq(GetSecretValueRequest.builder().secretId("testSecret").build()))).thenReturn(GetSecretValueResponse.builder().secretString("{\"username\": \"testUser\", \"password\": \"testPassword\"}").build()); this.saphanaMetadataHandler = new SaphanaMetadataHandler(databaseConnectionConfig, this.secretsManager, this.athena, this.jdbcConnectionFactory, com.google.common.collect.ImmutableMap.of()); this.federatedIdentity = Mockito.mock(FederatedIdentity.class); diff --git a/athena-saphana/src/test/java/com/amazonaws/athena/connectors/saphana/SaphanaMuxJdbcMetadataHandlerTest.java b/athena-saphana/src/test/java/com/amazonaws/athena/connectors/saphana/SaphanaMuxJdbcMetadataHandlerTest.java index 9a26fc14d9..bab1b3d9c2 100644 --- a/athena-saphana/src/test/java/com/amazonaws/athena/connectors/saphana/SaphanaMuxJdbcMetadataHandlerTest.java +++ b/athena-saphana/src/test/java/com/amazonaws/athena/connectors/saphana/SaphanaMuxJdbcMetadataHandlerTest.java @@ -28,10 +28,10 @@ import com.amazonaws.athena.connectors.jdbc.connection.DatabaseConnectionConfig; import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler; -import com.amazonaws.services.athena.AmazonAthena; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Collections; @@ -46,7 +46,7 @@ public class SaphanaMuxJdbcMetadataHandlerTest { private JdbcMetadataHandler jdbcMetadataHandler; private BlockAllocator allocator; private SecretsManagerClient secretsManager; - private AmazonAthena athena; + private AthenaClient athena; private QueryStatusChecker queryStatusChecker; private JdbcConnectionFactory jdbcConnectionFactory; @@ -59,7 +59,7 @@ public void setup() this.saphanaMetadataHandler = Mockito.mock(SaphanaMetadataHandler.class); this.metadataHandlerMap = Collections.singletonMap("fakedatabase", this.saphanaMetadataHandler); this.secretsManager = Mockito.mock(SecretsManagerClient.class); - this.athena = Mockito.mock(AmazonAthena.class); + this.athena = Mockito.mock(AthenaClient.class); this.queryStatusChecker = Mockito.mock(QueryStatusChecker.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); DatabaseConnectionConfig databaseConnectionConfig = new DatabaseConnectionConfig("testCatalog", "fakedatabase", diff --git a/athena-saphana/src/test/java/com/amazonaws/athena/connectors/saphana/SaphanaMuxJdbcRecordHandlerTest.java b/athena-saphana/src/test/java/com/amazonaws/athena/connectors/saphana/SaphanaMuxJdbcRecordHandlerTest.java index ca00262b23..4acddbca38 100644 --- a/athena-saphana/src/test/java/com/amazonaws/athena/connectors/saphana/SaphanaMuxJdbcRecordHandlerTest.java +++ b/athena-saphana/src/test/java/com/amazonaws/athena/connectors/saphana/SaphanaMuxJdbcRecordHandlerTest.java @@ -28,12 +28,12 @@ import com.amazonaws.athena.connectors.jdbc.connection.DatabaseConnectionConfig; import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; -import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; import org.apache.arrow.vector.types.pojo.Schema; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -48,7 +48,7 @@ public class SaphanaMuxJdbcRecordHandlerTest private JdbcRecordHandler jdbcRecordHandler; private AmazonS3 amazonS3; private SecretsManagerClient secretsManager; - private AmazonAthena athena; + private AthenaClient athena; private QueryStatusChecker queryStatusChecker; private JdbcConnectionFactory jdbcConnectionFactory; @@ -59,7 +59,7 @@ public void setup() this.recordHandlerMap = Collections.singletonMap("saphana", this.saphanaRecordHandler); this.amazonS3 = Mockito.mock(AmazonS3.class); this.secretsManager = Mockito.mock(SecretsManagerClient.class); - this.athena = Mockito.mock(AmazonAthena.class); + this.athena = Mockito.mock(AthenaClient.class); this.queryStatusChecker = Mockito.mock(QueryStatusChecker.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); DatabaseConnectionConfig databaseConnectionConfig = new DatabaseConnectionConfig("testCatalog", "saphana", diff --git a/athena-saphana/src/test/java/com/amazonaws/athena/connectors/saphana/SaphanaRecordHandlerTest.java b/athena-saphana/src/test/java/com/amazonaws/athena/connectors/saphana/SaphanaRecordHandlerTest.java index 0b8b58fd9f..80934a3f8d 100644 --- a/athena-saphana/src/test/java/com/amazonaws/athena/connectors/saphana/SaphanaRecordHandlerTest.java +++ b/athena-saphana/src/test/java/com/amazonaws/athena/connectors/saphana/SaphanaRecordHandlerTest.java @@ -32,7 +32,6 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.connection.JdbcCredentialProvider; import com.amazonaws.athena.connectors.jdbc.manager.JdbcSplitQueryBuilder; -import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; @@ -42,6 +41,7 @@ import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -60,7 +60,7 @@ public class SaphanaRecordHandlerTest private JdbcSplitQueryBuilder jdbcSplitQueryBuilder; private AmazonS3 amazonS3; private SecretsManagerClient secretsManager; - private AmazonAthena athena; + private AthenaClient athena; @Before public void setup() @@ -68,7 +68,7 @@ public void setup() { this.amazonS3 = Mockito.mock(AmazonS3.class); this.secretsManager = Mockito.mock(SecretsManagerClient.class); - this.athena = Mockito.mock(AmazonAthena.class); + this.athena = Mockito.mock(AthenaClient.class); this.connection = Mockito.mock(Connection.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); Mockito.when(this.jdbcConnectionFactory.getConnection(nullable(JdbcCredentialProvider.class))).thenReturn(this.connection); diff --git a/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMetadataHandler.java b/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMetadataHandler.java index 751bd58f21..d9c9b9d3f1 100644 --- a/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMetadataHandler.java +++ b/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMetadataHandler.java @@ -55,7 +55,6 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcArrowTypeConverter; import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler; import com.amazonaws.athena.connectors.jdbc.manager.PreparedStatementBuilder; -import com.amazonaws.services.athena.AmazonAthena; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Strings; import com.google.common.collect.ImmutableMap; @@ -67,6 +66,7 @@ import org.apache.arrow.vector.types.pojo.Schema; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -140,7 +140,7 @@ JDBC_PROPERTIES, new DatabaseConnectionInfo(SnowflakeConstants.SNOWFLAKE_DRIVER_ protected SnowflakeMetadataHandler( DatabaseConnectionConfig databaseConnectionConfig, SecretsManagerClient secretsManager, - AmazonAthena athena, + AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, java.util.Map configOptions) { diff --git a/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMuxMetadataHandler.java b/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMuxMetadataHandler.java index 27f9135f49..17c85a6189 100644 --- a/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMuxMetadataHandler.java +++ b/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMuxMetadataHandler.java @@ -25,8 +25,8 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandlerFactory; -import com.amazonaws.services.athena.AmazonAthena; import org.apache.arrow.util.VisibleForTesting; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Map; @@ -56,7 +56,7 @@ public SnowflakeMuxMetadataHandler(java.util.Map configOptions) } @VisibleForTesting - protected SnowflakeMuxMetadataHandler(SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, + protected SnowflakeMuxMetadataHandler(SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, Map metadataHandlerMap, DatabaseConnectionConfig databaseConnectionConfig, java.util.Map configOptions) { super(secretsManager, athena, jdbcConnectionFactory, metadataHandlerMap, databaseConnectionConfig, configOptions); diff --git a/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMuxRecordHandler.java b/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMuxRecordHandler.java index 98b1fb6c10..3874591f69 100644 --- a/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMuxRecordHandler.java +++ b/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMuxRecordHandler.java @@ -25,9 +25,9 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandlerFactory; -import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; import com.google.common.annotations.VisibleForTesting; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Map; @@ -55,7 +55,7 @@ public SnowflakeMuxRecordHandler(java.util.Map configOptions) } @VisibleForTesting - SnowflakeMuxRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, + SnowflakeMuxRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, DatabaseConnectionConfig databaseConnectionConfig, Map recordHandlerMap, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, jdbcConnectionFactory, databaseConnectionConfig, recordHandlerMap, configOptions); diff --git a/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeRecordHandler.java b/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeRecordHandler.java index 649a722862..57acce2f23 100644 --- a/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeRecordHandler.java +++ b/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeRecordHandler.java @@ -30,13 +30,12 @@ import com.amazonaws.athena.connectors.jdbc.manager.JDBCUtil; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcSplitQueryBuilder; -import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.athena.AmazonAthenaClientBuilder; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3ClientBuilder; import com.google.common.annotations.VisibleForTesting; import org.apache.arrow.vector.types.pojo.Schema; import org.apache.commons.lang3.Validate; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -67,12 +66,12 @@ public SnowflakeRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, } public SnowflakeRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, GenericJdbcConnectionFactory jdbcConnectionFactory, java.util.Map configOptions) { - this(databaseConnectionConfig, AmazonS3ClientBuilder.defaultClient(), SecretsManagerClient.create(), AmazonAthenaClientBuilder.defaultClient(), + this(databaseConnectionConfig, AmazonS3ClientBuilder.defaultClient(), SecretsManagerClient.create(), AthenaClient.create(), jdbcConnectionFactory, new SnowflakeQueryStringBuilder(SNOWFLAKE_QUOTE_CHARACTER, new SnowflakeFederationExpressionParser(SNOWFLAKE_QUOTE_CHARACTER)), configOptions); } @VisibleForTesting SnowflakeRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, final AmazonS3 amazonS3, final SecretsManagerClient secretsManager, - final AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, JdbcSplitQueryBuilder jdbcSplitQueryBuilder, java.util.Map configOptions) + final AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, JdbcSplitQueryBuilder jdbcSplitQueryBuilder, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, databaseConnectionConfig, jdbcConnectionFactory, configOptions); this.jdbcSplitQueryBuilder = Validate.notNull(jdbcSplitQueryBuilder, "query builder must not be null"); diff --git a/athena-snowflake/src/test/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMetadataHandlerTest.java b/athena-snowflake/src/test/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMetadataHandlerTest.java index 5fe668f5a3..3acd762712 100644 --- a/athena-snowflake/src/test/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMetadataHandlerTest.java +++ b/athena-snowflake/src/test/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMetadataHandlerTest.java @@ -29,10 +29,10 @@ import com.amazonaws.athena.connectors.jdbc.connection.DatabaseConnectionConfig; import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.connection.JdbcCredentialProvider; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueRequest; import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueResponse; -import com.amazonaws.services.athena.AmazonAthena; import org.apache.arrow.vector.types.pojo.Field; import org.apache.arrow.vector.types.pojo.Schema; import org.junit.Assert; @@ -59,7 +59,7 @@ public class SnowflakeMetadataHandlerTest private Connection connection; private FederatedIdentity federatedIdentity; private SecretsManagerClient secretsManager; - private AmazonAthena athena; + private AthenaClient athena; private BlockAllocator blockAllocator; private static final Schema PARTITION_SCHEMA = SchemaBuilder.newBuilder().addField("partition", org.apache.arrow.vector.types.Types.MinorType.VARCHAR.getType()).build(); @@ -72,7 +72,7 @@ public void setup() this.connection = Mockito.mock(Connection.class, Mockito.RETURNS_DEEP_STUBS); Mockito.when(this.jdbcConnectionFactory.getConnection(nullable(JdbcCredentialProvider.class))).thenReturn(this.connection); this.secretsManager = Mockito.mock(SecretsManagerClient.class); - this.athena = Mockito.mock(AmazonAthena.class); + this.athena = Mockito.mock(AthenaClient.class); Mockito.when(this.secretsManager.getSecretValue(Mockito.eq(GetSecretValueRequest.builder().secretId("testSecret").build()))).thenReturn(GetSecretValueResponse.builder().secretString("{\"username\": \"testUser\", \"password\": \"testPassword\"}").build()); this.snowflakeMetadataHandler = new SnowflakeMetadataHandler(databaseConnectionConfig, this.secretsManager, this.athena, this.jdbcConnectionFactory, com.google.common.collect.ImmutableMap.of()); this.federatedIdentity = Mockito.mock(FederatedIdentity.class); diff --git a/athena-snowflake/src/test/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMuxJdbcMetadataHandlerTest.java b/athena-snowflake/src/test/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMuxJdbcMetadataHandlerTest.java index ac62d22e7f..b6279fa517 100644 --- a/athena-snowflake/src/test/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMuxJdbcMetadataHandlerTest.java +++ b/athena-snowflake/src/test/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMuxJdbcMetadataHandlerTest.java @@ -28,10 +28,10 @@ import com.amazonaws.athena.connectors.jdbc.connection.DatabaseConnectionConfig; import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler; -import com.amazonaws.services.athena.AmazonAthena; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Collections; @@ -46,7 +46,7 @@ public class SnowflakeMuxJdbcMetadataHandlerTest private JdbcMetadataHandler jdbcMetadataHandler; private BlockAllocator allocator; private SecretsManagerClient secretsManager; - private AmazonAthena athena; + private AthenaClient athena; private QueryStatusChecker queryStatusChecker; private JdbcConnectionFactory jdbcConnectionFactory; @@ -57,7 +57,7 @@ public void setup() this.snowflakeMetadataHandler = Mockito.mock(SnowflakeMetadataHandler.class); this.metadataHandlerMap = Collections.singletonMap("fakedatabase", this.snowflakeMetadataHandler); this.secretsManager = Mockito.mock(SecretsManagerClient.class); - this.athena = Mockito.mock(AmazonAthena.class); + this.athena = Mockito.mock(AthenaClient.class); this.queryStatusChecker = Mockito.mock(QueryStatusChecker.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); DatabaseConnectionConfig databaseConnectionConfig = new DatabaseConnectionConfig("testCatalog", "fakedatabase", diff --git a/athena-snowflake/src/test/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMuxJdbcRecordHandlerTest.java b/athena-snowflake/src/test/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMuxJdbcRecordHandlerTest.java index 5582a34454..2e7c0b70cb 100644 --- a/athena-snowflake/src/test/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMuxJdbcRecordHandlerTest.java +++ b/athena-snowflake/src/test/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMuxJdbcRecordHandlerTest.java @@ -30,12 +30,12 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; import com.amazonaws.athena.connectors.snowflake.SnowflakeMuxRecordHandler; import com.amazonaws.athena.connectors.snowflake.SnowflakeRecordHandler; -import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; import org.apache.arrow.vector.types.pojo.Schema; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -52,7 +52,7 @@ public class SnowflakeMuxJdbcRecordHandlerTest private JdbcRecordHandler jdbcRecordHandler; private AmazonS3 amazonS3; private SecretsManagerClient secretsManager; - private AmazonAthena athena; + private AthenaClient athena; private QueryStatusChecker queryStatusChecker; private JdbcConnectionFactory jdbcConnectionFactory; @@ -63,7 +63,7 @@ public void setup() this.recordHandlerMap = Collections.singletonMap("snowflake", this.snowflakeRecordHandler); this.amazonS3 = Mockito.mock(AmazonS3.class); this.secretsManager = Mockito.mock(SecretsManagerClient.class); - this.athena = Mockito.mock(AmazonAthena.class); + this.athena = Mockito.mock(AthenaClient.class); this.queryStatusChecker = Mockito.mock(QueryStatusChecker.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); DatabaseConnectionConfig databaseConnectionConfig = new DatabaseConnectionConfig("testCatalog", "snowflake", diff --git a/athena-snowflake/src/test/java/com/amazonaws/athena/connectors/snowflake/SnowflakeRecordHandlerTest.java b/athena-snowflake/src/test/java/com/amazonaws/athena/connectors/snowflake/SnowflakeRecordHandlerTest.java index bd0cbed6c9..e7f4813d34 100644 --- a/athena-snowflake/src/test/java/com/amazonaws/athena/connectors/snowflake/SnowflakeRecordHandlerTest.java +++ b/athena-snowflake/src/test/java/com/amazonaws/athena/connectors/snowflake/SnowflakeRecordHandlerTest.java @@ -33,7 +33,6 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.connection.JdbcCredentialProvider; import com.amazonaws.athena.connectors.jdbc.manager.JdbcSplitQueryBuilder; -import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; @@ -43,6 +42,7 @@ import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -61,7 +61,7 @@ public class SnowflakeRecordHandlerTest private JdbcSplitQueryBuilder jdbcSplitQueryBuilder; private AmazonS3 amazonS3; private SecretsManagerClient secretsManager; - private AmazonAthena athena; + private AthenaClient athena; @Before public void setup() @@ -69,7 +69,7 @@ public void setup() { this.amazonS3 = Mockito.mock(AmazonS3.class); this.secretsManager = Mockito.mock(SecretsManagerClient.class); - this.athena = Mockito.mock(AmazonAthena.class); + this.athena = Mockito.mock(AthenaClient.class); this.connection = Mockito.mock(Connection.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); Mockito.when(this.jdbcConnectionFactory.getConnection(nullable(JdbcCredentialProvider.class))).thenReturn(this.connection); diff --git a/athena-sqlserver/src/main/java/com/amazonaws/athena/connectors/sqlserver/SqlServerMetadataHandler.java b/athena-sqlserver/src/main/java/com/amazonaws/athena/connectors/sqlserver/SqlServerMetadataHandler.java index 099cf0171f..f67ebad56b 100644 --- a/athena-sqlserver/src/main/java/com/amazonaws/athena/connectors/sqlserver/SqlServerMetadataHandler.java +++ b/athena-sqlserver/src/main/java/com/amazonaws/athena/connectors/sqlserver/SqlServerMetadataHandler.java @@ -54,7 +54,6 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcArrowTypeConverter; import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler; import com.amazonaws.athena.connectors.jdbc.manager.PreparedStatementBuilder; -import com.amazonaws.services.athena.AmazonAthena; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; @@ -65,6 +64,7 @@ import org.apache.arrow.vector.types.pojo.Schema; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -150,7 +150,7 @@ public SqlServerMetadataHandler(DatabaseConnectionConfig databaseConnectionConfi protected SqlServerMetadataHandler( DatabaseConnectionConfig databaseConnectionConfig, SecretsManagerClient secretsManager, - AmazonAthena athena, + AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, java.util.Map configOptions) { diff --git a/athena-sqlserver/src/main/java/com/amazonaws/athena/connectors/sqlserver/SqlServerMuxMetadataHandler.java b/athena-sqlserver/src/main/java/com/amazonaws/athena/connectors/sqlserver/SqlServerMuxMetadataHandler.java index bd577beb8b..7ca913e30e 100644 --- a/athena-sqlserver/src/main/java/com/amazonaws/athena/connectors/sqlserver/SqlServerMuxMetadataHandler.java +++ b/athena-sqlserver/src/main/java/com/amazonaws/athena/connectors/sqlserver/SqlServerMuxMetadataHandler.java @@ -25,8 +25,8 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandlerFactory; -import com.amazonaws.services.athena.AmazonAthena; import org.apache.arrow.util.VisibleForTesting; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Map; @@ -56,7 +56,7 @@ public SqlServerMuxMetadataHandler(java.util.Map configOptions) } @VisibleForTesting - protected SqlServerMuxMetadataHandler(SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, + protected SqlServerMuxMetadataHandler(SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, Map metadataHandlerMap, DatabaseConnectionConfig databaseConnectionConfig, java.util.Map configOptions) { super(secretsManager, athena, jdbcConnectionFactory, metadataHandlerMap, databaseConnectionConfig, configOptions); diff --git a/athena-sqlserver/src/main/java/com/amazonaws/athena/connectors/sqlserver/SqlServerMuxRecordHandler.java b/athena-sqlserver/src/main/java/com/amazonaws/athena/connectors/sqlserver/SqlServerMuxRecordHandler.java index 5872227624..7282b19ac4 100644 --- a/athena-sqlserver/src/main/java/com/amazonaws/athena/connectors/sqlserver/SqlServerMuxRecordHandler.java +++ b/athena-sqlserver/src/main/java/com/amazonaws/athena/connectors/sqlserver/SqlServerMuxRecordHandler.java @@ -25,9 +25,9 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandlerFactory; -import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; import com.google.common.annotations.VisibleForTesting; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Map; @@ -55,7 +55,7 @@ public SqlServerMuxRecordHandler(java.util.Map configOptions) } @VisibleForTesting - SqlServerMuxRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, + SqlServerMuxRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, DatabaseConnectionConfig databaseConnectionConfig, Map recordHandlerMap, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, jdbcConnectionFactory, databaseConnectionConfig, recordHandlerMap, configOptions); diff --git a/athena-sqlserver/src/main/java/com/amazonaws/athena/connectors/sqlserver/SqlServerRecordHandler.java b/athena-sqlserver/src/main/java/com/amazonaws/athena/connectors/sqlserver/SqlServerRecordHandler.java index 814131685c..6bdd298a57 100644 --- a/athena-sqlserver/src/main/java/com/amazonaws/athena/connectors/sqlserver/SqlServerRecordHandler.java +++ b/athena-sqlserver/src/main/java/com/amazonaws/athena/connectors/sqlserver/SqlServerRecordHandler.java @@ -29,13 +29,12 @@ import com.amazonaws.athena.connectors.jdbc.manager.JDBCUtil; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcSplitQueryBuilder; -import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.athena.AmazonAthenaClientBuilder; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3ClientBuilder; import com.google.common.annotations.VisibleForTesting; import org.apache.arrow.vector.types.pojo.Schema; import org.apache.commons.lang3.Validate; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -63,12 +62,12 @@ public SqlServerRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, public SqlServerRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, JdbcConnectionFactory jdbcConnectionFactory, java.util.Map configOptions) { this(databaseConnectionConfig, AmazonS3ClientBuilder.defaultClient(), SecretsManagerClient.create(), - AmazonAthenaClientBuilder.defaultClient(), jdbcConnectionFactory, new SqlServerQueryStringBuilder(SQLSERVER_QUOTE_CHARACTER, new SqlServerFederationExpressionParser(SQLSERVER_QUOTE_CHARACTER)), configOptions); + AthenaClient.create(), jdbcConnectionFactory, new SqlServerQueryStringBuilder(SQLSERVER_QUOTE_CHARACTER, new SqlServerFederationExpressionParser(SQLSERVER_QUOTE_CHARACTER)), configOptions); } @VisibleForTesting SqlServerRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, final AmazonS3 amazonS3, final SecretsManagerClient secretsManager, - final AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, JdbcSplitQueryBuilder jdbcSplitQueryBuilder, java.util.Map configOptions) + final AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, JdbcSplitQueryBuilder jdbcSplitQueryBuilder, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, databaseConnectionConfig, jdbcConnectionFactory, configOptions); this.jdbcSplitQueryBuilder = Validate.notNull(jdbcSplitQueryBuilder, "query builder must not be null"); diff --git a/athena-sqlserver/src/test/java/com/amazonaws/athena/connectors/sqlserver/SqlServerMetadataHandlerTest.java b/athena-sqlserver/src/test/java/com/amazonaws/athena/connectors/sqlserver/SqlServerMetadataHandlerTest.java index adea9f5dc9..e8e46b63fc 100644 --- a/athena-sqlserver/src/test/java/com/amazonaws/athena/connectors/sqlserver/SqlServerMetadataHandlerTest.java +++ b/athena-sqlserver/src/test/java/com/amazonaws/athena/connectors/sqlserver/SqlServerMetadataHandlerTest.java @@ -42,7 +42,6 @@ import com.amazonaws.athena.connectors.jdbc.connection.DatabaseConnectionConfig; import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.connection.JdbcCredentialProvider; -import com.amazonaws.services.athena.AmazonAthena; import org.apache.arrow.vector.types.pojo.Field; import org.apache.arrow.vector.types.pojo.Schema; import org.junit.Assert; @@ -51,6 +50,7 @@ import org.mockito.Mockito; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueRequest; import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueResponse; @@ -85,7 +85,7 @@ public class SqlServerMetadataHandlerTest private Connection connection; private FederatedIdentity federatedIdentity; private SecretsManagerClient secretsManager; - private AmazonAthena athena; + private AthenaClient athena; private BlockAllocator allocator; @Before @@ -98,7 +98,7 @@ public void setup() logger.info(" this.connection.."+ this.connection); Mockito.when(this.jdbcConnectionFactory.getConnection(nullable(JdbcCredentialProvider.class))).thenReturn(this.connection); this.secretsManager = Mockito.mock(SecretsManagerClient.class); - this.athena = Mockito.mock(AmazonAthena.class); + this.athena = Mockito.mock(AthenaClient.class); Mockito.when(this.secretsManager.getSecretValue(Mockito.eq(GetSecretValueRequest.builder().secretId("testSecret").build()))).thenReturn(GetSecretValueResponse.builder().secretString("{\"user\": \"testUser\", \"password\": \"testPassword\"}").build()); this.sqlServerMetadataHandler = new SqlServerMetadataHandler(databaseConnectionConfig, this.secretsManager, this.athena, this.jdbcConnectionFactory, com.google.common.collect.ImmutableMap.of()); this.federatedIdentity = Mockito.mock(FederatedIdentity.class); diff --git a/athena-sqlserver/src/test/java/com/amazonaws/athena/connectors/sqlserver/SqlServerMuxMetadataHandlerTest.java b/athena-sqlserver/src/test/java/com/amazonaws/athena/connectors/sqlserver/SqlServerMuxMetadataHandlerTest.java index bd2827b6e4..cf8ec137a1 100644 --- a/athena-sqlserver/src/test/java/com/amazonaws/athena/connectors/sqlserver/SqlServerMuxMetadataHandlerTest.java +++ b/athena-sqlserver/src/test/java/com/amazonaws/athena/connectors/sqlserver/SqlServerMuxMetadataHandlerTest.java @@ -32,10 +32,10 @@ import com.amazonaws.athena.connectors.jdbc.connection.DatabaseConnectionConfig; import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler; -import com.amazonaws.services.athena.AmazonAthena; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Collections; @@ -50,7 +50,7 @@ public class SqlServerMuxMetadataHandlerTest private JdbcMetadataHandler jdbcMetadataHandler; private BlockAllocator allocator; private SecretsManagerClient secretsManager; - private AmazonAthena athena; + private AthenaClient athena; private QueryStatusChecker queryStatusChecker; private JdbcConnectionFactory jdbcConnectionFactory; @@ -61,7 +61,7 @@ public void setup() this.sqlServerMetadataHandler = Mockito.mock(SqlServerMetadataHandler.class); this.metadataHandlerMap = Collections.singletonMap("fakedatabase", this.sqlServerMetadataHandler); this.secretsManager = Mockito.mock(SecretsManagerClient.class); - this.athena = Mockito.mock(AmazonAthena.class); + this.athena = Mockito.mock(AthenaClient.class); this.queryStatusChecker = Mockito.mock(QueryStatusChecker.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); DatabaseConnectionConfig databaseConnectionConfig = new DatabaseConnectionConfig("testCatalog", "fakedatabase", diff --git a/athena-sqlserver/src/test/java/com/amazonaws/athena/connectors/sqlserver/SqlServerMuxRecordHandlerTest.java b/athena-sqlserver/src/test/java/com/amazonaws/athena/connectors/sqlserver/SqlServerMuxRecordHandlerTest.java index 2c6de5c5f9..e5074306b3 100644 --- a/athena-sqlserver/src/test/java/com/amazonaws/athena/connectors/sqlserver/SqlServerMuxRecordHandlerTest.java +++ b/athena-sqlserver/src/test/java/com/amazonaws/athena/connectors/sqlserver/SqlServerMuxRecordHandlerTest.java @@ -28,12 +28,12 @@ import com.amazonaws.athena.connectors.jdbc.connection.DatabaseConnectionConfig; import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; -import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; import org.apache.arrow.vector.types.pojo.Schema; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -48,7 +48,7 @@ public class SqlServerMuxRecordHandlerTest private JdbcRecordHandler jdbcRecordHandler; private AmazonS3 amazonS3; private SecretsManagerClient secretsManager; - private AmazonAthena athena; + private AthenaClient athena; private QueryStatusChecker queryStatusChecker; private JdbcConnectionFactory jdbcConnectionFactory; @@ -59,7 +59,7 @@ public void setup() this.recordHandlerMap = Collections.singletonMap(SqlServerConstants.NAME, this.sqlServerRecordHandler); this.amazonS3 = Mockito.mock(AmazonS3.class); this.secretsManager = Mockito.mock(SecretsManagerClient.class); - this.athena = Mockito.mock(AmazonAthena.class); + this.athena = Mockito.mock(AthenaClient.class); this.queryStatusChecker = Mockito.mock(QueryStatusChecker.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); DatabaseConnectionConfig databaseConnectionConfig = new DatabaseConnectionConfig("testCatalog", SqlServerConstants.NAME, diff --git a/athena-sqlserver/src/test/java/com/amazonaws/athena/connectors/sqlserver/SqlServerRecordHandlerTest.java b/athena-sqlserver/src/test/java/com/amazonaws/athena/connectors/sqlserver/SqlServerRecordHandlerTest.java index 7b1651bf89..58cc7a8dc6 100644 --- a/athena-sqlserver/src/test/java/com/amazonaws/athena/connectors/sqlserver/SqlServerRecordHandlerTest.java +++ b/athena-sqlserver/src/test/java/com/amazonaws/athena/connectors/sqlserver/SqlServerRecordHandlerTest.java @@ -32,7 +32,6 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.connection.JdbcCredentialProvider; import com.amazonaws.athena.connectors.jdbc.manager.JdbcSplitQueryBuilder; -import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; import com.google.common.collect.ImmutableMap; import org.apache.arrow.vector.types.Types; @@ -41,6 +40,7 @@ import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -59,7 +59,7 @@ public class SqlServerRecordHandlerTest private JdbcSplitQueryBuilder jdbcSplitQueryBuilder; private AmazonS3 amazonS3; private SecretsManagerClient secretsManager; - private AmazonAthena athena; + private AthenaClient athena; @Before public void setup() @@ -68,7 +68,7 @@ public void setup() System.setProperty("aws.region", "us-east-1"); this.amazonS3 = Mockito.mock(AmazonS3.class); this.secretsManager = Mockito.mock(SecretsManagerClient.class); - this.athena = Mockito.mock(AmazonAthena.class); + this.athena = Mockito.mock(AthenaClient.class); this.connection = Mockito.mock(Connection.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); Mockito.when(this.jdbcConnectionFactory.getConnection(nullable(JdbcCredentialProvider.class))).thenReturn(this.connection); diff --git a/athena-synapse/src/main/java/com/amazonaws/athena/connectors/synapse/SynapseMetadataHandler.java b/athena-synapse/src/main/java/com/amazonaws/athena/connectors/synapse/SynapseMetadataHandler.java index 504321b329..f35bbe34a2 100644 --- a/athena-synapse/src/main/java/com/amazonaws/athena/connectors/synapse/SynapseMetadataHandler.java +++ b/athena-synapse/src/main/java/com/amazonaws/athena/connectors/synapse/SynapseMetadataHandler.java @@ -48,7 +48,6 @@ import com.amazonaws.athena.connectors.jdbc.manager.JDBCUtil; import com.amazonaws.athena.connectors.jdbc.manager.JdbcArrowTypeConverter; import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler; -import com.amazonaws.services.athena.AmazonAthena; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; @@ -62,6 +61,7 @@ import org.stringtemplate.v4.ST; import org.stringtemplate.v4.STGroup; import org.stringtemplate.v4.STGroupDir; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -112,7 +112,7 @@ public SynapseMetadataHandler(DatabaseConnectionConfig databaseConnectionConfig, protected SynapseMetadataHandler( DatabaseConnectionConfig databaseConnectionConfig, SecretsManagerClient secretsManager, - AmazonAthena athena, + AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, java.util.Map configOptions) { diff --git a/athena-synapse/src/main/java/com/amazonaws/athena/connectors/synapse/SynapseMuxMetadataHandler.java b/athena-synapse/src/main/java/com/amazonaws/athena/connectors/synapse/SynapseMuxMetadataHandler.java index 6b88c83d50..8947d626b7 100644 --- a/athena-synapse/src/main/java/com/amazonaws/athena/connectors/synapse/SynapseMuxMetadataHandler.java +++ b/athena-synapse/src/main/java/com/amazonaws/athena/connectors/synapse/SynapseMuxMetadataHandler.java @@ -24,8 +24,8 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandlerFactory; -import com.amazonaws.services.athena.AmazonAthena; import org.apache.arrow.util.VisibleForTesting; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Map; @@ -55,7 +55,7 @@ public SynapseMuxMetadataHandler(java.util.Map configOptions) } @VisibleForTesting - protected SynapseMuxMetadataHandler(SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, + protected SynapseMuxMetadataHandler(SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, Map metadataHandlerMap, DatabaseConnectionConfig databaseConnectionConfig, java.util.Map configOptions) { super(secretsManager, athena, jdbcConnectionFactory, metadataHandlerMap, databaseConnectionConfig, configOptions); diff --git a/athena-synapse/src/main/java/com/amazonaws/athena/connectors/synapse/SynapseMuxRecordHandler.java b/athena-synapse/src/main/java/com/amazonaws/athena/connectors/synapse/SynapseMuxRecordHandler.java index 70502037b0..38e47cbd02 100644 --- a/athena-synapse/src/main/java/com/amazonaws/athena/connectors/synapse/SynapseMuxRecordHandler.java +++ b/athena-synapse/src/main/java/com/amazonaws/athena/connectors/synapse/SynapseMuxRecordHandler.java @@ -24,9 +24,9 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandlerFactory; -import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; import com.google.common.annotations.VisibleForTesting; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Map; @@ -54,7 +54,7 @@ public SynapseMuxRecordHandler(java.util.Map configOptions) } @VisibleForTesting - SynapseMuxRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, + SynapseMuxRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, DatabaseConnectionConfig databaseConnectionConfig, Map recordHandlerMap, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, jdbcConnectionFactory, databaseConnectionConfig, recordHandlerMap, configOptions); diff --git a/athena-synapse/src/main/java/com/amazonaws/athena/connectors/synapse/SynapseRecordHandler.java b/athena-synapse/src/main/java/com/amazonaws/athena/connectors/synapse/SynapseRecordHandler.java index 6198e00dd7..11e5d74148 100644 --- a/athena-synapse/src/main/java/com/amazonaws/athena/connectors/synapse/SynapseRecordHandler.java +++ b/athena-synapse/src/main/java/com/amazonaws/athena/connectors/synapse/SynapseRecordHandler.java @@ -33,8 +33,6 @@ import com.amazonaws.athena.connectors.jdbc.manager.JDBCUtil; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcSplitQueryBuilder; -import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.athena.AmazonAthenaClientBuilder; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3ClientBuilder; import com.google.common.annotations.VisibleForTesting; @@ -44,6 +42,7 @@ import org.apache.commons.lang3.Validate; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -66,14 +65,14 @@ public SynapseRecordHandler(java.util.Map configOptions) public SynapseRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, java.util.Map configOptions) { this(databaseConnectionConfig, AmazonS3ClientBuilder.defaultClient(), SecretsManagerClient.create(), - AmazonAthenaClientBuilder.defaultClient(), new SynapseJdbcConnectionFactory(databaseConnectionConfig, + AthenaClient.create(), new SynapseJdbcConnectionFactory(databaseConnectionConfig, SynapseMetadataHandler.JDBC_PROPERTIES, new DatabaseConnectionInfo(SynapseConstants.DRIVER_CLASS, SynapseConstants.DEFAULT_PORT)), new SynapseQueryStringBuilder(QUOTE_CHARACTER, new SynapseFederationExpressionParser(QUOTE_CHARACTER)), configOptions); } @VisibleForTesting SynapseRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, final AmazonS3 amazonS3, final SecretsManagerClient secretsManager, - final AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, JdbcSplitQueryBuilder jdbcSplitQueryBuilder, java.util.Map configOptions) + final AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, JdbcSplitQueryBuilder jdbcSplitQueryBuilder, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, databaseConnectionConfig, jdbcConnectionFactory, configOptions); this.jdbcSplitQueryBuilder = Validate.notNull(jdbcSplitQueryBuilder, "query builder must not be null"); diff --git a/athena-synapse/src/test/java/com/amazonaws/athena/connectors/synapse/SynapseMetadataHandlerTest.java b/athena-synapse/src/test/java/com/amazonaws/athena/connectors/synapse/SynapseMetadataHandlerTest.java index 19ccc69b2c..4138897089 100644 --- a/athena-synapse/src/test/java/com/amazonaws/athena/connectors/synapse/SynapseMetadataHandlerTest.java +++ b/athena-synapse/src/test/java/com/amazonaws/athena/connectors/synapse/SynapseMetadataHandlerTest.java @@ -38,7 +38,6 @@ import com.amazonaws.athena.connectors.jdbc.connection.DatabaseConnectionConfig; import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.connection.JdbcCredentialProvider; -import com.amazonaws.services.athena.AmazonAthena; import org.apache.arrow.vector.types.pojo.Field; import org.apache.arrow.vector.types.pojo.Schema; import org.junit.Assert; @@ -47,6 +46,7 @@ import org.mockito.Mockito; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueRequest; import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueResponse; @@ -81,7 +81,7 @@ public class SynapseMetadataHandlerTest private Connection connection; private FederatedIdentity federatedIdentity; private SecretsManagerClient secretsManager; - private AmazonAthena athena; + private AthenaClient athena; @Before public void setup() @@ -93,7 +93,7 @@ public void setup() logger.info(" this.connection.."+ this.connection); Mockito.when(this.jdbcConnectionFactory.getConnection(nullable(JdbcCredentialProvider.class))).thenReturn(this.connection); this.secretsManager = Mockito.mock(SecretsManagerClient.class); - this.athena = Mockito.mock(AmazonAthena.class); + this.athena = Mockito.mock(AthenaClient.class); Mockito.when(this.secretsManager.getSecretValue(Mockito.eq(GetSecretValueRequest.builder().secretId("testSecret").build()))).thenReturn(GetSecretValueResponse.builder().secretString("{\"user\": \"testUser\", \"password\": \"testPassword\"}").build()); this.synapseMetadataHandler = new SynapseMetadataHandler(databaseConnectionConfig, this.secretsManager, this.athena, this.jdbcConnectionFactory, com.google.common.collect.ImmutableMap.of()); this.federatedIdentity = Mockito.mock(FederatedIdentity.class); diff --git a/athena-synapse/src/test/java/com/amazonaws/athena/connectors/synapse/SynapseMuxMetadataHandlerTest.java b/athena-synapse/src/test/java/com/amazonaws/athena/connectors/synapse/SynapseMuxMetadataHandlerTest.java index bed0b399eb..8c593cb948 100644 --- a/athena-synapse/src/test/java/com/amazonaws/athena/connectors/synapse/SynapseMuxMetadataHandlerTest.java +++ b/athena-synapse/src/test/java/com/amazonaws/athena/connectors/synapse/SynapseMuxMetadataHandlerTest.java @@ -32,10 +32,10 @@ import com.amazonaws.athena.connectors.jdbc.connection.DatabaseConnectionConfig; import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler; -import com.amazonaws.services.athena.AmazonAthena; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Collections; @@ -50,7 +50,7 @@ public class SynapseMuxMetadataHandlerTest private JdbcMetadataHandler jdbcMetadataHandler; private BlockAllocator allocator; private SecretsManagerClient secretsManager; - private AmazonAthena athena; + private AthenaClient athena; private QueryStatusChecker queryStatusChecker; private JdbcConnectionFactory jdbcConnectionFactory; @@ -61,7 +61,7 @@ public void setup() this.synapseMetadataHandler = Mockito.mock(SynapseMetadataHandler.class); this.metadataHandlerMap = Collections.singletonMap("fakedatabase", this.synapseMetadataHandler); this.secretsManager = Mockito.mock(SecretsManagerClient.class); - this.athena = Mockito.mock(AmazonAthena.class); + this.athena = Mockito.mock(AthenaClient.class); this.queryStatusChecker = Mockito.mock(QueryStatusChecker.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); DatabaseConnectionConfig databaseConnectionConfig = new DatabaseConnectionConfig("testCatalog", "fakedatabase", diff --git a/athena-synapse/src/test/java/com/amazonaws/athena/connectors/synapse/SynapseMuxRecordHandlerTest.java b/athena-synapse/src/test/java/com/amazonaws/athena/connectors/synapse/SynapseMuxRecordHandlerTest.java index 4fcf8bb4a9..369d2d7dd2 100644 --- a/athena-synapse/src/test/java/com/amazonaws/athena/connectors/synapse/SynapseMuxRecordHandlerTest.java +++ b/athena-synapse/src/test/java/com/amazonaws/athena/connectors/synapse/SynapseMuxRecordHandlerTest.java @@ -28,12 +28,12 @@ import com.amazonaws.athena.connectors.jdbc.connection.DatabaseConnectionConfig; import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; -import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; import org.apache.arrow.vector.types.pojo.Schema; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -48,7 +48,7 @@ public class SynapseMuxRecordHandlerTest private JdbcRecordHandler jdbcRecordHandler; private AmazonS3 amazonS3; private SecretsManagerClient secretsManager; - private AmazonAthena athena; + private AthenaClient athena; private QueryStatusChecker queryStatusChecker; private JdbcConnectionFactory jdbcConnectionFactory; @@ -59,7 +59,7 @@ public void setup() this.recordHandlerMap = Collections.singletonMap(SynapseConstants.NAME, this.synapseRecordHandler); this.amazonS3 = Mockito.mock(AmazonS3.class); this.secretsManager = Mockito.mock(SecretsManagerClient.class); - this.athena = Mockito.mock(AmazonAthena.class); + this.athena = Mockito.mock(AthenaClient.class); this.queryStatusChecker = Mockito.mock(QueryStatusChecker.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); DatabaseConnectionConfig databaseConnectionConfig = new DatabaseConnectionConfig("testCatalog", SynapseConstants.NAME, diff --git a/athena-synapse/src/test/java/com/amazonaws/athena/connectors/synapse/SynapseRecordHandlerTest.java b/athena-synapse/src/test/java/com/amazonaws/athena/connectors/synapse/SynapseRecordHandlerTest.java index f035f90688..aaa61dea9a 100644 --- a/athena-synapse/src/test/java/com/amazonaws/athena/connectors/synapse/SynapseRecordHandlerTest.java +++ b/athena-synapse/src/test/java/com/amazonaws/athena/connectors/synapse/SynapseRecordHandlerTest.java @@ -31,7 +31,6 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.connection.JdbcCredentialProvider; import com.amazonaws.athena.connectors.jdbc.manager.JdbcSplitQueryBuilder; -import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; import com.google.common.collect.ImmutableMap; import org.apache.arrow.vector.types.Types; @@ -40,6 +39,7 @@ import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -60,7 +60,7 @@ public class SynapseRecordHandlerTest private JdbcSplitQueryBuilder jdbcSplitQueryBuilder; private AmazonS3 amazonS3; private SecretsManagerClient secretsManager; - private AmazonAthena athena; + private AthenaClient athena; @Before public void setup() @@ -68,7 +68,7 @@ public void setup() { this.amazonS3 = Mockito.mock(AmazonS3.class); this.secretsManager = Mockito.mock(SecretsManagerClient.class); - this.athena = Mockito.mock(AmazonAthena.class); + this.athena = Mockito.mock(AthenaClient.class); this.connection = Mockito.mock(Connection.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); Mockito.when(this.jdbcConnectionFactory.getConnection(nullable(JdbcCredentialProvider.class))).thenReturn(this.connection); diff --git a/athena-teradata/src/main/java/com/amazonaws/athena/connectors/teradata/TeradataMetadataHandler.java b/athena-teradata/src/main/java/com/amazonaws/athena/connectors/teradata/TeradataMetadataHandler.java index 8a456a9fbd..d230e8dcbf 100644 --- a/athena-teradata/src/main/java/com/amazonaws/athena/connectors/teradata/TeradataMetadataHandler.java +++ b/athena-teradata/src/main/java/com/amazonaws/athena/connectors/teradata/TeradataMetadataHandler.java @@ -50,7 +50,6 @@ import com.amazonaws.athena.connectors.jdbc.manager.JDBCUtil; import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler; import com.amazonaws.athena.connectors.jdbc.manager.PreparedStatementBuilder; -import com.amazonaws.services.athena.AmazonAthena; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; @@ -64,6 +63,7 @@ import org.apache.arrow.vector.types.pojo.Schema; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -130,7 +130,7 @@ public TeradataMetadataHandler( protected TeradataMetadataHandler( DatabaseConnectionConfig databaseConnectionConfig, SecretsManagerClient secretsManager, - AmazonAthena athena, + AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, java.util.Map configOptions) { diff --git a/athena-teradata/src/main/java/com/amazonaws/athena/connectors/teradata/TeradataMuxMetadataHandler.java b/athena-teradata/src/main/java/com/amazonaws/athena/connectors/teradata/TeradataMuxMetadataHandler.java index 9799c1f7b0..a54c73fe56 100644 --- a/athena-teradata/src/main/java/com/amazonaws/athena/connectors/teradata/TeradataMuxMetadataHandler.java +++ b/athena-teradata/src/main/java/com/amazonaws/athena/connectors/teradata/TeradataMuxMetadataHandler.java @@ -24,8 +24,8 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandlerFactory; -import com.amazonaws.services.athena.AmazonAthena; import org.apache.arrow.util.VisibleForTesting; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Map; @@ -55,7 +55,7 @@ public TeradataMuxMetadataHandler(java.util.Map configOptions) } @VisibleForTesting - protected TeradataMuxMetadataHandler(SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, + protected TeradataMuxMetadataHandler(SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, Map metadataHandlerMap, DatabaseConnectionConfig databaseConnectionConfig, java.util.Map configOptions) { super(secretsManager, athena, jdbcConnectionFactory, metadataHandlerMap, databaseConnectionConfig, configOptions); diff --git a/athena-teradata/src/main/java/com/amazonaws/athena/connectors/teradata/TeradataMuxRecordHandler.java b/athena-teradata/src/main/java/com/amazonaws/athena/connectors/teradata/TeradataMuxRecordHandler.java index 99a616a895..2f1a9f2954 100644 --- a/athena-teradata/src/main/java/com/amazonaws/athena/connectors/teradata/TeradataMuxRecordHandler.java +++ b/athena-teradata/src/main/java/com/amazonaws/athena/connectors/teradata/TeradataMuxRecordHandler.java @@ -25,9 +25,9 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandlerFactory; -import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; import com.google.common.annotations.VisibleForTesting; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Map; @@ -55,7 +55,7 @@ public TeradataMuxRecordHandler(java.util.Map configOptions) } @VisibleForTesting - TeradataMuxRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, + TeradataMuxRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, DatabaseConnectionConfig databaseConnectionConfig, Map recordHandlerMap, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, jdbcConnectionFactory, databaseConnectionConfig, recordHandlerMap, configOptions); diff --git a/athena-teradata/src/main/java/com/amazonaws/athena/connectors/teradata/TeradataRecordHandler.java b/athena-teradata/src/main/java/com/amazonaws/athena/connectors/teradata/TeradataRecordHandler.java index 99a74d6747..74b83ae3fd 100644 --- a/athena-teradata/src/main/java/com/amazonaws/athena/connectors/teradata/TeradataRecordHandler.java +++ b/athena-teradata/src/main/java/com/amazonaws/athena/connectors/teradata/TeradataRecordHandler.java @@ -29,13 +29,12 @@ import com.amazonaws.athena.connectors.jdbc.manager.JDBCUtil; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcSplitQueryBuilder; -import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.athena.AmazonAthenaClientBuilder; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3ClientBuilder; import com.google.common.annotations.VisibleForTesting; import org.apache.arrow.vector.types.pojo.Schema; import org.apache.commons.lang3.Validate; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -59,13 +58,13 @@ public TeradataRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, public TeradataRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, JdbcConnectionFactory jdbcConnectionFactory, java.util.Map configOptions) { - this(databaseConnectionConfig, AmazonS3ClientBuilder.defaultClient(), SecretsManagerClient.create(), AmazonAthenaClientBuilder.defaultClient(), + this(databaseConnectionConfig, AmazonS3ClientBuilder.defaultClient(), SecretsManagerClient.create(), AthenaClient.create(), jdbcConnectionFactory, new TeradataQueryStringBuilder(TERADATA_QUOTE_CHARACTER, new TeradataFederationExpressionParser(TERADATA_QUOTE_CHARACTER)), configOptions); } @VisibleForTesting TeradataRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, final AmazonS3 amazonS3, final SecretsManagerClient secretsManager, - final AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, JdbcSplitQueryBuilder jdbcSplitQueryBuilder, java.util.Map configOptions) + final AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, JdbcSplitQueryBuilder jdbcSplitQueryBuilder, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, databaseConnectionConfig, jdbcConnectionFactory, configOptions); this.jdbcSplitQueryBuilder = Validate.notNull(jdbcSplitQueryBuilder, "query builder must not be null"); diff --git a/athena-teradata/src/test/java/com/amazonaws/athena/connectors/teradata/TeradataMetadataHandlerTest.java b/athena-teradata/src/test/java/com/amazonaws/athena/connectors/teradata/TeradataMetadataHandlerTest.java index 991304b50c..137b2db9d2 100644 --- a/athena-teradata/src/test/java/com/amazonaws/athena/connectors/teradata/TeradataMetadataHandlerTest.java +++ b/athena-teradata/src/test/java/com/amazonaws/athena/connectors/teradata/TeradataMetadataHandlerTest.java @@ -29,13 +29,13 @@ import com.amazonaws.athena.connectors.jdbc.connection.DatabaseConnectionConfig; import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.connection.JdbcCredentialProvider; -import com.amazonaws.services.athena.AmazonAthena; import org.apache.arrow.vector.types.pojo.Field; import org.apache.arrow.vector.types.pojo.Schema; import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueRequest; import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueResponse; @@ -59,7 +59,7 @@ public class TeradataMetadataHandlerTest private Connection connection; private FederatedIdentity federatedIdentity; private SecretsManagerClient secretsManager; - private AmazonAthena athena; + private AthenaClient athena; private BlockAllocator blockAllocator; @Before @@ -68,7 +68,7 @@ public void setup() throws Exception { this.connection = Mockito.mock(Connection.class, Mockito.RETURNS_DEEP_STUBS); Mockito.when(this.jdbcConnectionFactory.getConnection(nullable(JdbcCredentialProvider.class))).thenReturn(this.connection); this.secretsManager = Mockito.mock(SecretsManagerClient.class); - this.athena = Mockito.mock(AmazonAthena.class); + this.athena = Mockito.mock(AthenaClient.class); Mockito.when(this.secretsManager.getSecretValue(Mockito.eq(GetSecretValueRequest.builder().secretId("testSecret").build()))).thenReturn(GetSecretValueResponse.builder().secretString("{\"username\": \"testUser\", \"password\": \"testPassword\"}").build()); this.teradataMetadataHandler = new TeradataMetadataHandler(databaseConnectionConfig, this.secretsManager, this.athena, this.jdbcConnectionFactory, com.google.common.collect.ImmutableMap.of("partitioncount", "1000")); this.federatedIdentity = Mockito.mock(FederatedIdentity.class); diff --git a/athena-teradata/src/test/java/com/amazonaws/athena/connectors/teradata/TeradataMuxJdbcMetadataHandlerTest.java b/athena-teradata/src/test/java/com/amazonaws/athena/connectors/teradata/TeradataMuxJdbcMetadataHandlerTest.java index 2e9ed78cca..5f2e376f82 100644 --- a/athena-teradata/src/test/java/com/amazonaws/athena/connectors/teradata/TeradataMuxJdbcMetadataHandlerTest.java +++ b/athena-teradata/src/test/java/com/amazonaws/athena/connectors/teradata/TeradataMuxJdbcMetadataHandlerTest.java @@ -28,10 +28,10 @@ import com.amazonaws.athena.connectors.jdbc.connection.DatabaseConnectionConfig; import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler; -import com.amazonaws.services.athena.AmazonAthena; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Collections; @@ -46,7 +46,7 @@ public class TeradataMuxJdbcMetadataHandlerTest { private JdbcMetadataHandler jdbcMetadataHandler; private BlockAllocator allocator; private SecretsManagerClient secretsManager; - private AmazonAthena athena; + private AthenaClient athena; private QueryStatusChecker queryStatusChecker; private JdbcConnectionFactory jdbcConnectionFactory; @@ -57,7 +57,7 @@ public void setup() this.teradataMetadataHandler = Mockito.mock(TeradataMetadataHandler.class); this.metadataHandlerMap = Collections.singletonMap("fakedatabase", this.teradataMetadataHandler); this.secretsManager = Mockito.mock(SecretsManagerClient.class); - this.athena = Mockito.mock(AmazonAthena.class); + this.athena = Mockito.mock(AthenaClient.class); this.queryStatusChecker = Mockito.mock(QueryStatusChecker.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); DatabaseConnectionConfig databaseConnectionConfig = new DatabaseConnectionConfig("testCatalog", "fakedatabase", diff --git a/athena-teradata/src/test/java/com/amazonaws/athena/connectors/teradata/TeradataMuxJdbcRecordHandlerTest.java b/athena-teradata/src/test/java/com/amazonaws/athena/connectors/teradata/TeradataMuxJdbcRecordHandlerTest.java index c26849aae7..8ee13facf2 100644 --- a/athena-teradata/src/test/java/com/amazonaws/athena/connectors/teradata/TeradataMuxJdbcRecordHandlerTest.java +++ b/athena-teradata/src/test/java/com/amazonaws/athena/connectors/teradata/TeradataMuxJdbcRecordHandlerTest.java @@ -28,12 +28,12 @@ import com.amazonaws.athena.connectors.jdbc.connection.DatabaseConnectionConfig; import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; -import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; import org.apache.arrow.vector.types.pojo.Schema; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -48,7 +48,7 @@ public class TeradataMuxJdbcRecordHandlerTest private JdbcRecordHandler jdbcRecordHandler; private AmazonS3 amazonS3; private SecretsManagerClient secretsManager; - private AmazonAthena athena; + private AthenaClient athena; private QueryStatusChecker queryStatusChecker; private JdbcConnectionFactory jdbcConnectionFactory; @@ -59,7 +59,7 @@ public void setup() this.recordHandlerMap = Collections.singletonMap("teradata", this.teradataRecordHandler); this.amazonS3 = Mockito.mock(AmazonS3.class); this.secretsManager = Mockito.mock(SecretsManagerClient.class); - this.athena = Mockito.mock(AmazonAthena.class); + this.athena = Mockito.mock(AthenaClient.class); this.queryStatusChecker = Mockito.mock(QueryStatusChecker.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); DatabaseConnectionConfig databaseConnectionConfig = new DatabaseConnectionConfig("testCatalog", "teradata", diff --git a/athena-teradata/src/test/java/com/amazonaws/athena/connectors/teradata/TeradataRecordHandlerTest.java b/athena-teradata/src/test/java/com/amazonaws/athena/connectors/teradata/TeradataRecordHandlerTest.java index 3422add715..09dd2f4c75 100644 --- a/athena-teradata/src/test/java/com/amazonaws/athena/connectors/teradata/TeradataRecordHandlerTest.java +++ b/athena-teradata/src/test/java/com/amazonaws/athena/connectors/teradata/TeradataRecordHandlerTest.java @@ -32,7 +32,6 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.connection.JdbcCredentialProvider; import com.amazonaws.athena.connectors.jdbc.manager.JdbcSplitQueryBuilder; -import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; @@ -42,6 +41,7 @@ import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -60,7 +60,7 @@ public class TeradataRecordHandlerTest private JdbcSplitQueryBuilder jdbcSplitQueryBuilder; private AmazonS3 amazonS3; private SecretsManagerClient secretsManager; - private AmazonAthena athena; + private AthenaClient athena; @Before public void setup() @@ -68,7 +68,7 @@ public void setup() { this.amazonS3 = Mockito.mock(AmazonS3.class); this.secretsManager = Mockito.mock(SecretsManagerClient.class); - this.athena = Mockito.mock(AmazonAthena.class); + this.athena = Mockito.mock(AthenaClient.class); this.connection = Mockito.mock(Connection.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); Mockito.when(this.jdbcConnectionFactory.getConnection(nullable(JdbcCredentialProvider.class))).thenReturn(this.connection); diff --git a/athena-timestream/src/main/java/com/amazonaws/athena/connectors/timestream/TimestreamMetadataHandler.java b/athena-timestream/src/main/java/com/amazonaws/athena/connectors/timestream/TimestreamMetadataHandler.java index 61a89f90d0..23fb89ad37 100644 --- a/athena-timestream/src/main/java/com/amazonaws/athena/connectors/timestream/TimestreamMetadataHandler.java +++ b/athena-timestream/src/main/java/com/amazonaws/athena/connectors/timestream/TimestreamMetadataHandler.java @@ -42,7 +42,6 @@ import com.amazonaws.athena.connector.util.PaginatedRequestIterator; import com.amazonaws.athena.connectors.timestream.qpt.TimestreamQueryPassthrough; import com.amazonaws.athena.connectors.timestream.query.QueryFactory; -import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.timestreamquery.AmazonTimestreamQuery; import com.amazonaws.services.timestreamquery.model.ColumnInfo; import com.amazonaws.services.timestreamquery.model.Datum; @@ -59,6 +58,7 @@ import org.apache.arrow.vector.types.pojo.Schema; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.glue.GlueClient; import software.amazon.awssdk.services.glue.model.Table; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; @@ -111,7 +111,7 @@ protected TimestreamMetadataHandler( GlueClient glue, EncryptionKeyFactory keyFactory, SecretsManagerClient secretsManager, - AmazonAthena athena, + AthenaClient athena, String spillBucket, String spillPrefix, java.util.Map configOptions) diff --git a/athena-timestream/src/main/java/com/amazonaws/athena/connectors/timestream/TimestreamRecordHandler.java b/athena-timestream/src/main/java/com/amazonaws/athena/connectors/timestream/TimestreamRecordHandler.java index fab8718f48..9975a8c33f 100644 --- a/athena-timestream/src/main/java/com/amazonaws/athena/connectors/timestream/TimestreamRecordHandler.java +++ b/athena-timestream/src/main/java/com/amazonaws/athena/connectors/timestream/TimestreamRecordHandler.java @@ -40,8 +40,6 @@ import com.amazonaws.athena.connectors.timestream.qpt.TimestreamQueryPassthrough; import com.amazonaws.athena.connectors.timestream.query.QueryFactory; import com.amazonaws.athena.connectors.timestream.query.SelectQueryBuilder; -import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.athena.AmazonAthenaClientBuilder; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3ClientBuilder; import com.amazonaws.services.timestreamquery.AmazonTimestreamQuery; @@ -60,6 +58,7 @@ import org.apache.arrow.vector.types.pojo.Field; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.time.Instant; @@ -95,13 +94,13 @@ public TimestreamRecordHandler(java.util.Map configOptions) this( AmazonS3ClientBuilder.defaultClient(), SecretsManagerClient.create(), - AmazonAthenaClientBuilder.defaultClient(), + AthenaClient.create(), TimestreamClientBuilder.buildQueryClient(SOURCE_TYPE), configOptions); } @VisibleForTesting - protected TimestreamRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AmazonAthena athena, AmazonTimestreamQuery tsQuery, java.util.Map configOptions) + protected TimestreamRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, AmazonTimestreamQuery tsQuery, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, SOURCE_TYPE, configOptions); this.tsQuery = tsQuery; diff --git a/athena-timestream/src/test/java/com/amazonaws/athena/connectors/timestream/TimestreamMetadataHandlerTest.java b/athena-timestream/src/test/java/com/amazonaws/athena/connectors/timestream/TimestreamMetadataHandlerTest.java index 7c0746ef94..0744aae186 100644 --- a/athena-timestream/src/test/java/com/amazonaws/athena/connectors/timestream/TimestreamMetadataHandlerTest.java +++ b/athena-timestream/src/test/java/com/amazonaws/athena/connectors/timestream/TimestreamMetadataHandlerTest.java @@ -40,7 +40,6 @@ import com.amazonaws.athena.connector.lambda.metadata.MetadataResponse; import com.amazonaws.athena.connector.lambda.security.FederatedIdentity; import com.amazonaws.athena.connector.lambda.security.LocalKeyFactory; -import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.timestreamquery.AmazonTimestreamQuery; import com.amazonaws.services.timestreamquery.model.Datum; import com.amazonaws.services.timestreamquery.model.QueryRequest; @@ -64,6 +63,7 @@ import org.mockito.junit.MockitoJUnitRunner; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.glue.GlueClient; import software.amazon.awssdk.services.glue.model.Column; import software.amazon.awssdk.services.glue.model.StorageDescriptor; @@ -97,7 +97,7 @@ public class TimestreamMetadataHandlerTest @Mock protected SecretsManagerClient mockSecretsManager; @Mock - protected AmazonAthena mockAthena; + protected AthenaClient mockAthena; @Mock protected AmazonTimestreamQuery mockTsQuery; @Mock diff --git a/athena-timestream/src/test/java/com/amazonaws/athena/connectors/timestream/TimestreamRecordHandlerTest.java b/athena-timestream/src/test/java/com/amazonaws/athena/connectors/timestream/TimestreamRecordHandlerTest.java index 6b3f79fb57..9804555422 100644 --- a/athena-timestream/src/test/java/com/amazonaws/athena/connectors/timestream/TimestreamRecordHandlerTest.java +++ b/athena-timestream/src/test/java/com/amazonaws/athena/connectors/timestream/TimestreamRecordHandlerTest.java @@ -40,7 +40,6 @@ import com.amazonaws.athena.connector.lambda.security.EncryptionKeyFactory; import com.amazonaws.athena.connector.lambda.security.FederatedIdentity; import com.amazonaws.athena.connector.lambda.security.LocalKeyFactory; -import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.model.PutObjectRequest; import com.amazonaws.services.s3.model.PutObjectResult; @@ -65,6 +64,7 @@ import org.mockito.stubbing.Answer; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.io.ByteArrayInputStream; @@ -119,7 +119,7 @@ public class TimestreamRecordHandlerTest private SecretsManagerClient mockSecretsManager; @Mock - private AmazonAthena mockAthena; + private AthenaClient mockAthena; private class ByteHolder { diff --git a/athena-timestream/src/test/java/com/amazonaws/athena/connectors/timestream/integ/TimestreamIntegTest.java b/athena-timestream/src/test/java/com/amazonaws/athena/connectors/timestream/integ/TimestreamIntegTest.java index 0fce4624bc..66dfd428ec 100644 --- a/athena-timestream/src/test/java/com/amazonaws/athena/connectors/timestream/integ/TimestreamIntegTest.java +++ b/athena-timestream/src/test/java/com/amazonaws/athena/connectors/timestream/integ/TimestreamIntegTest.java @@ -21,7 +21,6 @@ import com.amazonaws.athena.connector.integ.IntegrationTestBase; import com.amazonaws.athena.connectors.timestream.TimestreamClientBuilder; -import com.amazonaws.services.athena.model.Row; import com.amazonaws.services.timestreamwrite.AmazonTimestreamWrite; import com.amazonaws.services.timestreamwrite.model.CreateTableRequest; import com.amazonaws.services.timestreamwrite.model.DeleteTableRequest; @@ -38,6 +37,7 @@ import software.amazon.awscdk.services.iam.PolicyDocument; import software.amazon.awscdk.services.iam.PolicyStatement; import software.amazon.awscdk.services.timestream.CfnDatabase; +import software.amazon.awssdk.services.athena.model.Row; import java.util.ArrayList; import java.util.List; @@ -295,13 +295,13 @@ public void selectColumnWithPredicateIntegTest() String query = String.format("select conversation from \"%s\".\"%s\".\"%s\" where subject = '%s' order by time desc limit 1;", lambdaFunctionName, timestreamDbName, timestreamTableName, jokeProtagonist); - List rows = startQueryExecution(query).getResultSet().getRows(); + List rows = startQueryExecution(query).resultSet().rows(); if (!rows.isEmpty()) { // Remove the column-header row rows.remove(0); } List conversation = new ArrayList<>(); - rows.forEach(row -> conversation.add(row.getData().get(0).getVarCharValue())); + rows.forEach(row -> conversation.add(row.data().get(0).varCharValue())); logger.info("conversation: {}", conversation); assertEquals("Wrong number of DB records found.", 1, conversation.size()); assertTrue("Did not find correct conversation: " + jokePunchline, conversation.contains(jokePunchline)); diff --git a/athena-tpcds/src/main/java/com/amazonaws/athena/connectors/tpcds/TPCDSMetadataHandler.java b/athena-tpcds/src/main/java/com/amazonaws/athena/connectors/tpcds/TPCDSMetadataHandler.java index 3e98dc12ee..63a4fa2436 100644 --- a/athena-tpcds/src/main/java/com/amazonaws/athena/connectors/tpcds/TPCDSMetadataHandler.java +++ b/athena-tpcds/src/main/java/com/amazonaws/athena/connectors/tpcds/TPCDSMetadataHandler.java @@ -36,13 +36,13 @@ import com.amazonaws.athena.connector.lambda.metadata.ListTablesRequest; import com.amazonaws.athena.connector.lambda.metadata.ListTablesResponse; import com.amazonaws.athena.connector.lambda.security.EncryptionKeyFactory; -import com.amazonaws.services.athena.AmazonAthena; import com.google.common.collect.ImmutableSet; import com.teradata.tpcds.Table; import com.teradata.tpcds.column.Column; import org.apache.arrow.util.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Collections; @@ -90,7 +90,7 @@ public TPCDSMetadataHandler(java.util.Map configOptions) protected TPCDSMetadataHandler( EncryptionKeyFactory keyFactory, SecretsManagerClient secretsManager, - AmazonAthena athena, + AthenaClient athena, String spillBucket, String spillPrefix, java.util.Map configOptions) diff --git a/athena-tpcds/src/main/java/com/amazonaws/athena/connectors/tpcds/TPCDSRecordHandler.java b/athena-tpcds/src/main/java/com/amazonaws/athena/connectors/tpcds/TPCDSRecordHandler.java index 12dc7a0667..df8b16529a 100644 --- a/athena-tpcds/src/main/java/com/amazonaws/athena/connectors/tpcds/TPCDSRecordHandler.java +++ b/athena-tpcds/src/main/java/com/amazonaws/athena/connectors/tpcds/TPCDSRecordHandler.java @@ -26,8 +26,6 @@ import com.amazonaws.athena.connector.lambda.domain.TableName; import com.amazonaws.athena.connector.lambda.handlers.RecordHandler; import com.amazonaws.athena.connector.lambda.records.ReadRecordsRequest; -import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.athena.AmazonAthenaClientBuilder; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3ClientBuilder; import com.teradata.tpcds.Results; @@ -40,6 +38,7 @@ import org.apache.arrow.vector.types.pojo.Schema; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.io.IOException; @@ -77,11 +76,11 @@ public class TPCDSRecordHandler public TPCDSRecordHandler(java.util.Map configOptions) { - super(AmazonS3ClientBuilder.defaultClient(), SecretsManagerClient.create(), AmazonAthenaClientBuilder.defaultClient(), SOURCE_TYPE, configOptions); + super(AmazonS3ClientBuilder.defaultClient(), SecretsManagerClient.create(), AthenaClient.create(), SOURCE_TYPE, configOptions); } @VisibleForTesting - protected TPCDSRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AmazonAthena athena, java.util.Map configOptions) + protected TPCDSRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, SOURCE_TYPE, configOptions); } diff --git a/athena-tpcds/src/test/java/com/amazonaws/athena/connectors/tpcds/TPCDSMetadataHandlerTest.java b/athena-tpcds/src/test/java/com/amazonaws/athena/connectors/tpcds/TPCDSMetadataHandlerTest.java index 47affa5f1c..e88a55b6e2 100644 --- a/athena-tpcds/src/test/java/com/amazonaws/athena/connectors/tpcds/TPCDSMetadataHandlerTest.java +++ b/athena-tpcds/src/test/java/com/amazonaws/athena/connectors/tpcds/TPCDSMetadataHandlerTest.java @@ -42,7 +42,6 @@ import com.amazonaws.athena.connector.lambda.metadata.MetadataResponse; import com.amazonaws.athena.connector.lambda.security.FederatedIdentity; import com.amazonaws.athena.connector.lambda.security.LocalKeyFactory; -import com.amazonaws.services.athena.AmazonAthena; import org.apache.arrow.vector.types.Types; import org.apache.arrow.vector.types.pojo.Schema; import org.junit.After; @@ -53,6 +52,7 @@ import org.mockito.junit.MockitoJUnitRunner; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Collections; @@ -79,7 +79,7 @@ public class TPCDSMetadataHandlerTest private SecretsManagerClient mockSecretsManager; @Mock - private AmazonAthena mockAthena; + private AthenaClient mockAthena; @Before public void setUp() diff --git a/athena-tpcds/src/test/java/com/amazonaws/athena/connectors/tpcds/TPCDSRecordHandlerTest.java b/athena-tpcds/src/test/java/com/amazonaws/athena/connectors/tpcds/TPCDSRecordHandlerTest.java index 2bc6193e70..e26bf2458d 100644 --- a/athena-tpcds/src/test/java/com/amazonaws/athena/connectors/tpcds/TPCDSRecordHandlerTest.java +++ b/athena-tpcds/src/test/java/com/amazonaws/athena/connectors/tpcds/TPCDSRecordHandlerTest.java @@ -41,7 +41,6 @@ import com.amazonaws.athena.connector.lambda.security.EncryptionKeyFactory; import com.amazonaws.athena.connector.lambda.security.FederatedIdentity; import com.amazonaws.athena.connector.lambda.security.LocalKeyFactory; -import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.model.PutObjectRequest; import com.amazonaws.services.s3.model.PutObjectResult; @@ -62,6 +61,7 @@ import org.mockito.junit.MockitoJUnitRunner; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.io.ByteArrayInputStream; @@ -104,7 +104,7 @@ public class TPCDSRecordHandlerTest private SecretsManagerClient mockSecretsManager; @Mock - private AmazonAthena mockAthena; + private AthenaClient mockAthena; @Before public void setUp() diff --git a/athena-vertica/src/main/java/com/amazonaws/athena/connectors/vertica/VerticaRecordHandler.java b/athena-vertica/src/main/java/com/amazonaws/athena/connectors/vertica/VerticaRecordHandler.java index 899fa11370..795d02e402 100644 --- a/athena-vertica/src/main/java/com/amazonaws/athena/connectors/vertica/VerticaRecordHandler.java +++ b/athena-vertica/src/main/java/com/amazonaws/athena/connectors/vertica/VerticaRecordHandler.java @@ -32,8 +32,6 @@ import com.amazonaws.athena.connector.lambda.domain.Split; import com.amazonaws.athena.connector.lambda.handlers.RecordHandler; import com.amazonaws.athena.connector.lambda.records.ReadRecordsRequest; -import com.amazonaws.services.athena.AmazonAthena; -import com.amazonaws.services.athena.AmazonAthenaClientBuilder; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3ClientBuilder; import com.amazonaws.services.s3.model.*; @@ -45,6 +43,7 @@ import org.apache.arrow.vector.types.pojo.Schema; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.io.BufferedReader; @@ -70,11 +69,11 @@ public VerticaRecordHandler(java.util.Map configOptions) { this(AmazonS3ClientBuilder.defaultClient(), SecretsManagerClient.create(), - AmazonAthenaClientBuilder.defaultClient(), configOptions); + AthenaClient.create(), configOptions); } @VisibleForTesting - protected VerticaRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AmazonAthena amazonAthena, java.util.Map configOptions) + protected VerticaRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AthenaClient amazonAthena, java.util.Map configOptions) { super(amazonS3, secretsManager, amazonAthena, SOURCE_TYPE, configOptions); this.amazonS3 = amazonS3; diff --git a/athena-vertica/src/test/java/com/amazonaws/athena/connectors/vertica/VerticaMetadataHandlerTest.java b/athena-vertica/src/test/java/com/amazonaws/athena/connectors/vertica/VerticaMetadataHandlerTest.java index 125b28cc9c..ae833032d7 100644 --- a/athena-vertica/src/test/java/com/amazonaws/athena/connectors/vertica/VerticaMetadataHandlerTest.java +++ b/athena-vertica/src/test/java/com/amazonaws/athena/connectors/vertica/VerticaMetadataHandlerTest.java @@ -47,7 +47,6 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcCredentialProvider; import com.amazonaws.athena.connectors.vertica.query.QueryFactory; import com.amazonaws.athena.connectors.vertica.query.VerticaExportQueryBuilder; -import com.amazonaws.services.athena.AmazonAthena; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.model.ListObjectsRequest; import com.amazonaws.services.s3.model.ObjectListing; @@ -66,6 +65,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.stringtemplate.v4.ST; +import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueRequest; import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueResponse; @@ -104,7 +104,7 @@ public class VerticaMetadataHandlerTest extends TestBase private VerticaSchemaUtils verticaSchemaUtils; private Connection connection; private SecretsManagerClient secretsManager; - private AmazonAthena athena; + private AthenaClient athena; private AmazonS3 amazonS3; private FederatedIdentity federatedIdentity; private BlockAllocatorImpl allocator; @@ -135,7 +135,7 @@ public void setUp() throws Exception this.verticaExportQueryBuilder = Mockito.mock(VerticaExportQueryBuilder.class); this.connection = Mockito.mock(Connection.class, Mockito.RETURNS_DEEP_STUBS); this.secretsManager = Mockito.mock(SecretsManagerClient.class); - this.athena = Mockito.mock(AmazonAthena.class); + this.athena = Mockito.mock(AthenaClient.class); this.federatedIdentity = Mockito.mock(FederatedIdentity.class); this.databaseMetaData = Mockito.mock(DatabaseMetaData.class); this.tableName = Mockito.mock(TableName.class); @@ -154,7 +154,7 @@ public void setUp() throws Exception this.connection = Mockito.mock(Connection.class, Mockito.RETURNS_DEEP_STUBS); Mockito.when(this.jdbcConnectionFactory.getConnection(nullable(JdbcCredentialProvider.class))).thenReturn(this.connection); this.secretsManager = Mockito.mock(SecretsManagerClient.class); - this.athena = Mockito.mock(AmazonAthena.class); + this.athena = Mockito.mock(AthenaClient.class); this.verticaMetadataHandler = new VerticaMetadataHandler(databaseConnectionConfig, this.jdbcConnectionFactory, com.google.common.collect.ImmutableMap.of(), amazonS3, verticaSchemaUtils); this.federatedIdentity = Mockito.mock(FederatedIdentity.class); this.allocator = new BlockAllocatorImpl(); diff --git a/pom.xml b/pom.xml index f66381fb09..74a9fea322 100644 --- a/pom.xml +++ b/pom.xml @@ -14,8 +14,8 @@ 11 3.13.0 + 1.12.750 2.25.56 - 1.12.761 1.2.2 1.6.0 1.204.0 From 3f2a330199cc95a28af8b1af26ddb4f6be31bee4 Mon Sep 17 00:00:00 2001 From: Aimery Methena <159072740+aimethed@users.noreply.github.com> Date: Mon, 22 Jul 2024 17:07:13 -0400 Subject: [PATCH 06/87] V2 migration KMS (#2107) --- athena-federation-sdk/pom.xml | 19 +++++++++-- .../lambda/handlers/MetadataHandler.java | 4 +-- .../lambda/security/KmsKeyFactory.java | 34 ++++++++++--------- 3 files changed, 36 insertions(+), 21 deletions(-) diff --git a/athena-federation-sdk/pom.xml b/athena-federation-sdk/pom.xml index b19652c867..9635e10784 100644 --- a/athena-federation-sdk/pom.xml +++ b/athena-federation-sdk/pom.xml @@ -201,11 +201,24 @@ com.amazonaws aws-java-sdk-s3 ${aws-sdk.version} + + + + com.amazonaws + aws-java-sdk-kms + + - com.amazonaws - aws-java-sdk-kms - ${aws-sdk.version} + software.amazon.awssdk + kms + ${aws-sdk-v2.version} + + + software.amazon.awssdk + netty-nio-client + + com.google.guava diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/handlers/MetadataHandler.java b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/handlers/MetadataHandler.java index ef64f9057d..88c00b461f 100644 --- a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/handlers/MetadataHandler.java +++ b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/handlers/MetadataHandler.java @@ -58,7 +58,6 @@ import com.amazonaws.athena.connector.lambda.security.KmsKeyFactory; import com.amazonaws.athena.connector.lambda.security.LocalKeyFactory; import com.amazonaws.athena.connector.lambda.serde.VersionedObjectMapperFactory; -import com.amazonaws.services.kms.AWSKMSClientBuilder; import com.amazonaws.services.lambda.runtime.Context; import com.amazonaws.services.lambda.runtime.RequestStreamHandler; import com.amazonaws.services.s3.AmazonS3ClientBuilder; @@ -69,6 +68,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.kms.KmsClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.io.IOException; @@ -144,7 +144,7 @@ public MetadataHandler(String sourceType, java.util.Map configOp } else { this.encryptionKeyFactory = (this.configOptions.get(KMS_KEY_ID_ENV) != null) ? - new KmsKeyFactory(AWSKMSClientBuilder.standard().build(), this.configOptions.get(KMS_KEY_ID_ENV)) : + new KmsKeyFactory(KmsClient.create(), this.configOptions.get(KMS_KEY_ID_ENV)) : new LocalKeyFactory(); logger.debug("ENABLE_SPILL_ENCRYPTION with encryption factory: " + encryptionKeyFactory.getClass().getSimpleName()); } diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/security/KmsKeyFactory.java b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/security/KmsKeyFactory.java index 622ebcd01a..c9d0589bfd 100644 --- a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/security/KmsKeyFactory.java +++ b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/security/KmsKeyFactory.java @@ -20,12 +20,12 @@ * #L% */ -import com.amazonaws.services.kms.AWSKMS; -import com.amazonaws.services.kms.model.DataKeySpec; -import com.amazonaws.services.kms.model.GenerateDataKeyRequest; -import com.amazonaws.services.kms.model.GenerateDataKeyResult; -import com.amazonaws.services.kms.model.GenerateRandomRequest; -import com.amazonaws.services.kms.model.GenerateRandomResult; +import software.amazon.awssdk.services.kms.KmsClient; +import software.amazon.awssdk.services.kms.model.DataKeySpec; +import software.amazon.awssdk.services.kms.model.GenerateDataKeyRequest; +import software.amazon.awssdk.services.kms.model.GenerateDataKeyResponse; +import software.amazon.awssdk.services.kms.model.GenerateRandomRequest; +import software.amazon.awssdk.services.kms.model.GenerateRandomResponse; /** * An EncryptionKeyFactory that is backed by AWS KMS. @@ -35,10 +35,10 @@ public class KmsKeyFactory implements EncryptionKeyFactory { - private final AWSKMS kmsClient; + private final KmsClient kmsClient; private final String masterKeyId; - public KmsKeyFactory(AWSKMS kmsClient, String masterKeyId) + public KmsKeyFactory(KmsClient kmsClient, String masterKeyId) { this.kmsClient = kmsClient; this.masterKeyId = masterKeyId; @@ -49,16 +49,18 @@ public KmsKeyFactory(AWSKMS kmsClient, String masterKeyId) */ public EncryptionKey create() { - GenerateDataKeyResult dataKeyResult = + GenerateDataKeyResponse dataKeyResponse = kmsClient.generateDataKey( - new GenerateDataKeyRequest() - .withKeyId(masterKeyId) - .withKeySpec(DataKeySpec.AES_128)); + GenerateDataKeyRequest.builder() + .keyId(masterKeyId) + .keySpec(DataKeySpec.AES_128) + .build()); - GenerateRandomRequest randomRequest = new GenerateRandomRequest() - .withNumberOfBytes(AesGcmBlockCrypto.NONCE_BYTES); - GenerateRandomResult randomResult = kmsClient.generateRandom(randomRequest); + GenerateRandomRequest randomRequest = GenerateRandomRequest.builder() + .numberOfBytes(AesGcmBlockCrypto.NONCE_BYTES) + .build(); + GenerateRandomResponse randomResponse = kmsClient.generateRandom(randomRequest); - return new EncryptionKey(dataKeyResult.getPlaintext().array(), randomResult.getPlaintext().array()); + return new EncryptionKey(dataKeyResponse.plaintext().asByteArray(), randomResponse.plaintext().asByteArray()); } } From a60ed614c533c0d9cbe67db7f294b63a472add29 Mon Sep 17 00:00:00 2001 From: Aimery Methena <159072740+aimethed@users.noreply.github.com> Date: Wed, 24 Jul 2024 16:42:06 -0400 Subject: [PATCH 07/87] V2 migration lambda (#2077) --- .../docdb/integ/DocDbIntegTest.java | 18 ++-- .../validation/FederationService.java | 30 ------- .../validation/FederationServiceProvider.java | 87 ++++++++++--------- .../validation/LambdaMetadataProvider.java | 12 +-- .../validation/LambdaRecordProvider.java | 4 +- athena-federation-sdk/pom.xml | 12 ++- .../lambda/metadata/MetadataService.java | 38 -------- .../lambda/records/RecordService.java | 38 -------- .../v2/LambdaFunctionExceptionSerDe.java | 30 ++----- .../serde/v2/ObjectMapperFactoryV2.java | 6 +- .../serde/v3/ObjectMapperFactoryV3.java | 6 +- .../serde/v4/ObjectMapperFactoryV4.java | 6 +- .../serde/v5/ObjectMapperFactoryV5.java | 6 +- .../v2/LambdaFunctionExceptionSerDeTest.java | 21 ++--- .../hbase/integ/HbaseIntegTest.java | 18 ++-- .../redis/integ/RedisIntegTest.java | 18 ++-- 16 files changed, 115 insertions(+), 235 deletions(-) delete mode 100644 athena-federation-sdk-tools/src/main/java/com/amazonaws/athena/connector/validation/FederationService.java delete mode 100644 athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/metadata/MetadataService.java delete mode 100644 athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/records/RecordService.java diff --git a/athena-docdb/src/test/java/com/amazonaws/athena/connectors/docdb/integ/DocDbIntegTest.java b/athena-docdb/src/test/java/com/amazonaws/athena/connectors/docdb/integ/DocDbIntegTest.java index 9c44e21bfb..3cca51f94e 100644 --- a/athena-docdb/src/test/java/com/amazonaws/athena/connectors/docdb/integ/DocDbIntegTest.java +++ b/athena-docdb/src/test/java/com/amazonaws/athena/connectors/docdb/integ/DocDbIntegTest.java @@ -32,10 +32,6 @@ import com.amazonaws.services.docdb.model.DBCluster; import com.amazonaws.services.docdb.model.DescribeDBClustersRequest; import com.amazonaws.services.docdb.model.DescribeDBClustersResult; -import com.amazonaws.services.lambda.AWSLambda; -import com.amazonaws.services.lambda.AWSLambdaClientBuilder; -import com.amazonaws.services.lambda.model.InvocationType; -import com.amazonaws.services.lambda.model.InvokeRequest; import com.google.common.collect.ImmutableMap; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -57,6 +53,9 @@ import software.amazon.awscdk.services.ec2.VpcAttributes; import software.amazon.awscdk.services.iam.PolicyDocument; import software.amazon.awssdk.services.athena.model.Row; +import software.amazon.awssdk.services.lambda.LambdaClient; +import software.amazon.awssdk.services.lambda.model.InvocationType; +import software.amazon.awssdk.services.lambda.model.InvokeRequest; import java.util.ArrayList; import java.util.HashMap; @@ -263,20 +262,21 @@ protected void setUpTableData() logger.info("----------------------------------------------------"); String mongoLambdaName = "integ-mongodb-" + UUID.randomUUID(); - AWSLambda lambdaClient = AWSLambdaClientBuilder.defaultClient(); + LambdaClient lambdaClient = LambdaClient.create(); CloudFormationClient cloudFormationMongoClient = new CloudFormationClient(getMongoLambdaStack(mongoLambdaName)); try { // Create the Lambda function. cloudFormationMongoClient.createStack(); // Invoke the Lambda function. - lambdaClient.invoke(new InvokeRequest() - .withFunctionName(mongoLambdaName) - .withInvocationType(InvocationType.RequestResponse)); + lambdaClient.invoke(InvokeRequest.builder() + .functionName(mongoLambdaName) + .invocationType(InvocationType.REQUEST_RESPONSE) + .build()); } finally { // Delete the Lambda function. cloudFormationMongoClient.deleteStack(); - lambdaClient.shutdown(); + lambdaClient.close(); } } diff --git a/athena-federation-sdk-tools/src/main/java/com/amazonaws/athena/connector/validation/FederationService.java b/athena-federation-sdk-tools/src/main/java/com/amazonaws/athena/connector/validation/FederationService.java deleted file mode 100644 index d15468e50b..0000000000 --- a/athena-federation-sdk-tools/src/main/java/com/amazonaws/athena/connector/validation/FederationService.java +++ /dev/null @@ -1,30 +0,0 @@ -/*- - * #%L - * Amazon Athena Query Federation SDK Tools - * %% - * Copyright (C) 2019 - 2020 Amazon Web Services - * %% - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * #L% - */ -package com.amazonaws.athena.connector.validation; - -import com.amazonaws.athena.connector.lambda.request.FederationRequest; -import com.amazonaws.athena.connector.lambda.request.FederationResponse; -import com.amazonaws.services.lambda.invoke.LambdaFunction; - -public interface FederationService -{ - @LambdaFunction - FederationResponse call(final FederationRequest request); -} diff --git a/athena-federation-sdk-tools/src/main/java/com/amazonaws/athena/connector/validation/FederationServiceProvider.java b/athena-federation-sdk-tools/src/main/java/com/amazonaws/athena/connector/validation/FederationServiceProvider.java index 4306468cc1..4f3628a68f 100644 --- a/athena-federation-sdk-tools/src/main/java/com/amazonaws/athena/connector/validation/FederationServiceProvider.java +++ b/athena-federation-sdk-tools/src/main/java/com/amazonaws/athena/connector/validation/FederationServiceProvider.java @@ -19,19 +19,22 @@ */ package com.amazonaws.athena.connector.validation; +import com.amazonaws.athena.connector.lambda.request.FederationRequest; +import com.amazonaws.athena.connector.lambda.request.FederationResponse; import com.amazonaws.athena.connector.lambda.request.PingRequest; import com.amazonaws.athena.connector.lambda.request.PingResponse; import com.amazonaws.athena.connector.lambda.security.FederatedIdentity; import com.amazonaws.athena.connector.lambda.serde.VersionedObjectMapperFactory; -import com.amazonaws.services.lambda.AWSLambdaClientBuilder; -import com.amazonaws.services.lambda.invoke.LambdaFunction; -import com.amazonaws.services.lambda.invoke.LambdaFunctionNameResolver; -import com.amazonaws.services.lambda.invoke.LambdaInvokerFactory; -import com.amazonaws.services.lambda.invoke.LambdaInvokerFactoryConfig; +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.core.SdkBytes; +import software.amazon.awssdk.services.lambda.LambdaClient; +import software.amazon.awssdk.services.lambda.model.InvokeRequest; +import software.amazon.awssdk.services.lambda.model.InvokeResponse; -import java.lang.reflect.Method; +import java.io.IOException; import java.util.Map; import java.util.UUID; import java.util.concurrent.ConcurrentHashMap; @@ -45,60 +48,58 @@ public class FederationServiceProvider private static final String VALIDATION_SUFFIX = "_validation"; - private static final Map serviceCache = new ConcurrentHashMap<>(); + private static final Map serdeVersionCache = new ConcurrentHashMap<>(); + + private static final LambdaClient lambdaClient = LambdaClient.create(); private FederationServiceProvider() { // Intentionally left blank. } - public static FederationService getService(String lambdaFunction, FederatedIdentity identity, String catalog) + private static R invokeFunction(String lambdaFunction, T request, Class responseClass, ObjectMapper objectMapper) { - FederationService service = serviceCache.get(lambdaFunction); - if (service != null) { - return service; + String payload; + try { + payload = objectMapper.writeValueAsString(request); + } + catch (JsonProcessingException e) { + throw new RuntimeException("Failed to serialize request object", e); } - service = LambdaInvokerFactory.builder() - .lambdaClient(AWSLambdaClientBuilder.defaultClient()) - .objectMapper(VersionedObjectMapperFactory.create(BLOCK_ALLOCATOR)) - .lambdaFunctionNameResolver(new Mapper(lambdaFunction)) - .build(FederationService.class); - - PingRequest pingRequest = new PingRequest(identity, catalog, generateQueryId()); - PingResponse pingResponse = (PingResponse) service.call(pingRequest); + InvokeRequest invokeRequest = InvokeRequest.builder() + .functionName(lambdaFunction) + .payload(SdkBytes.fromUtf8String(payload)) + .build(); - int actualSerDeVersion = pingResponse.getSerDeVersion(); - log.info("SerDe version for function {}, catalog {} is {}", lambdaFunction, catalog, actualSerDeVersion); + InvokeResponse invokeResponse = lambdaClient.invoke(invokeRequest); - if (actualSerDeVersion != SERDE_VERSION) { - service = LambdaInvokerFactory.builder() - .lambdaClient(AWSLambdaClientBuilder.defaultClient()) - .objectMapper(VersionedObjectMapperFactory.create(BLOCK_ALLOCATOR, actualSerDeVersion)) - .lambdaFunctionNameResolver(new Mapper(lambdaFunction)) - .build(FederationService.class); + String response = invokeResponse.payload().asUtf8String(); + try { + return objectMapper.readValue(response, responseClass); + } + catch (IOException e) { + throw new RuntimeException("Failed to deserialize response payload", e); } - - serviceCache.put(lambdaFunction, service); - return service; } - public static final class Mapper - implements LambdaFunctionNameResolver + public static FederationResponse callService(String lambdaFunction, FederatedIdentity identity, String catalog, FederationRequest request) { - private final String function; - - private Mapper(String function) - { - this.function = function; + int serDeVersion = SERDE_VERSION; + if (serdeVersionCache.containsKey(lambdaFunction)) { + serDeVersion = serdeVersionCache.get(lambdaFunction); } - - @Override - public String getFunctionName(Method method, LambdaFunction lambdaFunction, - LambdaInvokerFactoryConfig lambdaInvokerFactoryConfig) - { - return function; + else { + ObjectMapper objectMapper = VersionedObjectMapperFactory.create(BLOCK_ALLOCATOR); + PingRequest pingRequest = new PingRequest(identity, catalog, generateQueryId()); + PingResponse pingResponse = invokeFunction(lambdaFunction, pingRequest, PingResponse.class, objectMapper); + + int actualSerDeVersion = pingResponse.getSerDeVersion(); + log.info("SerDe version for function {}, catalog {} is {}", lambdaFunction, catalog, actualSerDeVersion); + serdeVersionCache.put(lambdaFunction, actualSerDeVersion); } + + return invokeFunction(lambdaFunction, request, FederationResponse.class, VersionedObjectMapperFactory.create(BLOCK_ALLOCATOR, serDeVersion)); } public static String generateQueryId() diff --git a/athena-federation-sdk-tools/src/main/java/com/amazonaws/athena/connector/validation/LambdaMetadataProvider.java b/athena-federation-sdk-tools/src/main/java/com/amazonaws/athena/connector/validation/LambdaMetadataProvider.java index d3b2a93614..c603ddb463 100644 --- a/athena-federation-sdk-tools/src/main/java/com/amazonaws/athena/connector/validation/LambdaMetadataProvider.java +++ b/athena-federation-sdk-tools/src/main/java/com/amazonaws/athena/connector/validation/LambdaMetadataProvider.java @@ -42,8 +42,8 @@ import java.util.Set; import static com.amazonaws.athena.connector.lambda.metadata.ListTablesRequest.UNLIMITED_PAGE_SIZE_VALUE; +import static com.amazonaws.athena.connector.validation.FederationServiceProvider.callService; import static com.amazonaws.athena.connector.validation.FederationServiceProvider.generateQueryId; -import static com.amazonaws.athena.connector.validation.FederationServiceProvider.getService; /** * This class offers multiple convenience methods to retrieve metadata from a deployed Lambda. @@ -75,7 +75,7 @@ public static ListSchemasResponse listSchemas(String catalog, try (ListSchemasRequest request = new ListSchemasRequest(identity, queryId, catalog)) { log.info("Submitting request: {}", request); - ListSchemasResponse response = (ListSchemasResponse) getService(metadataFunction, identity, catalog).call(request); + ListSchemasResponse response = (ListSchemasResponse) callService(metadataFunction, identity, catalog, request); log.info("Received response: {}", response); return response; } @@ -107,7 +107,7 @@ public static ListTablesResponse listTables(String catalog, try (ListTablesRequest request = new ListTablesRequest(identity, queryId, catalog, schema, null, UNLIMITED_PAGE_SIZE_VALUE)) { log.info("Submitting request: {}", request); - ListTablesResponse response = (ListTablesResponse) getService(metadataFunction, identity, catalog).call(request); + ListTablesResponse response = (ListTablesResponse) callService(metadataFunction, identity, catalog, request); log.info("Received response: {}", response); return response; } @@ -136,7 +136,7 @@ public static GetTableResponse getTable(String catalog, try (GetTableRequest request = new GetTableRequest(identity, queryId, catalog, tableName, Collections.emptyMap())) { log.info("Submitting request: {}", request); - GetTableResponse response = (GetTableResponse) getService(metadataFunction, identity, catalog).call(request); + GetTableResponse response = (GetTableResponse) callService(metadataFunction, identity, catalog, request); log.info("Received response: {}", response); return response; } @@ -171,7 +171,7 @@ public static GetTableLayoutResponse getTableLayout(String catalog, try (GetTableLayoutRequest request = new GetTableLayoutRequest(identity, queryId, catalog, tableName, constraints, schema, partitionCols)) { log.info("Submitting request: {}", request); - GetTableLayoutResponse response = (GetTableLayoutResponse) getService(metadataFunction, identity, catalog).call(request); + GetTableLayoutResponse response = (GetTableLayoutResponse) callService(metadataFunction, identity, catalog, request); log.info("Received response: {}", response); return response; } @@ -208,7 +208,7 @@ public static GetSplitsResponse getSplits(String catalog, try (GetSplitsRequest request = new GetSplitsRequest(identity, queryId, catalog, tableName, partitions, partitionCols, constraints, contToken)) { log.info("Submitting request: {}", request); - GetSplitsResponse response = (GetSplitsResponse) getService(metadataFunction, identity, catalog).call(request); + GetSplitsResponse response = (GetSplitsResponse) callService(metadataFunction, identity, catalog, request); log.info("Received response: {}", response); return response; } diff --git a/athena-federation-sdk-tools/src/main/java/com/amazonaws/athena/connector/validation/LambdaRecordProvider.java b/athena-federation-sdk-tools/src/main/java/com/amazonaws/athena/connector/validation/LambdaRecordProvider.java index 87d301931e..296de3a854 100644 --- a/athena-federation-sdk-tools/src/main/java/com/amazonaws/athena/connector/validation/LambdaRecordProvider.java +++ b/athena-federation-sdk-tools/src/main/java/com/amazonaws/athena/connector/validation/LambdaRecordProvider.java @@ -29,8 +29,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import static com.amazonaws.athena.connector.validation.FederationServiceProvider.callService; import static com.amazonaws.athena.connector.validation.FederationServiceProvider.generateQueryId; -import static com.amazonaws.athena.connector.validation.FederationServiceProvider.getService; /** * This class offers a convenience method to retrieve records from a deployed Lambda. @@ -81,7 +81,7 @@ public static ReadRecordsResponse readRecords(String catalog, MAX_BLOCK_SIZE, MAX_INLINE_BLOCK_SIZE)) { log.info("Submitting request: {}", request); - ReadRecordsResponse response = (ReadRecordsResponse) getService(recordFunction, identity, catalog).call(request); + ReadRecordsResponse response = (ReadRecordsResponse) callService(recordFunction, identity, catalog, request); log.info("Received response: {}", response); return response; } diff --git a/athena-federation-sdk/pom.xml b/athena-federation-sdk/pom.xml index 9635e10784..43fb9f3388 100644 --- a/athena-federation-sdk/pom.xml +++ b/athena-federation-sdk/pom.xml @@ -193,9 +193,15 @@ 1.2.3 - com.amazonaws - aws-java-sdk-lambda - ${aws-sdk.version} + software.amazon.awssdk + lambda + ${aws-sdk-v2.version} + + + software.amazon.awssdk + netty-nio-client + + com.amazonaws diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/metadata/MetadataService.java b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/metadata/MetadataService.java deleted file mode 100644 index f5fc0b99fc..0000000000 --- a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/metadata/MetadataService.java +++ /dev/null @@ -1,38 +0,0 @@ -package com.amazonaws.athena.connector.lambda.metadata; - -/*- - * #%L - * Amazon Athena Query Federation SDK - * %% - * Copyright (C) 2019 Amazon Web Services - * %% - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * #L% - */ - -import com.amazonaws.services.lambda.invoke.LambdaFunction; - -/** - * Lambda functions intended for Metadata operations associate with this interface. - */ -public interface MetadataService -{ - /** - * Returns metadata corresponding to the request type. - * - * @param request The metadata request. - * @return The metadata. - */ - @LambdaFunction(functionName = "metadata") - MetadataResponse getMetadata(final MetadataRequest request); -} diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/records/RecordService.java b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/records/RecordService.java deleted file mode 100644 index 3d70c240ca..0000000000 --- a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/records/RecordService.java +++ /dev/null @@ -1,38 +0,0 @@ -package com.amazonaws.athena.connector.lambda.records; - -/*- - * #%L - * Amazon Athena Query Federation SDK - * %% - * Copyright (C) 2019 Amazon Web Services - * %% - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * #L% - */ - -import com.amazonaws.services.lambda.invoke.LambdaFunction; - -/** - * Lambda functions intended for Record operations associate with this interface. - */ -public interface RecordService -{ - /** - * Returns data/records corresponding to the request type. - * - * @param request The data/records request. - * @return The data/records. - */ - @LambdaFunction(functionName = "record") - RecordResponse readRecords(final RecordRequest request); -} diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/serde/v2/LambdaFunctionExceptionSerDe.java b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/serde/v2/LambdaFunctionExceptionSerDe.java index 595133c0da..637ef13771 100644 --- a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/serde/v2/LambdaFunctionExceptionSerDe.java +++ b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/serde/v2/LambdaFunctionExceptionSerDe.java @@ -20,14 +20,13 @@ package com.amazonaws.athena.connector.lambda.serde.v2; import com.amazonaws.athena.connector.lambda.serde.BaseDeserializer; -import com.amazonaws.services.lambda.invoke.LambdaFunctionException; import com.fasterxml.jackson.core.JsonParser; import com.fasterxml.jackson.databind.DeserializationContext; import com.fasterxml.jackson.databind.JsonNode; import com.google.common.base.Joiner; +import software.amazon.awssdk.services.lambda.model.LambdaException; import java.io.IOException; -import java.lang.reflect.Constructor; import java.util.Iterator; import java.util.LinkedList; import java.util.List; @@ -46,15 +45,15 @@ public class LambdaFunctionExceptionSerDe private LambdaFunctionExceptionSerDe() {} - public static final class Deserializer extends BaseDeserializer + public static final class Deserializer extends BaseDeserializer { public Deserializer() { - super(LambdaFunctionException.class); + super(LambdaException.class); } @Override - public LambdaFunctionException deserialize(JsonParser jparser, DeserializationContext ctxt) + public LambdaException deserialize(JsonParser jparser, DeserializationContext ctxt) throws IOException { validateObjectStart(jparser.getCurrentToken()); @@ -63,18 +62,18 @@ public LambdaFunctionException deserialize(JsonParser jparser, DeserializationCo } @Override - public LambdaFunctionException doDeserialize(JsonParser jparser, DeserializationContext ctxt) + public LambdaException doDeserialize(JsonParser jparser, DeserializationContext ctxt) throws IOException { JsonNode root = jparser.getCodec().readTree(jparser); return recursiveParse(root); } - private LambdaFunctionException recursiveParse(JsonNode root) + private LambdaException recursiveParse(JsonNode root) { String errorType = getNullableStringValue(root, ERROR_TYPE_FIELD); String errorMessage = getNullableStringValue(root, ERROR_MESSAGE_FIELD); - LambdaFunctionException cause = null; + LambdaException cause = null; JsonNode causeNode = root.get(CAUSE_FIELD); if (causeNode != null) { cause = recursiveParse(causeNode); @@ -102,20 +101,7 @@ private LambdaFunctionException recursiveParse(JsonNode root) } } - // HACK: LambdaFunctionException is only intended to be instantiated by Lambda server-side, so its constructors - // are package-private or deprecated. Thus the need for reflection here. If the signature of the preferred - // constructor does change, we fall back to the deprecated constructor (which requires us to append the stackTrace - // to the errorMessage to not lose it). If the deprecated constructor is removed then this will not compile - // and the appropriate adjustment can be made. - try { - Constructor constructor = LambdaFunctionException.class.getDeclaredConstructor( - String.class, String.class, LambdaFunctionException.class, List.class); - constructor.setAccessible(true); - return constructor.newInstance(errorType, errorMessage, cause, stackTraces); - } - catch (ReflectiveOperationException e) { - return new LambdaFunctionException(appendStackTrace(errorMessage, stackTraces), false, errorType); - } + return (LambdaException) LambdaException.builder().cause(cause).message(appendStackTrace(errorMessage, stackTraces) + "\nErrorType: " + errorType).build(); } private String getNullableStringValue(JsonNode parent, String field) diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/serde/v2/ObjectMapperFactoryV2.java b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/serde/v2/ObjectMapperFactoryV2.java index 0a82530652..8e4799b185 100644 --- a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/serde/v2/ObjectMapperFactoryV2.java +++ b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/serde/v2/ObjectMapperFactoryV2.java @@ -27,7 +27,6 @@ import com.amazonaws.athena.connector.lambda.serde.PingRequestSerDe; import com.amazonaws.athena.connector.lambda.serde.PingResponseSerDe; import com.amazonaws.athena.connector.lambda.serde.VersionedSerDe; -import com.amazonaws.services.lambda.invoke.LambdaFunctionException; import com.fasterxml.jackson.core.JsonFactory; import com.fasterxml.jackson.databind.BeanDescription; import com.fasterxml.jackson.databind.DeserializationContext; @@ -52,12 +51,13 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import org.apache.arrow.vector.types.pojo.Schema; +import software.amazon.awssdk.services.lambda.model.LambdaException; @Deprecated public class ObjectMapperFactoryV2 { private static final JsonFactory JSON_FACTORY = new JsonFactory(); - private static final String LAMDA_EXCEPTION_CLASS_NAME = LambdaFunctionException.class.getName(); + private static final String LAMDA_EXCEPTION_CLASS_NAME = LambdaException.class.getName(); private static final SerializerFactory SERIALIZER_FACTORY; @@ -153,7 +153,7 @@ private StrictObjectMapper(BlockAllocator allocator) ImmutableMap, JsonDeserializer> desers = ImmutableMap.of( FederationRequest.class, createRequestDeserializer(allocator), FederationResponse.class, createResponseDeserializer(allocator), - LambdaFunctionException.class, new LambdaFunctionExceptionSerDe.Deserializer()); + LambdaException.class, new LambdaFunctionExceptionSerDe.Deserializer()); SimpleDeserializers deserializers = new SimpleDeserializers(desers); DeserializerFactoryConfig dConfig = new DeserializerFactoryConfig().withAdditionalDeserializers(deserializers); _deserializationContext = new DefaultDeserializationContext.Impl(new StrictDeserializerFactory(dConfig)); diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/serde/v3/ObjectMapperFactoryV3.java b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/serde/v3/ObjectMapperFactoryV3.java index 412555b4ec..9a305cf9b1 100644 --- a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/serde/v3/ObjectMapperFactoryV3.java +++ b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/serde/v3/ObjectMapperFactoryV3.java @@ -58,7 +58,6 @@ import com.amazonaws.athena.connector.lambda.serde.v2.UserDefinedFunctionRequestSerDe; import com.amazonaws.athena.connector.lambda.serde.v2.UserDefinedFunctionResponseSerDe; import com.amazonaws.athena.connector.lambda.serde.v2.ValueSetSerDe; -import com.amazonaws.services.lambda.invoke.LambdaFunctionException; import com.fasterxml.jackson.core.JsonFactory; import com.fasterxml.jackson.databind.BeanDescription; import com.fasterxml.jackson.databind.DeserializationContext; @@ -83,11 +82,12 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import org.apache.arrow.vector.types.pojo.Schema; +import software.amazon.awssdk.services.lambda.model.LambdaException; public class ObjectMapperFactoryV3 { private static final JsonFactory JSON_FACTORY = new JsonFactory(); - private static final String LAMDA_EXCEPTION_CLASS_NAME = LambdaFunctionException.class.getName(); + private static final String LAMDA_EXCEPTION_CLASS_NAME = LambdaException.class.getName(); private static final SerializerFactory SERIALIZER_FACTORY; @@ -183,7 +183,7 @@ private StrictObjectMapper(BlockAllocator allocator) ImmutableMap, JsonDeserializer> desers = ImmutableMap.of( FederationRequest.class, createRequestDeserializer(allocator), FederationResponse.class, createResponseDeserializer(allocator), - LambdaFunctionException.class, new LambdaFunctionExceptionSerDe.Deserializer()); + LambdaException.class, new LambdaFunctionExceptionSerDe.Deserializer()); SimpleDeserializers deserializers = new SimpleDeserializers(desers); DeserializerFactoryConfig dConfig = new DeserializerFactoryConfig().withAdditionalDeserializers(deserializers); _deserializationContext = new DefaultDeserializationContext.Impl(new StrictDeserializerFactory(dConfig)); diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/serde/v4/ObjectMapperFactoryV4.java b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/serde/v4/ObjectMapperFactoryV4.java index bbb38c56ce..3f8f7d0f00 100644 --- a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/serde/v4/ObjectMapperFactoryV4.java +++ b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/serde/v4/ObjectMapperFactoryV4.java @@ -60,7 +60,6 @@ import com.amazonaws.athena.connector.lambda.serde.v2.UserDefinedFunctionRequestSerDe; import com.amazonaws.athena.connector.lambda.serde.v2.UserDefinedFunctionResponseSerDe; import com.amazonaws.athena.connector.lambda.serde.v2.ValueSetSerDe; -import com.amazonaws.services.lambda.invoke.LambdaFunctionException; import com.fasterxml.jackson.core.JsonFactory; import com.fasterxml.jackson.databind.BeanDescription; import com.fasterxml.jackson.databind.DeserializationContext; @@ -85,11 +84,12 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import org.apache.arrow.vector.types.pojo.Schema; +import software.amazon.awssdk.services.lambda.model.LambdaException; public class ObjectMapperFactoryV4 { private static final JsonFactory JSON_FACTORY = new JsonFactory(); - private static final String LAMDA_EXCEPTION_CLASS_NAME = LambdaFunctionException.class.getName(); + private static final String LAMDA_EXCEPTION_CLASS_NAME = LambdaException.class.getName(); private static final SerializerFactory SERIALIZER_FACTORY; @@ -185,7 +185,7 @@ private StrictObjectMapper(BlockAllocator allocator) ImmutableMap, JsonDeserializer> desers = ImmutableMap.of( FederationRequest.class, createRequestDeserializer(allocator), FederationResponse.class, createResponseDeserializer(allocator), - LambdaFunctionException.class, new LambdaFunctionExceptionSerDe.Deserializer()); + LambdaException.class, new LambdaFunctionExceptionSerDe.Deserializer()); SimpleDeserializers deserializers = new SimpleDeserializers(desers); DeserializerFactoryConfig dConfig = new DeserializerFactoryConfig().withAdditionalDeserializers(deserializers); _deserializationContext = new DefaultDeserializationContext.Impl(new StrictDeserializerFactory(dConfig)); diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/serde/v5/ObjectMapperFactoryV5.java b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/serde/v5/ObjectMapperFactoryV5.java index d47dc71c23..6b94467c9f 100644 --- a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/serde/v5/ObjectMapperFactoryV5.java +++ b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/serde/v5/ObjectMapperFactoryV5.java @@ -71,7 +71,6 @@ import com.amazonaws.athena.connector.lambda.serde.v4.OrderByFieldSerDeV4; import com.amazonaws.athena.connector.lambda.serde.v4.SchemaSerDeV4; import com.amazonaws.athena.connector.lambda.serde.v4.VariableExpressionSerDeV4; -import com.amazonaws.services.lambda.invoke.LambdaFunctionException; import com.fasterxml.jackson.core.JsonFactory; import com.fasterxml.jackson.databind.BeanDescription; import com.fasterxml.jackson.databind.DeserializationContext; @@ -96,11 +95,12 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import org.apache.arrow.vector.types.pojo.Schema; +import software.amazon.awssdk.services.lambda.model.LambdaException; public class ObjectMapperFactoryV5 { private static final JsonFactory JSON_FACTORY = new JsonFactory(); - private static final String LAMDA_EXCEPTION_CLASS_NAME = LambdaFunctionException.class.getName(); + private static final String LAMDA_EXCEPTION_CLASS_NAME = LambdaException.class.getName(); private static final SerializerFactory SERIALIZER_FACTORY; @@ -196,7 +196,7 @@ private StrictObjectMapper(BlockAllocator allocator) ImmutableMap, JsonDeserializer> desers = ImmutableMap.of( FederationRequest.class, createRequestDeserializer(allocator), FederationResponse.class, createResponseDeserializer(allocator), - LambdaFunctionException.class, new LambdaFunctionExceptionSerDe.Deserializer()); + LambdaException.class, new LambdaFunctionExceptionSerDe.Deserializer()); SimpleDeserializers deserializers = new SimpleDeserializers(desers); DeserializerFactoryConfig dConfig = new DeserializerFactoryConfig().withAdditionalDeserializers(deserializers); _deserializationContext = new DefaultDeserializationContext.Impl(new StrictDeserializerFactory(dConfig)); diff --git a/athena-federation-sdk/src/test/java/com/amazonaws/athena/connector/lambda/serde/v2/LambdaFunctionExceptionSerDeTest.java b/athena-federation-sdk/src/test/java/com/amazonaws/athena/connector/lambda/serde/v2/LambdaFunctionExceptionSerDeTest.java index bb5c75043d..a9972e9ca1 100644 --- a/athena-federation-sdk/src/test/java/com/amazonaws/athena/connector/lambda/serde/v2/LambdaFunctionExceptionSerDeTest.java +++ b/athena-federation-sdk/src/test/java/com/amazonaws/athena/connector/lambda/serde/v2/LambdaFunctionExceptionSerDeTest.java @@ -20,23 +20,21 @@ package com.amazonaws.athena.connector.lambda.serde.v2; import com.amazonaws.athena.connector.lambda.serde.TypedSerDeTest; -import com.amazonaws.services.lambda.invoke.LambdaFunctionException; import com.google.common.collect.ImmutableList; import org.junit.Before; import org.junit.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.lambda.model.LambdaException; import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.InputStream; -import java.lang.reflect.Constructor; -import java.util.List; import static org.junit.Assert.assertEquals; public class LambdaFunctionExceptionSerDeTest - extends TypedSerDeTest + extends TypedSerDeTest { private static final Logger logger = LoggerFactory.getLogger(LambdaFunctionExceptionSerDeTest.class); @@ -47,14 +45,10 @@ public void beforeTest() String errorType = "com.amazonaws.services.dynamodbv2.model.ResourceNotFoundException"; String errorMessage = "Requested resource not found (Service: AmazonDynamoDBv2; Status Code: 400; Error Code: ResourceNotFoundException; Request ID: RIB6NOH4BNMAK6KQG88R5VE583VV4KQNSO5AEMVJF66Q9ASUAAJG)"; - ImmutableList stackTrace = ImmutableList.of( - "com.amazonaws.http.AmazonHttpClient$RequestExecutor.handleErrorResponse(AmazonHttpClient.java:1701)", - "com.amazonaws.http.AmazonHttpClient$RequestExecutor.executeOneRequest(AmazonHttpClient.java:1356)"); - Constructor constructor = LambdaFunctionException.class.getDeclaredConstructor( - String.class, String.class, LambdaFunctionException.class, List.class); - constructor.setAccessible(true); - expected = constructor.newInstance(errorType, errorMessage, null, stackTrace); - + ImmutableList> stackTrace = ImmutableList.of( + ImmutableList.of("com.amazonaws.http.AmazonHttpClient$RequestExecutor.handleErrorResponse(AmazonHttpClient.java:1701)"), + ImmutableList.of("com.amazonaws.http.AmazonHttpClient$RequestExecutor.executeOneRequest(AmazonHttpClient.java:1356)")); + expected = (LambdaException) LambdaException.builder().message(errorMessage + ". Stack trace: " + stackTrace + "\nErrorType: " + errorType).build(); String expectedSerDeFile = utils.getResourceOrFail("serde/v2", "LambdaFunctionException.json"); expectedSerDeText = utils.readAllAsString(expectedSerDeFile).trim(); } @@ -73,11 +67,10 @@ public void deserialize() logger.info("deserialize: enter"); InputStream input = new ByteArrayInputStream(expectedSerDeText.getBytes()); - LambdaFunctionException actual = mapper.readValue(input, LambdaFunctionException.class); + LambdaException actual = mapper.readValue(input, LambdaException.class); logger.info("deserialize: deserialized[{}]", actual.toString()); - assertEquals(expected.getType(), actual.getType()); assertEquals(expected.getMessage(), actual.getMessage()); assertEquals(expected.getCause(), actual.getCause()); expected.fillInStackTrace(); diff --git a/athena-hbase/src/test/java/com/amazonaws/athena/connectors/hbase/integ/HbaseIntegTest.java b/athena-hbase/src/test/java/com/amazonaws/athena/connectors/hbase/integ/HbaseIntegTest.java index 6edd01d72c..ac1b59237e 100644 --- a/athena-hbase/src/test/java/com/amazonaws/athena/connectors/hbase/integ/HbaseIntegTest.java +++ b/athena-hbase/src/test/java/com/amazonaws/athena/connectors/hbase/integ/HbaseIntegTest.java @@ -34,10 +34,6 @@ import com.amazonaws.services.elasticmapreduce.model.DescribeClusterResult; import com.amazonaws.services.elasticmapreduce.model.ListClustersRequest; import com.amazonaws.services.elasticmapreduce.model.ListClustersResult; -import com.amazonaws.services.lambda.AWSLambda; -import com.amazonaws.services.lambda.AWSLambdaClientBuilder; -import com.amazonaws.services.lambda.model.InvocationType; -import com.amazonaws.services.lambda.model.InvokeRequest; import com.google.common.collect.ImmutableList; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -50,6 +46,9 @@ import software.amazon.awscdk.services.emr.CfnCluster; import software.amazon.awscdk.services.iam.PolicyDocument; import software.amazon.awssdk.services.athena.model.Row; +import software.amazon.awssdk.services.lambda.LambdaClient; +import software.amazon.awssdk.services.lambda.model.InvocationType; +import software.amazon.awssdk.services.lambda.model.InvokeRequest; import java.time.LocalDate; import java.time.LocalDateTime; @@ -279,20 +278,21 @@ protected void setUpTableData() logger.info("----------------------------------------------------"); String hbaseLambdaName = "integ-hbase-" + UUID.randomUUID(); - AWSLambda lambdaClient = AWSLambdaClientBuilder.defaultClient(); + LambdaClient lambdaClient = LambdaClient.create(); CloudFormationClient cloudFormationHbaseClient = new CloudFormationClient(getHbaseLambdaStack(hbaseLambdaName)); try { // Create the Lambda function. cloudFormationHbaseClient.createStack(); // Invoke the Lambda function. - lambdaClient.invoke(new InvokeRequest() - .withFunctionName(hbaseLambdaName) - .withInvocationType(InvocationType.RequestResponse)); + lambdaClient.invoke(InvokeRequest.builder() + .functionName(hbaseLambdaName) + .invocationType(InvocationType.REQUEST_RESPONSE) + .build()); } finally { // Delete the Lambda function. cloudFormationHbaseClient.deleteStack(); - lambdaClient.shutdown(); + lambdaClient.close(); } } diff --git a/athena-redis/src/test/java/com/amazonaws/athena/connectors/redis/integ/RedisIntegTest.java b/athena-redis/src/test/java/com/amazonaws/athena/connectors/redis/integ/RedisIntegTest.java index 9c2ea312c2..2cb870fa7c 100644 --- a/athena-redis/src/test/java/com/amazonaws/athena/connectors/redis/integ/RedisIntegTest.java +++ b/athena-redis/src/test/java/com/amazonaws/athena/connectors/redis/integ/RedisIntegTest.java @@ -35,10 +35,6 @@ import com.amazonaws.services.elasticache.model.DescribeReplicationGroupsRequest; import com.amazonaws.services.elasticache.model.DescribeReplicationGroupsResult; import com.amazonaws.services.elasticache.model.Endpoint; -import com.amazonaws.services.lambda.AWSLambda; -import com.amazonaws.services.lambda.AWSLambdaClientBuilder; -import com.amazonaws.services.lambda.model.InvocationType; -import com.amazonaws.services.lambda.model.InvokeRequest; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.testng.annotations.AfterClass; @@ -64,6 +60,9 @@ import software.amazon.awssdk.services.glue.model.EntityNotFoundException; import software.amazon.awssdk.services.glue.model.TableInput; import software.amazon.awssdk.services.glue.model.UpdateTableRequest; +import software.amazon.awssdk.services.lambda.LambdaClient; +import software.amazon.awssdk.services.lambda.model.InvocationType; +import software.amazon.awssdk.services.lambda.model.InvokeRequest; import java.time.Duration; import java.util.ArrayList; @@ -191,20 +190,21 @@ protected void setUpTableData() logger.info("----------------------------------------------------"); String redisLambdaName = "integ-redis-helper-" + UUID.randomUUID(); - AWSLambda lambdaClient = AWSLambdaClientBuilder.defaultClient(); + LambdaClient lambdaClient = LambdaClient.create(); CloudFormationClient cloudFormationRedisClient = new CloudFormationClient(getRedisLambdaStack(redisLambdaName)); try { // Create the Lambda function. cloudFormationRedisClient.createStack(); // Invoke the Lambda function. - lambdaClient.invoke(new InvokeRequest() - .withFunctionName(redisLambdaName) - .withInvocationType(InvocationType.RequestResponse)); + lambdaClient.invoke(InvokeRequest.builder() + .functionName(redisLambdaName) + .invocationType(InvocationType.REQUEST_RESPONSE) + .build()); } finally { // Delete the Lambda function. cloudFormationRedisClient.deleteStack(); - lambdaClient.shutdown(); + lambdaClient.close(); } } From 2545c7e93136a9e41772575be7574bbf0fa09288 Mon Sep 17 00:00:00 2001 From: Aimery Methena <159072740+aimethed@users.noreply.github.com> Date: Thu, 25 Jul 2024 15:23:42 -0400 Subject: [PATCH 08/87] V2 migration s3 (#2108) --- .../aws/cmdb/AwsCmdbRecordHandler.java | 4 +- .../aws/cmdb/TableProviderFactory.java | 7 +- .../tables/s3/S3BucketsTableProvider.java | 26 +- .../tables/s3/S3ObjectsTableProvider.java | 54 +-- .../aws/cmdb/AwsCmdbMetadataHandlerTest.java | 4 +- .../aws/cmdb/AwsCmdbRecordHandlerTest.java | 4 +- .../aws/cmdb/TableProviderFactoryTest.java | 4 +- .../tables/AbstractTableProviderTest.java | 41 +- .../tables/s3/S3BucketsTableProviderTest.java | 33 +- .../tables/s3/S3ObjectsTableProviderTest.java | 60 ++- .../ClickHouseMuxRecordHandler.java | 4 +- .../clickhouse/ClickHouseRecordHandler.java | 7 +- .../ClickHouseMuxJdbcRecordHandlerTest.java | 6 +- .../cloudera/HiveMuxRecordHandler.java | 4 +- .../cloudera/HiveRecordHandler.java | 7 +- .../cloudera/HiveMuxRecordHandlerTest.java | 6 +- .../cloudera/HiveRecordHandlerTest.java | 7 +- .../cloudera/ImpalaMuxRecordHandler.java | 4 +- .../cloudera/ImpalaRecordHandler.java | 7 +- .../cloudera/ImpalaMuxRecordHandlerTest.java | 6 +- .../cloudera/ImpalaRecordHandlerTest.java | 6 +- .../metrics/MetricsRecordHandler.java | 9 +- .../metrics/MetricsRecordHandlerTest.java | 29 +- .../cloudwatch/CloudwatchRecordHandler.java | 7 +- .../CloudwatchRecordHandlerTest.java | 29 +- .../DataLakeGen2MuxRecordHandler.java | 4 +- .../DataLakeGen2RecordHandler.java | 7 +- .../DataLakeGen2MuxRecordHandlerTest.java | 6 +- .../DataLakeRecordHandlerTest.java | 6 +- .../db2as400/Db2As400MuxRecordHandler.java | 4 +- .../db2as400/Db2As400RecordHandler.java | 7 +- .../db2as400/Db2As400RecordHandlerTest.java | 6 +- .../connectors/db2/Db2MuxRecordHandler.java | 4 +- .../connectors/db2/Db2RecordHandler.java | 7 +- .../connectors/db2/Db2RecordHandlerTest.java | 6 +- .../connectors/docdb/DocDBRecordHandler.java | 7 +- .../docdb/DocDBRecordHandlerTest.java | 31 +- .../dynamodb/DynamoDBRecordHandler.java | 4 +- .../dynamodb/DynamoDBRecordHandlerTest.java | 4 +- athena-elasticsearch/pom.xml | 27 -- .../ElasticsearchRecordHandler.java | 7 +- .../ElasticsearchRecordHandlerTest.java | 35 +- .../example/ExampleRecordHandler.java | 25 +- .../example/ExampleRecordHandlerTest.java | 21 +- athena-federation-sdk/pom.xml | 117 +++--- .../connector/lambda/data/BlockUtils.java | 4 + .../lambda/data/S3BlockSpillReader.java | 40 +- .../connector/lambda/data/S3BlockSpiller.java | 54 ++- .../domain/spill/SpillLocationVerifier.java | 12 +- .../lambda/handlers/MetadataHandler.java | 6 +- .../lambda/handlers/RecordHandler.java | 9 +- .../lambda/data/S3BlockSpillerTest.java | 46 +-- .../spill/SpillLocationVerifierTest.java | 20 +- .../connectors/gcs/GcsRecordHandler.java | 7 +- .../gcs/GcsCompositeHandlerTest.java | 9 +- .../connectors/gcs/GcsRecordHandlerTest.java | 7 +- .../athena/connectors/gcs/GenericGcsTest.java | 6 +- .../bigquery/BigQueryRecordHandler.java | 7 +- .../bigquery/BigQueryRecordHandlerTest.java | 6 +- .../connectors/hbase/HbaseKerberosUtils.java | 38 +- .../connectors/hbase/HbaseRecordHandler.java | 9 +- .../hbase/HbaseRecordHandlerTest.java | 30 +- .../hortonworks/HiveMuxRecordHandler.java | 4 +- .../hortonworks/HiveRecordHandler.java | 7 +- .../hortonworks/HiveMuxRecordHandlerTest.java | 6 +- .../hortonworks/HiveRecordHandlerTest.java | 6 +- athena-jdbc/pom.xml | 27 -- .../jdbc/MultiplexingJdbcRecordHandler.java | 4 +- .../jdbc/manager/JdbcRecordHandler.java | 4 +- .../MultiplexingJdbcRecordHandlerTest.java | 6 +- .../jdbc/manager/JdbcRecordHandlerTest.java | 32 +- .../connectors/kafka/KafkaRecordHandler.java | 7 +- .../athena/connectors/kafka/KafkaUtils.java | 40 +- .../kafka/KafkaRecordHandlerTest.java | 4 +- .../connectors/kafka/KafkaUtilsTest.java | 50 +-- .../msk/AmazonMskRecordHandler.java | 7 +- .../athena/connectors/msk/AmazonMskUtils.java | 38 +- .../msk/AmazonMskRecordHandlerTest.java | 4 +- .../connectors/msk/AmazonMskUtilsTest.java | 49 +-- .../mysql/MySqlMuxRecordHandler.java | 4 +- .../connectors/mysql/MySqlRecordHandler.java | 7 +- .../mysql/MySqlMuxJdbcRecordHandlerTest.java | 6 +- .../mysql/MySqlRecordHandlerTest.java | 6 +- .../neptune/NeptuneRecordHandler.java | 7 +- .../neptune/NeptuneRecordHandlerTest.java | 44 +-- .../oracle/OracleMuxRecordHandler.java | 4 +- .../oracle/OracleRecordHandler.java | 7 +- .../OracleMuxJdbcMetadataHandlerTest.java | 2 - .../OracleMuxJdbcRecordHandlerTest.java | 6 +- .../oracle/OracleRecordHandlerTest.java | 6 +- .../PostGreSqlMuxRecordHandler.java | 4 +- .../postgresql/PostGreSqlRecordHandler.java | 7 +- .../PostGreSqlMuxJdbcRecordHandlerTest.java | 6 +- .../PostGreSqlRecordHandlerTest.java | 6 +- athena-redis/pom.xml | 27 -- .../connectors/redis/RedisRecordHandler.java | 9 +- .../redis/RedisRecordHandlerTest.java | 30 +- .../redshift/RedshiftMuxRecordHandler.java | 4 +- .../redshift/RedshiftRecordHandler.java | 7 +- .../RedshiftMuxJdbcRecordHandlerTest.java | 6 +- .../redshift/RedshiftRecordHandlerTest.java | 6 +- .../saphana/SaphanaMuxRecordHandler.java | 4 +- .../saphana/SaphanaRecordHandler.java | 7 +- .../SaphanaMuxJdbcRecordHandlerTest.java | 6 +- .../saphana/SaphanaRecordHandlerTest.java | 6 +- .../snowflake/SnowflakeMuxRecordHandler.java | 4 +- .../snowflake/SnowflakeRecordHandler.java | 7 +- .../SnowflakeMetadataHandlerTest.java | 8 +- .../SnowflakeMuxJdbcRecordHandlerTest.java | 6 +- .../snowflake/SnowflakeRecordHandlerTest.java | 6 +- .../sqlserver/SqlServerMuxRecordHandler.java | 4 +- .../sqlserver/SqlServerRecordHandler.java | 7 +- .../SqlServerMuxRecordHandlerTest.java | 6 +- .../sqlserver/SqlServerRecordHandlerTest.java | 6 +- .../synapse/SynapseMuxRecordHandler.java | 4 +- .../synapse/SynapseRecordHandler.java | 7 +- .../synapse/SynapseMetadataHandlerTest.java | 1 - .../synapse/SynapseMuxRecordHandlerTest.java | 6 +- .../synapse/SynapseRecordHandlerTest.java | 6 +- .../teradata/TeradataMuxRecordHandler.java | 4 +- .../teradata/TeradataRecordHandler.java | 7 +- .../TeradataMuxJdbcRecordHandlerTest.java | 6 +- .../teradata/TeradataRecordHandlerTest.java | 6 +- .../timestream/TimestreamRecordHandler.java | 7 +- .../TimestreamRecordHandlerTest.java | 32 +- .../connectors/tpcds/TPCDSRecordHandler.java | 7 +- .../tpcds/TPCDSRecordHandlerTest.java | 40 +- athena-vertica/pom.xml | 5 + .../connectors/vertica/VerticaConstants.java | 3 + .../vertica/VerticaMetadataHandler.java | 52 +-- .../vertica/VerticaRecordHandler.java | 110 +++--- .../vertica/VerticaMetadataHandlerTest.java | 38 +- .../vertica/VerticaRecordHandlerTest.java | 349 ++++++++++++++++++ 133 files changed, 1258 insertions(+), 1035 deletions(-) create mode 100644 athena-vertica/src/test/java/com/amazonaws/athena/connectors/vertica/VerticaRecordHandlerTest.java diff --git a/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/AwsCmdbRecordHandler.java b/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/AwsCmdbRecordHandler.java index 60c63b437a..dc530d3f90 100644 --- a/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/AwsCmdbRecordHandler.java +++ b/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/AwsCmdbRecordHandler.java @@ -25,9 +25,9 @@ import com.amazonaws.athena.connector.lambda.handlers.RecordHandler; import com.amazonaws.athena.connector.lambda.records.ReadRecordsRequest; import com.amazonaws.athena.connectors.aws.cmdb.tables.TableProvider; -import com.amazonaws.services.s3.AmazonS3; import org.apache.arrow.util.VisibleForTesting; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Map; @@ -56,7 +56,7 @@ public AwsCmdbRecordHandler(java.util.Map configOptions) } @VisibleForTesting - protected AwsCmdbRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, TableProviderFactory tableProviderFactory, java.util.Map configOptions) + protected AwsCmdbRecordHandler(S3Client amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, TableProviderFactory tableProviderFactory, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, SOURCE_TYPE, configOptions); tableProviders = tableProviderFactory.getTableProviders(); diff --git a/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/TableProviderFactory.java b/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/TableProviderFactory.java index d5868d33db..7a5099e0a7 100644 --- a/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/TableProviderFactory.java +++ b/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/TableProviderFactory.java @@ -38,9 +38,8 @@ import com.amazonaws.services.elasticmapreduce.AmazonElasticMapReduceClientBuilder; import com.amazonaws.services.rds.AmazonRDS; import com.amazonaws.services.rds.AmazonRDSClientBuilder; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.AmazonS3ClientBuilder; import org.apache.arrow.util.VisibleForTesting; +import software.amazon.awssdk.services.s3.S3Client; import java.util.ArrayList; import java.util.HashMap; @@ -62,12 +61,12 @@ public TableProviderFactory(java.util.Map configOptions) AmazonEC2ClientBuilder.standard().build(), AmazonElasticMapReduceClientBuilder.standard().build(), AmazonRDSClientBuilder.standard().build(), - AmazonS3ClientBuilder.standard().build(), + S3Client.create(), configOptions); } @VisibleForTesting - protected TableProviderFactory(AmazonEC2 ec2, AmazonElasticMapReduce emr, AmazonRDS rds, AmazonS3 amazonS3, java.util.Map configOptions) + protected TableProviderFactory(AmazonEC2 ec2, AmazonElasticMapReduce emr, AmazonRDS rds, S3Client amazonS3, java.util.Map configOptions) { addProvider(new Ec2TableProvider(ec2)); addProvider(new EbsTableProvider(ec2)); diff --git a/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/s3/S3BucketsTableProvider.java b/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/s3/S3BucketsTableProvider.java index 0387ac6bf7..7ff28b61e5 100644 --- a/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/s3/S3BucketsTableProvider.java +++ b/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/s3/S3BucketsTableProvider.java @@ -29,10 +29,12 @@ import com.amazonaws.athena.connector.lambda.metadata.GetTableResponse; import com.amazonaws.athena.connector.lambda.records.ReadRecordsRequest; import com.amazonaws.athena.connectors.aws.cmdb.tables.TableProvider; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.model.Bucket; -import com.amazonaws.services.s3.model.Owner; import org.apache.arrow.vector.types.pojo.Schema; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.Bucket; +import software.amazon.awssdk.services.s3.model.GetBucketAclRequest; +import software.amazon.awssdk.services.s3.model.GetBucketAclResponse; +import software.amazon.awssdk.services.s3.model.Owner; /** * Maps your S3 Objects to a table. @@ -41,9 +43,9 @@ public class S3BucketsTableProvider implements TableProvider { private static final Schema SCHEMA; - private AmazonS3 amazonS3; + private S3Client amazonS3; - public S3BucketsTableProvider(AmazonS3 amazonS3) + public S3BucketsTableProvider(S3Client amazonS3) { this.amazonS3 = amazonS3; } @@ -84,7 +86,7 @@ public GetTableResponse getTable(BlockAllocator blockAllocator, GetTableRequest @Override public void readWithConstraint(BlockSpiller spiller, ReadRecordsRequest recordsRequest, QueryStatusChecker queryStatusChecker) { - for (Bucket next : amazonS3.listBuckets()) { + for (Bucket next : amazonS3.listBuckets().buckets()) { toRow(next, spiller); } } @@ -102,13 +104,15 @@ private void toRow(Bucket bucket, { spiller.writeRows((Block block, int row) -> { boolean matched = true; - matched &= block.offerValue("bucket_name", row, bucket.getName()); - matched &= block.offerValue("create_date", row, bucket.getCreationDate()); + matched &= block.offerValue("bucket_name", row, bucket.name()); + matched &= block.offerValue("create_date", row, bucket.creationDate()); - Owner owner = bucket.getOwner(); + GetBucketAclResponse response = amazonS3.getBucketAcl(GetBucketAclRequest.builder().bucket(bucket.name()).build()); + + Owner owner = response.owner(); if (owner != null) { - matched &= block.offerValue("owner_name", row, bucket.getOwner().getDisplayName()); - matched &= block.offerValue("owner_id", row, bucket.getOwner().getId()); + matched &= block.offerValue("owner_name", row, owner.displayName()); + matched &= block.offerValue("owner_id", row, owner.id()); } return matched ? 1 : 0; diff --git a/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/s3/S3ObjectsTableProvider.java b/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/s3/S3ObjectsTableProvider.java index c58315f49e..88179b9382 100644 --- a/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/s3/S3ObjectsTableProvider.java +++ b/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/s3/S3ObjectsTableProvider.java @@ -30,12 +30,12 @@ import com.amazonaws.athena.connector.lambda.metadata.GetTableResponse; import com.amazonaws.athena.connector.lambda.records.ReadRecordsRequest; import com.amazonaws.athena.connectors.aws.cmdb.tables.TableProvider; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.model.ListObjectsV2Request; -import com.amazonaws.services.s3.model.ListObjectsV2Result; -import com.amazonaws.services.s3.model.Owner; -import com.amazonaws.services.s3.model.S3ObjectSummary; import org.apache.arrow.vector.types.pojo.Schema; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.ListObjectsV2Request; +import software.amazon.awssdk.services.s3.model.ListObjectsV2Response; +import software.amazon.awssdk.services.s3.model.Owner; +import software.amazon.awssdk.services.s3.model.S3Object; /** * Maps your S3 Objects to a table. @@ -45,9 +45,9 @@ public class S3ObjectsTableProvider { private static final int MAX_KEYS = 1000; private static final Schema SCHEMA; - private AmazonS3 amazonS3; + private S3Client amazonS3; - public S3ObjectsTableProvider(AmazonS3 amazonS3) + public S3ObjectsTableProvider(S3Client amazonS3) { this.amazonS3 = amazonS3; } @@ -98,42 +98,44 @@ public void readWithConstraint(BlockSpiller spiller, ReadRecordsRequest recordsR "(e.g. where bucket_name='my_bucket'."); } - ListObjectsV2Request req = new ListObjectsV2Request().withBucketName(bucket).withMaxKeys(MAX_KEYS); - ListObjectsV2Result result; + ListObjectsV2Request req = ListObjectsV2Request.builder().bucket(bucket).maxKeys(MAX_KEYS).build(); + ListObjectsV2Response response; do { - result = amazonS3.listObjectsV2(req); - for (S3ObjectSummary objectSummary : result.getObjectSummaries()) { - toRow(objectSummary, spiller); + response = amazonS3.listObjectsV2(req); + for (S3Object s3Object : response.contents()) { + toRow(s3Object, spiller, bucket); } - req.setContinuationToken(result.getNextContinuationToken()); + req = req.toBuilder().continuationToken(response.nextContinuationToken()).build(); } - while (result.isTruncated() && queryStatusChecker.isQueryRunning()); + while (response.isTruncated() && queryStatusChecker.isQueryRunning()); } /** * Maps a DBInstance into a row in our Apache Arrow response block(s). * - * @param objectSummary The S3 ObjectSummary to map. + * @param s3Object The S3 object to map. * @param spiller The BlockSpiller to use when we want to write a matching row to the response. + * @param bucket The name of the S3 bucket * @note The current implementation is rather naive in how it maps fields. It leverages a static * list of fields that we'd like to provide and then explicitly filters and converts each field. */ - private void toRow(S3ObjectSummary objectSummary, - BlockSpiller spiller) + private void toRow(S3Object s3Object, + BlockSpiller spiller, + String bucket) { spiller.writeRows((Block block, int row) -> { boolean matched = true; - matched &= block.offerValue("bucket_name", row, objectSummary.getBucketName()); - matched &= block.offerValue("e_tag", row, objectSummary.getETag()); - matched &= block.offerValue("key", row, objectSummary.getKey()); - matched &= block.offerValue("bytes", row, objectSummary.getSize()); - matched &= block.offerValue("storage_class", row, objectSummary.getStorageClass()); - matched &= block.offerValue("last_modified", row, objectSummary.getLastModified()); + matched &= block.offerValue("bucket_name", row, bucket); + matched &= block.offerValue("e_tag", row, s3Object.eTag()); + matched &= block.offerValue("key", row, s3Object.key()); + matched &= block.offerValue("bytes", row, s3Object.size()); + matched &= block.offerValue("storage_class", row, s3Object.storageClassAsString()); + matched &= block.offerValue("last_modified", row, s3Object.lastModified()); - Owner owner = objectSummary.getOwner(); + Owner owner = s3Object.owner(); if (owner != null) { - matched &= block.offerValue("owner_name", row, owner.getDisplayName()); - matched &= block.offerValue("owner_id", row, owner.getId()); + matched &= block.offerValue("owner_name", row, owner.displayName()); + matched &= block.offerValue("owner_id", row, owner.id()); } return matched ? 1 : 0; diff --git a/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/AwsCmdbMetadataHandlerTest.java b/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/AwsCmdbMetadataHandlerTest.java index 7aeefea094..6c755e65aa 100644 --- a/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/AwsCmdbMetadataHandlerTest.java +++ b/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/AwsCmdbMetadataHandlerTest.java @@ -38,7 +38,6 @@ import com.amazonaws.athena.connector.lambda.security.FederatedIdentity; import com.amazonaws.athena.connector.lambda.security.LocalKeyFactory; import com.amazonaws.athena.connectors.aws.cmdb.tables.TableProvider; -import com.amazonaws.services.s3.AmazonS3; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -46,6 +45,7 @@ import org.mockito.Mock; import org.mockito.junit.MockitoJUnitRunner; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.ArrayList; @@ -75,7 +75,7 @@ public class AwsCmdbMetadataHandlerTest private FederatedIdentity identity = new FederatedIdentity("arn", "account", Collections.emptyMap(), Collections.emptyList()); @Mock - private AmazonS3 mockS3; + private S3Client mockS3; @Mock private TableProviderFactory mockTableProviderFactory; diff --git a/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/AwsCmdbRecordHandlerTest.java b/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/AwsCmdbRecordHandlerTest.java index 940df77986..09000c9e60 100644 --- a/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/AwsCmdbRecordHandlerTest.java +++ b/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/AwsCmdbRecordHandlerTest.java @@ -32,7 +32,6 @@ import com.amazonaws.athena.connector.lambda.security.FederatedIdentity; import com.amazonaws.athena.connector.lambda.security.LocalKeyFactory; import com.amazonaws.athena.connectors.aws.cmdb.tables.TableProvider; -import com.amazonaws.services.s3.AmazonS3; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; @@ -40,6 +39,7 @@ import org.mockito.Mockito; import org.mockito.junit.MockitoJUnitRunner; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Collections; @@ -62,7 +62,7 @@ public class AwsCmdbRecordHandlerTest private FederatedIdentity identity = new FederatedIdentity("arn", "account", Collections.emptyMap(), Collections.emptyList()); @Mock - private AmazonS3 mockS3; + private S3Client mockS3; @Mock private TableProviderFactory mockTableProviderFactory; diff --git a/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/TableProviderFactoryTest.java b/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/TableProviderFactoryTest.java index 19a77878e4..c196e379d6 100644 --- a/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/TableProviderFactoryTest.java +++ b/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/TableProviderFactoryTest.java @@ -24,11 +24,11 @@ import com.amazonaws.services.ec2.AmazonEC2; import com.amazonaws.services.elasticmapreduce.AmazonElasticMapReduce; import com.amazonaws.services.rds.AmazonRDS; -import com.amazonaws.services.s3.AmazonS3; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; import org.mockito.junit.MockitoJUnitRunner; +import software.amazon.awssdk.services.s3.S3Client; import java.util.List; import java.util.Map; @@ -51,7 +51,7 @@ public class TableProviderFactoryTest private AmazonRDS mockRds; @Mock - private AmazonS3 amazonS3; + private S3Client amazonS3; private TableProviderFactory factory = new TableProviderFactory(mockEc2, mockEmr, mockRds, amazonS3, com.google.common.collect.ImmutableMap.of()); diff --git a/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/tables/AbstractTableProviderTest.java b/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/tables/AbstractTableProviderTest.java index f4d6ba505a..8ab8620921 100644 --- a/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/tables/AbstractTableProviderTest.java +++ b/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/tables/AbstractTableProviderTest.java @@ -43,11 +43,6 @@ import com.amazonaws.athena.connector.lambda.security.EncryptionKeyFactory; import com.amazonaws.athena.connector.lambda.security.FederatedIdentity; import com.amazonaws.athena.connector.lambda.security.LocalKeyFactory; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.model.PutObjectRequest; -import com.amazonaws.services.s3.model.PutObjectResult; -import com.amazonaws.services.s3.model.S3Object; -import com.amazonaws.services.s3.model.S3ObjectInputStream; import com.google.common.io.ByteStreams; import org.apache.arrow.vector.types.Types; import org.apache.arrow.vector.types.pojo.Schema; @@ -59,8 +54,16 @@ import org.mockito.Mockito; import org.mockito.invocation.InvocationOnMock; import org.mockito.junit.MockitoJUnitRunner; +import org.mockito.stubbing.Answer; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.core.ResponseInputStream; +import software.amazon.awssdk.core.sync.RequestBody; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.GetObjectRequest; +import software.amazon.awssdk.services.s3.model.GetObjectResponse; +import software.amazon.awssdk.services.s3.model.PutObjectRequest; +import software.amazon.awssdk.services.s3.model.PutObjectResponse; import java.io.ByteArrayInputStream; import java.io.InputStream; @@ -74,8 +77,6 @@ import static com.amazonaws.athena.connector.lambda.domain.predicate.Constraints.DEFAULT_NO_LIMIT; import static org.junit.Assert.*; import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.nullable; -import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @RunWith(MockitoJUnitRunner.class) @@ -99,7 +100,7 @@ public abstract class AbstractTableProviderTest private final List mockS3Store = new ArrayList<>(); @Mock - private AmazonS3 amazonS3; + private S3Client amazonS3; @Mock private QueryStatusChecker queryStatusChecker; @@ -129,24 +130,24 @@ public void setUp() { allocator = new BlockAllocatorImpl(); - when(amazonS3.putObject(any())) + when(amazonS3.putObject(any(PutObjectRequest.class), any(RequestBody.class))) .thenAnswer((InvocationOnMock invocationOnMock) -> { - InputStream inputStream = ((PutObjectRequest) invocationOnMock.getArguments()[0]).getInputStream(); + InputStream inputStream = ((RequestBody) invocationOnMock.getArguments()[1]).contentStreamProvider().newStream(); ByteHolder byteHolder = new ByteHolder(); byteHolder.setBytes(ByteStreams.toByteArray(inputStream)); mockS3Store.add(byteHolder); - return mock(PutObjectResult.class); + return PutObjectResponse.builder().build(); }); - when(amazonS3.getObject(nullable(String.class), nullable(String.class))) - .thenAnswer((InvocationOnMock invocationOnMock) -> { - S3Object mockObject = mock(S3Object.class); - ByteHolder byteHolder = mockS3Store.get(0); - mockS3Store.remove(0); - when(mockObject.getObjectContent()).thenReturn( - new S3ObjectInputStream( - new ByteArrayInputStream(byteHolder.getBytes()), null)); - return mockObject; + when(amazonS3.getObject(any(GetObjectRequest.class))) + .thenAnswer(new Answer() + { + @Override + public Object answer(InvocationOnMock invocationOnMock) + throws Throwable + { + return new ResponseInputStream<>(GetObjectResponse.builder().build(), new ByteArrayInputStream(mockS3Store.get(0).getBytes())); + } }); blockSpillReader = new S3BlockSpillReader(amazonS3, allocator); diff --git a/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/tables/s3/S3BucketsTableProviderTest.java b/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/tables/s3/S3BucketsTableProviderTest.java index cb1372a917..348a077164 100644 --- a/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/tables/s3/S3BucketsTableProviderTest.java +++ b/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/tables/s3/S3BucketsTableProviderTest.java @@ -23,9 +23,6 @@ import com.amazonaws.athena.connector.lambda.data.BlockUtils; import com.amazonaws.athena.connectors.aws.cmdb.tables.AbstractTableProviderTest; import com.amazonaws.athena.connectors.aws.cmdb.tables.TableProvider; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.model.Bucket; -import com.amazonaws.services.s3.model.Owner; import org.apache.arrow.vector.complex.reader.FieldReader; import org.apache.arrow.vector.types.Types; import org.apache.arrow.vector.types.pojo.Field; @@ -33,12 +30,19 @@ import org.mockito.invocation.InvocationOnMock; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.Bucket; +import software.amazon.awssdk.services.s3.model.GetBucketAclRequest; +import software.amazon.awssdk.services.s3.model.GetBucketAclResponse; +import software.amazon.awssdk.services.s3.model.ListBucketsResponse; +import software.amazon.awssdk.services.s3.model.Owner; import java.util.ArrayList; import java.util.Date; import java.util.List; import static org.junit.Assert.*; +import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.when; public class S3BucketsTableProviderTest @@ -47,7 +51,7 @@ public class S3BucketsTableProviderTest private static final Logger logger = LoggerFactory.getLogger(S3BucketsTableProviderTest.class); @Mock - private AmazonS3 mockS3; + private S3Client mockS3; protected String getIdField() { @@ -87,7 +91,15 @@ protected void setUpRead() values.add(makeBucket(getIdValue())); values.add(makeBucket(getIdValue())); values.add(makeBucket("fake-id")); - return values; + return ListBucketsResponse.builder().buckets(values).build(); + }); + when(mockS3.getBucketAcl(any(GetBucketAclRequest.class))).thenAnswer((InvocationOnMock invocation) -> { + return GetBucketAclResponse.builder() + .owner(Owner.builder() + .displayName("owner_name") + .id("owner_id") + .build()) + .build(); }); } @@ -143,13 +155,10 @@ private void validate(FieldReader fieldReader) private Bucket makeBucket(String id) { - Bucket bucket = new Bucket(); - bucket.setName(id); - Owner owner = new Owner(); - owner.setDisplayName("owner_name"); - owner.setId("owner_id"); - bucket.setOwner(owner); - bucket.setCreationDate(new Date(100_000)); + Bucket bucket = Bucket.builder() + .name(id) + .creationDate(new Date(100_000).toInstant()) + .build(); return bucket; } } diff --git a/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/tables/s3/S3ObjectsTableProviderTest.java b/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/tables/s3/S3ObjectsTableProviderTest.java index ec77efc11a..761730ee08 100644 --- a/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/tables/s3/S3ObjectsTableProviderTest.java +++ b/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/tables/s3/S3ObjectsTableProviderTest.java @@ -23,11 +23,6 @@ import com.amazonaws.athena.connector.lambda.data.BlockUtils; import com.amazonaws.athena.connectors.aws.cmdb.tables.AbstractTableProviderTest; import com.amazonaws.athena.connectors.aws.cmdb.tables.TableProvider; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.model.ListObjectsV2Request; -import com.amazonaws.services.s3.model.ListObjectsV2Result; -import com.amazonaws.services.s3.model.Owner; -import com.amazonaws.services.s3.model.S3ObjectSummary; import org.apache.arrow.vector.complex.reader.FieldReader; import org.apache.arrow.vector.types.Types; import org.apache.arrow.vector.types.pojo.Field; @@ -35,6 +30,11 @@ import org.mockito.invocation.InvocationOnMock; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.ListObjectsV2Request; +import software.amazon.awssdk.services.s3.model.ListObjectsV2Response; +import software.amazon.awssdk.services.s3.model.Owner; +import software.amazon.awssdk.services.s3.model.S3Object; import java.util.ArrayList; import java.util.Date; @@ -45,7 +45,6 @@ import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; import static org.mockito.ArgumentMatchers.nullable; -import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; public class S3ObjectsTableProviderTest @@ -54,7 +53,7 @@ public class S3ObjectsTableProviderTest private static final Logger logger = LoggerFactory.getLogger(S3ObjectsTableProviderTest.class); @Mock - private AmazonS3 mockS3; + private S3Client mockS3; protected String getIdField() { @@ -92,25 +91,26 @@ protected void setUpRead() AtomicLong count = new AtomicLong(0); when(mockS3.listObjectsV2(nullable(ListObjectsV2Request.class))).thenAnswer((InvocationOnMock invocation) -> { ListObjectsV2Request request = (ListObjectsV2Request) invocation.getArguments()[0]; - assertEquals(getIdValue(), request.getBucketName()); + assertEquals(getIdValue(), request.bucket()); - ListObjectsV2Result mockResult = mock(ListObjectsV2Result.class); - List values = new ArrayList<>(); - values.add(makeObjectSummary(getIdValue())); - values.add(makeObjectSummary(getIdValue())); - values.add(makeObjectSummary("fake-id")); - when(mockResult.getObjectSummaries()).thenReturn(values); + List values = new ArrayList<>(); + values.add(makeS3Object()); + values.add(makeS3Object()); + ListObjectsV2Response.Builder responseBuilder = ListObjectsV2Response.builder().contents(values); if (count.get() > 0) { - assertNotNull(request.getContinuationToken()); + assertNotNull(request.continuationToken()); } if (count.incrementAndGet() < 2) { - when(mockResult.isTruncated()).thenReturn(true); - when(mockResult.getNextContinuationToken()).thenReturn("token"); + responseBuilder.isTruncated(true); + responseBuilder.nextContinuationToken("token"); + } + else { + responseBuilder.isTruncated(false); } - return mockResult; + return responseBuilder.build(); }); } @@ -167,19 +167,17 @@ private void validate(FieldReader fieldReader) } } - private S3ObjectSummary makeObjectSummary(String id) + private S3Object makeS3Object() { - S3ObjectSummary summary = new S3ObjectSummary(); - Owner owner = new Owner(); - owner.setId("owner_id"); - owner.setDisplayName("owner_name"); - summary.setOwner(owner); - summary.setBucketName(id); - summary.setETag("e_tag"); - summary.setKey("key"); - summary.setSize(100); - summary.setLastModified(new Date(100_000)); - summary.setStorageClass("storage_class"); - return summary; + Owner owner = Owner.builder().id("owner_id").displayName("owner_name").build(); + S3Object s3Object = S3Object.builder() + .owner(owner) + .eTag("e_tag") + .key("key") + .size((long)100) + .lastModified(new Date(100_000).toInstant()) + .storageClass("storage_class") + .build(); + return s3Object; } } diff --git a/athena-clickhouse/src/main/java/com/amazonaws/athena/connectors/clickhouse/ClickHouseMuxRecordHandler.java b/athena-clickhouse/src/main/java/com/amazonaws/athena/connectors/clickhouse/ClickHouseMuxRecordHandler.java index ebe3ce9fa8..ccb54ee88a 100644 --- a/athena-clickhouse/src/main/java/com/amazonaws/athena/connectors/clickhouse/ClickHouseMuxRecordHandler.java +++ b/athena-clickhouse/src/main/java/com/amazonaws/athena/connectors/clickhouse/ClickHouseMuxRecordHandler.java @@ -25,9 +25,9 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandlerFactory; -import com.amazonaws.services.s3.AmazonS3; import com.google.common.annotations.VisibleForTesting; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Map; @@ -58,7 +58,7 @@ public ClickHouseMuxRecordHandler(java.util.Map configOptions) } @VisibleForTesting - ClickHouseMuxRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, + ClickHouseMuxRecordHandler(S3Client amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, DatabaseConnectionConfig databaseConnectionConfig, Map recordHandlerMap, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, jdbcConnectionFactory, databaseConnectionConfig, recordHandlerMap, configOptions); diff --git a/athena-clickhouse/src/main/java/com/amazonaws/athena/connectors/clickhouse/ClickHouseRecordHandler.java b/athena-clickhouse/src/main/java/com/amazonaws/athena/connectors/clickhouse/ClickHouseRecordHandler.java index da5187bf7f..6728a6c4e1 100644 --- a/athena-clickhouse/src/main/java/com/amazonaws/athena/connectors/clickhouse/ClickHouseRecordHandler.java +++ b/athena-clickhouse/src/main/java/com/amazonaws/athena/connectors/clickhouse/ClickHouseRecordHandler.java @@ -32,14 +32,13 @@ import com.amazonaws.athena.connectors.mysql.MySqlFederationExpressionParser; import com.amazonaws.athena.connectors.mysql.MySqlMuxCompositeHandler; import com.amazonaws.athena.connectors.mysql.MySqlQueryStringBuilder; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.AmazonS3ClientBuilder; import com.google.common.annotations.VisibleForTesting; import org.apache.arrow.vector.types.pojo.Schema; import org.apache.commons.lang3.Validate; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -76,12 +75,12 @@ public ClickHouseRecordHandler(DatabaseConnectionConfig databaseConnectionConfig public ClickHouseRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, JdbcConnectionFactory jdbcConnectionFactory, java.util.Map configOptions) { - this(databaseConnectionConfig, AmazonS3ClientBuilder.defaultClient(), SecretsManagerClient.create(), AthenaClient.create(), + this(databaseConnectionConfig, S3Client.create(), SecretsManagerClient.create(), AthenaClient.create(), jdbcConnectionFactory, new MySqlQueryStringBuilder(MYSQL_QUOTE_CHARACTER, new MySqlFederationExpressionParser(MYSQL_QUOTE_CHARACTER)), configOptions); } @VisibleForTesting - ClickHouseRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, final AmazonS3 amazonS3, final SecretsManagerClient secretsManager, + ClickHouseRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, final S3Client amazonS3, final SecretsManagerClient secretsManager, final AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, JdbcSplitQueryBuilder jdbcSplitQueryBuilder, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, databaseConnectionConfig, jdbcConnectionFactory, configOptions); diff --git a/athena-clickhouse/src/test/java/com/amazonaws/athena/connectors/clickhouse/ClickHouseMuxJdbcRecordHandlerTest.java b/athena-clickhouse/src/test/java/com/amazonaws/athena/connectors/clickhouse/ClickHouseMuxJdbcRecordHandlerTest.java index 1b0b1f629f..9adc8e9096 100644 --- a/athena-clickhouse/src/test/java/com/amazonaws/athena/connectors/clickhouse/ClickHouseMuxJdbcRecordHandlerTest.java +++ b/athena-clickhouse/src/test/java/com/amazonaws/athena/connectors/clickhouse/ClickHouseMuxJdbcRecordHandlerTest.java @@ -28,12 +28,12 @@ import com.amazonaws.athena.connectors.jdbc.connection.DatabaseConnectionConfig; import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; -import com.amazonaws.services.s3.AmazonS3; import org.apache.arrow.vector.types.pojo.Schema; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -46,7 +46,7 @@ public class ClickHouseMuxJdbcRecordHandlerTest private Map recordHandlerMap; private ClickHouseRecordHandler recordHandler; private JdbcRecordHandler jdbcRecordHandler; - private AmazonS3 amazonS3; + private S3Client amazonS3; private SecretsManagerClient secretsManager; private AthenaClient athena; private QueryStatusChecker queryStatusChecker; @@ -57,7 +57,7 @@ public void setup() { this.recordHandler = Mockito.mock(ClickHouseRecordHandler.class); this.recordHandlerMap = Collections.singletonMap(ClickHouseConstants.NAME, this.recordHandler); - this.amazonS3 = Mockito.mock(AmazonS3.class); + this.amazonS3 = Mockito.mock(S3Client.class); this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AthenaClient.class); this.queryStatusChecker = Mockito.mock(QueryStatusChecker.class); diff --git a/athena-cloudera-hive/src/main/java/com/amazonaws/athena/connectors/cloudera/HiveMuxRecordHandler.java b/athena-cloudera-hive/src/main/java/com/amazonaws/athena/connectors/cloudera/HiveMuxRecordHandler.java index a552bf34ad..3dd28acccc 100644 --- a/athena-cloudera-hive/src/main/java/com/amazonaws/athena/connectors/cloudera/HiveMuxRecordHandler.java +++ b/athena-cloudera-hive/src/main/java/com/amazonaws/athena/connectors/cloudera/HiveMuxRecordHandler.java @@ -25,9 +25,9 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandlerFactory; -import com.amazonaws.services.s3.AmazonS3; import com.google.common.annotations.VisibleForTesting; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Map; @@ -57,7 +57,7 @@ public HiveMuxRecordHandler(java.util.Map configOptions) } @VisibleForTesting - HiveMuxRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, + HiveMuxRecordHandler(S3Client amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, DatabaseConnectionConfig databaseConnectionConfig, Map recordHandlerMap, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, jdbcConnectionFactory, databaseConnectionConfig, recordHandlerMap, configOptions); diff --git a/athena-cloudera-hive/src/main/java/com/amazonaws/athena/connectors/cloudera/HiveRecordHandler.java b/athena-cloudera-hive/src/main/java/com/amazonaws/athena/connectors/cloudera/HiveRecordHandler.java index 839c8d0f31..95ff9f6a3e 100644 --- a/athena-cloudera-hive/src/main/java/com/amazonaws/athena/connectors/cloudera/HiveRecordHandler.java +++ b/athena-cloudera-hive/src/main/java/com/amazonaws/athena/connectors/cloudera/HiveRecordHandler.java @@ -28,12 +28,11 @@ import com.amazonaws.athena.connectors.jdbc.manager.JDBCUtil; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcSplitQueryBuilder; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.AmazonS3ClientBuilder; import com.google.common.annotations.VisibleForTesting; import org.apache.arrow.vector.types.pojo.Schema; import org.apache.commons.lang3.Validate; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -60,11 +59,11 @@ public HiveRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, java } public HiveRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, JdbcConnectionFactory jdbcConnectionFactory, java.util.Map configOptions) { - this(databaseConnectionConfig, AmazonS3ClientBuilder.defaultClient(), SecretsManagerClient.create(), AthenaClient.create(), + this(databaseConnectionConfig, S3Client.create(), SecretsManagerClient.create(), AthenaClient.create(), jdbcConnectionFactory, new HiveQueryStringBuilder(HIVE_QUOTE_CHARACTER, new HiveFederationExpressionParser(HIVE_QUOTE_CHARACTER)), configOptions); } @VisibleForTesting - HiveRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, AmazonS3 amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, JdbcSplitQueryBuilder jdbcSplitQueryBuilder, java.util.Map configOptions) + HiveRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, S3Client amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, JdbcSplitQueryBuilder jdbcSplitQueryBuilder, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, databaseConnectionConfig, jdbcConnectionFactory, configOptions); this.jdbcSplitQueryBuilder = Validate.notNull(jdbcSplitQueryBuilder, "query builder must not be null"); diff --git a/athena-cloudera-hive/src/test/java/com/amazonaws/athena/connectors/cloudera/HiveMuxRecordHandlerTest.java b/athena-cloudera-hive/src/test/java/com/amazonaws/athena/connectors/cloudera/HiveMuxRecordHandlerTest.java index f7888d4f2f..31035ae1a8 100644 --- a/athena-cloudera-hive/src/test/java/com/amazonaws/athena/connectors/cloudera/HiveMuxRecordHandlerTest.java +++ b/athena-cloudera-hive/src/test/java/com/amazonaws/athena/connectors/cloudera/HiveMuxRecordHandlerTest.java @@ -29,7 +29,6 @@ import com.amazonaws.athena.connectors.jdbc.connection.DatabaseConnectionConfig; import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; -import com.amazonaws.services.s3.AmazonS3; import org.apache.arrow.vector.types.pojo.Schema; import org.junit.Before; import org.junit.BeforeClass; @@ -37,6 +36,7 @@ import org.mockito.Mockito; import org.testng.Assert; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -50,7 +50,7 @@ public class HiveMuxRecordHandlerTest private Map recordHandlerMap; private HiveRecordHandler hiveRecordHandler; private JdbcRecordHandler jdbcRecordHandler; - private AmazonS3 amazonS3; + private S3Client amazonS3; private SecretsManagerClient secretsManager; private AthenaClient athena; private QueryStatusChecker queryStatusChecker; @@ -64,7 +64,7 @@ public void setup() { this.hiveRecordHandler = Mockito.mock(HiveRecordHandler.class); this.recordHandlerMap = Collections.singletonMap("recordHive", this.hiveRecordHandler); - this.amazonS3 = Mockito.mock(AmazonS3.class); + this.amazonS3 = Mockito.mock(S3Client.class); this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AthenaClient.class); this.queryStatusChecker = Mockito.mock(QueryStatusChecker.class); diff --git a/athena-cloudera-hive/src/test/java/com/amazonaws/athena/connectors/cloudera/HiveRecordHandlerTest.java b/athena-cloudera-hive/src/test/java/com/amazonaws/athena/connectors/cloudera/HiveRecordHandlerTest.java index 57ebfd68ff..108474f096 100644 --- a/athena-cloudera-hive/src/test/java/com/amazonaws/athena/connectors/cloudera/HiveRecordHandlerTest.java +++ b/athena-cloudera-hive/src/test/java/com/amazonaws/athena/connectors/cloudera/HiveRecordHandlerTest.java @@ -32,7 +32,6 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.connection.JdbcCredentialProvider; import com.amazonaws.athena.connectors.jdbc.manager.JdbcSplitQueryBuilder; -import com.amazonaws.services.s3.AmazonS3; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import org.apache.arrow.vector.types.Types; @@ -41,7 +40,9 @@ import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; + import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueRequest; import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueResponse; @@ -62,7 +63,7 @@ public class HiveRecordHandlerTest private Connection connection; private JdbcConnectionFactory jdbcConnectionFactory; private JdbcSplitQueryBuilder jdbcSplitQueryBuilder; - private AmazonS3 amazonS3; + private S3Client amazonS3; private SecretsManagerClient secretsManager; private AthenaClient athena; @@ -70,7 +71,7 @@ public class HiveRecordHandlerTest public void setup() throws Exception { - this.amazonS3 = Mockito.mock(AmazonS3.class); + this.amazonS3 = Mockito.mock(S3Client.class); this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AthenaClient.class); Mockito.when(this.secretsManager.getSecretValue(Mockito.eq(GetSecretValueRequest.builder().secretId("testSecret").build()))).thenReturn(GetSecretValueResponse.builder().secretString("{\"username\": \"testUser\", \"password\": \"testPassword\"}").build()); diff --git a/athena-cloudera-impala/src/main/java/com/amazonaws/athena/connectors/cloudera/ImpalaMuxRecordHandler.java b/athena-cloudera-impala/src/main/java/com/amazonaws/athena/connectors/cloudera/ImpalaMuxRecordHandler.java index 0faae53965..8dbac1f9e3 100644 --- a/athena-cloudera-impala/src/main/java/com/amazonaws/athena/connectors/cloudera/ImpalaMuxRecordHandler.java +++ b/athena-cloudera-impala/src/main/java/com/amazonaws/athena/connectors/cloudera/ImpalaMuxRecordHandler.java @@ -25,9 +25,9 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandlerFactory; -import com.amazonaws.services.s3.AmazonS3; import com.google.common.annotations.VisibleForTesting; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Map; @@ -57,7 +57,7 @@ public ImpalaMuxRecordHandler(java.util.Map configOptions) } @VisibleForTesting - ImpalaMuxRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, + ImpalaMuxRecordHandler(S3Client amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, DatabaseConnectionConfig databaseConnectionConfig, Map recordHandlerMap, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, jdbcConnectionFactory, databaseConnectionConfig, recordHandlerMap, configOptions); diff --git a/athena-cloudera-impala/src/main/java/com/amazonaws/athena/connectors/cloudera/ImpalaRecordHandler.java b/athena-cloudera-impala/src/main/java/com/amazonaws/athena/connectors/cloudera/ImpalaRecordHandler.java index 4c72168dc4..59912af693 100644 --- a/athena-cloudera-impala/src/main/java/com/amazonaws/athena/connectors/cloudera/ImpalaRecordHandler.java +++ b/athena-cloudera-impala/src/main/java/com/amazonaws/athena/connectors/cloudera/ImpalaRecordHandler.java @@ -28,12 +28,11 @@ import com.amazonaws.athena.connectors.jdbc.manager.JDBCUtil; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcSplitQueryBuilder; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.AmazonS3ClientBuilder; import com.google.common.annotations.VisibleForTesting; import org.apache.arrow.vector.types.pojo.Schema; import org.apache.commons.lang3.Validate; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -60,11 +59,11 @@ public ImpalaRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, ja } public ImpalaRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, JdbcConnectionFactory jdbcConnectionFactory, java.util.Map configOptions) { - this(databaseConnectionConfig, AmazonS3ClientBuilder.defaultClient(), SecretsManagerClient.create(), AthenaClient.create(), + this(databaseConnectionConfig, S3Client.create(), SecretsManagerClient.create(), AthenaClient.create(), jdbcConnectionFactory, new ImpalaQueryStringBuilder(IMPALA_QUOTE_CHARACTER, new ImpalaFederationExpressionParser(IMPALA_QUOTE_CHARACTER)), configOptions); } @VisibleForTesting - ImpalaRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, AmazonS3 amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, JdbcSplitQueryBuilder jdbcSplitQueryBuilder, java.util.Map configOptions) + ImpalaRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, S3Client amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, JdbcSplitQueryBuilder jdbcSplitQueryBuilder, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, databaseConnectionConfig, jdbcConnectionFactory, configOptions); this.jdbcSplitQueryBuilder = Validate.notNull(jdbcSplitQueryBuilder, "query builder must not be null"); diff --git a/athena-cloudera-impala/src/test/java/com/amazonaws/athena/connectors/cloudera/ImpalaMuxRecordHandlerTest.java b/athena-cloudera-impala/src/test/java/com/amazonaws/athena/connectors/cloudera/ImpalaMuxRecordHandlerTest.java index dbbd6aef09..cff80beebb 100644 --- a/athena-cloudera-impala/src/test/java/com/amazonaws/athena/connectors/cloudera/ImpalaMuxRecordHandlerTest.java +++ b/athena-cloudera-impala/src/test/java/com/amazonaws/athena/connectors/cloudera/ImpalaMuxRecordHandlerTest.java @@ -29,7 +29,6 @@ import com.amazonaws.athena.connectors.jdbc.connection.DatabaseConnectionConfig; import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; -import com.amazonaws.services.s3.AmazonS3; import org.apache.arrow.vector.types.pojo.Schema; import org.junit.Before; import org.junit.BeforeClass; @@ -37,6 +36,7 @@ import org.mockito.Mockito; import org.testng.Assert; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -50,7 +50,7 @@ public class ImpalaMuxRecordHandlerTest private Map recordHandlerMap; private ImpalaRecordHandler impalaRecordHandler; private JdbcRecordHandler jdbcRecordHandler; - private AmazonS3 amazonS3; + private S3Client amazonS3; private SecretsManagerClient secretsManager; private AthenaClient athena; private QueryStatusChecker queryStatusChecker; @@ -64,7 +64,7 @@ public void setup() { this.impalaRecordHandler = Mockito.mock(ImpalaRecordHandler.class); this.recordHandlerMap = Collections.singletonMap("recordImpala", this.impalaRecordHandler); - this.amazonS3 = Mockito.mock(AmazonS3.class); + this.amazonS3 = Mockito.mock(S3Client.class); this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AthenaClient.class); this.queryStatusChecker = Mockito.mock(QueryStatusChecker.class); diff --git a/athena-cloudera-impala/src/test/java/com/amazonaws/athena/connectors/cloudera/ImpalaRecordHandlerTest.java b/athena-cloudera-impala/src/test/java/com/amazonaws/athena/connectors/cloudera/ImpalaRecordHandlerTest.java index 8f502431a6..bd0909b48f 100644 --- a/athena-cloudera-impala/src/test/java/com/amazonaws/athena/connectors/cloudera/ImpalaRecordHandlerTest.java +++ b/athena-cloudera-impala/src/test/java/com/amazonaws/athena/connectors/cloudera/ImpalaRecordHandlerTest.java @@ -32,8 +32,8 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.connection.JdbcCredentialProvider; import com.amazonaws.athena.connectors.jdbc.manager.JdbcSplitQueryBuilder; -import com.amazonaws.services.s3.AmazonS3; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueRequest; import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueResponse; @@ -63,7 +63,7 @@ public class ImpalaRecordHandlerTest private Connection connection; private JdbcConnectionFactory jdbcConnectionFactory; private JdbcSplitQueryBuilder jdbcSplitQueryBuilder; - private AmazonS3 amazonS3; + private S3Client amazonS3; private SecretsManagerClient secretsManager; private AthenaClient athena; @@ -71,7 +71,7 @@ public class ImpalaRecordHandlerTest public void setup() throws Exception { - this.amazonS3 = Mockito.mock(AmazonS3.class); + this.amazonS3 = Mockito.mock(S3Client.class); this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AthenaClient.class); Mockito.when(this.secretsManager.getSecretValue(Mockito.eq(GetSecretValueRequest.builder().secretId("testSecret").build()))).thenReturn(GetSecretValueResponse.builder().secretString("{\"username\": \"testUser\", \"password\": \"testPassword\"}").build()); diff --git a/athena-cloudwatch-metrics/src/main/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricsRecordHandler.java b/athena-cloudwatch-metrics/src/main/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricsRecordHandler.java index 9bfe7efd82..5560b39f85 100644 --- a/athena-cloudwatch-metrics/src/main/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricsRecordHandler.java +++ b/athena-cloudwatch-metrics/src/main/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricsRecordHandler.java @@ -40,13 +40,12 @@ import com.amazonaws.services.cloudwatch.model.MetricDataQuery; import com.amazonaws.services.cloudwatch.model.MetricDataResult; import com.amazonaws.services.cloudwatch.model.MetricStat; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.AmazonS3ClientBuilder; import org.apache.arrow.util.VisibleForTesting; import org.apache.arrow.vector.types.pojo.Field; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Date; @@ -97,19 +96,19 @@ public class MetricsRecordHandler //Used to handle throttling events by applying AIMD congestion control private final ThrottlingInvoker invoker; - private final AmazonS3 amazonS3; + private final S3Client amazonS3; private final AmazonCloudWatch metrics; public MetricsRecordHandler(java.util.Map configOptions) { - this(AmazonS3ClientBuilder.defaultClient(), + this(S3Client.create(), SecretsManagerClient.create(), AthenaClient.create(), AmazonCloudWatchClientBuilder.standard().build(), configOptions); } @VisibleForTesting - protected MetricsRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, AmazonCloudWatch metrics, java.util.Map configOptions) + protected MetricsRecordHandler(S3Client amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, AmazonCloudWatch metrics, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, SOURCE_TYPE, configOptions); this.amazonS3 = amazonS3; diff --git a/athena-cloudwatch-metrics/src/test/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricsRecordHandlerTest.java b/athena-cloudwatch-metrics/src/test/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricsRecordHandlerTest.java index 51a69cf686..ae25003e62 100644 --- a/athena-cloudwatch-metrics/src/test/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricsRecordHandlerTest.java +++ b/athena-cloudwatch-metrics/src/test/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricsRecordHandlerTest.java @@ -47,11 +47,6 @@ import com.amazonaws.services.cloudwatch.model.MetricDataQuery; import com.amazonaws.services.cloudwatch.model.MetricDataResult; import com.amazonaws.services.cloudwatch.model.MetricStat; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.model.PutObjectRequest; -import com.amazonaws.services.s3.model.PutObjectResult; -import com.amazonaws.services.s3.model.S3Object; -import com.amazonaws.services.s3.model.S3ObjectInputStream; import com.google.common.io.ByteStreams; import org.junit.After; import org.junit.Before; @@ -66,6 +61,14 @@ import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; +import software.amazon.awssdk.core.ResponseInputStream; +import software.amazon.awssdk.core.sync.RequestBody; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.GetObjectRequest; +import software.amazon.awssdk.services.s3.model.GetObjectResponse; +import software.amazon.awssdk.services.s3.model.PutObjectRequest; +import software.amazon.awssdk.services.s3.model.PutObjectResponse; + import java.io.ByteArrayInputStream; import java.io.InputStream; import java.util.ArrayList; @@ -115,7 +118,7 @@ public class MetricsRecordHandlerTest private AmazonCloudWatch mockMetrics; @Mock - private AmazonS3 mockS3; + private S3Client mockS3; @Mock private SecretsManagerClient mockSecretsManager; @@ -132,31 +135,27 @@ public void setUp() handler = new MetricsRecordHandler(mockS3, mockSecretsManager, mockAthena, mockMetrics, com.google.common.collect.ImmutableMap.of()); spillReader = new S3BlockSpillReader(mockS3, allocator); - Mockito.lenient().when(mockS3.putObject(any())) + Mockito.lenient().when(mockS3.putObject(any(PutObjectRequest.class), any(RequestBody.class))) .thenAnswer((InvocationOnMock invocationOnMock) -> { - InputStream inputStream = ((PutObjectRequest) invocationOnMock.getArguments()[0]).getInputStream(); + InputStream inputStream = ((RequestBody) invocationOnMock.getArguments()[1]).contentStreamProvider().newStream(); ByteHolder byteHolder = new ByteHolder(); byteHolder.setBytes(ByteStreams.toByteArray(inputStream)); synchronized (mockS3Storage) { mockS3Storage.add(byteHolder); logger.info("puObject: total size " + mockS3Storage.size()); } - return mock(PutObjectResult.class); + return PutObjectResponse.builder().build(); }); - Mockito.lenient().when(mockS3.getObject(nullable(String.class), nullable(String.class))) + Mockito.lenient().when(mockS3.getObject(any(GetObjectRequest.class))) .thenAnswer((InvocationOnMock invocationOnMock) -> { - S3Object mockObject = mock(S3Object.class); ByteHolder byteHolder; synchronized (mockS3Storage) { byteHolder = mockS3Storage.get(0); mockS3Storage.remove(0); logger.info("getObject: total size " + mockS3Storage.size()); } - when(mockObject.getObjectContent()).thenReturn( - new S3ObjectInputStream( - new ByteArrayInputStream(byteHolder.getBytes()), null)); - return mockObject; + return new ResponseInputStream<>(GetObjectResponse.builder().build(), new ByteArrayInputStream(byteHolder.getBytes())); }); } diff --git a/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchRecordHandler.java b/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchRecordHandler.java index 05f8c25430..7b4aa47596 100644 --- a/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchRecordHandler.java +++ b/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchRecordHandler.java @@ -39,12 +39,11 @@ import com.amazonaws.services.logs.model.GetQueryResultsResult; import com.amazonaws.services.logs.model.OutputLogEvent; import com.amazonaws.services.logs.model.ResultField; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.AmazonS3ClientBuilder; import org.apache.arrow.util.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.List; @@ -82,7 +81,7 @@ public class CloudwatchRecordHandler public CloudwatchRecordHandler(java.util.Map configOptions) { this( - AmazonS3ClientBuilder.defaultClient(), + S3Client.create(), SecretsManagerClient.create(), AthenaClient.create(), AWSLogsClientBuilder.defaultClient(), @@ -90,7 +89,7 @@ public CloudwatchRecordHandler(java.util.Map configOptions) } @VisibleForTesting - protected CloudwatchRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, AWSLogs awsLogs, java.util.Map configOptions) + protected CloudwatchRecordHandler(S3Client amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, AWSLogs awsLogs, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, SOURCE_TYPE, configOptions); this.awsLogs = awsLogs; diff --git a/athena-cloudwatch/src/test/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchRecordHandlerTest.java b/athena-cloudwatch/src/test/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchRecordHandlerTest.java index 5e494cb9d0..758deacb50 100644 --- a/athena-cloudwatch/src/test/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchRecordHandlerTest.java +++ b/athena-cloudwatch/src/test/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchRecordHandlerTest.java @@ -43,11 +43,6 @@ import com.amazonaws.services.logs.model.GetLogEventsRequest; import com.amazonaws.services.logs.model.GetLogEventsResult; import com.amazonaws.services.logs.model.OutputLogEvent; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.model.PutObjectRequest; -import com.amazonaws.services.s3.model.PutObjectResult; -import com.amazonaws.services.s3.model.S3Object; -import com.amazonaws.services.s3.model.S3ObjectInputStream; import com.google.common.collect.ImmutableList; import com.google.common.io.ByteStreams; import org.apache.arrow.vector.types.Types; @@ -61,7 +56,14 @@ import org.mockito.junit.MockitoJUnitRunner; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.core.ResponseInputStream; +import software.amazon.awssdk.core.sync.RequestBody; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.GetObjectRequest; +import software.amazon.awssdk.services.s3.model.GetObjectResponse; +import software.amazon.awssdk.services.s3.model.PutObjectRequest; +import software.amazon.awssdk.services.s3.model.PutObjectResponse; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.io.ByteArrayInputStream; @@ -77,7 +79,6 @@ import static org.junit.Assert.*; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.nullable; -import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @RunWith(MockitoJUnitRunner.class) @@ -97,7 +98,7 @@ public class CloudwatchRecordHandlerTest private AWSLogs mockAwsLogs; @Mock - private AmazonS3 mockS3; + private S3Client mockS3; @Mock private SecretsManagerClient mockSecretsManager; @@ -116,31 +117,27 @@ public void setUp() handler = new CloudwatchRecordHandler(mockS3, mockSecretsManager, mockAthena, mockAwsLogs, com.google.common.collect.ImmutableMap.of()); spillReader = new S3BlockSpillReader(mockS3, allocator); - when(mockS3.putObject(any())) + when(mockS3.putObject(any(PutObjectRequest.class), any(RequestBody.class))) .thenAnswer((InvocationOnMock invocationOnMock) -> { - InputStream inputStream = ((PutObjectRequest) invocationOnMock.getArguments()[0]).getInputStream(); + InputStream inputStream = ((RequestBody) invocationOnMock.getArguments()[1]).contentStreamProvider().newStream(); ByteHolder byteHolder = new ByteHolder(); byteHolder.setBytes(ByteStreams.toByteArray(inputStream)); synchronized (mockS3Storage) { mockS3Storage.add(byteHolder); logger.info("puObject: total size " + mockS3Storage.size()); } - return mock(PutObjectResult.class); + return PutObjectResponse.builder().build(); }); - when(mockS3.getObject(nullable(String.class), nullable(String.class))) + when(mockS3.getObject(any(GetObjectRequest.class))) .thenAnswer((InvocationOnMock invocationOnMock) -> { - S3Object mockObject = mock(S3Object.class); ByteHolder byteHolder; synchronized (mockS3Storage) { byteHolder = mockS3Storage.get(0); mockS3Storage.remove(0); logger.info("getObject: total size " + mockS3Storage.size()); } - when(mockObject.getObjectContent()).thenReturn( - new S3ObjectInputStream( - new ByteArrayInputStream(byteHolder.getBytes()), null)); - return mockObject; + return new ResponseInputStream<>(GetObjectResponse.builder().build(), new ByteArrayInputStream(byteHolder.getBytes())); }); when(mockAwsLogs.getLogEvents(nullable(GetLogEventsRequest.class))).thenAnswer((InvocationOnMock invocationOnMock) -> { diff --git a/athena-datalakegen2/src/main/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2MuxRecordHandler.java b/athena-datalakegen2/src/main/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2MuxRecordHandler.java index 81dcb42ba7..dd7c643f82 100644 --- a/athena-datalakegen2/src/main/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2MuxRecordHandler.java +++ b/athena-datalakegen2/src/main/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2MuxRecordHandler.java @@ -24,9 +24,9 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandlerFactory; -import com.amazonaws.services.s3.AmazonS3; import com.google.common.annotations.VisibleForTesting; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Map; @@ -54,7 +54,7 @@ public DataLakeGen2MuxRecordHandler(java.util.Map configOptions) } @VisibleForTesting - DataLakeGen2MuxRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, + DataLakeGen2MuxRecordHandler(S3Client amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, DatabaseConnectionConfig databaseConnectionConfig, Map recordHandlerMap, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, jdbcConnectionFactory, databaseConnectionConfig, recordHandlerMap, configOptions); diff --git a/athena-datalakegen2/src/main/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2RecordHandler.java b/athena-datalakegen2/src/main/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2RecordHandler.java index a31bb1ee3a..f80e8bd0c0 100644 --- a/athena-datalakegen2/src/main/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2RecordHandler.java +++ b/athena-datalakegen2/src/main/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2RecordHandler.java @@ -28,12 +28,11 @@ import com.amazonaws.athena.connectors.jdbc.manager.JDBCUtil; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcSplitQueryBuilder; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.AmazonS3ClientBuilder; import com.google.common.annotations.VisibleForTesting; import org.apache.arrow.vector.types.pojo.Schema; import org.apache.commons.lang3.Validate; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -52,12 +51,12 @@ public DataLakeGen2RecordHandler(java.util.Map configOptions) } public DataLakeGen2RecordHandler(DatabaseConnectionConfig databaseConnectionConfig, java.util.Map configOptions) { - this(databaseConnectionConfig, AmazonS3ClientBuilder.defaultClient(), SecretsManagerClient.create(), AthenaClient.create(), + this(databaseConnectionConfig, S3Client.create(), SecretsManagerClient.create(), AthenaClient.create(), new GenericJdbcConnectionFactory(databaseConnectionConfig, DataLakeGen2MetadataHandler.JDBC_PROPERTIES, new DatabaseConnectionInfo(DataLakeGen2Constants.DRIVER_CLASS, DataLakeGen2Constants.DEFAULT_PORT)), new DataLakeGen2QueryStringBuilder(QUOTE_CHARACTER, new DataLakeGen2FederationExpressionParser(QUOTE_CHARACTER)), configOptions); } @VisibleForTesting - DataLakeGen2RecordHandler(DatabaseConnectionConfig databaseConnectionConfig, AmazonS3 amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, JdbcSplitQueryBuilder jdbcSplitQueryBuilder, java.util.Map configOptions) + DataLakeGen2RecordHandler(DatabaseConnectionConfig databaseConnectionConfig, S3Client amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, JdbcSplitQueryBuilder jdbcSplitQueryBuilder, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, databaseConnectionConfig, jdbcConnectionFactory, configOptions); this.jdbcSplitQueryBuilder = Validate.notNull(jdbcSplitQueryBuilder, "query builder must not be null"); diff --git a/athena-datalakegen2/src/test/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2MuxRecordHandlerTest.java b/athena-datalakegen2/src/test/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2MuxRecordHandlerTest.java index 3d4c0bbb0a..dc2fa02473 100644 --- a/athena-datalakegen2/src/test/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2MuxRecordHandlerTest.java +++ b/athena-datalakegen2/src/test/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2MuxRecordHandlerTest.java @@ -28,12 +28,12 @@ import com.amazonaws.athena.connectors.jdbc.connection.DatabaseConnectionConfig; import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; -import com.amazonaws.services.s3.AmazonS3; import org.apache.arrow.vector.types.pojo.Schema; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -46,7 +46,7 @@ public class DataLakeGen2MuxRecordHandlerTest private Map recordHandlerMap; private DataLakeGen2RecordHandler dataLakeGen2RecordHandler; private JdbcRecordHandler jdbcRecordHandler; - private AmazonS3 amazonS3; + private S3Client amazonS3; private SecretsManagerClient secretsManager; private AthenaClient athena; private QueryStatusChecker queryStatusChecker; @@ -57,7 +57,7 @@ public void setup() { this.dataLakeGen2RecordHandler = Mockito.mock(DataLakeGen2RecordHandler.class); this.recordHandlerMap = Collections.singletonMap(DataLakeGen2Constants.NAME, this.dataLakeGen2RecordHandler); - this.amazonS3 = Mockito.mock(AmazonS3.class); + this.amazonS3 = Mockito.mock(S3Client.class); this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AthenaClient.class); this.queryStatusChecker = Mockito.mock(QueryStatusChecker.class); diff --git a/athena-datalakegen2/src/test/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeRecordHandlerTest.java b/athena-datalakegen2/src/test/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeRecordHandlerTest.java index 0b77403715..912d328fa3 100644 --- a/athena-datalakegen2/src/test/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeRecordHandlerTest.java +++ b/athena-datalakegen2/src/test/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeRecordHandlerTest.java @@ -31,7 +31,6 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.connection.JdbcCredentialProvider; import com.amazonaws.athena.connectors.jdbc.manager.JdbcSplitQueryBuilder; -import com.amazonaws.services.s3.AmazonS3; import com.google.common.collect.ImmutableMap; import org.apache.arrow.vector.types.Types; import org.apache.arrow.vector.types.pojo.Schema; @@ -40,6 +39,7 @@ import org.junit.Test; import org.mockito.Mockito; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -56,7 +56,7 @@ public class DataLakeRecordHandlerTest private Connection connection; private JdbcConnectionFactory jdbcConnectionFactory; private JdbcSplitQueryBuilder jdbcSplitQueryBuilder; - private AmazonS3 amazonS3; + private S3Client amazonS3; private SecretsManagerClient secretsManager; private AthenaClient athena; @@ -65,7 +65,7 @@ public void setup() throws Exception { System.setProperty("aws.region", "us-east-1"); - this.amazonS3 = Mockito.mock(AmazonS3.class); + this.amazonS3 = Mockito.mock(S3Client.class); this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AthenaClient.class); this.connection = Mockito.mock(Connection.class); diff --git a/athena-db2-as400/src/main/java/com/amazonaws/athena/connectors/db2as400/Db2As400MuxRecordHandler.java b/athena-db2-as400/src/main/java/com/amazonaws/athena/connectors/db2as400/Db2As400MuxRecordHandler.java index 3f17c52a56..3d4706a208 100644 --- a/athena-db2-as400/src/main/java/com/amazonaws/athena/connectors/db2as400/Db2As400MuxRecordHandler.java +++ b/athena-db2-as400/src/main/java/com/amazonaws/athena/connectors/db2as400/Db2As400MuxRecordHandler.java @@ -24,9 +24,9 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandlerFactory; -import com.amazonaws.services.s3.AmazonS3; import com.google.common.annotations.VisibleForTesting; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Map; @@ -54,7 +54,7 @@ public Db2As400MuxRecordHandler(java.util.Map configOptions) } @VisibleForTesting - Db2As400MuxRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, + Db2As400MuxRecordHandler(S3Client amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, DatabaseConnectionConfig databaseConnectionConfig, Map recordHandlerMap, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, jdbcConnectionFactory, databaseConnectionConfig, recordHandlerMap, configOptions); diff --git a/athena-db2-as400/src/main/java/com/amazonaws/athena/connectors/db2as400/Db2As400RecordHandler.java b/athena-db2-as400/src/main/java/com/amazonaws/athena/connectors/db2as400/Db2As400RecordHandler.java index 60898b6524..69d0711852 100644 --- a/athena-db2-as400/src/main/java/com/amazonaws/athena/connectors/db2as400/Db2As400RecordHandler.java +++ b/athena-db2-as400/src/main/java/com/amazonaws/athena/connectors/db2as400/Db2As400RecordHandler.java @@ -29,12 +29,11 @@ import com.amazonaws.athena.connectors.jdbc.manager.JDBCUtil; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcSplitQueryBuilder; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.AmazonS3ClientBuilder; import com.google.common.annotations.VisibleForTesting; import org.apache.arrow.vector.types.pojo.Schema; import org.apache.commons.lang3.Validate; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -58,13 +57,13 @@ public Db2As400RecordHandler(java.util.Map configOptions) */ public Db2As400RecordHandler(DatabaseConnectionConfig databaseConnectionConfig, java.util.Map configOptions) { - this(databaseConnectionConfig, AmazonS3ClientBuilder.defaultClient(), SecretsManagerClient.create(), AthenaClient.create(), + this(databaseConnectionConfig, S3Client.create(), SecretsManagerClient.create(), AthenaClient.create(), new GenericJdbcConnectionFactory(databaseConnectionConfig, null, new DatabaseConnectionInfo(Db2As400Constants.DRIVER_CLASS, Db2As400Constants.DEFAULT_PORT)), new Db2As400QueryStringBuilder(QUOTE_CHARACTER), configOptions); } @VisibleForTesting - Db2As400RecordHandler(DatabaseConnectionConfig databaseConnectionConfig, AmazonS3 amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, JdbcSplitQueryBuilder jdbcSplitQueryBuilder, java.util.Map configOptions) + Db2As400RecordHandler(DatabaseConnectionConfig databaseConnectionConfig, S3Client amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, JdbcSplitQueryBuilder jdbcSplitQueryBuilder, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, databaseConnectionConfig, jdbcConnectionFactory, configOptions); this.jdbcSplitQueryBuilder = Validate.notNull(jdbcSplitQueryBuilder, "query builder must not be null"); diff --git a/athena-db2-as400/src/test/java/com/amazonaws/athena/connectors/db2as400/Db2As400RecordHandlerTest.java b/athena-db2-as400/src/test/java/com/amazonaws/athena/connectors/db2as400/Db2As400RecordHandlerTest.java index e1bd503827..4ca5b947a8 100644 --- a/athena-db2-as400/src/test/java/com/amazonaws/athena/connectors/db2as400/Db2As400RecordHandlerTest.java +++ b/athena-db2-as400/src/test/java/com/amazonaws/athena/connectors/db2as400/Db2As400RecordHandlerTest.java @@ -31,7 +31,6 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.connection.JdbcCredentialProvider; import com.amazonaws.athena.connectors.jdbc.manager.JdbcSplitQueryBuilder; -import com.amazonaws.services.s3.AmazonS3; import com.google.common.collect.ImmutableMap; import org.apache.arrow.vector.types.Types; import org.apache.arrow.vector.types.pojo.Schema; @@ -40,6 +39,7 @@ import org.junit.Test; import org.mockito.Mockito; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -54,14 +54,14 @@ public class Db2As400RecordHandlerTest { private Connection connection; private JdbcConnectionFactory jdbcConnectionFactory; private JdbcSplitQueryBuilder jdbcSplitQueryBuilder; - private AmazonS3 amazonS3; + private S3Client amazonS3; private SecretsManagerClient secretsManager; private AthenaClient athena; @Before public void setup() throws Exception { System.setProperty("aws.region", "us-east-1"); - this.amazonS3 = Mockito.mock(AmazonS3.class); + this.amazonS3 = Mockito.mock(S3Client.class); this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AthenaClient.class); this.connection = Mockito.mock(Connection.class); diff --git a/athena-db2/src/main/java/com/amazonaws/athena/connectors/db2/Db2MuxRecordHandler.java b/athena-db2/src/main/java/com/amazonaws/athena/connectors/db2/Db2MuxRecordHandler.java index da988d8f3b..94fbe8c395 100644 --- a/athena-db2/src/main/java/com/amazonaws/athena/connectors/db2/Db2MuxRecordHandler.java +++ b/athena-db2/src/main/java/com/amazonaws/athena/connectors/db2/Db2MuxRecordHandler.java @@ -24,9 +24,9 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandlerFactory; -import com.amazonaws.services.s3.AmazonS3; import com.google.common.annotations.VisibleForTesting; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Map; @@ -54,7 +54,7 @@ public Db2MuxRecordHandler(java.util.Map configOptions) } @VisibleForTesting - Db2MuxRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, + Db2MuxRecordHandler(S3Client amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, DatabaseConnectionConfig databaseConnectionConfig, Map recordHandlerMap, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, jdbcConnectionFactory, databaseConnectionConfig, recordHandlerMap, configOptions); diff --git a/athena-db2/src/main/java/com/amazonaws/athena/connectors/db2/Db2RecordHandler.java b/athena-db2/src/main/java/com/amazonaws/athena/connectors/db2/Db2RecordHandler.java index 731d03b0e1..8e9941f220 100644 --- a/athena-db2/src/main/java/com/amazonaws/athena/connectors/db2/Db2RecordHandler.java +++ b/athena-db2/src/main/java/com/amazonaws/athena/connectors/db2/Db2RecordHandler.java @@ -29,12 +29,11 @@ import com.amazonaws.athena.connectors.jdbc.manager.JDBCUtil; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcSplitQueryBuilder; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.AmazonS3ClientBuilder; import com.google.common.annotations.VisibleForTesting; import org.apache.arrow.vector.types.pojo.Schema; import org.apache.commons.lang3.Validate; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -59,13 +58,13 @@ public Db2RecordHandler(java.util.Map configOptions) */ public Db2RecordHandler(DatabaseConnectionConfig databaseConnectionConfig, java.util.Map configOptions) { - this(databaseConnectionConfig, AmazonS3ClientBuilder.defaultClient(), SecretsManagerClient.create(), AthenaClient.create(), + this(databaseConnectionConfig, S3Client.create(), SecretsManagerClient.create(), AthenaClient.create(), new GenericJdbcConnectionFactory(databaseConnectionConfig, null, new DatabaseConnectionInfo(Db2Constants.DRIVER_CLASS, Db2Constants.DEFAULT_PORT)), new Db2QueryStringBuilder(QUOTE_CHARACTER, new Db2FederationExpressionParser(QUOTE_CHARACTER)), configOptions); } @VisibleForTesting - Db2RecordHandler(DatabaseConnectionConfig databaseConnectionConfig, AmazonS3 amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, JdbcSplitQueryBuilder jdbcSplitQueryBuilder, java.util.Map configOptions) + Db2RecordHandler(DatabaseConnectionConfig databaseConnectionConfig, S3Client amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, JdbcSplitQueryBuilder jdbcSplitQueryBuilder, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, databaseConnectionConfig, jdbcConnectionFactory, configOptions); this.jdbcSplitQueryBuilder = Validate.notNull(jdbcSplitQueryBuilder, "query builder must not be null"); diff --git a/athena-db2/src/test/java/com/amazonaws/athena/connectors/db2/Db2RecordHandlerTest.java b/athena-db2/src/test/java/com/amazonaws/athena/connectors/db2/Db2RecordHandlerTest.java index 5daef72df5..b7de058f8d 100644 --- a/athena-db2/src/test/java/com/amazonaws/athena/connectors/db2/Db2RecordHandlerTest.java +++ b/athena-db2/src/test/java/com/amazonaws/athena/connectors/db2/Db2RecordHandlerTest.java @@ -31,7 +31,6 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.connection.JdbcCredentialProvider; import com.amazonaws.athena.connectors.jdbc.manager.JdbcSplitQueryBuilder; -import com.amazonaws.services.s3.AmazonS3; import com.google.common.collect.ImmutableMap; import org.apache.arrow.vector.types.Types; import org.apache.arrow.vector.types.pojo.Schema; @@ -40,6 +39,7 @@ import org.junit.Test; import org.mockito.Mockito; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -55,14 +55,14 @@ public class Db2RecordHandlerTest { private Connection connection; private JdbcConnectionFactory jdbcConnectionFactory; private JdbcSplitQueryBuilder jdbcSplitQueryBuilder; - private AmazonS3 amazonS3; + private S3Client amazonS3; private SecretsManagerClient secretsManager; private AthenaClient athena; @Before public void setup() throws Exception { System.setProperty("aws.region", "us-east-1"); - this.amazonS3 = Mockito.mock(AmazonS3.class); + this.amazonS3 = Mockito.mock(S3Client.class); this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AthenaClient.class); this.connection = Mockito.mock(Connection.class); diff --git a/athena-docdb/src/main/java/com/amazonaws/athena/connectors/docdb/DocDBRecordHandler.java b/athena-docdb/src/main/java/com/amazonaws/athena/connectors/docdb/DocDBRecordHandler.java index 0b103a82b2..4b0459f57e 100644 --- a/athena-docdb/src/main/java/com/amazonaws/athena/connectors/docdb/DocDBRecordHandler.java +++ b/athena-docdb/src/main/java/com/amazonaws/athena/connectors/docdb/DocDBRecordHandler.java @@ -28,8 +28,6 @@ import com.amazonaws.athena.connector.lambda.handlers.RecordHandler; import com.amazonaws.athena.connector.lambda.records.ReadRecordsRequest; import com.amazonaws.athena.connectors.docdb.qpt.DocDBQueryPassthrough; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.AmazonS3ClientBuilder; import com.mongodb.client.MongoClient; import com.mongodb.client.MongoCollection; import com.mongodb.client.MongoCursor; @@ -41,6 +39,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Map; @@ -79,7 +78,7 @@ public class DocDBRecordHandler public DocDBRecordHandler(java.util.Map configOptions) { this( - AmazonS3ClientBuilder.defaultClient(), + S3Client.create(), SecretsManagerClient.create(), AthenaClient.create(), new DocDBConnectionFactory(), @@ -87,7 +86,7 @@ public DocDBRecordHandler(java.util.Map configOptions) } @VisibleForTesting - protected DocDBRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, DocDBConnectionFactory connectionFactory, java.util.Map configOptions) + protected DocDBRecordHandler(S3Client amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, DocDBConnectionFactory connectionFactory, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, SOURCE_TYPE, configOptions); this.connectionFactory = connectionFactory; diff --git a/athena-docdb/src/test/java/com/amazonaws/athena/connectors/docdb/DocDBRecordHandlerTest.java b/athena-docdb/src/test/java/com/amazonaws/athena/connectors/docdb/DocDBRecordHandlerTest.java index 93d6e63c30..866bc1ac41 100644 --- a/athena-docdb/src/test/java/com/amazonaws/athena/connectors/docdb/DocDBRecordHandlerTest.java +++ b/athena-docdb/src/test/java/com/amazonaws/athena/connectors/docdb/DocDBRecordHandlerTest.java @@ -40,11 +40,6 @@ import com.amazonaws.athena.connector.lambda.records.RemoteReadRecordsResponse; import com.amazonaws.athena.connector.lambda.security.EncryptionKeyFactory; import com.amazonaws.athena.connector.lambda.security.LocalKeyFactory; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.model.PutObjectRequest; -import com.amazonaws.services.s3.model.PutObjectResult; -import com.amazonaws.services.s3.model.S3Object; -import com.amazonaws.services.s3.model.S3ObjectInputStream; import com.google.common.collect.ImmutableList; import com.google.common.io.ByteStreams; import com.mongodb.client.FindIterable; @@ -72,6 +67,14 @@ import software.amazon.awssdk.services.glue.GlueClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; +import software.amazon.awssdk.core.ResponseInputStream; +import software.amazon.awssdk.core.sync.RequestBody; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.GetObjectRequest; +import software.amazon.awssdk.services.s3.model.GetObjectResponse; +import software.amazon.awssdk.services.s3.model.PutObjectRequest; +import software.amazon.awssdk.services.s3.model.PutObjectResponse; + import java.io.ByteArrayInputStream; import java.io.InputStream; import java.util.ArrayList; @@ -100,7 +103,7 @@ public class DocDBRecordHandlerTest private DocDBRecordHandler handler; private BlockAllocator allocator; private List mockS3Storage = new ArrayList<>(); - private AmazonS3 amazonS3; + private S3Client amazonS3; private S3BlockSpillReader spillReader; private Schema schemaForRead; private EncryptionKeyFactory keyFactory = new LocalKeyFactory(); @@ -171,7 +174,7 @@ public void setUp() allocator = new BlockAllocatorImpl(); - amazonS3 = mock(AmazonS3.class); + amazonS3 = mock(S3Client.class); mockDatabase = mock(MongoDatabase.class); mockCollection = mock(MongoCollection.class); mockIterable = mock(FindIterable.class); @@ -179,31 +182,27 @@ public void setUp() when(mockClient.getDatabase(eq(DEFAULT_SCHEMA))).thenReturn(mockDatabase); when(mockDatabase.getCollection(eq(TEST_TABLE))).thenReturn(mockCollection); - when(amazonS3.putObject(any())) + when(amazonS3.putObject(any(PutObjectRequest.class), any(RequestBody.class))) .thenAnswer((InvocationOnMock invocationOnMock) -> { - InputStream inputStream = ((PutObjectRequest) invocationOnMock.getArguments()[0]).getInputStream(); + InputStream inputStream = ((RequestBody) invocationOnMock.getArguments()[1]).contentStreamProvider().newStream(); ByteHolder byteHolder = new ByteHolder(); byteHolder.setBytes(ByteStreams.toByteArray(inputStream)); synchronized (mockS3Storage) { mockS3Storage.add(byteHolder); logger.info("puObject: total size " + mockS3Storage.size()); } - return mock(PutObjectResult.class); + return PutObjectResponse.builder().build(); }); - when(amazonS3.getObject(nullable(String.class), nullable(String.class))) + when(amazonS3.getObject(any(GetObjectRequest.class))) .thenAnswer((InvocationOnMock invocationOnMock) -> { - S3Object mockObject = mock(S3Object.class); ByteHolder byteHolder; synchronized (mockS3Storage) { byteHolder = mockS3Storage.get(0); mockS3Storage.remove(0); logger.info("getObject: total size " + mockS3Storage.size()); } - when(mockObject.getObjectContent()).thenReturn( - new S3ObjectInputStream( - new ByteArrayInputStream(byteHolder.getBytes()), null)); - return mockObject; + return new ResponseInputStream<>(GetObjectResponse.builder().build(), new ByteArrayInputStream(byteHolder.getBytes())); }); handler = new DocDBRecordHandler(amazonS3, mockSecretsManager, mockAthena, connectionFactory, com.google.common.collect.ImmutableMap.of()); diff --git a/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/DynamoDBRecordHandler.java b/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/DynamoDBRecordHandler.java index 33b467d2b8..42c38478a0 100644 --- a/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/DynamoDBRecordHandler.java +++ b/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/DynamoDBRecordHandler.java @@ -35,7 +35,6 @@ import com.amazonaws.athena.connectors.dynamodb.util.DDBPredicateUtils; import com.amazonaws.athena.connectors.dynamodb.util.DDBRecordMetadata; import com.amazonaws.athena.connectors.dynamodb.util.DDBTypeUtils; -import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.util.json.Jackson; import com.fasterxml.jackson.core.type.TypeReference; import com.google.common.annotations.VisibleForTesting; @@ -56,6 +55,7 @@ import software.amazon.awssdk.services.dynamodb.model.QueryResponse; import software.amazon.awssdk.services.dynamodb.model.ScanRequest; import software.amazon.awssdk.services.dynamodb.model.ScanResponse; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.io.IOException; @@ -128,7 +128,7 @@ public ThrottlingInvoker load(String tableName) } @VisibleForTesting - DynamoDBRecordHandler(DynamoDbClient ddbClient, AmazonS3 amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, String sourceType, java.util.Map configOptions) + DynamoDBRecordHandler(DynamoDbClient ddbClient, S3Client amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, String sourceType, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, sourceType, configOptions); this.ddbClient = ddbClient; diff --git a/athena-dynamodb/src/test/java/com/amazonaws/athena/connectors/dynamodb/DynamoDBRecordHandlerTest.java b/athena-dynamodb/src/test/java/com/amazonaws/athena/connectors/dynamodb/DynamoDBRecordHandlerTest.java index 878542ad5c..9972e3fc0f 100644 --- a/athena-dynamodb/src/test/java/com/amazonaws/athena/connectors/dynamodb/DynamoDBRecordHandlerTest.java +++ b/athena-dynamodb/src/test/java/com/amazonaws/athena/connectors/dynamodb/DynamoDBRecordHandlerTest.java @@ -38,7 +38,6 @@ import com.amazonaws.athena.connector.lambda.security.EncryptionKeyFactory; import com.amazonaws.athena.connector.lambda.security.LocalKeyFactory; import com.amazonaws.athena.connectors.dynamodb.util.DDBTypeUtils; -import com.amazonaws.services.s3.AmazonS3; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import org.apache.arrow.vector.complex.impl.UnionListReader; @@ -66,6 +65,7 @@ import software.amazon.awssdk.services.glue.model.EntityNotFoundException; import software.amazon.awssdk.services.glue.model.StorageDescriptor; import software.amazon.awssdk.services.glue.model.Table; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.time.LocalDate; @@ -136,7 +136,7 @@ public void setup() logger.info("{}: enter", testName.getMethodName()); allocator = new BlockAllocatorImpl(); - handler = new DynamoDBRecordHandler(ddbClient, mock(AmazonS3.class), mock(SecretsManagerClient.class), mock(AthenaClient.class), "source_type", com.google.common.collect.ImmutableMap.of()); + handler = new DynamoDBRecordHandler(ddbClient, mock(S3Client.class), mock(SecretsManagerClient.class), mock(AthenaClient.class), "source_type", com.google.common.collect.ImmutableMap.of()); metadataHandler = new DynamoDBMetadataHandler(new LocalKeyFactory(), secretsManager, athena, "spillBucket", "spillPrefix", ddbClient, glueClient, com.google.common.collect.ImmutableMap.of()); } diff --git a/athena-elasticsearch/pom.xml b/athena-elasticsearch/pom.xml index 7e9748b077..3ef358cfcc 100644 --- a/athena-elasticsearch/pom.xml +++ b/athena-elasticsearch/pom.xml @@ -62,33 +62,6 @@ ${log4j2Version} runtime - - com.amazonaws - aws-java-sdk-core - ${aws-sdk.version} - - - com.fasterxml.jackson.datatype - jackson-datatype-jsr310 - - - com.fasterxml.jackson.dataformat - jackson-dataformat-cbor - - - com.fasterxml.jackson.core - jackson-core - - - com.fasterxml.jackson.core - jackson-databind - - - com.fasterxml.jackson.core - jackson-annotations - - - software.amazon.jsii jsii-runtime diff --git a/athena-elasticsearch/src/main/java/com/amazonaws/athena/connectors/elasticsearch/ElasticsearchRecordHandler.java b/athena-elasticsearch/src/main/java/com/amazonaws/athena/connectors/elasticsearch/ElasticsearchRecordHandler.java index facfe16209..1d90956ad1 100644 --- a/athena-elasticsearch/src/main/java/com/amazonaws/athena/connectors/elasticsearch/ElasticsearchRecordHandler.java +++ b/athena-elasticsearch/src/main/java/com/amazonaws/athena/connectors/elasticsearch/ElasticsearchRecordHandler.java @@ -27,8 +27,6 @@ import com.amazonaws.athena.connector.lambda.handlers.RecordHandler; import com.amazonaws.athena.connector.lambda.records.ReadRecordsRequest; import com.amazonaws.athena.connectors.elasticsearch.qpt.ElasticsearchQueryPassthrough; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.AmazonS3ClientBuilder; import org.apache.arrow.util.VisibleForTesting; import org.apache.arrow.vector.types.pojo.Field; import org.elasticsearch.action.search.ClearScrollRequest; @@ -45,6 +43,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.io.IOException; @@ -89,7 +88,7 @@ public class ElasticsearchRecordHandler public ElasticsearchRecordHandler(Map configOptions) { - super(AmazonS3ClientBuilder.defaultClient(), SecretsManagerClient.create(), + super(S3Client.create(), SecretsManagerClient.create(), AthenaClient.create(), SOURCE_TYPE, configOptions); this.typeUtils = new ElasticsearchTypeUtils(); @@ -100,7 +99,7 @@ public ElasticsearchRecordHandler(Map configOptions) @VisibleForTesting protected ElasticsearchRecordHandler( - AmazonS3 amazonS3, + S3Client amazonS3, SecretsManagerClient secretsManager, AthenaClient amazonAthena, AwsRestHighLevelClientFactory clientFactory, diff --git a/athena-elasticsearch/src/test/java/com/amazonaws/athena/connectors/elasticsearch/ElasticsearchRecordHandlerTest.java b/athena-elasticsearch/src/test/java/com/amazonaws/athena/connectors/elasticsearch/ElasticsearchRecordHandlerTest.java index 99c641d32f..1336badd71 100644 --- a/athena-elasticsearch/src/test/java/com/amazonaws/athena/connectors/elasticsearch/ElasticsearchRecordHandlerTest.java +++ b/athena-elasticsearch/src/test/java/com/amazonaws/athena/connectors/elasticsearch/ElasticsearchRecordHandlerTest.java @@ -37,11 +37,6 @@ import com.amazonaws.athena.connector.lambda.records.RecordResponse; import com.amazonaws.athena.connector.lambda.records.RemoteReadRecordsResponse; import com.amazonaws.athena.connector.lambda.security.FederatedIdentity; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.model.PutObjectRequest; -import com.amazonaws.services.s3.model.PutObjectResult; -import com.amazonaws.services.s3.model.S3Object; -import com.amazonaws.services.s3.model.S3ObjectInputStream; import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; @@ -68,6 +63,14 @@ import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; +import software.amazon.awssdk.core.ResponseInputStream; +import software.amazon.awssdk.core.sync.RequestBody; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.GetObjectRequest; +import software.amazon.awssdk.services.s3.model.GetObjectResponse; +import software.amazon.awssdk.services.s3.model.PutObjectRequest; +import software.amazon.awssdk.services.s3.model.PutObjectResponse; + import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.InputStream; @@ -116,7 +119,7 @@ public class ElasticsearchRecordHandlerTest private SearchResponse mockScrollResponse; @Mock - private AmazonS3 amazonS3; + private S3Client amazonS3; @Mock private SecretsManagerClient awsSecretsManager; @@ -124,12 +127,6 @@ public class ElasticsearchRecordHandlerTest @Mock private AthenaClient athena; - @Mock - PutObjectResult putObjectResult; - - @Mock - S3Object s3Object; - String[] expectedDocuments = {"[mytext : My favorite Sci-Fi movie is Interstellar.], [mykeyword : I love keywords.], [mylong : {11,12,13}], [myinteger : 666115], [myshort : 1972], [mybyte : 5], [mydouble : 47.5], [myscaled : 7], [myfloat : 5.6], [myhalf : 6.2], [mydatemilli : 2020-05-15T06:49:30], [mydatenano : {2020-05-15T06:50:01.457}], [myboolean : true], [mybinary : U29tZSBiaW5hcnkgYmxvYg==], [mynested : {[l1long : 357345987],[l1date : 2020-05-15T06:57:44.123],[l1nested : {[l2short : {1,2,3,4,5,6,7,8,9,10}],[l2binary : U29tZSBiaW5hcnkgYmxvYg==]}]}], [objlistouter : {}]" ,"[mytext : My favorite TV comedy is Seinfeld.], [mykeyword : I hate key-values.], [mylong : {14,null,16}], [myinteger : 732765666], [myshort : 1971], [mybyte : 7], [mydouble : 27.6], [myscaled : 10], [myfloat : 7.8], [myhalf : 7.3], [mydatemilli : null], [mydatenano : {2020-05-15T06:49:30.001}], [myboolean : false], [mybinary : U29tZSBiaW5hcnkgYmxvYg==], [mynested : {[l1long : 7322775555],[l1date : 2020-05-15T01:57:44.777],[l1nested : {[l2short : {11,12,13,14,15,16,null,18,19,20}],[l2binary : U29tZSBiaW5hcnkgYmxvYg==]}]}], [objlistouter : {{[objlistinner : {{[title : somebook],[hi : hi]}}],[test2 : title]}}]"}; @@ -276,31 +273,27 @@ public void setUp() allocator = new BlockAllocatorImpl(); - when(amazonS3.putObject(any())) + when(amazonS3.putObject(any(PutObjectRequest.class), any(RequestBody.class))) .thenAnswer((InvocationOnMock invocationOnMock) -> { - InputStream inputStream = ((PutObjectRequest) invocationOnMock.getArguments()[0]).getInputStream(); + InputStream inputStream = ((RequestBody) invocationOnMock.getArguments()[1]).contentStreamProvider().newStream(); ByteHolder byteHolder = new ByteHolder(); byteHolder.setBytes(ByteStreams.toByteArray(inputStream)); synchronized (mockS3Storage) { mockS3Storage.add(byteHolder); logger.info("puObject: total size " + mockS3Storage.size()); } - return mock(PutObjectResult.class); + return PutObjectResponse.builder().build(); }); - when(amazonS3.getObject(nullable(String.class), nullable(String.class))) + when(amazonS3.getObject(any(GetObjectRequest.class))) .thenAnswer((InvocationOnMock invocationOnMock) -> { - S3Object mockObject = mock(S3Object.class); ByteHolder byteHolder; synchronized (mockS3Storage) { byteHolder = mockS3Storage.get(0); mockS3Storage.remove(0); logger.info("getObject: total size " + mockS3Storage.size()); } - when(mockObject.getObjectContent()).thenReturn( - new S3ObjectInputStream( - new ByteArrayInputStream(byteHolder.getBytes()), null)); - return mockObject; + return new ResponseInputStream<>(GetObjectResponse.builder().build(), new ByteArrayInputStream(byteHolder.getBytes())); }); spillReader = new S3BlockSpillReader(amazonS3, allocator); diff --git a/athena-example/src/main/java/com/amazonaws/athena/connectors/example/ExampleRecordHandler.java b/athena-example/src/main/java/com/amazonaws/athena/connectors/example/ExampleRecordHandler.java index 87f3064d92..402420e0bb 100644 --- a/athena-example/src/main/java/com/amazonaws/athena/connectors/example/ExampleRecordHandler.java +++ b/athena-example/src/main/java/com/amazonaws/athena/connectors/example/ExampleRecordHandler.java @@ -34,17 +34,17 @@ import com.amazonaws.athena.connector.lambda.domain.predicate.ConstraintProjector; import com.amazonaws.athena.connector.lambda.handlers.RecordHandler; import com.amazonaws.athena.connector.lambda.records.ReadRecordsRequest; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.AmazonS3ClientBuilder; -import com.amazonaws.services.s3.model.S3Object; import org.apache.arrow.util.VisibleForTesting; import org.apache.arrow.vector.FieldVector; import org.apache.arrow.vector.holders.NullableIntHolder; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; - import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.core.ResponseInputStream; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.GetObjectRequest; +import software.amazon.awssdk.services.s3.model.GetObjectResponse; +import software.amazon.awssdk.services.s3.model.NoSuchKeyException; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.io.BufferedReader; @@ -77,15 +77,15 @@ public class ExampleRecordHandler */ private static final String SOURCE_TYPE = "example"; - private AmazonS3 amazonS3; + private S3Client amazonS3; public ExampleRecordHandler(java.util.Map configOptions) { - this(AmazonS3ClientBuilder.defaultClient(), SecretsManagerClient.create(), AthenaClient.create(), configOptions); + this(S3Client.create(), SecretsManagerClient.create(), AthenaClient.create(), configOptions); } @VisibleForTesting - protected ExampleRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AthenaClient amazonAthena, java.util.Map configOptions) + protected ExampleRecordHandler(S3Client amazonS3, SecretsManagerClient secretsManager, AthenaClient amazonAthena, java.util.Map configOptions) { super(amazonS3, secretsManager, amazonAthena, SOURCE_TYPE, configOptions); this.amazonS3 = amazonS3; @@ -230,10 +230,13 @@ protected void readWithConstraint(BlockSpiller spiller, ReadRecordsRequest recor private BufferedReader openS3File(String bucket, String key) { logger.info("openS3File: opening file " + bucket + ":" + key); - if (amazonS3.doesObjectExist(bucket, key)) { - S3Object obj = amazonS3.getObject(bucket, key); + try { + ResponseInputStream responseStream = amazonS3.getObject(GetObjectRequest.builder().bucket(bucket).key(key).build()); logger.info("openS3File: opened file " + bucket + ":" + key); - return new BufferedReader(new InputStreamReader(obj.getObjectContent())); + return new BufferedReader(new InputStreamReader(responseStream)); + } + catch (NoSuchKeyException e) { + logger.error("openS3File: failed to open file " + bucket + ":" + key, e); } return null; } diff --git a/athena-example/src/test/java/com/amazonaws/athena/connectors/example/ExampleRecordHandlerTest.java b/athena-example/src/test/java/com/amazonaws/athena/connectors/example/ExampleRecordHandlerTest.java index c69dc6b3df..de2b30524b 100644 --- a/athena-example/src/test/java/com/amazonaws/athena/connectors/example/ExampleRecordHandlerTest.java +++ b/athena-example/src/test/java/com/amazonaws/athena/connectors/example/ExampleRecordHandlerTest.java @@ -33,9 +33,6 @@ import com.amazonaws.athena.connector.lambda.records.ReadRecordsResponse; import com.amazonaws.athena.connector.lambda.records.RecordResponse; import com.amazonaws.athena.connector.lambda.security.FederatedIdentity; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.model.S3Object; -import com.amazonaws.services.s3.model.S3ObjectInputStream; import org.apache.arrow.vector.types.Types; import org.apache.arrow.vector.types.pojo.Schema; import org.junit.After; @@ -48,6 +45,10 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.core.ResponseInputStream; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.GetObjectRequest; +import software.amazon.awssdk.services.s3.model.GetObjectResponse; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.io.ByteArrayInputStream; @@ -59,6 +60,7 @@ import static com.amazonaws.athena.connector.lambda.domain.predicate.Constraints.DEFAULT_NO_LIMIT; import static org.junit.Assert.*; +import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.nullable; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -72,7 +74,7 @@ public class ExampleRecordHandlerTest System.getenv("publishing").equalsIgnoreCase("true"); private BlockAllocatorImpl allocator; private Schema schemaForRead; - private AmazonS3 amazonS3; + private S3Client amazonS3; private SecretsManagerClient awsSecretsManager; private AthenaClient athena; private S3BlockSpillReader spillReader; @@ -105,23 +107,18 @@ public void setUp() allocator = new BlockAllocatorImpl(); - amazonS3 = mock(AmazonS3.class); + amazonS3 = mock(S3Client.class); awsSecretsManager = mock(SecretsManagerClient.class); athena = mock(AthenaClient.class); - when(amazonS3.doesObjectExist(nullable(String.class), nullable(String.class))).thenReturn(true); - when(amazonS3.getObject(nullable(String.class), nullable(String.class))) + when(amazonS3.getObject(any(GetObjectRequest.class))) .thenAnswer(new Answer() { @Override public Object answer(InvocationOnMock invocationOnMock) throws Throwable { - S3Object mockObject = mock(S3Object.class); - when(mockObject.getObjectContent()).thenReturn( - new S3ObjectInputStream( - new ByteArrayInputStream(getFakeObject()), null)); - return mockObject; + return new ResponseInputStream<>(GetObjectResponse.builder().build(), new ByteArrayInputStream(getFakeObject())); } }); diff --git a/athena-federation-sdk/pom.xml b/athena-federation-sdk/pom.xml index 43fb9f3388..c532d70ef0 100644 --- a/athena-federation-sdk/pom.xml +++ b/athena-federation-sdk/pom.xml @@ -80,6 +80,66 @@ + + software.amazon.awssdk + apache-client + ${aws-sdk-v2.version} + + + software.amazon.awssdk + athena + ${aws-sdk-v2.version} + + + software.amazon.awssdk + netty-nio-client + + + + + software.amazon.awssdk + glue + ${aws-sdk-v2.version} + + + software.amazon.awssdk + netty-nio-client + + + + + software.amazon.awssdk + kms + ${aws-sdk-v2.version} + + + software.amazon.awssdk + netty-nio-client + + + + + software.amazon.awssdk + lambda + ${aws-sdk-v2.version} + + + software.amazon.awssdk + netty-nio-client + + + + + software.amazon.awssdk + s3 + ${aws-sdk-v2.version} + + + software.amazon.awssdk + netty-nio-client + + + software.amazon.awssdk secretsmanager @@ -121,29 +181,6 @@ com.fasterxml.jackson.core jackson-annotations - - - - software.amazon.awssdk - glue - ${aws-sdk-v2.version} - - - software.amazon.awssdk - netty-nio-client - - - - - software.amazon.awssdk - apache-client - ${aws-sdk-v2.version} - - - software.amazon.awssdk - athena - ${aws-sdk-v2.version} - software.amazon.awssdk netty-nio-client @@ -192,40 +229,6 @@ aws-lambda-java-core 1.2.3 - - software.amazon.awssdk - lambda - ${aws-sdk-v2.version} - - - software.amazon.awssdk - netty-nio-client - - - - - com.amazonaws - aws-java-sdk-s3 - ${aws-sdk.version} - - - - com.amazonaws - aws-java-sdk-kms - - - - - software.amazon.awssdk - kms - ${aws-sdk-v2.version} - - - software.amazon.awssdk - netty-nio-client - - - com.google.guava guava diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/data/BlockUtils.java b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/data/BlockUtils.java index dfac5b00d6..268dad7ddb 100644 --- a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/data/BlockUtils.java +++ b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/data/BlockUtils.java @@ -77,6 +77,7 @@ import java.math.BigDecimal; import java.math.RoundingMode; +import java.time.Instant; import java.time.LocalDate; import java.time.LocalDateTime; import java.time.ZoneId; @@ -273,6 +274,9 @@ else if (value instanceof LocalDateTime) { pos, ((LocalDateTime) value).atZone(UTC_ZONE_ID).toInstant().toEpochMilli()); } + else if (value instanceof Instant) { + ((DateMilliVector) vector).setSafe(pos, ((Instant) value).toEpochMilli()); + } else { ((DateMilliVector) vector).setSafe(pos, (long) value); } diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/data/S3BlockSpillReader.java b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/data/S3BlockSpillReader.java index 48806b99dc..6415484b41 100644 --- a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/data/S3BlockSpillReader.java +++ b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/data/S3BlockSpillReader.java @@ -25,12 +25,14 @@ import com.amazonaws.athena.connector.lambda.security.BlockCrypto; import com.amazonaws.athena.connector.lambda.security.EncryptionKey; import com.amazonaws.athena.connector.lambda.security.NoOpBlockCrypto; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.model.S3Object; import com.google.common.io.ByteStreams; import org.apache.arrow.vector.types.pojo.Schema; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.core.ResponseInputStream; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.GetObjectRequest; +import software.amazon.awssdk.services.s3.model.GetObjectResponse; import java.io.IOException; @@ -40,10 +42,10 @@ public class S3BlockSpillReader { private static final Logger logger = LoggerFactory.getLogger(S3BlockSpillReader.class); - private final AmazonS3 amazonS3; + private final S3Client amazonS3; private final BlockAllocator allocator; - public S3BlockSpillReader(AmazonS3 amazonS3, BlockAllocator allocator) + public S3BlockSpillReader(S3Client amazonS3, BlockAllocator allocator) { this.amazonS3 = requireNonNull(amazonS3, "amazonS3 was null"); this.allocator = requireNonNull(allocator, "allocator was null"); @@ -59,13 +61,16 @@ public S3BlockSpillReader(AmazonS3 amazonS3, BlockAllocator allocator) */ public Block read(S3SpillLocation spillLocation, EncryptionKey key, Schema schema) { - S3Object fullObject = null; + ResponseInputStream responseStream = null; try { logger.debug("read: Started reading block from S3"); - fullObject = amazonS3.getObject(spillLocation.getBucket(), spillLocation.getKey()); + responseStream = amazonS3.getObject(GetObjectRequest.builder() + .bucket(spillLocation.getBucket()) + .key(spillLocation.getKey()) + .build()); logger.debug("read: Completed reading block from S3"); BlockCrypto blockCrypto = (key != null) ? new AesGcmBlockCrypto(allocator) : new NoOpBlockCrypto(allocator); - Block block = blockCrypto.decrypt(key, ByteStreams.toByteArray(fullObject.getObjectContent()), schema); + Block block = blockCrypto.decrypt(key, ByteStreams.toByteArray(responseStream), schema); logger.debug("read: Completed decrypting block of size."); return block; } @@ -73,12 +78,12 @@ public Block read(S3SpillLocation spillLocation, EncryptionKey key, Schema schem throw new RuntimeException(ex); } finally { - if (fullObject != null) { + if (responseStream != null) { try { - fullObject.close(); + responseStream.close(); } catch (IOException ex) { - logger.warn("read: Exception while closing S3 object", ex); + logger.warn("read: Exception while closing S3 response stream", ex); } } } @@ -93,24 +98,27 @@ public Block read(S3SpillLocation spillLocation, EncryptionKey key, Schema schem */ public byte[] read(S3SpillLocation spillLocation, EncryptionKey key) { - S3Object fullObject = null; + ResponseInputStream responseStream = null; try { logger.debug("read: Started reading block from S3"); - fullObject = amazonS3.getObject(spillLocation.getBucket(), spillLocation.getKey()); + responseStream = amazonS3.getObject(GetObjectRequest.builder() + .bucket(spillLocation.getBucket()) + .key(spillLocation.getKey()) + .build()); logger.debug("read: Completed reading block from S3"); BlockCrypto blockCrypto = (key != null) ? new AesGcmBlockCrypto(allocator) : new NoOpBlockCrypto(allocator); - return blockCrypto.decrypt(key, ByteStreams.toByteArray(fullObject.getObjectContent())); + return blockCrypto.decrypt(key, ByteStreams.toByteArray(responseStream)); } catch (IOException ex) { throw new RuntimeException(ex); } finally { - if (fullObject != null) { + if (responseStream != null) { try { - fullObject.close(); + responseStream.close(); } catch (IOException ex) { - logger.warn("read: Exception while closing S3 object", ex); + logger.warn("read: Exception while closing S3 response stream", ex); } } } diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/data/S3BlockSpiller.java b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/data/S3BlockSpiller.java index 2604b5e228..de879feafd 100644 --- a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/data/S3BlockSpiller.java +++ b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/data/S3BlockSpiller.java @@ -27,10 +27,6 @@ import com.amazonaws.athena.connector.lambda.security.BlockCrypto; import com.amazonaws.athena.connector.lambda.security.EncryptionKey; import com.amazonaws.athena.connector.lambda.security.NoOpBlockCrypto; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.model.ObjectMetadata; -import com.amazonaws.services.s3.model.PutObjectRequest; -import com.amazonaws.services.s3.model.S3Object; import com.fasterxml.jackson.core.type.TypeReference; import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.io.ByteStreams; @@ -38,10 +34,16 @@ import org.apache.commons.lang3.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.core.ResponseInputStream; +import software.amazon.awssdk.core.sync.RequestBody; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.GetObjectRequest; +import software.amazon.awssdk.services.s3.model.GetObjectResponse; +import software.amazon.awssdk.services.s3.model.PutObjectRequest; -import java.io.ByteArrayInputStream; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import java.util.Map; import java.util.concurrent.ExecutorService; @@ -76,7 +78,7 @@ public class S3BlockSpiller private static final String SPILL_PUT_REQUEST_HEADERS_ENV = "spill_put_request_headers"; //Used to write to S3 - private final AmazonS3 amazonS3; + private final S3Client amazonS3; //Used to optionally encrypt Blocks. private final BlockCrypto blockCrypto; //Used to create new blocks. @@ -125,7 +127,7 @@ public class S3BlockSpiller * @param constraintEvaluator The ConstraintEvaluator that should be used to constrain writes. */ public S3BlockSpiller( - AmazonS3 amazonS3, + S3Client amazonS3, SpillConfig spillConfig, BlockAllocator allocator, Schema schema, @@ -146,7 +148,7 @@ public S3BlockSpiller( * @param maxRowsPerCall The max number of rows to allow callers to write in one call. */ public S3BlockSpiller( - AmazonS3 amazonS3, + S3Client amazonS3, SpillConfig spillConfig, BlockAllocator allocator, Schema schema, @@ -318,29 +320,24 @@ public void close() /** * Grabs the request headers from env and sets them on the request */ - private void setRequestHeadersFromEnv(PutObjectRequest request) + private Map getRequestHeadersFromEnv() { String headersFromEnvStr = configOptions.get(SPILL_PUT_REQUEST_HEADERS_ENV); if (headersFromEnvStr == null || headersFromEnvStr.isEmpty()) { - return; + return Collections.emptyMap(); } try { ObjectMapper mapper = new ObjectMapper(); TypeReference> typeRef = new TypeReference>() {}; Map headers = mapper.readValue(headersFromEnvStr, typeRef); - for (Map.Entry entry : headers.entrySet()) { - String oldValue = request.putCustomRequestHeader(entry.getKey(), entry.getValue()); - if (oldValue != null) { - logger.warn("Key: %s has been overwritten with: %s. Old value: %s", - entry.getKey(), entry.getValue(), oldValue); - } - } + return headers; } catch (com.fasterxml.jackson.core.JsonProcessingException e) { String message = String.format("Invalid value for environment variable: %s : %s", SPILL_PUT_REQUEST_HEADERS_ENV, headersFromEnvStr); logger.error(message, e); } + return Collections.emptyMap(); } /** @@ -361,15 +358,13 @@ protected SpillLocation write(Block block) // Set the contentLength otherwise the s3 client will buffer again since it // only sees the InputStream wrapper. - ObjectMetadata objMeta = new ObjectMetadata(); - objMeta.setContentLength(bytes.length); - PutObjectRequest request = new PutObjectRequest( - spillLocation.getBucket(), - spillLocation.getKey(), - new ByteArrayInputStream(bytes), - objMeta); - setRequestHeadersFromEnv(request); - amazonS3.putObject(request); + PutObjectRequest request = PutObjectRequest.builder() + .bucket(spillLocation.getBucket()) + .key(spillLocation.getKey()) + .contentLength((long) bytes.length) + .metadata(getRequestHeadersFromEnv()) + .build(); + amazonS3.putObject(request, RequestBody.fromBytes(bytes)); logger.info("write: Completed spilling block of size {} bytes", bytes.length); return spillLocation; @@ -393,9 +388,12 @@ protected Block read(S3SpillLocation spillLocation, EncryptionKey key, Schema sc { try { logger.debug("write: Started reading block from S3"); - S3Object fullObject = amazonS3.getObject(spillLocation.getBucket(), spillLocation.getKey()); + ResponseInputStream responseStream = amazonS3.getObject(GetObjectRequest.builder() + .bucket(spillLocation.getBucket()) + .key(spillLocation.getKey()) + .build()); logger.debug("write: Completed reading block from S3"); - Block block = blockCrypto.decrypt(key, ByteStreams.toByteArray(fullObject.getObjectContent()), schema); + Block block = blockCrypto.decrypt(key, ByteStreams.toByteArray(responseStream), schema); logger.debug("write: Completed decrypting block of size."); return block; } diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/domain/spill/SpillLocationVerifier.java b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/domain/spill/SpillLocationVerifier.java index f15040812a..3dbb3e62ef 100644 --- a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/domain/spill/SpillLocationVerifier.java +++ b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/domain/spill/SpillLocationVerifier.java @@ -20,11 +20,11 @@ * #L% */ -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.model.AmazonS3Exception; import com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.S3Exception; import java.util.Set; import java.util.stream.Collectors; @@ -39,14 +39,14 @@ public class SpillLocationVerifier private enum BucketState {UNCHECKED, VALID, INVALID} - private final AmazonS3 amazons3; + private final S3Client amazons3; private String bucket; private BucketState state; /** * @param amazons3 The S3 object for the account. */ - public SpillLocationVerifier(AmazonS3 amazons3) + public SpillLocationVerifier(S3Client amazons3) { this.amazons3 = amazons3; this.bucket = null; @@ -85,7 +85,7 @@ public void checkBucketAuthZ(String spillBucket) void updateBucketState() { try { - Set buckets = amazons3.listBuckets().stream().map(b -> b.getName()).collect(Collectors.toSet()); + Set buckets = amazons3.listBuckets().buckets().stream().map(b -> b.name()).collect(Collectors.toSet()); if (!buckets.contains(bucket)) { state = BucketState.INVALID; @@ -96,7 +96,7 @@ void updateBucketState() logger.info("The state of bucket {} has been updated to {} from {}", bucket, state, BucketState.UNCHECKED); } - catch (AmazonS3Exception ex) { + catch (S3Exception ex) { throw new RuntimeException("Error while checking bucket ownership for " + bucket, ex); } } diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/handlers/MetadataHandler.java b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/handlers/MetadataHandler.java index 88c00b461f..0810ba64b1 100644 --- a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/handlers/MetadataHandler.java +++ b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/handlers/MetadataHandler.java @@ -60,7 +60,6 @@ import com.amazonaws.athena.connector.lambda.serde.VersionedObjectMapperFactory; import com.amazonaws.services.lambda.runtime.Context; import com.amazonaws.services.lambda.runtime.RequestStreamHandler; -import com.amazonaws.services.s3.AmazonS3ClientBuilder; import com.fasterxml.jackson.databind.ObjectMapper; import org.apache.arrow.vector.types.Types; import org.apache.arrow.vector.types.pojo.Field; @@ -69,6 +68,7 @@ import org.slf4j.LoggerFactory; import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.kms.KmsClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.io.IOException; @@ -151,7 +151,7 @@ public MetadataHandler(String sourceType, java.util.Map configOp this.secretsManager = new CachableSecretsManager(SecretsManagerClient.create()); this.athena = AthenaClient.create(); - this.verifier = new SpillLocationVerifier(AmazonS3ClientBuilder.standard().build()); + this.verifier = new SpillLocationVerifier(S3Client.create()); this.athenaInvoker = ThrottlingInvoker.newDefaultBuilder(ATHENA_EXCEPTION_FILTER, configOptions).build(); } @@ -174,7 +174,7 @@ public MetadataHandler( this.sourceType = sourceType; this.spillBucket = spillBucket; this.spillPrefix = spillPrefix; - this.verifier = new SpillLocationVerifier(AmazonS3ClientBuilder.standard().build()); + this.verifier = new SpillLocationVerifier(S3Client.create()); this.athenaInvoker = ThrottlingInvoker.newDefaultBuilder(ATHENA_EXCEPTION_FILTER, configOptions).build(); } diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/handlers/RecordHandler.java b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/handlers/RecordHandler.java index 1ac7a85645..ac3e563005 100644 --- a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/handlers/RecordHandler.java +++ b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/handlers/RecordHandler.java @@ -42,12 +42,11 @@ import com.amazonaws.athena.connector.lambda.serde.VersionedObjectMapperFactory; import com.amazonaws.services.lambda.runtime.Context; import com.amazonaws.services.lambda.runtime.RequestStreamHandler; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.AmazonS3ClientBuilder; import com.fasterxml.jackson.databind.ObjectMapper; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.io.IOException; @@ -69,7 +68,7 @@ public abstract class RecordHandler private static final String MAX_BLOCK_SIZE_BYTES = "MAX_BLOCK_SIZE_BYTES"; private static final int NUM_SPILL_THREADS = 2; protected final java.util.Map configOptions; - private final AmazonS3 amazonS3; + private final S3Client amazonS3; private final String sourceType; private final CachableSecretsManager secretsManager; private final AthenaClient athena; @@ -81,7 +80,7 @@ public abstract class RecordHandler public RecordHandler(String sourceType, java.util.Map configOptions) { this.sourceType = sourceType; - this.amazonS3 = AmazonS3ClientBuilder.defaultClient(); + this.amazonS3 = S3Client.create(); this.secretsManager = new CachableSecretsManager(SecretsManagerClient.create()); this.athena = AthenaClient.create(); this.configOptions = configOptions; @@ -91,7 +90,7 @@ public RecordHandler(String sourceType, java.util.Map configOpti /** * @param sourceType Used to aid in logging diagnostic info when raising a support case. */ - public RecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, String sourceType, java.util.Map configOptions) + public RecordHandler(S3Client amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, String sourceType, java.util.Map configOptions) { this.sourceType = sourceType; this.amazonS3 = amazonS3; diff --git a/athena-federation-sdk/src/test/java/com/amazonaws/athena/connector/lambda/data/S3BlockSpillerTest.java b/athena-federation-sdk/src/test/java/com/amazonaws/athena/connector/lambda/data/S3BlockSpillerTest.java index 0c9de56318..0abc45c3ec 100644 --- a/athena-federation-sdk/src/test/java/com/amazonaws/athena/connector/lambda/data/S3BlockSpillerTest.java +++ b/athena-federation-sdk/src/test/java/com/amazonaws/athena/connector/lambda/data/S3BlockSpillerTest.java @@ -25,11 +25,6 @@ import com.amazonaws.athena.connector.lambda.domain.spill.SpillLocation; import com.amazonaws.athena.connector.lambda.security.EncryptionKeyFactory; import com.amazonaws.athena.connector.lambda.security.LocalKeyFactory; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.model.PutObjectRequest; -import com.amazonaws.services.s3.model.PutObjectResult; -import com.amazonaws.services.s3.model.S3Object; -import com.amazonaws.services.s3.model.S3ObjectInputStream; import com.google.common.io.ByteStreams; import org.apache.arrow.vector.types.pojo.ArrowType; import org.apache.arrow.vector.types.pojo.Schema; @@ -44,6 +39,13 @@ import org.mockito.stubbing.Answer; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.core.ResponseInputStream; +import software.amazon.awssdk.core.sync.RequestBody; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.GetObjectRequest; +import software.amazon.awssdk.services.s3.model.GetObjectResponse; +import software.amazon.awssdk.services.s3.model.PutObjectRequest; +import software.amazon.awssdk.services.s3.model.PutObjectResponse; import java.io.ByteArrayInputStream; import java.io.IOException; @@ -51,8 +53,6 @@ import static org.junit.Assert.*; import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.nullable; -import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.reset; import static org.mockito.Mockito.times; @@ -71,7 +71,7 @@ public class S3BlockSpillerTest private String splitId = "splitId"; @Mock - private AmazonS3 mockS3; + private S3Client mockS3; private S3BlockSpiller blockWriter; private EncryptionKeyFactory keyFactory = new LocalKeyFactory(); @@ -130,18 +130,20 @@ public void spillTest() final ByteHolder byteHolder = new ByteHolder(); - ArgumentCaptor argument = ArgumentCaptor.forClass(PutObjectRequest.class); + ArgumentCaptor requestArgument = ArgumentCaptor.forClass(PutObjectRequest.class); + ArgumentCaptor bodyArgument = ArgumentCaptor.forClass(RequestBody.class); - when(mockS3.putObject(any())) + when(mockS3.putObject(any(PutObjectRequest.class), any(RequestBody.class))) .thenAnswer(new Answer() { @Override public Object answer(InvocationOnMock invocationOnMock) throws Throwable { - InputStream inputStream = ((PutObjectRequest) invocationOnMock.getArguments()[0]).getInputStream(); + PutObjectResponse response = PutObjectResponse.builder().build(); + InputStream inputStream = ((RequestBody) invocationOnMock.getArguments()[1]).contentStreamProvider().newStream(); byteHolder.setBytes(ByteStreams.toByteArray(inputStream)); - return mock(PutObjectResult.class); + return response; } }); @@ -151,9 +153,9 @@ public Object answer(InvocationOnMock invocationOnMock) assertEquals(bucket, ((S3SpillLocation) blockLocation).getBucket()); assertEquals(prefix + "/" + requestId + "/" + splitId + ".0", ((S3SpillLocation) blockLocation).getKey()); } - verify(mockS3, times(1)).putObject(argument.capture()); - assertEquals(argument.getValue().getBucketName(), bucket); - assertEquals(argument.getValue().getKey(), prefix + "/" + requestId + "/" + splitId + ".0"); + verify(mockS3, times(1)).putObject(requestArgument.capture(), bodyArgument.capture()); + assertEquals(requestArgument.getValue().bucket(), bucket); + assertEquals(requestArgument.getValue().key(), prefix + "/" + requestId + "/" + splitId + ".0"); SpillLocation blockLocation2 = blockWriter.write(expected); @@ -162,25 +164,23 @@ public Object answer(InvocationOnMock invocationOnMock) assertEquals(prefix + "/" + requestId + "/" + splitId + ".1", ((S3SpillLocation) blockLocation2).getKey()); } - verify(mockS3, times(2)).putObject(argument.capture()); - assertEquals(argument.getValue().getBucketName(), bucket); - assertEquals(argument.getValue().getKey(), prefix + "/" + requestId + "/" + splitId + ".1"); + verify(mockS3, times(2)).putObject(requestArgument.capture(), bodyArgument.capture()); + assertEquals(requestArgument.getValue().bucket(), bucket); + assertEquals(requestArgument.getValue().key(), prefix + "/" + requestId + "/" + splitId + ".1"); verifyNoMoreInteractions(mockS3); reset(mockS3); logger.info("spillTest: Starting read test."); - when(mockS3.getObject(eq(bucket), eq(prefix + "/" + requestId + "/" + splitId + ".1"))) + when(mockS3.getObject(any(GetObjectRequest.class))) .thenAnswer(new Answer() { @Override public Object answer(InvocationOnMock invocationOnMock) throws Throwable { - S3Object mockObject = mock(S3Object.class); - when(mockObject.getObjectContent()).thenReturn(new S3ObjectInputStream(new ByteArrayInputStream(byteHolder.getBytes()), null)); - return mockObject; + return new ResponseInputStream<>(GetObjectResponse.builder().build(), new ByteArrayInputStream(byteHolder.getBytes())); } }); @@ -189,7 +189,7 @@ public Object answer(InvocationOnMock invocationOnMock) assertEquals(expected, block); verify(mockS3, times(1)) - .getObject(eq(bucket), eq(prefix + "/" + requestId + "/" + splitId + ".1")); + .getObject(any(GetObjectRequest.class)); verifyNoMoreInteractions(mockS3); diff --git a/athena-federation-sdk/src/test/java/com/amazonaws/athena/connector/lambda/domain/spill/SpillLocationVerifierTest.java b/athena-federation-sdk/src/test/java/com/amazonaws/athena/connector/lambda/domain/spill/SpillLocationVerifierTest.java index 9c44a7a84d..88ca65cd5b 100644 --- a/athena-federation-sdk/src/test/java/com/amazonaws/athena/connector/lambda/domain/spill/SpillLocationVerifierTest.java +++ b/athena-federation-sdk/src/test/java/com/amazonaws/athena/connector/lambda/domain/spill/SpillLocationVerifierTest.java @@ -20,8 +20,6 @@ * #L% */ -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.model.Bucket; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -29,6 +27,10 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.Bucket; +import software.amazon.awssdk.services.s3.model.ListBucketsResponse; + import java.util.ArrayList; import java.util.Arrays; import java.util.List; @@ -57,7 +59,7 @@ public void setup() bucketNames = Arrays.asList("bucket1", "bucket2", "bucket3"); List buckets = createBuckets(bucketNames); - AmazonS3 mockS3 = createMockS3(buckets); + S3Client mockS3 = createMockS3(buckets); spyVerifier = spy(new SpillLocationVerifier(mockS3)); logger.info("setUpBefore - exit"); @@ -137,19 +139,19 @@ public void checkBucketAuthZFail() logger.info("checkBucketAuthZFail - exit"); } - private AmazonS3 createMockS3(List buckets) + private S3Client createMockS3(List buckets) { - AmazonS3 s3mock = mock(AmazonS3.class); - when(s3mock.listBuckets()).thenReturn(buckets); + S3Client s3mock = mock(S3Client.class); + ListBucketsResponse response = ListBucketsResponse.builder().buckets(buckets).build(); + when(s3mock.listBuckets()).thenReturn(response); return s3mock; } private List createBuckets(List names) { - List buckets = new ArrayList(); + List buckets = new ArrayList<>(); for (String name : names) { - Bucket bucket = mock(Bucket.class); - when(bucket.getName()).thenReturn(name); + Bucket bucket = Bucket.builder().name(name).build(); buckets.add(bucket); } diff --git a/athena-gcs/src/main/java/com/amazonaws/athena/connectors/gcs/GcsRecordHandler.java b/athena-gcs/src/main/java/com/amazonaws/athena/connectors/gcs/GcsRecordHandler.java index a9307fda32..4c86fe7e13 100644 --- a/athena-gcs/src/main/java/com/amazonaws/athena/connectors/gcs/GcsRecordHandler.java +++ b/athena-gcs/src/main/java/com/amazonaws/athena/connectors/gcs/GcsRecordHandler.java @@ -28,8 +28,6 @@ import com.amazonaws.athena.connector.lambda.domain.TableName; import com.amazonaws.athena.connector.lambda.handlers.RecordHandler; import com.amazonaws.athena.connector.lambda.records.ReadRecordsRequest; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.AmazonS3ClientBuilder; import com.fasterxml.jackson.core.type.TypeReference; import com.fasterxml.jackson.databind.ObjectMapper; import org.apache.arrow.dataset.file.FileFormat; @@ -50,6 +48,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.nio.charset.StandardCharsets; @@ -75,7 +74,7 @@ public class GcsRecordHandler public GcsRecordHandler(BufferAllocator allocator, java.util.Map configOptions) { - this(AmazonS3ClientBuilder.defaultClient(), + this(S3Client.create(), SecretsManagerClient.create(), AthenaClient.create(), configOptions); this.allocator = allocator; @@ -89,7 +88,7 @@ public GcsRecordHandler(BufferAllocator allocator, java.util.Map * @param amazonAthena An instance of AmazonAthena */ @VisibleForTesting - protected GcsRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AthenaClient amazonAthena, java.util.Map configOptions) + protected GcsRecordHandler(S3Client amazonS3, SecretsManagerClient secretsManager, AthenaClient amazonAthena, java.util.Map configOptions) { super(amazonS3, secretsManager, amazonAthena, SOURCE_TYPE, configOptions); this.invoker = ThrottlingInvoker.newDefaultBuilder(EXCEPTION_FILTER, configOptions).build(); diff --git a/athena-gcs/src/test/java/com/amazonaws/athena/connectors/gcs/GcsCompositeHandlerTest.java b/athena-gcs/src/test/java/com/amazonaws/athena/connectors/gcs/GcsCompositeHandlerTest.java index 5a6d3e0fc8..bc3d54c7dc 100644 --- a/athena-gcs/src/test/java/com/amazonaws/athena/connectors/gcs/GcsCompositeHandlerTest.java +++ b/athena-gcs/src/test/java/com/amazonaws/athena/connectors/gcs/GcsCompositeHandlerTest.java @@ -19,8 +19,6 @@ */ package com.amazonaws.athena.connectors.gcs; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.AmazonS3ClientBuilder; import com.google.auth.oauth2.GoogleCredentials; import com.google.auth.oauth2.ServiceAccountCredentials; import org.junit.jupiter.api.AfterAll; @@ -28,6 +26,7 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.api.TestInstance; import org.mockito.Mockito; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueRequest; import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueResponse; @@ -59,10 +58,8 @@ public void init() { mockedServiceAccountCredentials.when(() -> ServiceAccountCredentials.fromStream(Mockito.any())).thenReturn(serviceAccountCredentials); credentials = Mockito.mock(GoogleCredentials.class); mockedGoogleCredentials.when(() -> GoogleCredentials.fromStream(Mockito.any())).thenReturn(credentials); - AmazonS3ClientBuilder mockedAmazonS3Builder = Mockito.mock(AmazonS3ClientBuilder.class); - AmazonS3 mockedAmazonS3 = Mockito.mock(AmazonS3.class); - when(mockedAmazonS3Builder.build()).thenReturn(mockedAmazonS3); - mockedS3Builder.when(AmazonS3ClientBuilder::standard).thenReturn(mockedAmazonS3Builder); + S3Client mockedAmazonS3 = Mockito.mock(S3Client.class); + when(S3Client.create()).thenReturn(mockedAmazonS3); } @AfterAll diff --git a/athena-gcs/src/test/java/com/amazonaws/athena/connectors/gcs/GcsRecordHandlerTest.java b/athena-gcs/src/test/java/com/amazonaws/athena/connectors/gcs/GcsRecordHandlerTest.java index 6f88aa4e3c..3e340142b7 100644 --- a/athena-gcs/src/test/java/com/amazonaws/athena/connectors/gcs/GcsRecordHandlerTest.java +++ b/athena-gcs/src/test/java/com/amazonaws/athena/connectors/gcs/GcsRecordHandlerTest.java @@ -34,8 +34,6 @@ import com.amazonaws.athena.connector.lambda.security.EncryptionKeyFactory; import com.amazonaws.athena.connector.lambda.security.FederatedIdentity; import com.amazonaws.athena.connector.lambda.security.LocalKeyFactory; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.AmazonS3ClientBuilder; import com.google.auth.oauth2.GoogleCredentials; import org.apache.arrow.memory.BufferAllocator; import org.apache.arrow.memory.RootAllocator; @@ -49,6 +47,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.io.File; @@ -105,7 +104,7 @@ public void initCommonMockedStatic() LOGGER.info("Starting init."); federatedIdentity = Mockito.mock(FederatedIdentity.class); BlockAllocator allocator = new BlockAllocatorImpl(); - AmazonS3 amazonS3 = mock(AmazonS3.class); + S3Client amazonS3 = mock(S3Client.class); // Create Spill config // This will be enough for a single block @@ -123,7 +122,7 @@ public void initCommonMockedStatic() .withSpillLocation(s3SpillLocation) .build(); // To mock AmazonS3 via AmazonS3ClientBuilder - mockedS3Builder.when(AmazonS3ClientBuilder::defaultClient).thenReturn(amazonS3); + mockedS3Builder.when(S3Client::create).thenReturn(amazonS3); // To mock SecretsManagerClient via SecretsManagerClient mockedSecretManagerBuilder.when(SecretsManagerClient::create).thenReturn(secretsManager); // To mock AmazonAthena via AmazonAthenaClientBuilder diff --git a/athena-gcs/src/test/java/com/amazonaws/athena/connectors/gcs/GenericGcsTest.java b/athena-gcs/src/test/java/com/amazonaws/athena/connectors/gcs/GenericGcsTest.java index 885e5ca4d8..7d6fbef4f4 100644 --- a/athena-gcs/src/test/java/com/amazonaws/athena/connectors/gcs/GenericGcsTest.java +++ b/athena-gcs/src/test/java/com/amazonaws/athena/connectors/gcs/GenericGcsTest.java @@ -19,19 +19,19 @@ */ package com.amazonaws.athena.connectors.gcs; -import com.amazonaws.services.s3.AmazonS3ClientBuilder; import com.google.auth.oauth2.GoogleCredentials; import com.google.auth.oauth2.ServiceAccountCredentials; import org.mockito.MockedStatic; import org.mockito.Mockito; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.lang.reflect.Field; public class GenericGcsTest { - protected MockedStatic mockedS3Builder; + protected MockedStatic mockedS3Builder; protected MockedStatic mockedSecretManagerBuilder; protected MockedStatic mockedAthenaClientBuilder; protected MockedStatic mockedGoogleCredentials; @@ -41,7 +41,7 @@ public class GenericGcsTest protected void initCommonMockedStatic() { - mockedS3Builder = Mockito.mockStatic(AmazonS3ClientBuilder.class); + mockedS3Builder = Mockito.mockStatic(S3Client.class); mockedSecretManagerBuilder = Mockito.mockStatic(SecretsManagerClient.class); mockedAthenaClientBuilder = Mockito.mockStatic(AthenaClient.class); mockedGoogleCredentials = Mockito.mockStatic(GoogleCredentials.class); diff --git a/athena-google-bigquery/src/main/java/com/amazonaws/athena/connectors/google/bigquery/BigQueryRecordHandler.java b/athena-google-bigquery/src/main/java/com/amazonaws/athena/connectors/google/bigquery/BigQueryRecordHandler.java index a0d82f7284..dc0bf9dce6 100644 --- a/athena-google-bigquery/src/main/java/com/amazonaws/athena/connectors/google/bigquery/BigQueryRecordHandler.java +++ b/athena-google-bigquery/src/main/java/com/amazonaws/athena/connectors/google/bigquery/BigQueryRecordHandler.java @@ -29,8 +29,6 @@ import com.amazonaws.athena.connector.lambda.handlers.RecordHandler; import com.amazonaws.athena.connector.lambda.records.ReadRecordsRequest; import com.amazonaws.athena.connectors.google.bigquery.qpt.BigQueryQueryPassthrough; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.AmazonS3ClientBuilder; import com.google.api.gax.rpc.ServerStream; import com.google.cloud.bigquery.BigQuery; import com.google.cloud.bigquery.BigQueryException; @@ -59,6 +57,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.io.IOException; @@ -91,13 +90,13 @@ public class BigQueryRecordHandler BigQueryRecordHandler(java.util.Map configOptions, BufferAllocator allocator) { - this(AmazonS3ClientBuilder.defaultClient(), + this(S3Client.create(), SecretsManagerClient.create(), AthenaClient.create(), configOptions, allocator); } @VisibleForTesting - public BigQueryRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, java.util.Map configOptions, BufferAllocator allocator) + public BigQueryRecordHandler(S3Client amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, java.util.Map configOptions, BufferAllocator allocator) { super(amazonS3, secretsManager, athena, BigQueryConstants.SOURCE_TYPE, configOptions); this.invoker = ThrottlingInvoker.newDefaultBuilder(EXCEPTION_FILTER, configOptions).build(); diff --git a/athena-google-bigquery/src/test/java/com/amazonaws/athena/connectors/google/bigquery/BigQueryRecordHandlerTest.java b/athena-google-bigquery/src/test/java/com/amazonaws/athena/connectors/google/bigquery/BigQueryRecordHandlerTest.java index 948ee7d677..371b939508 100644 --- a/athena-google-bigquery/src/test/java/com/amazonaws/athena/connectors/google/bigquery/BigQueryRecordHandlerTest.java +++ b/athena-google-bigquery/src/test/java/com/amazonaws/athena/connectors/google/bigquery/BigQueryRecordHandlerTest.java @@ -35,7 +35,6 @@ import com.amazonaws.athena.connector.lambda.security.EncryptionKeyFactory; import com.amazonaws.athena.connector.lambda.security.FederatedIdentity; import com.amazonaws.athena.connector.lambda.security.LocalKeyFactory; -import com.amazonaws.services.s3.AmazonS3; import com.google.api.gax.rpc.ServerStream; import com.google.api.gax.rpc.ServerStreamingCallable; import com.google.cloud.bigquery.BigQuery; @@ -78,6 +77,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.nio.charset.StandardCharsets; @@ -120,7 +120,7 @@ public class BigQueryRecordHandlerTest @Mock private ArrowSchema arrowSchema; private BigQueryRecordHandler bigQueryRecordHandler; - private AmazonS3 amazonS3; + private S3Client amazonS3; private S3BlockSpiller spillWriter; private S3BlockSpillReader spillReader; private Schema schemaForRead; @@ -200,7 +200,7 @@ public void init() mockedStatic.when(() -> BigQueryUtils.getBigQueryClient(any(Map.class))).thenReturn(bigQuery); federatedIdentity = Mockito.mock(FederatedIdentity.class); allocator = new BlockAllocatorImpl(); - amazonS3 = mock(AmazonS3.class); + amazonS3 = mock(S3Client.class); //Create Spill config spillConfig = SpillConfig.newBuilder() diff --git a/athena-hbase/src/main/java/com/amazonaws/athena/connectors/hbase/HbaseKerberosUtils.java b/athena-hbase/src/main/java/com/amazonaws/athena/connectors/hbase/HbaseKerberosUtils.java index f8e3282ebe..56f0ddcfa8 100644 --- a/athena-hbase/src/main/java/com/amazonaws/athena/connectors/hbase/HbaseKerberosUtils.java +++ b/athena-hbase/src/main/java/com/amazonaws/athena/connectors/hbase/HbaseKerberosUtils.java @@ -19,17 +19,15 @@ */ package com.amazonaws.athena.connectors.hbase; -import com.amazonaws.auth.AWSCredentials; -import com.amazonaws.auth.AWSStaticCredentialsProvider; -import com.amazonaws.auth.DefaultAWSCredentialsProviderChain; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.AmazonS3ClientBuilder; -import com.amazonaws.services.s3.model.GetObjectRequest; -import com.amazonaws.services.s3.model.ObjectListing; -import com.amazonaws.services.s3.model.S3Object; -import com.amazonaws.services.s3.model.S3ObjectSummary; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.core.ResponseInputStream; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.GetObjectRequest; +import software.amazon.awssdk.services.s3.model.GetObjectResponse; +import software.amazon.awssdk.services.s3.model.ListObjectsRequest; +import software.amazon.awssdk.services.s3.model.ListObjectsResponse; +import software.amazon.awssdk.services.s3.model.S3Object; import java.io.BufferedInputStream; import java.io.File; @@ -68,20 +66,24 @@ public static Path copyConfigFilesFromS3ToTempFolder(java.util.Map responseStream = s3Client.getObject(GetObjectRequest.builder() + .bucket(s3Bucket[0]) + .key(s3Object.key()) + .build()); + InputStream inputStream = new BufferedInputStream(responseStream); + String key = s3Object.key(); String fName = key.substring(key.indexOf('/') + 1); if (!fName.isEmpty()) { File file = new File(tempDir + File.separator + fName); diff --git a/athena-hbase/src/main/java/com/amazonaws/athena/connectors/hbase/HbaseRecordHandler.java b/athena-hbase/src/main/java/com/amazonaws/athena/connectors/hbase/HbaseRecordHandler.java index 8af36b5e54..ad51a3fc35 100644 --- a/athena-hbase/src/main/java/com/amazonaws/athena/connectors/hbase/HbaseRecordHandler.java +++ b/athena-hbase/src/main/java/com/amazonaws/athena/connectors/hbase/HbaseRecordHandler.java @@ -31,8 +31,6 @@ import com.amazonaws.athena.connectors.hbase.connection.HBaseConnection; import com.amazonaws.athena.connectors.hbase.connection.HbaseConnectionFactory; import com.amazonaws.athena.connectors.hbase.qpt.HbaseQueryPassthrough; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.AmazonS3ClientBuilder; import org.apache.arrow.util.VisibleForTesting; import org.apache.arrow.vector.types.Types; import org.apache.arrow.vector.types.pojo.ArrowType; @@ -52,6 +50,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.io.IOException; @@ -81,7 +80,7 @@ public class HbaseRecordHandler //Used to denote the 'type' of this connector for diagnostic purposes. private static final String SOURCE_TYPE = "hbase"; - private final AmazonS3 amazonS3; + private final S3Client amazonS3; private final HbaseConnectionFactory connectionFactory; private final HbaseQueryPassthrough queryPassthrough = new HbaseQueryPassthrough(); @@ -89,7 +88,7 @@ public class HbaseRecordHandler public HbaseRecordHandler(java.util.Map configOptions) { this( - AmazonS3ClientBuilder.defaultClient(), + S3Client.create(), SecretsManagerClient.create(), AthenaClient.create(), new HbaseConnectionFactory(), @@ -97,7 +96,7 @@ public HbaseRecordHandler(java.util.Map configOptions) } @VisibleForTesting - protected HbaseRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, HbaseConnectionFactory connectionFactory, java.util.Map configOptions) + protected HbaseRecordHandler(S3Client amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, HbaseConnectionFactory connectionFactory, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, SOURCE_TYPE, configOptions); this.amazonS3 = amazonS3; diff --git a/athena-hbase/src/test/java/com/amazonaws/athena/connectors/hbase/HbaseRecordHandlerTest.java b/athena-hbase/src/test/java/com/amazonaws/athena/connectors/hbase/HbaseRecordHandlerTest.java index 8d3ebd1b45..017608c74d 100644 --- a/athena-hbase/src/test/java/com/amazonaws/athena/connectors/hbase/HbaseRecordHandlerTest.java +++ b/athena-hbase/src/test/java/com/amazonaws/athena/connectors/hbase/HbaseRecordHandlerTest.java @@ -43,11 +43,6 @@ import com.amazonaws.athena.connectors.hbase.connection.HBaseConnection; import com.amazonaws.athena.connectors.hbase.connection.HbaseConnectionFactory; import com.amazonaws.athena.connectors.hbase.connection.ResultProcessor; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.model.PutObjectRequest; -import com.amazonaws.services.s3.model.PutObjectResult; -import com.amazonaws.services.s3.model.S3Object; -import com.amazonaws.services.s3.model.S3ObjectInputStream; import com.google.common.collect.ImmutableList; import com.google.common.io.ByteStreams; import org.apache.arrow.vector.types.Types; @@ -69,6 +64,13 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.core.ResponseInputStream; +import software.amazon.awssdk.core.sync.RequestBody; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.GetObjectRequest; +import software.amazon.awssdk.services.s3.model.GetObjectResponse; +import software.amazon.awssdk.services.s3.model.PutObjectRequest; +import software.amazon.awssdk.services.s3.model.PutObjectResponse; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.io.ByteArrayInputStream; @@ -102,7 +104,7 @@ public class HbaseRecordHandlerTest private HbaseRecordHandler handler; private BlockAllocator allocator; private List mockS3Storage = new ArrayList<>(); - private AmazonS3 amazonS3; + private S3Client amazonS3; private S3BlockSpillReader spillReader; private Schema schemaForRead; private EncryptionKeyFactory keyFactory = new LocalKeyFactory(); @@ -135,33 +137,29 @@ public void setUp() allocator = new BlockAllocatorImpl(); - amazonS3 = mock(AmazonS3.class); + amazonS3 = mock(S3Client.class); - when(amazonS3.putObject(any())) + when(amazonS3.putObject(any(PutObjectRequest.class), any(RequestBody.class))) .thenAnswer((InvocationOnMock invocationOnMock) -> { - InputStream inputStream = ((PutObjectRequest) invocationOnMock.getArguments()[0]).getInputStream(); + InputStream inputStream = ((RequestBody) invocationOnMock.getArguments()[1]).contentStreamProvider().newStream(); ByteHolder byteHolder = new ByteHolder(); byteHolder.setBytes(ByteStreams.toByteArray(inputStream)); synchronized (mockS3Storage) { mockS3Storage.add(byteHolder); logger.info("puObject: total size " + mockS3Storage.size()); } - return mock(PutObjectResult.class); + return PutObjectResponse.builder().build(); }); - when(amazonS3.getObject(nullable(String.class), nullable(String.class))) + when(amazonS3.getObject(any(GetObjectRequest.class))) .thenAnswer((InvocationOnMock invocationOnMock) -> { - S3Object mockObject = mock(S3Object.class); ByteHolder byteHolder; synchronized (mockS3Storage) { byteHolder = mockS3Storage.get(0); mockS3Storage.remove(0); logger.info("getObject: total size " + mockS3Storage.size()); } - when(mockObject.getObjectContent()).thenReturn( - new S3ObjectInputStream( - new ByteArrayInputStream(byteHolder.getBytes()), null)); - return mockObject; + return new ResponseInputStream<>(GetObjectResponse.builder().build(), new ByteArrayInputStream(byteHolder.getBytes())); }); schemaForRead = TestUtils.makeSchema().addStringField(HbaseSchemaUtils.ROW_COLUMN_NAME).build(); diff --git a/athena-hortonworks-hive/src/main/java/com/amazonaws/athena/connectors/hortonworks/HiveMuxRecordHandler.java b/athena-hortonworks-hive/src/main/java/com/amazonaws/athena/connectors/hortonworks/HiveMuxRecordHandler.java index 9709e4adde..aa676b7b99 100644 --- a/athena-hortonworks-hive/src/main/java/com/amazonaws/athena/connectors/hortonworks/HiveMuxRecordHandler.java +++ b/athena-hortonworks-hive/src/main/java/com/amazonaws/athena/connectors/hortonworks/HiveMuxRecordHandler.java @@ -25,9 +25,9 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandlerFactory; -import com.amazonaws.services.s3.AmazonS3; import com.google.common.annotations.VisibleForTesting; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Map; @@ -57,7 +57,7 @@ public HiveMuxRecordHandler(java.util.Map configOptions) } @VisibleForTesting - HiveMuxRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, + HiveMuxRecordHandler(S3Client amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, DatabaseConnectionConfig databaseConnectionConfig, Map recordHandlerMap, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, jdbcConnectionFactory, databaseConnectionConfig, recordHandlerMap, configOptions); diff --git a/athena-hortonworks-hive/src/main/java/com/amazonaws/athena/connectors/hortonworks/HiveRecordHandler.java b/athena-hortonworks-hive/src/main/java/com/amazonaws/athena/connectors/hortonworks/HiveRecordHandler.java index 5d5ab5035a..1450634a1e 100644 --- a/athena-hortonworks-hive/src/main/java/com/amazonaws/athena/connectors/hortonworks/HiveRecordHandler.java +++ b/athena-hortonworks-hive/src/main/java/com/amazonaws/athena/connectors/hortonworks/HiveRecordHandler.java @@ -28,12 +28,11 @@ import com.amazonaws.athena.connectors.jdbc.manager.JDBCUtil; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcSplitQueryBuilder; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.AmazonS3ClientBuilder; import com.google.common.annotations.VisibleForTesting; import org.apache.arrow.vector.types.pojo.Schema; import org.apache.commons.lang3.Validate; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -60,11 +59,11 @@ public HiveRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, java } public HiveRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, JdbcConnectionFactory jdbcConnectionFactory, java.util.Map configOptions) { - this(databaseConnectionConfig, AmazonS3ClientBuilder.defaultClient(), SecretsManagerClient.create(), AthenaClient.create(), + this(databaseConnectionConfig, S3Client.create(), SecretsManagerClient.create(), AthenaClient.create(), jdbcConnectionFactory, new HiveQueryStringBuilder(HIVE_QUOTE_CHARACTER, new HiveFederationExpressionParser(HIVE_QUOTE_CHARACTER)), configOptions); } @VisibleForTesting - HiveRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, AmazonS3 amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, JdbcSplitQueryBuilder jdbcSplitQueryBuilder, java.util.Map configOptions) + HiveRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, S3Client amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, JdbcSplitQueryBuilder jdbcSplitQueryBuilder, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, databaseConnectionConfig, jdbcConnectionFactory, configOptions); this.jdbcSplitQueryBuilder = Validate.notNull(jdbcSplitQueryBuilder, "query builder must not be null"); diff --git a/athena-hortonworks-hive/src/test/java/com/amazonaws/athena/connectors/hortonworks/HiveMuxRecordHandlerTest.java b/athena-hortonworks-hive/src/test/java/com/amazonaws/athena/connectors/hortonworks/HiveMuxRecordHandlerTest.java index 39074a4f36..32dba90175 100644 --- a/athena-hortonworks-hive/src/test/java/com/amazonaws/athena/connectors/hortonworks/HiveMuxRecordHandlerTest.java +++ b/athena-hortonworks-hive/src/test/java/com/amazonaws/athena/connectors/hortonworks/HiveMuxRecordHandlerTest.java @@ -29,7 +29,6 @@ import com.amazonaws.athena.connectors.jdbc.connection.DatabaseConnectionConfig; import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; -import com.amazonaws.services.s3.AmazonS3; import org.apache.arrow.vector.types.pojo.Schema; import org.junit.Before; import org.junit.BeforeClass; @@ -37,6 +36,7 @@ import org.mockito.Mockito; import org.testng.Assert; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -50,7 +50,7 @@ public class HiveMuxRecordHandlerTest private Map recordHandlerMap; private HiveRecordHandler hiveRecordHandler; private JdbcRecordHandler jdbcRecordHandler; - private AmazonS3 amazonS3; + private S3Client amazonS3; private SecretsManagerClient secretsManager; private AthenaClient athena; private QueryStatusChecker queryStatusChecker; @@ -64,7 +64,7 @@ public void setup() { this.hiveRecordHandler = Mockito.mock(HiveRecordHandler.class); this.recordHandlerMap = Collections.singletonMap("recordHive", this.hiveRecordHandler); - this.amazonS3 = Mockito.mock(AmazonS3.class); + this.amazonS3 = Mockito.mock(S3Client.class); this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AthenaClient.class); this.queryStatusChecker = Mockito.mock(QueryStatusChecker.class); diff --git a/athena-hortonworks-hive/src/test/java/com/amazonaws/athena/connectors/hortonworks/HiveRecordHandlerTest.java b/athena-hortonworks-hive/src/test/java/com/amazonaws/athena/connectors/hortonworks/HiveRecordHandlerTest.java index 5fc3438e2f..c45cfaf7c4 100644 --- a/athena-hortonworks-hive/src/test/java/com/amazonaws/athena/connectors/hortonworks/HiveRecordHandlerTest.java +++ b/athena-hortonworks-hive/src/test/java/com/amazonaws/athena/connectors/hortonworks/HiveRecordHandlerTest.java @@ -46,10 +46,10 @@ import com.amazonaws.athena.connector.lambda.domain.predicate.Range; import com.amazonaws.athena.connector.lambda.domain.predicate.SortedRangeSet; import com.amazonaws.athena.connector.lambda.domain.predicate.ValueSet; -import com.amazonaws.services.s3.AmazonS3; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueRequest; import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueResponse; @@ -64,7 +64,7 @@ public class HiveRecordHandlerTest private Connection connection; private JdbcConnectionFactory jdbcConnectionFactory; private JdbcSplitQueryBuilder jdbcSplitQueryBuilder; - private AmazonS3 amazonS3; + private S3Client amazonS3; private SecretsManagerClient secretsManager; private AthenaClient athena; @@ -72,7 +72,7 @@ public class HiveRecordHandlerTest public void setup() throws Exception { - this.amazonS3 = Mockito.mock(AmazonS3.class); + this.amazonS3 = Mockito.mock(S3Client.class); this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AthenaClient.class); Mockito.when(this.secretsManager.getSecretValue(Mockito.eq(GetSecretValueRequest.builder().secretId("testSecret").build()))).thenReturn(GetSecretValueResponse.builder().secretString("{\"username\": \"testUser\", \"password\": \"testPassword\"}").build()); diff --git a/athena-jdbc/pom.xml b/athena-jdbc/pom.xml index 8ea9371ea6..a5dadefca8 100644 --- a/athena-jdbc/pom.xml +++ b/athena-jdbc/pom.xml @@ -9,33 +9,6 @@ athena-jdbc 2022.47.1 - - com.amazonaws - aws-java-sdk-core - ${aws-sdk.version} - - - com.fasterxml.jackson.datatype - jackson-datatype-jsr310 - - - com.fasterxml.jackson.dataformat - jackson-dataformat-cbor - - - com.fasterxml.jackson.core - jackson-core - - - com.fasterxml.jackson.core - jackson-databind - - - com.fasterxml.jackson.core - jackson-annotations - - - software.amazon.jsii jsii-runtime diff --git a/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/MultiplexingJdbcRecordHandler.java b/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/MultiplexingJdbcRecordHandler.java index 2b791d3454..e2cb4f227c 100644 --- a/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/MultiplexingJdbcRecordHandler.java +++ b/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/MultiplexingJdbcRecordHandler.java @@ -30,11 +30,11 @@ import com.amazonaws.athena.connectors.jdbc.manager.JDBCUtil; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandlerFactory; -import com.amazonaws.services.s3.AmazonS3; import com.google.common.annotations.VisibleForTesting; import org.apache.arrow.vector.types.pojo.Schema; import org.apache.commons.lang3.Validate; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -61,7 +61,7 @@ public MultiplexingJdbcRecordHandler(JdbcRecordHandlerFactory jdbcRecordHandlerF @VisibleForTesting protected MultiplexingJdbcRecordHandler( - AmazonS3 amazonS3, + S3Client amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, diff --git a/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/manager/JdbcRecordHandler.java b/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/manager/JdbcRecordHandler.java index 044488e256..9b82c5428e 100644 --- a/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/manager/JdbcRecordHandler.java +++ b/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/manager/JdbcRecordHandler.java @@ -54,7 +54,6 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcCredentialProvider; import com.amazonaws.athena.connectors.jdbc.connection.RdsSecretsCredentialProvider; import com.amazonaws.athena.connectors.jdbc.qpt.JdbcQueryPassthrough; -import com.amazonaws.services.s3.AmazonS3; import org.apache.arrow.util.VisibleForTesting; import org.apache.arrow.vector.FieldVector; import org.apache.arrow.vector.holders.NullableBigIntHolder; @@ -75,6 +74,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Array; @@ -111,7 +111,7 @@ protected JdbcRecordHandler(String sourceType, java.util.Map con } protected JdbcRecordHandler( - AmazonS3 amazonS3, + S3Client amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, DatabaseConnectionConfig databaseConnectionConfig, diff --git a/athena-jdbc/src/test/java/com/amazonaws/athena/connectors/jdbc/MultiplexingJdbcRecordHandlerTest.java b/athena-jdbc/src/test/java/com/amazonaws/athena/connectors/jdbc/MultiplexingJdbcRecordHandlerTest.java index 1177f10375..60e229d6f7 100644 --- a/athena-jdbc/src/test/java/com/amazonaws/athena/connectors/jdbc/MultiplexingJdbcRecordHandlerTest.java +++ b/athena-jdbc/src/test/java/com/amazonaws/athena/connectors/jdbc/MultiplexingJdbcRecordHandlerTest.java @@ -28,12 +28,12 @@ import com.amazonaws.athena.connectors.jdbc.connection.DatabaseConnectionConfig; import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; -import com.amazonaws.services.s3.AmazonS3; import org.apache.arrow.vector.types.pojo.Schema; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -46,7 +46,7 @@ public class MultiplexingJdbcRecordHandlerTest private Map recordHandlerMap; private JdbcRecordHandler fakeJdbcRecordHandler; private JdbcRecordHandler jdbcRecordHandler; - private AmazonS3 amazonS3; + private S3Client amazonS3; private SecretsManagerClient secretsManager; private AthenaClient athena; private QueryStatusChecker queryStatusChecker; @@ -57,7 +57,7 @@ public void setup() { this.fakeJdbcRecordHandler = Mockito.mock(JdbcRecordHandler.class); this.recordHandlerMap = Collections.singletonMap("fakedatabase", this.fakeJdbcRecordHandler); - this.amazonS3 = Mockito.mock(AmazonS3.class); + this.amazonS3 = Mockito.mock(S3Client.class); this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AthenaClient.class); this.queryStatusChecker = Mockito.mock(QueryStatusChecker.class); diff --git a/athena-jdbc/src/test/java/com/amazonaws/athena/connectors/jdbc/manager/JdbcRecordHandlerTest.java b/athena-jdbc/src/test/java/com/amazonaws/athena/connectors/jdbc/manager/JdbcRecordHandlerTest.java index 17a0cb55e3..cfbeba5602 100644 --- a/athena-jdbc/src/test/java/com/amazonaws/athena/connectors/jdbc/manager/JdbcRecordHandlerTest.java +++ b/athena-jdbc/src/test/java/com/amazonaws/athena/connectors/jdbc/manager/JdbcRecordHandlerTest.java @@ -39,17 +39,18 @@ import com.amazonaws.athena.connectors.jdbc.connection.DatabaseConnectionConfig; import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.connection.JdbcCredentialProvider; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.model.PutObjectRequest; -import com.amazonaws.services.s3.model.PutObjectResult; import org.apache.arrow.vector.holders.NullableFloat8Holder; import org.apache.arrow.vector.types.pojo.Schema; import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; -import org.mockito.stubbing.Answer; +import org.mockito.invocation.InvocationOnMock; +import software.amazon.awssdk.core.sync.RequestBody; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.PutObjectRequest; +import software.amazon.awssdk.services.s3.model.PutObjectResponse; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueRequest; import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueResponse; @@ -75,7 +76,7 @@ public class JdbcRecordHandlerTest private JdbcRecordHandler jdbcRecordHandler; private Connection connection; private JdbcConnectionFactory jdbcConnectionFactory; - private AmazonS3 amazonS3; + private S3Client amazonS3; private SecretsManagerClient secretsManager; private AthenaClient athena; private QueryStatusChecker queryStatusChecker; @@ -89,7 +90,7 @@ public void setup() this.connection = Mockito.mock(Connection.class, Mockito.RETURNS_DEEP_STUBS); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); Mockito.when(this.jdbcConnectionFactory.getConnection(nullable(JdbcCredentialProvider.class))).thenReturn(this.connection); - this.amazonS3 = Mockito.mock(AmazonS3.class); + this.amazonS3 = Mockito.mock(S3Client.class); this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AthenaClient.class); this.queryStatusChecker = Mockito.mock(QueryStatusChecker.class); @@ -143,15 +144,16 @@ public void readWithConstraint() BlockSpiller s3Spiller = new S3BlockSpiller(this.amazonS3, spillConfig, allocator, fieldSchema, constraintEvaluator, com.google.common.collect.ImmutableMap.of()); ReadRecordsRequest readRecordsRequest = new ReadRecordsRequest(this.federatedIdentity, "testCatalog", "testQueryId", inputTableName, fieldSchema, splitBuilder.build(), constraints, 1024, 1024); - Mockito.when(amazonS3.putObject(any())).thenAnswer((Answer) invocation -> { - ByteArrayInputStream byteArrayInputStream = (ByteArrayInputStream) ((PutObjectRequest) invocation.getArguments()[0]).getInputStream(); - int n = byteArrayInputStream.available(); - byte[] bytes = new byte[n]; - byteArrayInputStream.read(bytes, 0, n); - String data = new String(bytes, StandardCharsets.UTF_8); - Assert.assertTrue(data.contains("testVal1") || data.contains("testVal2") || data.contains("testPartitionValue")); - return new PutObjectResult(); - }); + Mockito.when(amazonS3.putObject(any(PutObjectRequest.class), any(RequestBody.class))) + .thenAnswer((InvocationOnMock invocationOnMock) -> { + ByteArrayInputStream inputStream = (ByteArrayInputStream) ((RequestBody) invocationOnMock.getArguments()[1]).contentStreamProvider().newStream(); + int n = inputStream.available(); + byte[] bytes = new byte[n]; + inputStream.read(bytes, 0, n); + String data = new String(bytes, StandardCharsets.UTF_8); + Assert.assertTrue(data.contains("testVal1") || data.contains("testVal2") || data.contains("testPartitionValue")); + return PutObjectResponse.builder().build(); + }); this.jdbcRecordHandler.readWithConstraint(s3Spiller, readRecordsRequest, queryStatusChecker); } diff --git a/athena-kafka/src/main/java/com/amazonaws/athena/connectors/kafka/KafkaRecordHandler.java b/athena-kafka/src/main/java/com/amazonaws/athena/connectors/kafka/KafkaRecordHandler.java index 74d8679563..dccd950901 100644 --- a/athena-kafka/src/main/java/com/amazonaws/athena/connectors/kafka/KafkaRecordHandler.java +++ b/athena-kafka/src/main/java/com/amazonaws/athena/connectors/kafka/KafkaRecordHandler.java @@ -27,8 +27,6 @@ import com.amazonaws.athena.connectors.kafka.dto.KafkaField; import com.amazonaws.athena.connectors.kafka.dto.SplitParameters; import com.amazonaws.athena.connectors.kafka.dto.TopicResultSet; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.AmazonS3ClientBuilder; import com.google.common.annotations.VisibleForTesting; import com.google.protobuf.Descriptors; import com.google.protobuf.DynamicMessage; @@ -42,6 +40,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.time.Duration; @@ -60,14 +59,14 @@ public class KafkaRecordHandler KafkaRecordHandler(java.util.Map configOptions) { this( - AmazonS3ClientBuilder.defaultClient(), + S3Client.create(), SecretsManagerClient.create(), AthenaClient.create(), configOptions); } @VisibleForTesting - public KafkaRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, java.util.Map configOptions) + public KafkaRecordHandler(S3Client amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, KafkaConstants.KAFKA_SOURCE, configOptions); } diff --git a/athena-kafka/src/main/java/com/amazonaws/athena/connectors/kafka/KafkaUtils.java b/athena-kafka/src/main/java/com/amazonaws/athena/connectors/kafka/KafkaUtils.java index 7ff8296da6..0746aace06 100644 --- a/athena-kafka/src/main/java/com/amazonaws/athena/connectors/kafka/KafkaUtils.java +++ b/athena-kafka/src/main/java/com/amazonaws/athena/connectors/kafka/KafkaUtils.java @@ -24,15 +24,6 @@ import com.amazonaws.athena.connectors.kafka.dto.TopicResultSet; import com.amazonaws.athena.connectors.kafka.serde.KafkaCsvDeserializer; import com.amazonaws.athena.connectors.kafka.serde.KafkaJsonDeserializer; -import com.amazonaws.auth.AWSCredentials; -import com.amazonaws.auth.AWSStaticCredentialsProvider; -import com.amazonaws.auth.DefaultAWSCredentialsProviderChain; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.AmazonS3ClientBuilder; -import com.amazonaws.services.s3.model.GetObjectRequest; -import com.amazonaws.services.s3.model.ObjectListing; -import com.amazonaws.services.s3.model.S3Object; -import com.amazonaws.services.s3.model.S3ObjectSummary; import com.fasterxml.jackson.core.type.TypeReference; import com.fasterxml.jackson.databind.ObjectMapper; import com.google.protobuf.DynamicMessage; @@ -47,6 +38,13 @@ import org.apache.kafka.common.serialization.StringDeserializer; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.core.ResponseInputStream; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.GetObjectRequest; +import software.amazon.awssdk.services.s3.model.GetObjectResponse; +import software.amazon.awssdk.services.s3.model.ListObjectsRequest; +import software.amazon.awssdk.services.s3.model.ListObjectsResponse; +import software.amazon.awssdk.services.s3.model.S3Object; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueRequest; import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueResponse; @@ -326,20 +324,24 @@ protected static Path copyCertificatesFromS3ToTempFolder(java.util.Map responseStream = s3Client.getObject(GetObjectRequest.builder() + .bucket(s3Bucket[0]) + .key(objectSummary.key()) + .build()); + InputStream inputStream = new BufferedInputStream(responseStream); + String key = objectSummary.key(); String fName = key.substring(key.indexOf('/') + 1); if (!fName.isEmpty()) { File file = new File(tempDir + File.separator + fName); diff --git a/athena-kafka/src/test/java/com/amazonaws/athena/connectors/kafka/KafkaRecordHandlerTest.java b/athena-kafka/src/test/java/com/amazonaws/athena/connectors/kafka/KafkaRecordHandlerTest.java index 929f8ff14b..d172b52f63 100644 --- a/athena-kafka/src/test/java/com/amazonaws/athena/connectors/kafka/KafkaRecordHandlerTest.java +++ b/athena-kafka/src/test/java/com/amazonaws/athena/connectors/kafka/KafkaRecordHandlerTest.java @@ -32,7 +32,6 @@ import com.amazonaws.athena.connector.lambda.security.FederatedIdentity; import com.amazonaws.athena.connector.lambda.security.LocalKeyFactory; import com.amazonaws.athena.connectors.kafka.dto.*; -import com.amazonaws.services.s3.AmazonS3; import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.ObjectMapper; import com.google.protobuf.Descriptors; @@ -62,6 +61,7 @@ import software.amazon.awssdk.services.glue.model.GetSchemaResponse; import software.amazon.awssdk.services.glue.model.GetSchemaVersionRequest; import software.amazon.awssdk.services.glue.model.GetSchemaVersionResponse; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Collections; @@ -86,7 +86,7 @@ public class KafkaRecordHandlerTest { GlueClient awsGlue; @Mock - AmazonS3 amazonS3; + S3Client amazonS3; @Mock SecretsManagerClient awsSecretsManager; diff --git a/athena-kafka/src/test/java/com/amazonaws/athena/connectors/kafka/KafkaUtilsTest.java b/athena-kafka/src/test/java/com/amazonaws/athena/connectors/kafka/KafkaUtilsTest.java index 178a303023..7baacf6180 100644 --- a/athena-kafka/src/test/java/com/amazonaws/athena/connectors/kafka/KafkaUtilsTest.java +++ b/athena-kafka/src/test/java/com/amazonaws/athena/connectors/kafka/KafkaUtilsTest.java @@ -24,12 +24,6 @@ import com.amazonaws.auth.AWSStaticCredentialsProvider; import com.amazonaws.auth.BasicAWSCredentials; import com.amazonaws.auth.DefaultAWSCredentialsProviderChain; -import com.amazonaws.services.s3.AmazonS3Client; -import com.amazonaws.services.s3.AmazonS3ClientBuilder; -import com.amazonaws.services.s3.model.ListObjectsRequest; -import com.amazonaws.services.s3.model.ObjectListing; -import com.amazonaws.services.s3.model.S3Object; -import com.amazonaws.services.s3.model.S3ObjectSummary; import com.fasterxml.jackson.core.type.TypeReference; import com.fasterxml.jackson.databind.ObjectMapper; import org.apache.arrow.vector.types.Types; @@ -48,6 +42,13 @@ import org.mockito.junit.MockitoJUnitRunner; import org.mockito.MockedConstruction; import org.mockito.MockedStatic; +import software.amazon.awssdk.core.ResponseInputStream; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.GetObjectRequest; +import software.amazon.awssdk.services.s3.model.GetObjectResponse; +import software.amazon.awssdk.services.s3.model.ListObjectsRequest; +import software.amazon.awssdk.services.s3.model.ListObjectsResponse; +import software.amazon.awssdk.services.s3.model.S3Object; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueRequest; import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueResponse; @@ -89,13 +90,7 @@ public class KafkaUtilsTest { BasicAWSCredentials credentials; @Mock - AmazonS3Client amazonS3Client; - - @Mock - AmazonS3ClientBuilder clientBuilder; - - @Mock - ObjectListing oList; + S3Client amazonS3Client; final java.util.Map configOptions = com.google.common.collect.ImmutableMap.of( @@ -105,9 +100,8 @@ public class KafkaUtilsTest { "certificates_s3_reference", "s3://kafka-connector-test-bucket/kafkafiles/", "secrets_manager_secret", "Kafka_afq"); - private MockedConstruction mockedObjectMapper; private MockedConstruction mockedDefaultCredentials; - private MockedStatic mockedS3ClientBuilder; + private MockedStatic mockedS3ClientBuilder; private MockedStatic mockedSecretsManagerClient; @@ -132,30 +126,22 @@ public void init() throws Exception { Mockito.when(secretValueResponse.secretString()).thenReturn(creds); Mockito.when(awsSecretsManager.getSecretValue(Mockito.isA(GetSecretValueRequest.class))).thenReturn(secretValueResponse); - mockedObjectMapper = Mockito.mockConstruction(ObjectMapper.class, - (mock, context) -> { - Mockito.doReturn(map).when(mock).readValue(Mockito.eq(creds), nullable(TypeReference.class)); - }); mockedDefaultCredentials = Mockito.mockConstruction(DefaultAWSCredentialsProviderChain.class, (mock, context) -> { Mockito.when(mock.getCredentials()).thenReturn(credentials); }); - mockedS3ClientBuilder = Mockito.mockStatic(AmazonS3ClientBuilder.class); - mockedS3ClientBuilder.when(()-> AmazonS3ClientBuilder.standard()).thenReturn(clientBuilder); - - Mockito.doReturn(clientBuilder).when(clientBuilder).withCredentials(any()); - Mockito.when(clientBuilder.build()).thenReturn(amazonS3Client); - Mockito.when(amazonS3Client.listObjects(any(), any())).thenReturn(oList); - S3Object s3Obj = new S3Object(); - s3Obj.setObjectContent(new ByteArrayInputStream("largeContentFile".getBytes())); - Mockito.when(amazonS3Client.getObject(any())).thenReturn(s3Obj); - S3ObjectSummary s3 = new S3ObjectSummary(); - s3.setKey("test/key"); - Mockito.when(oList.getObjectSummaries()).thenReturn(com.google.common.collect.ImmutableList.of(s3)); + mockedS3ClientBuilder = Mockito.mockStatic(S3Client.class); + mockedS3ClientBuilder.when(()-> S3Client.create()).thenReturn(amazonS3Client); + + S3Object s3 = S3Object.builder().key("test/key").build(); + Mockito.when(amazonS3Client.listObjects(any(ListObjectsRequest.class))).thenReturn(ListObjectsResponse.builder() + .contents(s3) + .build()); + Mockito.when(amazonS3Client.getObject(any(GetObjectRequest.class))) + .thenReturn(new ResponseInputStream<>(GetObjectResponse.builder().build(), new ByteArrayInputStream("largeContentFile".getBytes()))); } @After public void tearDown() { - mockedObjectMapper.close(); mockedDefaultCredentials.close(); mockedS3ClientBuilder.close(); mockedSecretsManagerClient.close(); diff --git a/athena-msk/src/main/java/com/amazonaws/athena/connectors/msk/AmazonMskRecordHandler.java b/athena-msk/src/main/java/com/amazonaws/athena/connectors/msk/AmazonMskRecordHandler.java index 080d72ea44..eed652e94f 100644 --- a/athena-msk/src/main/java/com/amazonaws/athena/connectors/msk/AmazonMskRecordHandler.java +++ b/athena-msk/src/main/java/com/amazonaws/athena/connectors/msk/AmazonMskRecordHandler.java @@ -27,8 +27,6 @@ import com.amazonaws.athena.connectors.msk.dto.MSKField; import com.amazonaws.athena.connectors.msk.dto.SplitParameters; import com.amazonaws.athena.connectors.msk.dto.TopicResultSet; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.AmazonS3ClientBuilder; import com.google.common.annotations.VisibleForTesting; import org.apache.kafka.clients.consumer.Consumer; import org.apache.kafka.clients.consumer.ConsumerRecord; @@ -38,6 +36,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.time.Duration; @@ -53,14 +52,14 @@ public class AmazonMskRecordHandler AmazonMskRecordHandler(java.util.Map configOptions) { this( - AmazonS3ClientBuilder.defaultClient(), + S3Client.create(), SecretsManagerClient.create(), AthenaClient.create(), configOptions); } @VisibleForTesting - public AmazonMskRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, java.util.Map configOptions) + public AmazonMskRecordHandler(S3Client amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, AmazonMskConstants.MSK_SOURCE, configOptions); } diff --git a/athena-msk/src/main/java/com/amazonaws/athena/connectors/msk/AmazonMskUtils.java b/athena-msk/src/main/java/com/amazonaws/athena/connectors/msk/AmazonMskUtils.java index 9a12c3bf06..9a9db0bf5c 100644 --- a/athena-msk/src/main/java/com/amazonaws/athena/connectors/msk/AmazonMskUtils.java +++ b/athena-msk/src/main/java/com/amazonaws/athena/connectors/msk/AmazonMskUtils.java @@ -24,15 +24,6 @@ import com.amazonaws.athena.connectors.msk.dto.TopicResultSet; import com.amazonaws.athena.connectors.msk.serde.MskCsvDeserializer; import com.amazonaws.athena.connectors.msk.serde.MskJsonDeserializer; -import com.amazonaws.auth.AWSCredentials; -import com.amazonaws.auth.AWSStaticCredentialsProvider; -import com.amazonaws.auth.DefaultAWSCredentialsProviderChain; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.AmazonS3ClientBuilder; -import com.amazonaws.services.s3.model.GetObjectRequest; -import com.amazonaws.services.s3.model.ObjectListing; -import com.amazonaws.services.s3.model.S3Object; -import com.amazonaws.services.s3.model.S3ObjectSummary; import com.fasterxml.jackson.core.type.TypeReference; import com.fasterxml.jackson.databind.ObjectMapper; import org.apache.arrow.vector.types.Types; @@ -45,6 +36,13 @@ import org.apache.kafka.common.serialization.StringDeserializer; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.core.ResponseInputStream; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.GetObjectRequest; +import software.amazon.awssdk.services.s3.model.GetObjectResponse; +import software.amazon.awssdk.services.s3.model.ListObjectsRequest; +import software.amazon.awssdk.services.s3.model.ListObjectsResponse; +import software.amazon.awssdk.services.s3.model.S3Object; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueRequest; import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueResponse; @@ -305,20 +303,24 @@ protected static Path copyCertificatesFromS3ToTempFolder(java.util.Map responseStream = s3Client.getObject(GetObjectRequest.builder() + .bucket(s3Bucket[0]) + .key(objectSummary.key()) + .build()); + InputStream inputStream = new BufferedInputStream(responseStream); + String key = objectSummary.key(); String fName = key.substring(key.indexOf('/') + 1); if (!fName.isEmpty()) { File file = new File(tempDir + File.separator + fName); diff --git a/athena-msk/src/test/java/com/amazonaws/athena/connectors/msk/AmazonMskRecordHandlerTest.java b/athena-msk/src/test/java/com/amazonaws/athena/connectors/msk/AmazonMskRecordHandlerTest.java index 7fcfc6a607..e1d2cf31a1 100644 --- a/athena-msk/src/test/java/com/amazonaws/athena/connectors/msk/AmazonMskRecordHandlerTest.java +++ b/athena-msk/src/test/java/com/amazonaws/athena/connectors/msk/AmazonMskRecordHandlerTest.java @@ -32,7 +32,6 @@ import com.amazonaws.athena.connector.lambda.security.FederatedIdentity; import com.amazonaws.athena.connector.lambda.security.LocalKeyFactory; import com.amazonaws.athena.connectors.msk.dto.*; -import com.amazonaws.services.s3.AmazonS3; import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.ObjectMapper; import org.apache.arrow.vector.types.pojo.Field; @@ -52,6 +51,7 @@ import org.mockito.MockitoAnnotations; import org.mockito.junit.MockitoJUnitRunner; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Collections; @@ -68,7 +68,7 @@ public class AmazonMskRecordHandlerTest { private static final ObjectMapper objectMapper = new ObjectMapper(); @Mock - AmazonS3 amazonS3; + S3Client amazonS3; @Mock SecretsManagerClient awsSecretsManager; diff --git a/athena-msk/src/test/java/com/amazonaws/athena/connectors/msk/AmazonMskUtilsTest.java b/athena-msk/src/test/java/com/amazonaws/athena/connectors/msk/AmazonMskUtilsTest.java index 1888bddbe1..36db23e1cc 100644 --- a/athena-msk/src/test/java/com/amazonaws/athena/connectors/msk/AmazonMskUtilsTest.java +++ b/athena-msk/src/test/java/com/amazonaws/athena/connectors/msk/AmazonMskUtilsTest.java @@ -24,12 +24,6 @@ import com.amazonaws.auth.AWSStaticCredentialsProvider; import com.amazonaws.auth.BasicAWSCredentials; import com.amazonaws.auth.DefaultAWSCredentialsProviderChain; -import com.amazonaws.services.s3.AmazonS3Client; -import com.amazonaws.services.s3.AmazonS3ClientBuilder; -import com.amazonaws.services.s3.model.ObjectListing; -import com.amazonaws.services.s3.model.S3Object; -import com.amazonaws.services.s3.model.S3ObjectSummary; -import com.fasterxml.jackson.core.type.TypeReference; import com.fasterxml.jackson.databind.ObjectMapper; import org.apache.arrow.vector.types.Types; import org.apache.arrow.vector.types.pojo.ArrowType; @@ -46,6 +40,13 @@ import org.mockito.MockedStatic; import org.mockito.Mockito; import org.mockito.junit.MockitoJUnitRunner; +import software.amazon.awssdk.core.ResponseInputStream; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.GetObjectRequest; +import software.amazon.awssdk.services.s3.model.GetObjectResponse; +import software.amazon.awssdk.services.s3.model.ListObjectsRequest; +import software.amazon.awssdk.services.s3.model.ListObjectsResponse; +import software.amazon.awssdk.services.s3.model.S3Object; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueRequest; import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueResponse; @@ -58,7 +59,6 @@ import static org.junit.Assert.*; import static java.util.Arrays.asList; import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.nullable; @RunWith(MockitoJUnitRunner.class) public class AmazonMskUtilsTest { @@ -87,13 +87,7 @@ public class AmazonMskUtilsTest { BasicAWSCredentials credentials; @Mock - AmazonS3Client amazonS3Client; - - @Mock - AmazonS3ClientBuilder clientBuilder; - - @Mock - ObjectListing oList; + S3Client amazonS3Client; final java.util.Map configOptions = com.google.common.collect.ImmutableMap.of( "glue_registry_arn", "arn:aws:glue:us-west-2:123456789101:registry/Athena-Kafka", @@ -101,9 +95,8 @@ public class AmazonMskUtilsTest { "kafka_endpoint", "12.207.18.179:9092", "certificates_s3_reference", "s3://kafka-connector-test-bucket/kafkafiles/", "secrets_manager_secret", "Kafka_afq"); - private MockedConstruction mockedObjectMapper; private MockedConstruction mockedDefaultCredentials; - private MockedStatic mockedS3ClientBuilder; + private MockedStatic mockedS3ClientBuilder; private MockedStatic mockedSecretsManagerClient; @Before @@ -113,6 +106,8 @@ public void init() throws Exception { System.setProperty("aws.secretKey", "vamsajdsjkl"); mockedSecretsManagerClient = Mockito.mockStatic(SecretsManagerClient.class); mockedSecretsManagerClient.when(()-> SecretsManagerClient.create()).thenReturn(awsSecretsManager); + mockedS3ClientBuilder = Mockito.mockStatic(S3Client.class); + mockedS3ClientBuilder.when(()-> S3Client.create()).thenReturn(amazonS3Client); String creds = "{\"username\":\"admin\",\"password\":\"test\",\"keystore_password\":\"keypass\",\"truststore_password\":\"trustpass\",\"ssl_key_password\":\"sslpass\"}"; @@ -126,30 +121,20 @@ public void init() throws Exception { Mockito.when(secretValueResponse.secretString()).thenReturn(creds); Mockito.when(awsSecretsManager.getSecretValue(Mockito.isA(GetSecretValueRequest.class))).thenReturn(secretValueResponse); - mockedObjectMapper = Mockito.mockConstruction(ObjectMapper.class, - (mock, context) -> { - Mockito.doReturn(map).when(mock).readValue(Mockito.eq(creds), nullable(TypeReference.class)); - }); mockedDefaultCredentials = Mockito.mockConstruction(DefaultAWSCredentialsProviderChain.class, (mock, context) -> { Mockito.when(mock.getCredentials()).thenReturn(credentials); }); - mockedS3ClientBuilder = Mockito.mockStatic(AmazonS3ClientBuilder.class); - mockedS3ClientBuilder.when(()-> AmazonS3ClientBuilder.standard()).thenReturn(clientBuilder); - Mockito.doReturn(clientBuilder).when(clientBuilder).withCredentials(any()); - Mockito.when(clientBuilder.build()).thenReturn(amazonS3Client); - Mockito.when(amazonS3Client.listObjects(any(), any())).thenReturn(oList); - S3Object s3Obj = new S3Object(); - s3Obj.setObjectContent(new ByteArrayInputStream("largeContentFile".getBytes())); - Mockito.when(amazonS3Client.getObject(any())).thenReturn(s3Obj); - S3ObjectSummary s3 = new S3ObjectSummary(); - s3.setKey("test/key"); - Mockito.when(oList.getObjectSummaries()).thenReturn(com.google.common.collect.ImmutableList.of(s3)); + S3Object s3 = S3Object.builder().key("test/key").build(); + Mockito.when(amazonS3Client.listObjects(any(ListObjectsRequest.class))).thenReturn(ListObjectsResponse.builder() + .contents(s3) + .build()); + Mockito.when(amazonS3Client.getObject(any(GetObjectRequest.class))) + .thenReturn(new ResponseInputStream<>(GetObjectResponse.builder().build(), new ByteArrayInputStream("largeContentFile".getBytes()))); } @After public void tearDown() { - mockedObjectMapper.close(); mockedDefaultCredentials.close(); mockedS3ClientBuilder.close(); mockedSecretsManagerClient.close(); diff --git a/athena-mysql/src/main/java/com/amazonaws/athena/connectors/mysql/MySqlMuxRecordHandler.java b/athena-mysql/src/main/java/com/amazonaws/athena/connectors/mysql/MySqlMuxRecordHandler.java index 659262750e..a159921eed 100644 --- a/athena-mysql/src/main/java/com/amazonaws/athena/connectors/mysql/MySqlMuxRecordHandler.java +++ b/athena-mysql/src/main/java/com/amazonaws/athena/connectors/mysql/MySqlMuxRecordHandler.java @@ -25,9 +25,9 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandlerFactory; -import com.amazonaws.services.s3.AmazonS3; import com.google.common.annotations.VisibleForTesting; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Map; @@ -57,7 +57,7 @@ public MySqlMuxRecordHandler(java.util.Map configOptions) } @VisibleForTesting - MySqlMuxRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, + MySqlMuxRecordHandler(S3Client amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, DatabaseConnectionConfig databaseConnectionConfig, Map recordHandlerMap, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, jdbcConnectionFactory, databaseConnectionConfig, recordHandlerMap, configOptions); diff --git a/athena-mysql/src/main/java/com/amazonaws/athena/connectors/mysql/MySqlRecordHandler.java b/athena-mysql/src/main/java/com/amazonaws/athena/connectors/mysql/MySqlRecordHandler.java index a7cb397c97..8acf177306 100644 --- a/athena-mysql/src/main/java/com/amazonaws/athena/connectors/mysql/MySqlRecordHandler.java +++ b/athena-mysql/src/main/java/com/amazonaws/athena/connectors/mysql/MySqlRecordHandler.java @@ -29,14 +29,13 @@ import com.amazonaws.athena.connectors.jdbc.manager.JDBCUtil; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcSplitQueryBuilder; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.AmazonS3ClientBuilder; import com.google.common.annotations.VisibleForTesting; import org.apache.arrow.vector.types.pojo.Schema; import org.apache.commons.lang3.Validate; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -77,12 +76,12 @@ public MySqlRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, jav public MySqlRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, JdbcConnectionFactory jdbcConnectionFactory, java.util.Map configOptions) { - this(databaseConnectionConfig, AmazonS3ClientBuilder.defaultClient(), SecretsManagerClient.create(), AthenaClient.create(), + this(databaseConnectionConfig, S3Client.create(), SecretsManagerClient.create(), AthenaClient.create(), jdbcConnectionFactory, new MySqlQueryStringBuilder(MYSQL_QUOTE_CHARACTER, new MySqlFederationExpressionParser(MYSQL_QUOTE_CHARACTER)), configOptions); } @VisibleForTesting - MySqlRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, final AmazonS3 amazonS3, final SecretsManagerClient secretsManager, + MySqlRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, final S3Client amazonS3, final SecretsManagerClient secretsManager, final AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, JdbcSplitQueryBuilder jdbcSplitQueryBuilder, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, databaseConnectionConfig, jdbcConnectionFactory, configOptions); diff --git a/athena-mysql/src/test/java/com/amazonaws/athena/connectors/mysql/MySqlMuxJdbcRecordHandlerTest.java b/athena-mysql/src/test/java/com/amazonaws/athena/connectors/mysql/MySqlMuxJdbcRecordHandlerTest.java index fdb6f56ec7..ea9c543c0b 100644 --- a/athena-mysql/src/test/java/com/amazonaws/athena/connectors/mysql/MySqlMuxJdbcRecordHandlerTest.java +++ b/athena-mysql/src/test/java/com/amazonaws/athena/connectors/mysql/MySqlMuxJdbcRecordHandlerTest.java @@ -28,12 +28,12 @@ import com.amazonaws.athena.connectors.jdbc.connection.DatabaseConnectionConfig; import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; -import com.amazonaws.services.s3.AmazonS3; import org.apache.arrow.vector.types.pojo.Schema; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -46,7 +46,7 @@ public class MySqlMuxJdbcRecordHandlerTest private Map recordHandlerMap; private MySqlRecordHandler mySqlRecordHandler; private JdbcRecordHandler jdbcRecordHandler; - private AmazonS3 amazonS3; + private S3Client amazonS3; private SecretsManagerClient secretsManager; private AthenaClient athena; private QueryStatusChecker queryStatusChecker; @@ -57,7 +57,7 @@ public void setup() { this.mySqlRecordHandler = Mockito.mock(MySqlRecordHandler.class); this.recordHandlerMap = Collections.singletonMap("mysql", this.mySqlRecordHandler); - this.amazonS3 = Mockito.mock(AmazonS3.class); + this.amazonS3 = Mockito.mock(S3Client.class); this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AthenaClient.class); this.queryStatusChecker = Mockito.mock(QueryStatusChecker.class); diff --git a/athena-mysql/src/test/java/com/amazonaws/athena/connectors/mysql/MySqlRecordHandlerTest.java b/athena-mysql/src/test/java/com/amazonaws/athena/connectors/mysql/MySqlRecordHandlerTest.java index e4a40ff0b5..157c08228b 100644 --- a/athena-mysql/src/test/java/com/amazonaws/athena/connectors/mysql/MySqlRecordHandlerTest.java +++ b/athena-mysql/src/test/java/com/amazonaws/athena/connectors/mysql/MySqlRecordHandlerTest.java @@ -36,7 +36,6 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.connection.JdbcCredentialProvider; import com.amazonaws.athena.connectors.jdbc.manager.JdbcSplitQueryBuilder; -import com.amazonaws.services.s3.AmazonS3; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import org.apache.arrow.vector.types.Types; @@ -47,6 +46,7 @@ import org.junit.Test; import org.mockito.Mockito; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -66,7 +66,7 @@ public class MySqlRecordHandlerTest private Connection connection; private JdbcConnectionFactory jdbcConnectionFactory; private JdbcSplitQueryBuilder jdbcSplitQueryBuilder; - private AmazonS3 amazonS3; + private S3Client amazonS3; private SecretsManagerClient secretsManager; private AthenaClient athena; @@ -74,7 +74,7 @@ public class MySqlRecordHandlerTest public void setup() throws Exception { - this.amazonS3 = Mockito.mock(AmazonS3.class); + this.amazonS3 = Mockito.mock(S3Client.class); this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AthenaClient.class); this.connection = Mockito.mock(Connection.class); diff --git a/athena-neptune/src/main/java/com/amazonaws/athena/connectors/neptune/NeptuneRecordHandler.java b/athena-neptune/src/main/java/com/amazonaws/athena/connectors/neptune/NeptuneRecordHandler.java index a61278292a..2456b3aa27 100644 --- a/athena-neptune/src/main/java/com/amazonaws/athena/connectors/neptune/NeptuneRecordHandler.java +++ b/athena-neptune/src/main/java/com/amazonaws/athena/connectors/neptune/NeptuneRecordHandler.java @@ -26,12 +26,11 @@ import com.amazonaws.athena.connectors.neptune.Enums.GraphType; import com.amazonaws.athena.connectors.neptune.propertygraph.PropertyGraphHandler; import com.amazonaws.athena.connectors.neptune.rdf.RDFHandler; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.AmazonS3ClientBuilder; import org.apache.arrow.util.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; /** @@ -63,7 +62,7 @@ public class NeptuneRecordHandler extends RecordHandler public NeptuneRecordHandler(java.util.Map configOptions) { this( - AmazonS3ClientBuilder.defaultClient(), + S3Client.create(), SecretsManagerClient.create(), AthenaClient.create(), NeptuneConnection.createConnection(configOptions), @@ -72,7 +71,7 @@ public NeptuneRecordHandler(java.util.Map configOptions) @VisibleForTesting protected NeptuneRecordHandler( - AmazonS3 amazonS3, + S3Client amazonS3, SecretsManagerClient secretsManager, AthenaClient amazonAthena, NeptuneConnection neptuneConnection, diff --git a/athena-neptune/src/test/java/com/amazonaws/athena/connectors/neptune/NeptuneRecordHandlerTest.java b/athena-neptune/src/test/java/com/amazonaws/athena/connectors/neptune/NeptuneRecordHandlerTest.java index 13ff521aaf..bde646b1d3 100644 --- a/athena-neptune/src/test/java/com/amazonaws/athena/connectors/neptune/NeptuneRecordHandlerTest.java +++ b/athena-neptune/src/test/java/com/amazonaws/athena/connectors/neptune/NeptuneRecordHandlerTest.java @@ -46,11 +46,6 @@ import com.amazonaws.athena.connector.lambda.records.RemoteReadRecordsResponse; import com.amazonaws.athena.connector.lambda.security.EncryptionKeyFactory; import com.amazonaws.athena.connector.lambda.security.LocalKeyFactory; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.model.PutObjectRequest; -import com.amazonaws.services.s3.model.PutObjectResult; -import com.amazonaws.services.s3.model.S3Object; -import com.amazonaws.services.s3.model.S3ObjectInputStream; import com.google.common.io.ByteStreams; import org.apache.arrow.vector.types.Types; import org.apache.arrow.vector.types.pojo.Schema; @@ -76,6 +71,13 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.core.ResponseInputStream; +import software.amazon.awssdk.core.sync.RequestBody; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.GetObjectRequest; +import software.amazon.awssdk.services.s3.model.GetObjectResponse; +import software.amazon.awssdk.services.s3.model.PutObjectRequest; +import software.amazon.awssdk.services.s3.model.PutObjectResponse; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.io.ByteArrayInputStream; @@ -97,7 +99,7 @@ public class NeptuneRecordHandlerTest extends TestBase { private Schema schemaPGVertexForRead; private Schema schemaPGEdgeForRead; private Schema schemaPGQueryForRead; - private AmazonS3 amazonS3; + private S3Client amazonS3; private SecretsManagerClient awsSecretsManager; private AthenaClient athena; private S3BlockSpillReader spillReader; @@ -164,34 +166,32 @@ public void setUp() { .build(); allocator = new BlockAllocatorImpl(); - amazonS3 = mock(AmazonS3.class); + amazonS3 = mock(S3Client.class); awsSecretsManager = mock(SecretsManagerClient.class); athena = mock(AthenaClient.class); - when(amazonS3.putObject(any())) + when(amazonS3.putObject(any(PutObjectRequest.class), any(RequestBody.class))) .thenAnswer((InvocationOnMock invocationOnMock) -> { - InputStream inputStream = ((PutObjectRequest) invocationOnMock.getArguments()[0]).getInputStream(); + InputStream inputStream = ((RequestBody) invocationOnMock.getArguments()[1]).contentStreamProvider().newStream(); ByteHolder byteHolder = new ByteHolder(); byteHolder.setBytes(ByteStreams.toByteArray(inputStream)); synchronized (mockS3Storage) { mockS3Storage.add(byteHolder); logger.info("puObject: total size " + mockS3Storage.size()); } - return mock(PutObjectResult.class); + return PutObjectResponse.builder().build(); }); - when(amazonS3.getObject(nullable(String.class), nullable(String.class))).thenAnswer((InvocationOnMock invocationOnMock) -> { - S3Object mockObject = mock(S3Object.class); - ByteHolder byteHolder; - synchronized (mockS3Storage) { - byteHolder = mockS3Storage.get(0); - mockS3Storage.remove(0); - logger.info("getObject: total size " + mockS3Storage.size()); - } - when(mockObject.getObjectContent()).thenReturn( - new S3ObjectInputStream(new ByteArrayInputStream(byteHolder.getBytes()), null)); - return mockObject; - }); + when(amazonS3.getObject(any(GetObjectRequest.class))) + .thenAnswer((InvocationOnMock invocationOnMock) -> { + ByteHolder byteHolder; + synchronized (mockS3Storage) { + byteHolder = mockS3Storage.get(0); + mockS3Storage.remove(0); + logger.info("getObject: total size " + mockS3Storage.size()); + } + return new ResponseInputStream<>(GetObjectResponse.builder().build(), new ByteArrayInputStream(byteHolder.getBytes())); + }); handler = new NeptuneRecordHandler(amazonS3, awsSecretsManager, athena, neptuneConnection, com.google.common.collect.ImmutableMap.of()); spillReader = new S3BlockSpillReader(amazonS3, allocator); diff --git a/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleMuxRecordHandler.java b/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleMuxRecordHandler.java index 0b6d2f00b2..9a1f0a09ae 100644 --- a/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleMuxRecordHandler.java +++ b/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleMuxRecordHandler.java @@ -25,9 +25,9 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandlerFactory; -import com.amazonaws.services.s3.AmazonS3; import com.google.common.annotations.VisibleForTesting; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Map; @@ -57,7 +57,7 @@ public OracleMuxRecordHandler(java.util.Map configOptions) } @VisibleForTesting - OracleMuxRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, + OracleMuxRecordHandler(S3Client amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, DatabaseConnectionConfig databaseConnectionConfig, Map recordHandlerMap, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, jdbcConnectionFactory, databaseConnectionConfig, recordHandlerMap, configOptions); diff --git a/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleRecordHandler.java b/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleRecordHandler.java index 9d4f94bc30..c312b87f5b 100644 --- a/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleRecordHandler.java +++ b/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleRecordHandler.java @@ -28,14 +28,13 @@ import com.amazonaws.athena.connectors.jdbc.manager.JDBCUtil; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcSplitQueryBuilder; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.AmazonS3ClientBuilder; import com.google.common.annotations.VisibleForTesting; import org.apache.arrow.vector.types.pojo.Schema; import org.apache.commons.lang3.Validate; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -75,12 +74,12 @@ public OracleRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, ja public OracleRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, JdbcConnectionFactory jdbcConnectionFactory, java.util.Map configOptions) { - this(databaseConnectionConfig, AmazonS3ClientBuilder.defaultClient(), SecretsManagerClient.create(), AthenaClient.create(), + this(databaseConnectionConfig, S3Client.create(), SecretsManagerClient.create(), AthenaClient.create(), jdbcConnectionFactory, new OracleQueryStringBuilder(ORACLE_QUOTE_CHARACTER, new OracleFederationExpressionParser(ORACLE_QUOTE_CHARACTER)), configOptions); } @VisibleForTesting - OracleRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, final AmazonS3 amazonS3, final SecretsManagerClient secretsManager, + OracleRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, final S3Client amazonS3, final SecretsManagerClient secretsManager, final AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, JdbcSplitQueryBuilder jdbcSplitQueryBuilder, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, databaseConnectionConfig, jdbcConnectionFactory, configOptions); diff --git a/athena-oracle/src/test/java/com/amazonaws/athena/connectors/oracle/OracleMuxJdbcMetadataHandlerTest.java b/athena-oracle/src/test/java/com/amazonaws/athena/connectors/oracle/OracleMuxJdbcMetadataHandlerTest.java index 67907d9b31..537cc0c969 100644 --- a/athena-oracle/src/test/java/com/amazonaws/athena/connectors/oracle/OracleMuxJdbcMetadataHandlerTest.java +++ b/athena-oracle/src/test/java/com/amazonaws/athena/connectors/oracle/OracleMuxJdbcMetadataHandlerTest.java @@ -32,8 +32,6 @@ import com.amazonaws.athena.connectors.jdbc.connection.DatabaseConnectionConfig; import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler; -import com.amazonaws.athena.connectors.oracle.OracleMetadataHandler; -import com.amazonaws.athena.connectors.oracle.OracleMuxMetadataHandler; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; diff --git a/athena-oracle/src/test/java/com/amazonaws/athena/connectors/oracle/OracleMuxJdbcRecordHandlerTest.java b/athena-oracle/src/test/java/com/amazonaws/athena/connectors/oracle/OracleMuxJdbcRecordHandlerTest.java index 485c1f0ba6..1ec10050b3 100644 --- a/athena-oracle/src/test/java/com/amazonaws/athena/connectors/oracle/OracleMuxJdbcRecordHandlerTest.java +++ b/athena-oracle/src/test/java/com/amazonaws/athena/connectors/oracle/OracleMuxJdbcRecordHandlerTest.java @@ -30,12 +30,12 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; import com.amazonaws.athena.connectors.oracle.OracleMuxRecordHandler; import com.amazonaws.athena.connectors.oracle.OracleRecordHandler; -import com.amazonaws.services.s3.AmazonS3; import org.apache.arrow.vector.types.pojo.Schema; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -48,7 +48,7 @@ public class OracleMuxJdbcRecordHandlerTest private Map recordHandlerMap; private OracleRecordHandler oracleRecordHandler; private JdbcRecordHandler jdbcRecordHandler; - private AmazonS3 amazonS3; + private S3Client amazonS3; private SecretsManagerClient secretsManager; private AthenaClient athena; private QueryStatusChecker queryStatusChecker; @@ -59,7 +59,7 @@ public void setup() { this.oracleRecordHandler = Mockito.mock(OracleRecordHandler.class); this.recordHandlerMap = Collections.singletonMap("oracle", this.oracleRecordHandler); - this.amazonS3 = Mockito.mock(AmazonS3.class); + this.amazonS3 = Mockito.mock(S3Client.class); this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AthenaClient.class); this.queryStatusChecker = Mockito.mock(QueryStatusChecker.class); diff --git a/athena-oracle/src/test/java/com/amazonaws/athena/connectors/oracle/OracleRecordHandlerTest.java b/athena-oracle/src/test/java/com/amazonaws/athena/connectors/oracle/OracleRecordHandlerTest.java index 4d0a887602..2e2f026297 100644 --- a/athena-oracle/src/test/java/com/amazonaws/athena/connectors/oracle/OracleRecordHandlerTest.java +++ b/athena-oracle/src/test/java/com/amazonaws/athena/connectors/oracle/OracleRecordHandlerTest.java @@ -32,7 +32,6 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.connection.JdbcCredentialProvider; import com.amazonaws.athena.connectors.jdbc.manager.JdbcSplitQueryBuilder; -import com.amazonaws.services.s3.AmazonS3; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import org.apache.arrow.vector.types.Types; @@ -42,6 +41,7 @@ import org.junit.Test; import org.mockito.Mockito; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -58,7 +58,7 @@ public class OracleRecordHandlerTest private Connection connection; private JdbcConnectionFactory jdbcConnectionFactory; private JdbcSplitQueryBuilder jdbcSplitQueryBuilder; - private AmazonS3 amazonS3; + private S3Client amazonS3; private SecretsManagerClient secretsManager; private AthenaClient athena; @@ -69,7 +69,7 @@ public class OracleRecordHandlerTest public void setup() throws Exception { - this.amazonS3 = Mockito.mock(AmazonS3.class); + this.amazonS3 = Mockito.mock(S3Client.class); this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AthenaClient.class); this.connection = Mockito.mock(Connection.class); diff --git a/athena-postgresql/src/main/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlMuxRecordHandler.java b/athena-postgresql/src/main/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlMuxRecordHandler.java index b6adea0a75..8b98b0813f 100644 --- a/athena-postgresql/src/main/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlMuxRecordHandler.java +++ b/athena-postgresql/src/main/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlMuxRecordHandler.java @@ -25,9 +25,9 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandlerFactory; -import com.amazonaws.services.s3.AmazonS3; import com.google.common.annotations.VisibleForTesting; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Map; @@ -57,7 +57,7 @@ public PostGreSqlMuxRecordHandler(java.util.Map configOptions) } @VisibleForTesting - PostGreSqlMuxRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, + PostGreSqlMuxRecordHandler(S3Client amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, DatabaseConnectionConfig databaseConnectionConfig, Map recordHandlerMap, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, jdbcConnectionFactory, databaseConnectionConfig, recordHandlerMap, configOptions); diff --git a/athena-postgresql/src/main/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlRecordHandler.java b/athena-postgresql/src/main/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlRecordHandler.java index d054ff3d06..0c89828a66 100644 --- a/athena-postgresql/src/main/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlRecordHandler.java +++ b/athena-postgresql/src/main/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlRecordHandler.java @@ -29,14 +29,13 @@ import com.amazonaws.athena.connectors.jdbc.manager.JDBCUtil; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcSplitQueryBuilder; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.AmazonS3ClientBuilder; import com.google.common.annotations.VisibleForTesting; import org.apache.arrow.vector.types.pojo.Schema; import org.apache.commons.lang3.Validate; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -69,12 +68,12 @@ public PostGreSqlRecordHandler(java.util.Map configOptions) public PostGreSqlRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, java.util.Map configOptions) { - this(databaseConnectionConfig, AmazonS3ClientBuilder.defaultClient(), SecretsManagerClient.create(), AthenaClient.create(), + this(databaseConnectionConfig, S3Client.create(), SecretsManagerClient.create(), AthenaClient.create(), new GenericJdbcConnectionFactory(databaseConnectionConfig, PostGreSqlMetadataHandler.JDBC_PROPERTIES, new DatabaseConnectionInfo(POSTGRESQL_DRIVER_CLASS, POSTGRESQL_DEFAULT_PORT)), new PostGreSqlQueryStringBuilder(POSTGRES_QUOTE_CHARACTER, new PostgreSqlFederationExpressionParser(POSTGRES_QUOTE_CHARACTER)), configOptions); } @VisibleForTesting - protected PostGreSqlRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, AmazonS3 amazonS3, SecretsManagerClient secretsManager, + protected PostGreSqlRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, S3Client amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, JdbcSplitQueryBuilder jdbcSplitQueryBuilder, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, databaseConnectionConfig, jdbcConnectionFactory, configOptions); diff --git a/athena-postgresql/src/test/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlMuxJdbcRecordHandlerTest.java b/athena-postgresql/src/test/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlMuxJdbcRecordHandlerTest.java index dd7a6a7736..eadd042db0 100644 --- a/athena-postgresql/src/test/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlMuxJdbcRecordHandlerTest.java +++ b/athena-postgresql/src/test/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlMuxJdbcRecordHandlerTest.java @@ -28,12 +28,12 @@ import com.amazonaws.athena.connectors.jdbc.connection.DatabaseConnectionConfig; import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; -import com.amazonaws.services.s3.AmazonS3; import org.apache.arrow.vector.types.pojo.Schema; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -46,7 +46,7 @@ public class PostGreSqlMuxJdbcRecordHandlerTest private Map recordHandlerMap; private PostGreSqlRecordHandler postGreSqlRecordHandler; private JdbcRecordHandler jdbcRecordHandler; - private AmazonS3 amazonS3; + private S3Client amazonS3; private SecretsManagerClient secretsManager; private AthenaClient athena; private QueryStatusChecker queryStatusChecker; @@ -57,7 +57,7 @@ public void setup() { this.postGreSqlRecordHandler = Mockito.mock(PostGreSqlRecordHandler.class); this.recordHandlerMap = Collections.singletonMap("postgres", this.postGreSqlRecordHandler); - this.amazonS3 = Mockito.mock(AmazonS3.class); + this.amazonS3 = Mockito.mock(S3Client.class); this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AthenaClient.class); this.queryStatusChecker = Mockito.mock(QueryStatusChecker.class); diff --git a/athena-postgresql/src/test/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlRecordHandlerTest.java b/athena-postgresql/src/test/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlRecordHandlerTest.java index 96b879d41e..123093f31f 100644 --- a/athena-postgresql/src/test/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlRecordHandlerTest.java +++ b/athena-postgresql/src/test/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlRecordHandlerTest.java @@ -33,7 +33,6 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.connection.JdbcCredentialProvider; import com.amazonaws.athena.connectors.jdbc.manager.JdbcSplitQueryBuilder; -import com.amazonaws.services.s3.AmazonS3; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import org.apache.arrow.vector.types.Types; @@ -46,6 +45,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.math.BigDecimal; @@ -68,7 +68,7 @@ public class PostGreSqlRecordHandlerTest extends TestBase private Connection connection; private JdbcConnectionFactory jdbcConnectionFactory; private JdbcSplitQueryBuilder jdbcSplitQueryBuilder; - private AmazonS3 amazonS3; + private S3Client amazonS3; private SecretsManagerClient secretsManager; private AthenaClient athena; @@ -76,7 +76,7 @@ public class PostGreSqlRecordHandlerTest extends TestBase public void setup() throws Exception { - this.amazonS3 = Mockito.mock(AmazonS3.class); + this.amazonS3 = Mockito.mock(S3Client.class); this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AthenaClient.class); this.connection = Mockito.mock(Connection.class); diff --git a/athena-redis/pom.xml b/athena-redis/pom.xml index b4faced6d0..fa13974351 100644 --- a/athena-redis/pom.xml +++ b/athena-redis/pom.xml @@ -9,33 +9,6 @@ athena-redis 2022.47.1 - - com.amazonaws - aws-java-sdk-core - ${aws-sdk.version} - - - com.fasterxml.jackson.datatype - jackson-datatype-jsr310 - - - com.fasterxml.jackson.dataformat - jackson-dataformat-cbor - - - com.fasterxml.jackson.core - jackson-core - - - com.fasterxml.jackson.core - jackson-databind - - - com.fasterxml.jackson.core - jackson-annotations - - - software.amazon.jsii jsii-runtime diff --git a/athena-redis/src/main/java/com/amazonaws/athena/connectors/redis/RedisRecordHandler.java b/athena-redis/src/main/java/com/amazonaws/athena/connectors/redis/RedisRecordHandler.java index 837e2decb1..5981aface2 100644 --- a/athena-redis/src/main/java/com/amazonaws/athena/connectors/redis/RedisRecordHandler.java +++ b/athena-redis/src/main/java/com/amazonaws/athena/connectors/redis/RedisRecordHandler.java @@ -29,8 +29,6 @@ import com.amazonaws.athena.connectors.redis.lettuce.RedisConnectionFactory; import com.amazonaws.athena.connectors.redis.lettuce.RedisConnectionWrapper; import com.amazonaws.athena.connectors.redis.qpt.RedisQueryPassthrough; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.AmazonS3ClientBuilder; import io.lettuce.core.KeyScanCursor; import io.lettuce.core.ScanArgs; import io.lettuce.core.ScanCursor; @@ -42,6 +40,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.HashMap; @@ -86,14 +85,14 @@ public class RedisRecordHandler private static final int SCAN_COUNT_SIZE = 100; private final RedisConnectionFactory redisConnectionFactory; - private final AmazonS3 amazonS3; + private final S3Client amazonS3; private final RedisQueryPassthrough queryPassthrough = new RedisQueryPassthrough(); public RedisRecordHandler(java.util.Map configOptions) { this( - AmazonS3ClientBuilder.standard().build(), + S3Client.create(), SecretsManagerClient.create(), AthenaClient.create(), new RedisConnectionFactory(), @@ -101,7 +100,7 @@ public RedisRecordHandler(java.util.Map configOptions) } @VisibleForTesting - protected RedisRecordHandler(AmazonS3 amazonS3, + protected RedisRecordHandler(S3Client amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, RedisConnectionFactory redisConnectionFactory, diff --git a/athena-redis/src/test/java/com/amazonaws/athena/connectors/redis/RedisRecordHandlerTest.java b/athena-redis/src/test/java/com/amazonaws/athena/connectors/redis/RedisRecordHandlerTest.java index b0c774c423..d330af3ca3 100644 --- a/athena-redis/src/test/java/com/amazonaws/athena/connectors/redis/RedisRecordHandlerTest.java +++ b/athena-redis/src/test/java/com/amazonaws/athena/connectors/redis/RedisRecordHandlerTest.java @@ -40,11 +40,6 @@ import com.amazonaws.athena.connectors.redis.lettuce.RedisConnectionWrapper; import com.amazonaws.athena.connectors.redis.util.MockKeyScanCursor; import com.amazonaws.athena.connectors.redis.util.MockScoredValueScanCursor; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.model.PutObjectRequest; -import com.amazonaws.services.s3.model.PutObjectResult; -import com.amazonaws.services.s3.model.S3Object; -import com.amazonaws.services.s3.model.S3ObjectInputStream; import com.google.common.collect.ImmutableList; import com.google.common.io.ByteStreams; import io.lettuce.core.ScanArgs; @@ -66,6 +61,13 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.core.ResponseInputStream; +import software.amazon.awssdk.core.sync.RequestBody; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.GetObjectRequest; +import software.amazon.awssdk.services.s3.model.GetObjectResponse; +import software.amazon.awssdk.services.s3.model.PutObjectRequest; +import software.amazon.awssdk.services.s3.model.PutObjectResponse; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueRequest; import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueResponse; @@ -105,7 +107,7 @@ public class RedisRecordHandlerTest private RedisRecordHandler handler; private BlockAllocator allocator; private List mockS3Storage = new ArrayList<>(); - private AmazonS3 amazonS3; + private S3Client amazonS3; private S3BlockSpillReader spillReader; private EncryptionKeyFactory keyFactory = new LocalKeyFactory(); @@ -137,33 +139,29 @@ public void setUp() allocator = new BlockAllocatorImpl(); - amazonS3 = mock(AmazonS3.class); + amazonS3 = mock(S3Client.class); - Mockito.lenient().when(amazonS3.putObject(any())) + Mockito.lenient().when(amazonS3.putObject(any(PutObjectRequest.class), any(RequestBody.class))) .thenAnswer((InvocationOnMock invocationOnMock) -> { - InputStream inputStream = ((PutObjectRequest) invocationOnMock.getArguments()[0]).getInputStream(); + InputStream inputStream = ((RequestBody) invocationOnMock.getArguments()[1]).contentStreamProvider().newStream(); ByteHolder byteHolder = new ByteHolder(); byteHolder.setBytes(ByteStreams.toByteArray(inputStream)); synchronized (mockS3Storage) { mockS3Storage.add(byteHolder); logger.info("puObject: total size " + mockS3Storage.size()); } - return mock(PutObjectResult.class); + return PutObjectResponse.builder().build(); }); - Mockito.lenient().when(amazonS3.getObject(nullable(String.class), nullable(String.class))) + Mockito.lenient().when(amazonS3.getObject(any(GetObjectRequest.class))) .thenAnswer((InvocationOnMock invocationOnMock) -> { - S3Object mockObject = mock(S3Object.class); ByteHolder byteHolder; synchronized (mockS3Storage) { byteHolder = mockS3Storage.get(0); mockS3Storage.remove(0); logger.info("getObject: total size " + mockS3Storage.size()); } - when(mockObject.getObjectContent()).thenReturn( - new S3ObjectInputStream( - new ByteArrayInputStream(byteHolder.getBytes()), null)); - return mockObject; + return new ResponseInputStream<>(GetObjectResponse.builder().build(), new ByteArrayInputStream(byteHolder.getBytes())); }); when(mockSecretsManager.getSecretValue(nullable(GetSecretValueRequest.class))) diff --git a/athena-redshift/src/main/java/com/amazonaws/athena/connectors/redshift/RedshiftMuxRecordHandler.java b/athena-redshift/src/main/java/com/amazonaws/athena/connectors/redshift/RedshiftMuxRecordHandler.java index 38b1b26a24..2fe7b8fa3e 100644 --- a/athena-redshift/src/main/java/com/amazonaws/athena/connectors/redshift/RedshiftMuxRecordHandler.java +++ b/athena-redshift/src/main/java/com/amazonaws/athena/connectors/redshift/RedshiftMuxRecordHandler.java @@ -25,9 +25,9 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandlerFactory; -import com.amazonaws.services.s3.AmazonS3; import com.google.common.annotations.VisibleForTesting; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Map; @@ -58,7 +58,7 @@ public RedshiftMuxRecordHandler(java.util.Map configOptions) } @VisibleForTesting - RedshiftMuxRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, + RedshiftMuxRecordHandler(S3Client amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, DatabaseConnectionConfig databaseConnectionConfig, Map recordHandlerMap, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, jdbcConnectionFactory, databaseConnectionConfig, recordHandlerMap, configOptions); diff --git a/athena-redshift/src/main/java/com/amazonaws/athena/connectors/redshift/RedshiftRecordHandler.java b/athena-redshift/src/main/java/com/amazonaws/athena/connectors/redshift/RedshiftRecordHandler.java index cdb9db0954..8684595097 100644 --- a/athena-redshift/src/main/java/com/amazonaws/athena/connectors/redshift/RedshiftRecordHandler.java +++ b/athena-redshift/src/main/java/com/amazonaws/athena/connectors/redshift/RedshiftRecordHandler.java @@ -30,12 +30,11 @@ import com.amazonaws.athena.connectors.postgresql.PostGreSqlQueryStringBuilder; import com.amazonaws.athena.connectors.postgresql.PostGreSqlRecordHandler; import com.amazonaws.athena.connectors.postgresql.PostgreSqlFederationExpressionParser; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.AmazonS3ClientBuilder; import org.apache.arrow.util.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import static com.amazonaws.athena.connectors.postgresql.PostGreSqlConstants.POSTGRES_QUOTE_CHARACTER; @@ -60,12 +59,12 @@ public RedshiftRecordHandler(java.util.Map configOptions) public RedshiftRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, java.util.Map configOptions) { - super(databaseConnectionConfig, AmazonS3ClientBuilder.defaultClient(), SecretsManagerClient.create(), AthenaClient.create(), + super(databaseConnectionConfig, S3Client.create(), SecretsManagerClient.create(), AthenaClient.create(), new GenericJdbcConnectionFactory(databaseConnectionConfig, PostGreSqlMetadataHandler.JDBC_PROPERTIES, new DatabaseConnectionInfo(REDSHIFT_DRIVER_CLASS, REDSHIFT_DEFAULT_PORT)), new PostGreSqlQueryStringBuilder(POSTGRES_QUOTE_CHARACTER, new PostgreSqlFederationExpressionParser(POSTGRES_QUOTE_CHARACTER)), configOptions); } @VisibleForTesting - RedshiftRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, AmazonS3 amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, JdbcSplitQueryBuilder jdbcSplitQueryBuilder, java.util.Map configOptions) + RedshiftRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, S3Client amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, JdbcSplitQueryBuilder jdbcSplitQueryBuilder, java.util.Map configOptions) { super(databaseConnectionConfig, amazonS3, secretsManager, athena, jdbcConnectionFactory, jdbcSplitQueryBuilder, configOptions); } diff --git a/athena-redshift/src/test/java/com/amazonaws/athena/connectors/redshift/RedshiftMuxJdbcRecordHandlerTest.java b/athena-redshift/src/test/java/com/amazonaws/athena/connectors/redshift/RedshiftMuxJdbcRecordHandlerTest.java index 4e0ff391cf..2a4abf05bc 100644 --- a/athena-redshift/src/test/java/com/amazonaws/athena/connectors/redshift/RedshiftMuxJdbcRecordHandlerTest.java +++ b/athena-redshift/src/test/java/com/amazonaws/athena/connectors/redshift/RedshiftMuxJdbcRecordHandlerTest.java @@ -28,12 +28,12 @@ import com.amazonaws.athena.connectors.jdbc.connection.DatabaseConnectionConfig; import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; -import com.amazonaws.services.s3.AmazonS3; import org.apache.arrow.vector.types.pojo.Schema; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -46,7 +46,7 @@ public class RedshiftMuxJdbcRecordHandlerTest private Map recordHandlerMap; private RedshiftRecordHandler redshiftRecordHandler; private JdbcRecordHandler jdbcRecordHandler; - private AmazonS3 amazonS3; + private S3Client amazonS3; private SecretsManagerClient secretsManager; private AthenaClient athena; private QueryStatusChecker queryStatusChecker; @@ -57,7 +57,7 @@ public void setup() { this.redshiftRecordHandler = Mockito.mock(RedshiftRecordHandler.class); this.recordHandlerMap = Collections.singletonMap("redshift", this.redshiftRecordHandler); - this.amazonS3 = Mockito.mock(AmazonS3.class); + this.amazonS3 = Mockito.mock(S3Client.class); this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AthenaClient.class); this.queryStatusChecker = Mockito.mock(QueryStatusChecker.class); diff --git a/athena-redshift/src/test/java/com/amazonaws/athena/connectors/redshift/RedshiftRecordHandlerTest.java b/athena-redshift/src/test/java/com/amazonaws/athena/connectors/redshift/RedshiftRecordHandlerTest.java index d9e2508b22..c9242f17a8 100644 --- a/athena-redshift/src/test/java/com/amazonaws/athena/connectors/redshift/RedshiftRecordHandlerTest.java +++ b/athena-redshift/src/test/java/com/amazonaws/athena/connectors/redshift/RedshiftRecordHandlerTest.java @@ -36,7 +36,6 @@ import com.amazonaws.athena.connectors.postgresql.PostGreSqlMetadataHandler; import com.amazonaws.athena.connectors.postgresql.PostGreSqlQueryStringBuilder; import com.amazonaws.athena.connectors.postgresql.PostgreSqlFederationExpressionParser; -import com.amazonaws.services.s3.AmazonS3; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import org.apache.arrow.vector.types.Types; @@ -49,6 +48,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.math.BigDecimal; @@ -71,7 +71,7 @@ public class RedshiftRecordHandlerTest private Connection connection; private JdbcConnectionFactory jdbcConnectionFactory; private JdbcSplitQueryBuilder jdbcSplitQueryBuilder; - private AmazonS3 amazonS3; + private S3Client amazonS3; private SecretsManagerClient secretsManager; private AthenaClient athena; @@ -79,7 +79,7 @@ public class RedshiftRecordHandlerTest public void setup() throws Exception { - this.amazonS3 = Mockito.mock(AmazonS3.class); + this.amazonS3 = Mockito.mock(S3Client.class); this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AthenaClient.class); this.connection = Mockito.mock(Connection.class); diff --git a/athena-saphana/src/main/java/com/amazonaws/athena/connectors/saphana/SaphanaMuxRecordHandler.java b/athena-saphana/src/main/java/com/amazonaws/athena/connectors/saphana/SaphanaMuxRecordHandler.java index a61d340fc7..2414854794 100644 --- a/athena-saphana/src/main/java/com/amazonaws/athena/connectors/saphana/SaphanaMuxRecordHandler.java +++ b/athena-saphana/src/main/java/com/amazonaws/athena/connectors/saphana/SaphanaMuxRecordHandler.java @@ -25,9 +25,9 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandlerFactory; -import com.amazonaws.services.s3.AmazonS3; import com.google.common.annotations.VisibleForTesting; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Map; @@ -55,7 +55,7 @@ public SaphanaMuxRecordHandler(java.util.Map configOptions) } @VisibleForTesting - SaphanaMuxRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, + SaphanaMuxRecordHandler(S3Client amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, DatabaseConnectionConfig databaseConnectionConfig, Map recordHandlerMap, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, jdbcConnectionFactory, databaseConnectionConfig, recordHandlerMap, configOptions); diff --git a/athena-saphana/src/main/java/com/amazonaws/athena/connectors/saphana/SaphanaRecordHandler.java b/athena-saphana/src/main/java/com/amazonaws/athena/connectors/saphana/SaphanaRecordHandler.java index 750bdf2434..67f7a93e6a 100644 --- a/athena-saphana/src/main/java/com/amazonaws/athena/connectors/saphana/SaphanaRecordHandler.java +++ b/athena-saphana/src/main/java/com/amazonaws/athena/connectors/saphana/SaphanaRecordHandler.java @@ -31,8 +31,6 @@ import com.amazonaws.athena.connectors.jdbc.manager.JDBCUtil; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcSplitQueryBuilder; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.AmazonS3ClientBuilder; import com.google.common.annotations.VisibleForTesting; import org.apache.arrow.vector.types.pojo.Field; import org.apache.arrow.vector.types.pojo.Schema; @@ -40,6 +38,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -66,7 +65,7 @@ public SaphanaRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, j SaphanaConstants.SAPHANA_DEFAULT_PORT)), configOptions); } @VisibleForTesting - SaphanaRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, AmazonS3 amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, JdbcSplitQueryBuilder jdbcSplitQueryBuilder, java.util.Map configOptions) + SaphanaRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, S3Client amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, JdbcSplitQueryBuilder jdbcSplitQueryBuilder, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, databaseConnectionConfig, jdbcConnectionFactory, configOptions); this.jdbcSplitQueryBuilder = Validate.notNull(jdbcSplitQueryBuilder, "query builder must not be null"); @@ -74,7 +73,7 @@ public SaphanaRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, j public SaphanaRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, GenericJdbcConnectionFactory jdbcConnectionFactory, java.util.Map configOptions) { - this(databaseConnectionConfig, AmazonS3ClientBuilder.defaultClient(), SecretsManagerClient.create(), + this(databaseConnectionConfig, S3Client.create(), SecretsManagerClient.create(), AthenaClient.create(), jdbcConnectionFactory, new SaphanaQueryStringBuilder(SAPHANA_QUOTE_CHARACTER, new SaphanaFederationExpressionParser(SAPHANA_QUOTE_CHARACTER)), configOptions); } diff --git a/athena-saphana/src/test/java/com/amazonaws/athena/connectors/saphana/SaphanaMuxJdbcRecordHandlerTest.java b/athena-saphana/src/test/java/com/amazonaws/athena/connectors/saphana/SaphanaMuxJdbcRecordHandlerTest.java index 4acddbca38..5f17964b44 100644 --- a/athena-saphana/src/test/java/com/amazonaws/athena/connectors/saphana/SaphanaMuxJdbcRecordHandlerTest.java +++ b/athena-saphana/src/test/java/com/amazonaws/athena/connectors/saphana/SaphanaMuxJdbcRecordHandlerTest.java @@ -28,12 +28,12 @@ import com.amazonaws.athena.connectors.jdbc.connection.DatabaseConnectionConfig; import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; -import com.amazonaws.services.s3.AmazonS3; import org.apache.arrow.vector.types.pojo.Schema; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -46,7 +46,7 @@ public class SaphanaMuxJdbcRecordHandlerTest private Map recordHandlerMap; private SaphanaRecordHandler saphanaRecordHandler; private JdbcRecordHandler jdbcRecordHandler; - private AmazonS3 amazonS3; + private S3Client amazonS3; private SecretsManagerClient secretsManager; private AthenaClient athena; private QueryStatusChecker queryStatusChecker; @@ -57,7 +57,7 @@ public void setup() { this.saphanaRecordHandler = Mockito.mock(SaphanaRecordHandler.class); this.recordHandlerMap = Collections.singletonMap("saphana", this.saphanaRecordHandler); - this.amazonS3 = Mockito.mock(AmazonS3.class); + this.amazonS3 = Mockito.mock(S3Client.class); this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AthenaClient.class); this.queryStatusChecker = Mockito.mock(QueryStatusChecker.class); diff --git a/athena-saphana/src/test/java/com/amazonaws/athena/connectors/saphana/SaphanaRecordHandlerTest.java b/athena-saphana/src/test/java/com/amazonaws/athena/connectors/saphana/SaphanaRecordHandlerTest.java index 80934a3f8d..c48ced9e6c 100644 --- a/athena-saphana/src/test/java/com/amazonaws/athena/connectors/saphana/SaphanaRecordHandlerTest.java +++ b/athena-saphana/src/test/java/com/amazonaws/athena/connectors/saphana/SaphanaRecordHandlerTest.java @@ -32,7 +32,6 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.connection.JdbcCredentialProvider; import com.amazonaws.athena.connectors.jdbc.manager.JdbcSplitQueryBuilder; -import com.amazonaws.services.s3.AmazonS3; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import org.apache.arrow.vector.types.Types; @@ -42,6 +41,7 @@ import org.junit.Test; import org.mockito.Mockito; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -58,7 +58,7 @@ public class SaphanaRecordHandlerTest private Connection connection; private JdbcConnectionFactory jdbcConnectionFactory; private JdbcSplitQueryBuilder jdbcSplitQueryBuilder; - private AmazonS3 amazonS3; + private S3Client amazonS3; private SecretsManagerClient secretsManager; private AthenaClient athena; @@ -66,7 +66,7 @@ public class SaphanaRecordHandlerTest public void setup() throws Exception { - this.amazonS3 = Mockito.mock(AmazonS3.class); + this.amazonS3 = Mockito.mock(S3Client.class); this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AthenaClient.class); this.connection = Mockito.mock(Connection.class); diff --git a/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMuxRecordHandler.java b/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMuxRecordHandler.java index 3874591f69..2fb0812375 100644 --- a/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMuxRecordHandler.java +++ b/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMuxRecordHandler.java @@ -25,9 +25,9 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandlerFactory; -import com.amazonaws.services.s3.AmazonS3; import com.google.common.annotations.VisibleForTesting; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Map; @@ -55,7 +55,7 @@ public SnowflakeMuxRecordHandler(java.util.Map configOptions) } @VisibleForTesting - SnowflakeMuxRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, + SnowflakeMuxRecordHandler(S3Client amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, DatabaseConnectionConfig databaseConnectionConfig, Map recordHandlerMap, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, jdbcConnectionFactory, databaseConnectionConfig, recordHandlerMap, configOptions); diff --git a/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeRecordHandler.java b/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeRecordHandler.java index 57acce2f23..28ac13ff21 100644 --- a/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeRecordHandler.java +++ b/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeRecordHandler.java @@ -30,12 +30,11 @@ import com.amazonaws.athena.connectors.jdbc.manager.JDBCUtil; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcSplitQueryBuilder; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.AmazonS3ClientBuilder; import com.google.common.annotations.VisibleForTesting; import org.apache.arrow.vector.types.pojo.Schema; import org.apache.commons.lang3.Validate; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -66,11 +65,11 @@ public SnowflakeRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, } public SnowflakeRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, GenericJdbcConnectionFactory jdbcConnectionFactory, java.util.Map configOptions) { - this(databaseConnectionConfig, AmazonS3ClientBuilder.defaultClient(), SecretsManagerClient.create(), AthenaClient.create(), + this(databaseConnectionConfig, S3Client.create(), SecretsManagerClient.create(), AthenaClient.create(), jdbcConnectionFactory, new SnowflakeQueryStringBuilder(SNOWFLAKE_QUOTE_CHARACTER, new SnowflakeFederationExpressionParser(SNOWFLAKE_QUOTE_CHARACTER)), configOptions); } @VisibleForTesting - SnowflakeRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, final AmazonS3 amazonS3, final SecretsManagerClient secretsManager, + SnowflakeRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, final S3Client amazonS3, final SecretsManagerClient secretsManager, final AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, JdbcSplitQueryBuilder jdbcSplitQueryBuilder, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, databaseConnectionConfig, jdbcConnectionFactory, configOptions); diff --git a/athena-snowflake/src/test/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMetadataHandlerTest.java b/athena-snowflake/src/test/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMetadataHandlerTest.java index 3acd762712..6a219a3b1f 100644 --- a/athena-snowflake/src/test/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMetadataHandlerTest.java +++ b/athena-snowflake/src/test/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMetadataHandlerTest.java @@ -29,16 +29,16 @@ import com.amazonaws.athena.connectors.jdbc.connection.DatabaseConnectionConfig; import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.connection.JdbcCredentialProvider; -import software.amazon.awssdk.services.athena.AthenaClient; -import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; -import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueRequest; -import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueResponse; import org.apache.arrow.vector.types.pojo.Field; import org.apache.arrow.vector.types.pojo.Schema; import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; +import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueRequest; +import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueResponse; import java.sql.*; import java.util.*; diff --git a/athena-snowflake/src/test/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMuxJdbcRecordHandlerTest.java b/athena-snowflake/src/test/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMuxJdbcRecordHandlerTest.java index 2e7c0b70cb..367fde0afc 100644 --- a/athena-snowflake/src/test/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMuxJdbcRecordHandlerTest.java +++ b/athena-snowflake/src/test/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMuxJdbcRecordHandlerTest.java @@ -30,12 +30,12 @@ import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; import com.amazonaws.athena.connectors.snowflake.SnowflakeMuxRecordHandler; import com.amazonaws.athena.connectors.snowflake.SnowflakeRecordHandler; -import com.amazonaws.services.s3.AmazonS3; import org.apache.arrow.vector.types.pojo.Schema; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -50,7 +50,7 @@ public class SnowflakeMuxJdbcRecordHandlerTest private Map recordHandlerMap; private SnowflakeRecordHandler snowflakeRecordHandler; private JdbcRecordHandler jdbcRecordHandler; - private AmazonS3 amazonS3; + private S3Client amazonS3; private SecretsManagerClient secretsManager; private AthenaClient athena; private QueryStatusChecker queryStatusChecker; @@ -61,7 +61,7 @@ public void setup() { this.snowflakeRecordHandler = Mockito.mock(SnowflakeRecordHandler.class); this.recordHandlerMap = Collections.singletonMap("snowflake", this.snowflakeRecordHandler); - this.amazonS3 = Mockito.mock(AmazonS3.class); + this.amazonS3 = Mockito.mock(S3Client.class); this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AthenaClient.class); this.queryStatusChecker = Mockito.mock(QueryStatusChecker.class); diff --git a/athena-snowflake/src/test/java/com/amazonaws/athena/connectors/snowflake/SnowflakeRecordHandlerTest.java b/athena-snowflake/src/test/java/com/amazonaws/athena/connectors/snowflake/SnowflakeRecordHandlerTest.java index e7f4813d34..56531dcdac 100644 --- a/athena-snowflake/src/test/java/com/amazonaws/athena/connectors/snowflake/SnowflakeRecordHandlerTest.java +++ b/athena-snowflake/src/test/java/com/amazonaws/athena/connectors/snowflake/SnowflakeRecordHandlerTest.java @@ -33,7 +33,6 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.connection.JdbcCredentialProvider; import com.amazonaws.athena.connectors.jdbc.manager.JdbcSplitQueryBuilder; -import com.amazonaws.services.s3.AmazonS3; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import org.apache.arrow.vector.types.Types; @@ -43,6 +42,7 @@ import org.junit.Test; import org.mockito.Mockito; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -59,7 +59,7 @@ public class SnowflakeRecordHandlerTest private Connection connection; private JdbcConnectionFactory jdbcConnectionFactory; private JdbcSplitQueryBuilder jdbcSplitQueryBuilder; - private AmazonS3 amazonS3; + private S3Client amazonS3; private SecretsManagerClient secretsManager; private AthenaClient athena; @@ -67,7 +67,7 @@ public class SnowflakeRecordHandlerTest public void setup() throws Exception { - this.amazonS3 = Mockito.mock(AmazonS3.class); + this.amazonS3 = Mockito.mock(S3Client.class); this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AthenaClient.class); this.connection = Mockito.mock(Connection.class); diff --git a/athena-sqlserver/src/main/java/com/amazonaws/athena/connectors/sqlserver/SqlServerMuxRecordHandler.java b/athena-sqlserver/src/main/java/com/amazonaws/athena/connectors/sqlserver/SqlServerMuxRecordHandler.java index 7282b19ac4..e9d5009639 100644 --- a/athena-sqlserver/src/main/java/com/amazonaws/athena/connectors/sqlserver/SqlServerMuxRecordHandler.java +++ b/athena-sqlserver/src/main/java/com/amazonaws/athena/connectors/sqlserver/SqlServerMuxRecordHandler.java @@ -25,9 +25,9 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandlerFactory; -import com.amazonaws.services.s3.AmazonS3; import com.google.common.annotations.VisibleForTesting; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Map; @@ -55,7 +55,7 @@ public SqlServerMuxRecordHandler(java.util.Map configOptions) } @VisibleForTesting - SqlServerMuxRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, + SqlServerMuxRecordHandler(S3Client amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, DatabaseConnectionConfig databaseConnectionConfig, Map recordHandlerMap, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, jdbcConnectionFactory, databaseConnectionConfig, recordHandlerMap, configOptions); diff --git a/athena-sqlserver/src/main/java/com/amazonaws/athena/connectors/sqlserver/SqlServerRecordHandler.java b/athena-sqlserver/src/main/java/com/amazonaws/athena/connectors/sqlserver/SqlServerRecordHandler.java index 6bdd298a57..073f5ad946 100644 --- a/athena-sqlserver/src/main/java/com/amazonaws/athena/connectors/sqlserver/SqlServerRecordHandler.java +++ b/athena-sqlserver/src/main/java/com/amazonaws/athena/connectors/sqlserver/SqlServerRecordHandler.java @@ -29,12 +29,11 @@ import com.amazonaws.athena.connectors.jdbc.manager.JDBCUtil; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcSplitQueryBuilder; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.AmazonS3ClientBuilder; import com.google.common.annotations.VisibleForTesting; import org.apache.arrow.vector.types.pojo.Schema; import org.apache.commons.lang3.Validate; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -61,12 +60,12 @@ public SqlServerRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, public SqlServerRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, JdbcConnectionFactory jdbcConnectionFactory, java.util.Map configOptions) { - this(databaseConnectionConfig, AmazonS3ClientBuilder.defaultClient(), SecretsManagerClient.create(), + this(databaseConnectionConfig, S3Client.create(), SecretsManagerClient.create(), AthenaClient.create(), jdbcConnectionFactory, new SqlServerQueryStringBuilder(SQLSERVER_QUOTE_CHARACTER, new SqlServerFederationExpressionParser(SQLSERVER_QUOTE_CHARACTER)), configOptions); } @VisibleForTesting - SqlServerRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, final AmazonS3 amazonS3, final SecretsManagerClient secretsManager, + SqlServerRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, final S3Client amazonS3, final SecretsManagerClient secretsManager, final AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, JdbcSplitQueryBuilder jdbcSplitQueryBuilder, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, databaseConnectionConfig, jdbcConnectionFactory, configOptions); diff --git a/athena-sqlserver/src/test/java/com/amazonaws/athena/connectors/sqlserver/SqlServerMuxRecordHandlerTest.java b/athena-sqlserver/src/test/java/com/amazonaws/athena/connectors/sqlserver/SqlServerMuxRecordHandlerTest.java index e5074306b3..e6faa255d9 100644 --- a/athena-sqlserver/src/test/java/com/amazonaws/athena/connectors/sqlserver/SqlServerMuxRecordHandlerTest.java +++ b/athena-sqlserver/src/test/java/com/amazonaws/athena/connectors/sqlserver/SqlServerMuxRecordHandlerTest.java @@ -28,12 +28,12 @@ import com.amazonaws.athena.connectors.jdbc.connection.DatabaseConnectionConfig; import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; -import com.amazonaws.services.s3.AmazonS3; import org.apache.arrow.vector.types.pojo.Schema; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -46,7 +46,7 @@ public class SqlServerMuxRecordHandlerTest private Map recordHandlerMap; private SqlServerRecordHandler sqlServerRecordHandler; private JdbcRecordHandler jdbcRecordHandler; - private AmazonS3 amazonS3; + private S3Client amazonS3; private SecretsManagerClient secretsManager; private AthenaClient athena; private QueryStatusChecker queryStatusChecker; @@ -57,7 +57,7 @@ public void setup() { this.sqlServerRecordHandler = Mockito.mock(SqlServerRecordHandler.class); this.recordHandlerMap = Collections.singletonMap(SqlServerConstants.NAME, this.sqlServerRecordHandler); - this.amazonS3 = Mockito.mock(AmazonS3.class); + this.amazonS3 = Mockito.mock(S3Client.class); this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AthenaClient.class); this.queryStatusChecker = Mockito.mock(QueryStatusChecker.class); diff --git a/athena-sqlserver/src/test/java/com/amazonaws/athena/connectors/sqlserver/SqlServerRecordHandlerTest.java b/athena-sqlserver/src/test/java/com/amazonaws/athena/connectors/sqlserver/SqlServerRecordHandlerTest.java index 58cc7a8dc6..c6f8f659dd 100644 --- a/athena-sqlserver/src/test/java/com/amazonaws/athena/connectors/sqlserver/SqlServerRecordHandlerTest.java +++ b/athena-sqlserver/src/test/java/com/amazonaws/athena/connectors/sqlserver/SqlServerRecordHandlerTest.java @@ -32,7 +32,6 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.connection.JdbcCredentialProvider; import com.amazonaws.athena.connectors.jdbc.manager.JdbcSplitQueryBuilder; -import com.amazonaws.services.s3.AmazonS3; import com.google.common.collect.ImmutableMap; import org.apache.arrow.vector.types.Types; import org.apache.arrow.vector.types.pojo.Schema; @@ -41,6 +40,7 @@ import org.junit.Test; import org.mockito.Mockito; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -57,7 +57,7 @@ public class SqlServerRecordHandlerTest private Connection connection; private JdbcConnectionFactory jdbcConnectionFactory; private JdbcSplitQueryBuilder jdbcSplitQueryBuilder; - private AmazonS3 amazonS3; + private S3Client amazonS3; private SecretsManagerClient secretsManager; private AthenaClient athena; @@ -66,7 +66,7 @@ public void setup() throws Exception { System.setProperty("aws.region", "us-east-1"); - this.amazonS3 = Mockito.mock(AmazonS3.class); + this.amazonS3 = Mockito.mock(S3Client.class); this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AthenaClient.class); this.connection = Mockito.mock(Connection.class); diff --git a/athena-synapse/src/main/java/com/amazonaws/athena/connectors/synapse/SynapseMuxRecordHandler.java b/athena-synapse/src/main/java/com/amazonaws/athena/connectors/synapse/SynapseMuxRecordHandler.java index 38e47cbd02..6fabc20bf7 100644 --- a/athena-synapse/src/main/java/com/amazonaws/athena/connectors/synapse/SynapseMuxRecordHandler.java +++ b/athena-synapse/src/main/java/com/amazonaws/athena/connectors/synapse/SynapseMuxRecordHandler.java @@ -24,9 +24,9 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandlerFactory; -import com.amazonaws.services.s3.AmazonS3; import com.google.common.annotations.VisibleForTesting; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Map; @@ -54,7 +54,7 @@ public SynapseMuxRecordHandler(java.util.Map configOptions) } @VisibleForTesting - SynapseMuxRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, + SynapseMuxRecordHandler(S3Client amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, DatabaseConnectionConfig databaseConnectionConfig, Map recordHandlerMap, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, jdbcConnectionFactory, databaseConnectionConfig, recordHandlerMap, configOptions); diff --git a/athena-synapse/src/main/java/com/amazonaws/athena/connectors/synapse/SynapseRecordHandler.java b/athena-synapse/src/main/java/com/amazonaws/athena/connectors/synapse/SynapseRecordHandler.java index 11e5d74148..a7a6aed815 100644 --- a/athena-synapse/src/main/java/com/amazonaws/athena/connectors/synapse/SynapseRecordHandler.java +++ b/athena-synapse/src/main/java/com/amazonaws/athena/connectors/synapse/SynapseRecordHandler.java @@ -33,8 +33,6 @@ import com.amazonaws.athena.connectors.jdbc.manager.JDBCUtil; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcSplitQueryBuilder; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.AmazonS3ClientBuilder; import com.google.common.annotations.VisibleForTesting; import org.apache.arrow.vector.types.pojo.ArrowType; import org.apache.arrow.vector.types.pojo.Field; @@ -43,6 +41,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -64,14 +63,14 @@ public SynapseRecordHandler(java.util.Map configOptions) } public SynapseRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, java.util.Map configOptions) { - this(databaseConnectionConfig, AmazonS3ClientBuilder.defaultClient(), SecretsManagerClient.create(), + this(databaseConnectionConfig, S3Client.create(), SecretsManagerClient.create(), AthenaClient.create(), new SynapseJdbcConnectionFactory(databaseConnectionConfig, SynapseMetadataHandler.JDBC_PROPERTIES, new DatabaseConnectionInfo(SynapseConstants.DRIVER_CLASS, SynapseConstants.DEFAULT_PORT)), new SynapseQueryStringBuilder(QUOTE_CHARACTER, new SynapseFederationExpressionParser(QUOTE_CHARACTER)), configOptions); } @VisibleForTesting - SynapseRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, final AmazonS3 amazonS3, final SecretsManagerClient secretsManager, + SynapseRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, final S3Client amazonS3, final SecretsManagerClient secretsManager, final AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, JdbcSplitQueryBuilder jdbcSplitQueryBuilder, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, databaseConnectionConfig, jdbcConnectionFactory, configOptions); diff --git a/athena-synapse/src/test/java/com/amazonaws/athena/connectors/synapse/SynapseMetadataHandlerTest.java b/athena-synapse/src/test/java/com/amazonaws/athena/connectors/synapse/SynapseMetadataHandlerTest.java index 4138897089..7e90ae436f 100644 --- a/athena-synapse/src/test/java/com/amazonaws/athena/connectors/synapse/SynapseMetadataHandlerTest.java +++ b/athena-synapse/src/test/java/com/amazonaws/athena/connectors/synapse/SynapseMetadataHandlerTest.java @@ -50,7 +50,6 @@ import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueRequest; import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueResponse; - import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.ResultSet; diff --git a/athena-synapse/src/test/java/com/amazonaws/athena/connectors/synapse/SynapseMuxRecordHandlerTest.java b/athena-synapse/src/test/java/com/amazonaws/athena/connectors/synapse/SynapseMuxRecordHandlerTest.java index 369d2d7dd2..3ed375cae4 100644 --- a/athena-synapse/src/test/java/com/amazonaws/athena/connectors/synapse/SynapseMuxRecordHandlerTest.java +++ b/athena-synapse/src/test/java/com/amazonaws/athena/connectors/synapse/SynapseMuxRecordHandlerTest.java @@ -28,12 +28,12 @@ import com.amazonaws.athena.connectors.jdbc.connection.DatabaseConnectionConfig; import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; -import com.amazonaws.services.s3.AmazonS3; import org.apache.arrow.vector.types.pojo.Schema; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -46,7 +46,7 @@ public class SynapseMuxRecordHandlerTest private Map recordHandlerMap; private SynapseRecordHandler synapseRecordHandler; private JdbcRecordHandler jdbcRecordHandler; - private AmazonS3 amazonS3; + private S3Client amazonS3; private SecretsManagerClient secretsManager; private AthenaClient athena; private QueryStatusChecker queryStatusChecker; @@ -57,7 +57,7 @@ public void setup() { this.synapseRecordHandler = Mockito.mock(SynapseRecordHandler.class); this.recordHandlerMap = Collections.singletonMap(SynapseConstants.NAME, this.synapseRecordHandler); - this.amazonS3 = Mockito.mock(AmazonS3.class); + this.amazonS3 = Mockito.mock(S3Client.class); this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AthenaClient.class); this.queryStatusChecker = Mockito.mock(QueryStatusChecker.class); diff --git a/athena-synapse/src/test/java/com/amazonaws/athena/connectors/synapse/SynapseRecordHandlerTest.java b/athena-synapse/src/test/java/com/amazonaws/athena/connectors/synapse/SynapseRecordHandlerTest.java index aaa61dea9a..b0108974cc 100644 --- a/athena-synapse/src/test/java/com/amazonaws/athena/connectors/synapse/SynapseRecordHandlerTest.java +++ b/athena-synapse/src/test/java/com/amazonaws/athena/connectors/synapse/SynapseRecordHandlerTest.java @@ -31,7 +31,6 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.connection.JdbcCredentialProvider; import com.amazonaws.athena.connectors.jdbc.manager.JdbcSplitQueryBuilder; -import com.amazonaws.services.s3.AmazonS3; import com.google.common.collect.ImmutableMap; import org.apache.arrow.vector.types.Types; import org.apache.arrow.vector.types.pojo.Schema; @@ -40,6 +39,7 @@ import org.junit.Test; import org.mockito.Mockito; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -58,7 +58,7 @@ public class SynapseRecordHandlerTest private Connection connection; private JdbcConnectionFactory jdbcConnectionFactory; private JdbcSplitQueryBuilder jdbcSplitQueryBuilder; - private AmazonS3 amazonS3; + private S3Client amazonS3; private SecretsManagerClient secretsManager; private AthenaClient athena; @@ -66,7 +66,7 @@ public class SynapseRecordHandlerTest public void setup() throws Exception { - this.amazonS3 = Mockito.mock(AmazonS3.class); + this.amazonS3 = Mockito.mock(S3Client.class); this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AthenaClient.class); this.connection = Mockito.mock(Connection.class); diff --git a/athena-teradata/src/main/java/com/amazonaws/athena/connectors/teradata/TeradataMuxRecordHandler.java b/athena-teradata/src/main/java/com/amazonaws/athena/connectors/teradata/TeradataMuxRecordHandler.java index 2f1a9f2954..5667ddae62 100644 --- a/athena-teradata/src/main/java/com/amazonaws/athena/connectors/teradata/TeradataMuxRecordHandler.java +++ b/athena-teradata/src/main/java/com/amazonaws/athena/connectors/teradata/TeradataMuxRecordHandler.java @@ -25,9 +25,9 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandlerFactory; -import com.amazonaws.services.s3.AmazonS3; import com.google.common.annotations.VisibleForTesting; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.Map; @@ -55,7 +55,7 @@ public TeradataMuxRecordHandler(java.util.Map configOptions) } @VisibleForTesting - TeradataMuxRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, + TeradataMuxRecordHandler(S3Client amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, DatabaseConnectionConfig databaseConnectionConfig, Map recordHandlerMap, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, jdbcConnectionFactory, databaseConnectionConfig, recordHandlerMap, configOptions); diff --git a/athena-teradata/src/main/java/com/amazonaws/athena/connectors/teradata/TeradataRecordHandler.java b/athena-teradata/src/main/java/com/amazonaws/athena/connectors/teradata/TeradataRecordHandler.java index 74b83ae3fd..52382322a6 100644 --- a/athena-teradata/src/main/java/com/amazonaws/athena/connectors/teradata/TeradataRecordHandler.java +++ b/athena-teradata/src/main/java/com/amazonaws/athena/connectors/teradata/TeradataRecordHandler.java @@ -29,12 +29,11 @@ import com.amazonaws.athena.connectors.jdbc.manager.JDBCUtil; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcSplitQueryBuilder; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.AmazonS3ClientBuilder; import com.google.common.annotations.VisibleForTesting; import org.apache.arrow.vector.types.pojo.Schema; import org.apache.commons.lang3.Validate; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -58,12 +57,12 @@ public TeradataRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, public TeradataRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, JdbcConnectionFactory jdbcConnectionFactory, java.util.Map configOptions) { - this(databaseConnectionConfig, AmazonS3ClientBuilder.defaultClient(), SecretsManagerClient.create(), AthenaClient.create(), + this(databaseConnectionConfig, S3Client.create(), SecretsManagerClient.create(), AthenaClient.create(), jdbcConnectionFactory, new TeradataQueryStringBuilder(TERADATA_QUOTE_CHARACTER, new TeradataFederationExpressionParser(TERADATA_QUOTE_CHARACTER)), configOptions); } @VisibleForTesting - TeradataRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, final AmazonS3 amazonS3, final SecretsManagerClient secretsManager, + TeradataRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, final S3Client amazonS3, final SecretsManagerClient secretsManager, final AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, JdbcSplitQueryBuilder jdbcSplitQueryBuilder, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, databaseConnectionConfig, jdbcConnectionFactory, configOptions); diff --git a/athena-teradata/src/test/java/com/amazonaws/athena/connectors/teradata/TeradataMuxJdbcRecordHandlerTest.java b/athena-teradata/src/test/java/com/amazonaws/athena/connectors/teradata/TeradataMuxJdbcRecordHandlerTest.java index 8ee13facf2..0c768ba3db 100644 --- a/athena-teradata/src/test/java/com/amazonaws/athena/connectors/teradata/TeradataMuxJdbcRecordHandlerTest.java +++ b/athena-teradata/src/test/java/com/amazonaws/athena/connectors/teradata/TeradataMuxJdbcRecordHandlerTest.java @@ -28,12 +28,12 @@ import com.amazonaws.athena.connectors.jdbc.connection.DatabaseConnectionConfig; import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; -import com.amazonaws.services.s3.AmazonS3; import org.apache.arrow.vector.types.pojo.Schema; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -46,7 +46,7 @@ public class TeradataMuxJdbcRecordHandlerTest private Map recordHandlerMap; private TeradataRecordHandler teradataRecordHandler; private JdbcRecordHandler jdbcRecordHandler; - private AmazonS3 amazonS3; + private S3Client amazonS3; private SecretsManagerClient secretsManager; private AthenaClient athena; private QueryStatusChecker queryStatusChecker; @@ -57,7 +57,7 @@ public void setup() { this.teradataRecordHandler = Mockito.mock(TeradataRecordHandler.class); this.recordHandlerMap = Collections.singletonMap("teradata", this.teradataRecordHandler); - this.amazonS3 = Mockito.mock(AmazonS3.class); + this.amazonS3 = Mockito.mock(S3Client.class); this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AthenaClient.class); this.queryStatusChecker = Mockito.mock(QueryStatusChecker.class); diff --git a/athena-teradata/src/test/java/com/amazonaws/athena/connectors/teradata/TeradataRecordHandlerTest.java b/athena-teradata/src/test/java/com/amazonaws/athena/connectors/teradata/TeradataRecordHandlerTest.java index 09dd2f4c75..4a306592df 100644 --- a/athena-teradata/src/test/java/com/amazonaws/athena/connectors/teradata/TeradataRecordHandlerTest.java +++ b/athena-teradata/src/test/java/com/amazonaws/athena/connectors/teradata/TeradataRecordHandlerTest.java @@ -32,7 +32,6 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.connection.JdbcCredentialProvider; import com.amazonaws.athena.connectors.jdbc.manager.JdbcSplitQueryBuilder; -import com.amazonaws.services.s3.AmazonS3; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import org.apache.arrow.vector.types.Types; @@ -42,6 +41,7 @@ import org.junit.Test; import org.mockito.Mockito; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.sql.Connection; @@ -58,7 +58,7 @@ public class TeradataRecordHandlerTest private Connection connection; private JdbcConnectionFactory jdbcConnectionFactory; private JdbcSplitQueryBuilder jdbcSplitQueryBuilder; - private AmazonS3 amazonS3; + private S3Client amazonS3; private SecretsManagerClient secretsManager; private AthenaClient athena; @@ -66,7 +66,7 @@ public class TeradataRecordHandlerTest public void setup() throws Exception { - this.amazonS3 = Mockito.mock(AmazonS3.class); + this.amazonS3 = Mockito.mock(S3Client.class); this.secretsManager = Mockito.mock(SecretsManagerClient.class); this.athena = Mockito.mock(AthenaClient.class); this.connection = Mockito.mock(Connection.class); diff --git a/athena-timestream/src/main/java/com/amazonaws/athena/connectors/timestream/TimestreamRecordHandler.java b/athena-timestream/src/main/java/com/amazonaws/athena/connectors/timestream/TimestreamRecordHandler.java index 9975a8c33f..a8cc2be021 100644 --- a/athena-timestream/src/main/java/com/amazonaws/athena/connectors/timestream/TimestreamRecordHandler.java +++ b/athena-timestream/src/main/java/com/amazonaws/athena/connectors/timestream/TimestreamRecordHandler.java @@ -40,8 +40,6 @@ import com.amazonaws.athena.connectors.timestream.qpt.TimestreamQueryPassthrough; import com.amazonaws.athena.connectors.timestream.query.QueryFactory; import com.amazonaws.athena.connectors.timestream.query.SelectQueryBuilder; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.AmazonS3ClientBuilder; import com.amazonaws.services.timestreamquery.AmazonTimestreamQuery; import com.amazonaws.services.timestreamquery.model.Datum; import com.amazonaws.services.timestreamquery.model.QueryRequest; @@ -59,6 +57,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.time.Instant; @@ -92,7 +91,7 @@ public class TimestreamRecordHandler public TimestreamRecordHandler(java.util.Map configOptions) { this( - AmazonS3ClientBuilder.defaultClient(), + S3Client.create(), SecretsManagerClient.create(), AthenaClient.create(), TimestreamClientBuilder.buildQueryClient(SOURCE_TYPE), @@ -100,7 +99,7 @@ public TimestreamRecordHandler(java.util.Map configOptions) } @VisibleForTesting - protected TimestreamRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, AmazonTimestreamQuery tsQuery, java.util.Map configOptions) + protected TimestreamRecordHandler(S3Client amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, AmazonTimestreamQuery tsQuery, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, SOURCE_TYPE, configOptions); this.tsQuery = tsQuery; diff --git a/athena-timestream/src/test/java/com/amazonaws/athena/connectors/timestream/TimestreamRecordHandlerTest.java b/athena-timestream/src/test/java/com/amazonaws/athena/connectors/timestream/TimestreamRecordHandlerTest.java index 9804555422..d7ad28e816 100644 --- a/athena-timestream/src/test/java/com/amazonaws/athena/connectors/timestream/TimestreamRecordHandlerTest.java +++ b/athena-timestream/src/test/java/com/amazonaws/athena/connectors/timestream/TimestreamRecordHandlerTest.java @@ -40,11 +40,6 @@ import com.amazonaws.athena.connector.lambda.security.EncryptionKeyFactory; import com.amazonaws.athena.connector.lambda.security.FederatedIdentity; import com.amazonaws.athena.connector.lambda.security.LocalKeyFactory; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.model.PutObjectRequest; -import com.amazonaws.services.s3.model.PutObjectResult; -import com.amazonaws.services.s3.model.S3Object; -import com.amazonaws.services.s3.model.S3ObjectInputStream; import com.amazonaws.services.timestreamquery.AmazonTimestreamQuery; import com.amazonaws.services.timestreamquery.model.QueryRequest; import com.amazonaws.services.timestreamquery.model.QueryResult; @@ -64,7 +59,14 @@ import org.mockito.stubbing.Answer; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.core.ResponseInputStream; +import software.amazon.awssdk.core.sync.RequestBody; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.GetObjectRequest; +import software.amazon.awssdk.services.s3.model.GetObjectResponse; +import software.amazon.awssdk.services.s3.model.PutObjectRequest; +import software.amazon.awssdk.services.s3.model.PutObjectResponse; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.io.ByteArrayInputStream; @@ -100,7 +102,7 @@ public class TimestreamRecordHandlerTest private TimestreamRecordHandler handler; private BlockAllocator allocator; private List mockS3Storage = new ArrayList<>(); - private AmazonS3 amazonS3; + private S3Client amazonS3; private S3BlockSpillReader spillReader; private Schema schemaForRead; private EncryptionKeyFactory keyFactory = new LocalKeyFactory(); @@ -144,31 +146,29 @@ public void setUp() allocator = new BlockAllocatorImpl(); - amazonS3 = mock(AmazonS3.class); + amazonS3 = mock(S3Client.class); - when(amazonS3.putObject(any())) + when(amazonS3.putObject(any(PutObjectRequest.class), any(RequestBody.class))) .thenAnswer((InvocationOnMock invocationOnMock) -> { - InputStream inputStream = ((PutObjectRequest) invocationOnMock.getArguments()[0]).getInputStream(); + InputStream inputStream = ((RequestBody) invocationOnMock.getArguments()[1]).contentStreamProvider().newStream(); ByteHolder byteHolder = new ByteHolder(); byteHolder.setBytes(ByteStreams.toByteArray(inputStream)); synchronized (mockS3Storage) { mockS3Storage.add(byteHolder); + logger.info("puObject: total size " + mockS3Storage.size()); } - return mock(PutObjectResult.class); + return PutObjectResponse.builder().build(); }); - when(amazonS3.getObject(nullable(String.class), nullable(String.class))) + when(amazonS3.getObject(any(GetObjectRequest.class))) .thenAnswer((InvocationOnMock invocationOnMock) -> { - S3Object mockObject = mock(S3Object.class); ByteHolder byteHolder; synchronized (mockS3Storage) { byteHolder = mockS3Storage.get(0); mockS3Storage.remove(0); + logger.info("getObject: total size " + mockS3Storage.size()); } - when(mockObject.getObjectContent()).thenReturn( - new S3ObjectInputStream( - new ByteArrayInputStream(byteHolder.getBytes()), null)); - return mockObject; + return new ResponseInputStream<>(GetObjectResponse.builder().build(), new ByteArrayInputStream(byteHolder.getBytes())); }); schemaForRead = SchemaBuilder.newBuilder() diff --git a/athena-tpcds/src/main/java/com/amazonaws/athena/connectors/tpcds/TPCDSRecordHandler.java b/athena-tpcds/src/main/java/com/amazonaws/athena/connectors/tpcds/TPCDSRecordHandler.java index df8b16529a..33c7fce626 100644 --- a/athena-tpcds/src/main/java/com/amazonaws/athena/connectors/tpcds/TPCDSRecordHandler.java +++ b/athena-tpcds/src/main/java/com/amazonaws/athena/connectors/tpcds/TPCDSRecordHandler.java @@ -26,8 +26,6 @@ import com.amazonaws.athena.connector.lambda.domain.TableName; import com.amazonaws.athena.connector.lambda.handlers.RecordHandler; import com.amazonaws.athena.connector.lambda.records.ReadRecordsRequest; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.AmazonS3ClientBuilder; import com.teradata.tpcds.Results; import com.teradata.tpcds.Session; import com.teradata.tpcds.Table; @@ -39,6 +37,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.io.IOException; @@ -76,11 +75,11 @@ public class TPCDSRecordHandler public TPCDSRecordHandler(java.util.Map configOptions) { - super(AmazonS3ClientBuilder.defaultClient(), SecretsManagerClient.create(), AthenaClient.create(), SOURCE_TYPE, configOptions); + super(S3Client.create(), SecretsManagerClient.create(), AthenaClient.create(), SOURCE_TYPE, configOptions); } @VisibleForTesting - protected TPCDSRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, java.util.Map configOptions) + protected TPCDSRecordHandler(S3Client amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, SOURCE_TYPE, configOptions); } diff --git a/athena-tpcds/src/test/java/com/amazonaws/athena/connectors/tpcds/TPCDSRecordHandlerTest.java b/athena-tpcds/src/test/java/com/amazonaws/athena/connectors/tpcds/TPCDSRecordHandlerTest.java index e26bf2458d..a13b453c55 100644 --- a/athena-tpcds/src/test/java/com/amazonaws/athena/connectors/tpcds/TPCDSRecordHandlerTest.java +++ b/athena-tpcds/src/test/java/com/amazonaws/athena/connectors/tpcds/TPCDSRecordHandlerTest.java @@ -41,11 +41,6 @@ import com.amazonaws.athena.connector.lambda.security.EncryptionKeyFactory; import com.amazonaws.athena.connector.lambda.security.FederatedIdentity; import com.amazonaws.athena.connector.lambda.security.LocalKeyFactory; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.model.PutObjectRequest; -import com.amazonaws.services.s3.model.PutObjectResult; -import com.amazonaws.services.s3.model.S3Object; -import com.amazonaws.services.s3.model.S3ObjectInputStream; import com.google.common.collect.ImmutableMap; import com.google.common.io.ByteStreams; import com.teradata.tpcds.Table; @@ -61,7 +56,14 @@ import org.mockito.junit.MockitoJUnitRunner; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.core.ResponseInputStream; +import software.amazon.awssdk.core.sync.RequestBody; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.GetObjectRequest; +import software.amazon.awssdk.services.s3.model.GetObjectResponse; +import software.amazon.awssdk.services.s3.model.PutObjectRequest; +import software.amazon.awssdk.services.s3.model.PutObjectResponse; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.io.ByteArrayInputStream; @@ -98,7 +100,7 @@ public class TPCDSRecordHandlerTest private Schema schemaForRead; @Mock - private AmazonS3 mockS3; + private S3Client mockS3; @Mock private SecretsManagerClient mockSecretsManager; @@ -127,30 +129,28 @@ public void setUp() handler = new TPCDSRecordHandler(mockS3, mockSecretsManager, mockAthena, com.google.common.collect.ImmutableMap.of()); spillReader = new S3BlockSpillReader(mockS3, allocator); - when(mockS3.putObject(any())) + when(mockS3.putObject(any(PutObjectRequest.class), any(RequestBody.class))) .thenAnswer((InvocationOnMock invocationOnMock) -> { + InputStream inputStream = ((RequestBody) invocationOnMock.getArguments()[1]).contentStreamProvider().newStream(); + ByteHolder byteHolder = new ByteHolder(); + byteHolder.setBytes(ByteStreams.toByteArray(inputStream)); synchronized (mockS3Storage) { - InputStream inputStream = ((PutObjectRequest) invocationOnMock.getArguments()[0]).getInputStream(); - ByteHolder byteHolder = new ByteHolder(); - byteHolder.setBytes(ByteStreams.toByteArray(inputStream)); mockS3Storage.add(byteHolder); - return mock(PutObjectResult.class); + logger.info("puObject: total size " + mockS3Storage.size()); } + return PutObjectResponse.builder().build(); }); - when(mockS3.getObject(nullable(String.class), nullable(String.class))) - .thenAnswer((InvocationOnMock invocationOnMock) -> - { + when(mockS3.getObject(any(GetObjectRequest.class))) + .thenAnswer((InvocationOnMock invocationOnMock) -> { + ByteHolder byteHolder; synchronized (mockS3Storage) { - S3Object mockObject = mock(S3Object.class); - ByteHolder byteHolder = mockS3Storage.get(0); + byteHolder = mockS3Storage.get(0); mockS3Storage.remove(0); - when(mockObject.getObjectContent()).thenReturn( - new S3ObjectInputStream( - new ByteArrayInputStream(byteHolder.getBytes()), null)); - return mockObject; + logger.info("getObject: total size " + mockS3Storage.size()); } + return new ResponseInputStream<>(GetObjectResponse.builder().build(), new ByteArrayInputStream(byteHolder.getBytes())); }); } diff --git a/athena-vertica/pom.xml b/athena-vertica/pom.xml index b5b086e449..745c4e5782 100644 --- a/athena-vertica/pom.xml +++ b/athena-vertica/pom.xml @@ -32,6 +32,11 @@ jcl-over-slf4j ${slf4j-log4j.version} + + org.apache.arrow + arrow-dataset + ${apache.arrow.version} + org.apache.logging.log4j log4j-slf4j2-impl diff --git a/athena-vertica/src/main/java/com/amazonaws/athena/connectors/vertica/VerticaConstants.java b/athena-vertica/src/main/java/com/amazonaws/athena/connectors/vertica/VerticaConstants.java index dbcf85e8a5..72bdacf8c2 100644 --- a/athena-vertica/src/main/java/com/amazonaws/athena/connectors/vertica/VerticaConstants.java +++ b/athena-vertica/src/main/java/com/amazonaws/athena/connectors/vertica/VerticaConstants.java @@ -24,6 +24,9 @@ public final class VerticaConstants public static final String VERTICA_NAME = "vertica"; public static final String VERTICA_DRIVER_CLASS = "com.vertica.jdbc.Driver"; public static final int VERTICA_DEFAULT_PORT = 5433; + public static final String VERTICA_SPLIT_QUERY_ID = "query_id"; + public static final String VERTICA_SPLIT_EXPORT_BUCKET = "exportBucket"; + public static final String VERTICA_SPLIT_OBJECT_KEY = "s3ObjectKey"; private VerticaConstants() {} } diff --git a/athena-vertica/src/main/java/com/amazonaws/athena/connectors/vertica/VerticaMetadataHandler.java b/athena-vertica/src/main/java/com/amazonaws/athena/connectors/vertica/VerticaMetadataHandler.java index ee40632659..4e691900aa 100644 --- a/athena-vertica/src/main/java/com/amazonaws/athena/connectors/vertica/VerticaMetadataHandler.java +++ b/athena-vertica/src/main/java/com/amazonaws/athena/connectors/vertica/VerticaMetadataHandler.java @@ -48,11 +48,6 @@ import com.amazonaws.athena.connectors.jdbc.qpt.JdbcQueryPassthrough; import com.amazonaws.athena.connectors.vertica.query.QueryFactory; import com.amazonaws.athena.connectors.vertica.query.VerticaExportQueryBuilder; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.AmazonS3ClientBuilder; -import com.amazonaws.services.s3.model.ListObjectsRequest; -import com.amazonaws.services.s3.model.ObjectListing; -import com.amazonaws.services.s3.model.S3ObjectSummary; import com.google.common.collect.ImmutableMap; import org.apache.arrow.util.VisibleForTesting; import org.apache.arrow.vector.complex.reader.FieldReader; @@ -62,6 +57,11 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.stringtemplate.v4.ST; +import software.amazon.awssdk.regions.providers.DefaultAwsRegionProviderChain; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.ListObjectsRequest; +import software.amazon.awssdk.services.s3.model.ListObjectsResponse; +import software.amazon.awssdk.services.s3.model.S3Object; import java.sql.Connection; import java.sql.DatabaseMetaData; @@ -80,6 +80,9 @@ import static com.amazonaws.athena.connectors.vertica.VerticaConstants.VERTICA_DEFAULT_PORT; import static com.amazonaws.athena.connectors.vertica.VerticaConstants.VERTICA_DRIVER_CLASS; import static com.amazonaws.athena.connectors.vertica.VerticaConstants.VERTICA_NAME; +import static com.amazonaws.athena.connectors.vertica.VerticaConstants.VERTICA_SPLIT_EXPORT_BUCKET; +import static com.amazonaws.athena.connectors.vertica.VerticaConstants.VERTICA_SPLIT_OBJECT_KEY; +import static com.amazonaws.athena.connectors.vertica.VerticaConstants.VERTICA_SPLIT_QUERY_ID; import static com.amazonaws.athena.connectors.vertica.VerticaSchemaUtils.convertToArrowType; @@ -100,7 +103,7 @@ public class VerticaMetadataHandler private static final String[] TABLE_TYPES = {"TABLE"}; private final QueryFactory queryFactory = new QueryFactory(); private final VerticaSchemaUtils verticaSchemaUtils; - private AmazonS3 amazonS3; + private S3Client amazonS3; private final JdbcQueryPassthrough queryPassthrough = new JdbcQueryPassthrough(); @@ -117,11 +120,11 @@ public VerticaMetadataHandler(DatabaseConnectionConfig databaseConnectionConfig, public VerticaMetadataHandler(DatabaseConnectionConfig databaseConnectionConfig, JdbcConnectionFactory jdbcConnectionFactory, Map configOptions) { super(databaseConnectionConfig, jdbcConnectionFactory, configOptions); - amazonS3 = AmazonS3ClientBuilder.defaultClient(); + amazonS3 = S3Client.create(); verticaSchemaUtils = new VerticaSchemaUtils(); } @VisibleForTesting - public VerticaMetadataHandler(DatabaseConnectionConfig databaseConnectionConfig, JdbcConnectionFactory jdbcConnectionFactory, Map configOptions, AmazonS3 amazonS3, VerticaSchemaUtils verticaSchemaUtils) + public VerticaMetadataHandler(DatabaseConnectionConfig databaseConnectionConfig, JdbcConnectionFactory jdbcConnectionFactory, Map configOptions, S3Client amazonS3, VerticaSchemaUtils verticaSchemaUtils) { super(databaseConnectionConfig, jdbcConnectionFactory, configOptions); this.amazonS3 = amazonS3; @@ -298,8 +301,8 @@ public void getPartitions(BlockWriter blockWriter, GetTableLayoutRequest request } logger.info("Vertica Export Statement: {}", preparedSQLStmt); - // Build the Set AWS Region SQL - String awsRegionSql = queryBuilder.buildSetAwsRegionSql(amazonS3.getRegion().toString()); + // Build the Set AWS Region SQL - Assumes using the default region provider chain + String awsRegionSql = queryBuilder.buildSetAwsRegionSql(DefaultAwsRegionProviderChain.builder().build().getRegion().toString()); // write the prepared SQL statement to the partition column created in enhancePartitionSchema blockWriter.writeRows((Block block, int rowNum) ->{ @@ -374,16 +377,16 @@ public GetSplitsResponse doGetSplits(BlockAllocator allocator, GetSplitsRequest * For each generated S3 object, create a split and add data to the split. */ Split split; - List s3ObjectSummaries = getlistExportedObjects(exportBucket, queryId); + List s3ObjectsList = getlistExportedObjects(exportBucket, queryId); - if(!s3ObjectSummaries.isEmpty()) + if(!s3ObjectsList.isEmpty()) { - for (S3ObjectSummary objectSummary : s3ObjectSummaries) + for (S3Object s3Object : s3ObjectsList) { split = Split.newBuilder(makeSpillLocation(request), makeEncryptionKey()) - .add("query_id", queryID) - .add("exportBucket", exportBucket) - .add("s3ObjectKey", objectSummary.getKey()) + .add(VERTICA_SPLIT_QUERY_ID, queryID) + .add(VERTICA_SPLIT_EXPORT_BUCKET, exportBucket) + .add(VERTICA_SPLIT_OBJECT_KEY, s3Object.key()) .build(); splits.add(split); @@ -395,9 +398,9 @@ public GetSplitsResponse doGetSplits(BlockAllocator allocator, GetSplitsRequest //No records were exported by Vertica for the issued query, creating a "empty" split logger.info("No records were exported by Vertica"); split = Split.newBuilder(makeSpillLocation(request), makeEncryptionKey()) - .add("query_id", queryID) - .add("exportBucket", exportBucket) - .add("s3ObjectKey", EMPTY_STRING) + .add(VERTICA_SPLIT_QUERY_ID, queryID) + .add(VERTICA_SPLIT_EXPORT_BUCKET, exportBucket) + .add(VERTICA_SPLIT_OBJECT_KEY, EMPTY_STRING) .build(); splits.add(split); return new GetSplitsResponse(catalogName,split); @@ -428,17 +431,20 @@ private void executeQueriesOnVertica(Connection connection, String sqlStatement, /* * Get the list of all the exported S3 objects */ - private List getlistExportedObjects(String s3ExportBucket, String queryId){ - ObjectListing objectListing; + private List getlistExportedObjects(String s3ExportBucket, String queryId){ + ListObjectsResponse listObjectsResponse; try { - objectListing = amazonS3.listObjects(new ListObjectsRequest().withBucketName(s3ExportBucket).withPrefix(queryId)); + listObjectsResponse = amazonS3.listObjects(ListObjectsRequest.builder() + .bucket(s3ExportBucket) + .prefix(queryId) + .build()); } catch (SdkClientException e) { throw new RuntimeException("Exception listing the exported objects : " + e.getMessage(), e); } - return objectListing.getObjectSummaries(); + return listObjectsResponse.contents(); } private void testAccess(Connection conn, TableName table) { diff --git a/athena-vertica/src/main/java/com/amazonaws/athena/connectors/vertica/VerticaRecordHandler.java b/athena-vertica/src/main/java/com/amazonaws/athena/connectors/vertica/VerticaRecordHandler.java index 795d02e402..29bec641d6 100644 --- a/athena-vertica/src/main/java/com/amazonaws/athena/connectors/vertica/VerticaRecordHandler.java +++ b/athena-vertica/src/main/java/com/amazonaws/athena/connectors/vertica/VerticaRecordHandler.java @@ -32,26 +32,34 @@ import com.amazonaws.athena.connector.lambda.domain.Split; import com.amazonaws.athena.connector.lambda.handlers.RecordHandler; import com.amazonaws.athena.connector.lambda.records.ReadRecordsRequest; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.AmazonS3ClientBuilder; -import com.amazonaws.services.s3.model.*; -import com.fasterxml.jackson.databind.ObjectMapper; +import org.apache.arrow.dataset.file.FileFormat; +import org.apache.arrow.dataset.file.FileSystemDatasetFactory; +import org.apache.arrow.dataset.jni.NativeMemoryPool; +import org.apache.arrow.dataset.scanner.ScanOptions; +import org.apache.arrow.dataset.scanner.Scanner; +import org.apache.arrow.dataset.source.Dataset; +import org.apache.arrow.dataset.source.DatasetFactory; +import org.apache.arrow.memory.BufferAllocator; +import org.apache.arrow.memory.RootAllocator; import org.apache.arrow.util.VisibleForTesting; +import org.apache.arrow.vector.VectorSchemaRoot; import org.apache.arrow.vector.holders.*; +import org.apache.arrow.vector.ipc.ArrowReader; import org.apache.arrow.vector.types.Types; import org.apache.arrow.vector.types.pojo.Field; import org.apache.arrow.vector.types.pojo.Schema; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; -import java.io.BufferedReader; +import static com.amazonaws.athena.connectors.vertica.VerticaConstants.VERTICA_SPLIT_EXPORT_BUCKET; +import static com.amazonaws.athena.connectors.vertica.VerticaConstants.VERTICA_SPLIT_OBJECT_KEY; +import static com.amazonaws.athena.connectors.vertica.VerticaConstants.VERTICA_SPLIT_QUERY_ID; + import java.io.IOException; -import java.io.InputStream; -import java.io.InputStreamReader; import java.math.BigDecimal; -import java.nio.charset.StandardCharsets; import java.time.LocalDate; import java.time.LocalDateTime; import java.util.HashMap; @@ -61,22 +69,18 @@ public class VerticaRecordHandler extends RecordHandler { private static final Logger logger = LoggerFactory.getLogger(VerticaRecordHandler.class); private static final String SOURCE_TYPE = "vertica"; - private static final String VERTICA_QUOTE_CHARACTER = "\""; - private static final String QUERY = "select * from S3Object s"; - private AmazonS3 amazonS3; public VerticaRecordHandler(java.util.Map configOptions) { - this(AmazonS3ClientBuilder.defaultClient(), + this(S3Client.create(), SecretsManagerClient.create(), AthenaClient.create(), configOptions); } @VisibleForTesting - protected VerticaRecordHandler(AmazonS3 amazonS3, SecretsManagerClient secretsManager, AthenaClient amazonAthena, java.util.Map configOptions) + protected VerticaRecordHandler(S3Client amazonS3, SecretsManagerClient secretsManager, AthenaClient amazonAthena, java.util.Map configOptions) { super(amazonS3, secretsManager, amazonAthena, SOURCE_TYPE, configOptions); - this.amazonS3 = amazonS3; } /** @@ -100,9 +104,9 @@ protected void readWithConstraint(BlockSpiller spiller, ReadRecordsRequest recor Schema schemaName = recordsRequest.getSchema(); Split split = recordsRequest.getSplit(); - String id = split.getProperty("query_id"); - String exportBucket = split.getProperty("exportBucket"); - String s3ObjectKey = split.getProperty("s3ObjectKey"); + String id = split.getProperty(VERTICA_SPLIT_QUERY_ID); + String exportBucket = split.getProperty(VERTICA_SPLIT_EXPORT_BUCKET); + String s3ObjectKey = split.getProperty(VERTICA_SPLIT_OBJECT_KEY); if(!s3ObjectKey.isEmpty()) { //get column name and type from the Schema @@ -127,25 +131,25 @@ protected void readWithConstraint(BlockSpiller spiller, ReadRecordsRequest recor } GeneratedRowWriter rowWriter = builder.build(); - /* - Using S3 Select to read the S3 Parquet file generated in the split - */ - //Creating the read Request - SelectObjectContentRequest request = generateBaseParquetRequest(exportBucket, s3ObjectKey); - try (SelectObjectContentResult result = amazonS3.selectObjectContent(request)) { - InputStream resultInputStream = result.getPayload().getRecordsInputStream(); - BufferedReader streamReader = new BufferedReader(new InputStreamReader(resultInputStream, StandardCharsets.UTF_8)); - String inputStr; - while ((inputStr = streamReader.readLine()) != null) { - HashMap map = new HashMap<>(); - //we are reading the parquet files, but serializing the output it as JSON as SDK provides a Parquet InputSerialization, but only a JSON or CSV OutputSerializatio - ObjectMapper objectMapper = new ObjectMapper(); - map = objectMapper.readValue(inputStr, HashMap.class); - rowContext.setNameValue(map); - - //Passing the RowContext to BlockWriter; - spiller.writeRows((Block block, int rowNum) -> rowWriter.writeRow(block, rowNum, rowContext) ? 1 : 0); + /* + Using Arrow Dataset to read the S3 Parquet file generated in the split + */ + try (ArrowReader reader = constructArrowReader(constructS3Uri(exportBucket, s3ObjectKey))) + { + while (reader.loadNextBatch()) { + VectorSchemaRoot root = reader.getVectorSchemaRoot(); + for (int row = 0; row < root.getRowCount(); row++) { + HashMap map = new HashMap<>(); + for (Field field : root.getSchema().getFields()) { + map.put(field.getName(), root.getVector(field).getObject(row)); + } + rowContext.setNameValue(map); + + //Passing the RowContext to BlockWriter; + spiller.writeRows((Block block, int rowNum) -> rowWriter.writeRow(block, rowNum, rowContext) ? 1 : 0); + } } + reader.close(); } catch (Exception e) { throw new RuntimeException("Error in connecting to S3 and selecting the object content for object : " + s3ObjectKey, e); } @@ -329,28 +333,24 @@ public HashMap getNameValue() { } } - - /* - Method to create the Parquet read request - */ - private static SelectObjectContentRequest generateBaseParquetRequest(String bucket, String key) + @VisibleForTesting + protected ArrowReader constructArrowReader(String uri) { - SelectObjectContentRequest request = new SelectObjectContentRequest(); - request.setBucketName(bucket); - request.setKey(key); - request.setExpression(VerticaRecordHandler.QUERY); - request.setExpressionType(ExpressionType.SQL); - - InputSerialization inputSerialization = new InputSerialization(); - inputSerialization.setParquet(new ParquetInput()); - inputSerialization.setCompressionType(CompressionType.NONE); - request.setInputSerialization(inputSerialization); - - OutputSerialization outputSerialization = new OutputSerialization(); - outputSerialization.setJson(new JSONOutput()); - request.setOutputSerialization(outputSerialization); + BufferAllocator allocator = new RootAllocator(); + DatasetFactory datasetFactory = new FileSystemDatasetFactory( + allocator, + NativeMemoryPool.getDefault(), + FileFormat.PARQUET, + uri); + Dataset dataset = datasetFactory.finish(); + ScanOptions options = new ScanOptions(/*batchSize*/ 32768); + Scanner scanner = dataset.newScan(options); + return scanner.scanBatches(); + } - return request; + private static String constructS3Uri(String bucket, String key) + { + return "s3://" + bucket + "/" + key; } } diff --git a/athena-vertica/src/test/java/com/amazonaws/athena/connectors/vertica/VerticaMetadataHandlerTest.java b/athena-vertica/src/test/java/com/amazonaws/athena/connectors/vertica/VerticaMetadataHandlerTest.java index ae833032d7..48091b59e0 100644 --- a/athena-vertica/src/test/java/com/amazonaws/athena/connectors/vertica/VerticaMetadataHandlerTest.java +++ b/athena-vertica/src/test/java/com/amazonaws/athena/connectors/vertica/VerticaMetadataHandlerTest.java @@ -47,11 +47,6 @@ import com.amazonaws.athena.connectors.jdbc.connection.JdbcCredentialProvider; import com.amazonaws.athena.connectors.vertica.query.QueryFactory; import com.amazonaws.athena.connectors.vertica.query.VerticaExportQueryBuilder; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.model.ListObjectsRequest; -import com.amazonaws.services.s3.model.ObjectListing; -import com.amazonaws.services.s3.model.Region; -import com.amazonaws.services.s3.model.S3ObjectSummary; import com.google.common.collect.ImmutableList; import org.apache.arrow.vector.types.pojo.Schema; import org.junit.After; @@ -66,6 +61,10 @@ import org.slf4j.LoggerFactory; import org.stringtemplate.v4.ST; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.ListObjectsRequest; +import software.amazon.awssdk.services.s3.model.ListObjectsResponse; +import software.amazon.awssdk.services.s3.model.S3Object; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueRequest; import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueResponse; @@ -105,7 +104,7 @@ public class VerticaMetadataHandlerTest extends TestBase private Connection connection; private SecretsManagerClient secretsManager; private AthenaClient athena; - private AmazonS3 amazonS3; + private S3Client amazonS3; private FederatedIdentity federatedIdentity; private BlockAllocatorImpl allocator; private DatabaseMetaData databaseMetaData; @@ -117,11 +116,7 @@ public class VerticaMetadataHandlerTest extends TestBase private QueryStatusChecker queryStatusChecker; private VerticaMetadataHandler verticaMetadataHandlerMocked; @Mock - private AmazonS3 s3clientMock; - @Mock - private ListObjectsRequest listObjectsRequest; - @Mock - private ObjectListing objectListing; + private S3Client s3clientMock; private DatabaseConnectionConfig databaseConnectionConfig = new DatabaseConnectionConfig("testCatalog", VERTICA_NAME, "vertica://jdbc:vertica:thin:username/password@//127.0.0.1:1521/vrt"); @@ -144,11 +139,10 @@ public void setUp() throws Exception this.schemaBuilder = Mockito.mock(SchemaBuilder.class); this.blockWriter = Mockito.mock(BlockWriter.class); this.queryStatusChecker = Mockito.mock(QueryStatusChecker.class); - this.amazonS3 = Mockito.mock(AmazonS3.class); + this.amazonS3 = Mockito.mock(S3Client.class); Mockito.lenient().when(this.secretsManager.getSecretValue(Mockito.eq(GetSecretValueRequest.builder().secretId("testSecret").build()))).thenReturn(GetSecretValueResponse.builder().secretString("{\"username\": \"testUser\", \"password\": \"testPassword\"}").build()); Mockito.when(connection.getMetaData()).thenReturn(databaseMetaData); - Mockito.when(amazonS3.getRegion()).thenReturn(Region.US_West_2); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class, Mockito.RETURNS_DEEP_STUBS); this.connection = Mockito.mock(Connection.class, Mockito.RETURNS_DEEP_STUBS); @@ -344,21 +338,13 @@ public void doGetSplits() throws Exception BlockUtils.setValue(partitions.getFieldVector("awsRegionSql"), i, "us-west-2"); } - List s3ObjectSummariesList = new ArrayList<>(); - S3ObjectSummary s3ObjectSummary = new S3ObjectSummary(); - s3ObjectSummary.setBucketName("s3ExportBucket"); - s3ObjectSummary.setKey("testKey"); - s3ObjectSummariesList.add(s3ObjectSummary); - ListObjectsRequest listObjectsRequestObj = new ListObjectsRequest(); - listObjectsRequestObj.setBucketName("s3ExportBucket"); - listObjectsRequestObj.setPrefix("queryId"); - + List objectList = new ArrayList<>(); + S3Object obj = S3Object.builder().key("testKey").build(); + objectList.add(obj); + ListObjectsResponse listObjectsResponse = ListObjectsResponse.builder().contents(objectList).build(); Mockito.when(verticaMetadataHandlerMocked.getS3ExportBucket()).thenReturn("testS3Bucket"); - Mockito.lenient().when(listObjectsRequest.withBucketName(nullable(String.class))).thenReturn(listObjectsRequestObj); - Mockito.lenient().when(listObjectsRequest.withPrefix(nullable(String.class))).thenReturn(listObjectsRequestObj); - Mockito.when(amazonS3.listObjects(nullable(ListObjectsRequest.class))).thenReturn(objectListing); - Mockito.when(objectListing.getObjectSummaries()).thenReturn(s3ObjectSummariesList); + Mockito.when(amazonS3.listObjects(nullable(ListObjectsRequest.class))).thenReturn(listObjectsResponse); GetSplitsRequest originalReq = new GetSplitsRequest(this.federatedIdentity, "queryId", "catalog_name", new TableName("schema", "table_name"), diff --git a/athena-vertica/src/test/java/com/amazonaws/athena/connectors/vertica/VerticaRecordHandlerTest.java b/athena-vertica/src/test/java/com/amazonaws/athena/connectors/vertica/VerticaRecordHandlerTest.java new file mode 100644 index 0000000000..b6ec304ad3 --- /dev/null +++ b/athena-vertica/src/test/java/com/amazonaws/athena/connectors/vertica/VerticaRecordHandlerTest.java @@ -0,0 +1,349 @@ +/*- + * #%L + * athena-gcs + * %% + * Copyright (C) 2019 - 2022 Amazon Web Services + * %% + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * #L% + */ +package com.amazonaws.athena.connectors.vertica; + +import java.io.ByteArrayInputStream; +import java.io.InputStream; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.UUID; + +import org.apache.arrow.memory.BufferAllocator; +import org.apache.arrow.memory.RootAllocator; +import org.apache.arrow.vector.BigIntVector; +import org.apache.arrow.vector.VarCharVector; +import org.apache.arrow.vector.VectorSchemaRoot; +import org.apache.arrow.vector.ipc.ArrowReader; +import org.apache.arrow.vector.types.Types; +import org.apache.arrow.vector.types.pojo.Field; +import org.apache.arrow.vector.types.pojo.Schema; +import org.apache.arrow.vector.util.Text; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TestName; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.junit.MockitoJUnitRunner; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.amazonaws.athena.connector.lambda.data.Block; +import com.amazonaws.athena.connector.lambda.data.BlockAllocator; +import com.amazonaws.athena.connector.lambda.data.BlockAllocatorImpl; +import com.amazonaws.athena.connector.lambda.data.BlockUtils; +import com.amazonaws.athena.connector.lambda.data.S3BlockSpillReader; +import com.amazonaws.athena.connector.lambda.data.SchemaBuilder; +import com.amazonaws.athena.connector.lambda.domain.Split; +import com.amazonaws.athena.connector.lambda.domain.predicate.Constraints; +import com.amazonaws.athena.connector.lambda.domain.predicate.Range; +import com.amazonaws.athena.connector.lambda.domain.predicate.SortedRangeSet; +import com.amazonaws.athena.connector.lambda.domain.predicate.ValueSet; +import com.amazonaws.athena.connector.lambda.domain.spill.S3SpillLocation; +import com.amazonaws.athena.connector.lambda.domain.spill.SpillLocation; +import com.amazonaws.athena.connector.lambda.records.ReadRecordsRequest; +import com.amazonaws.athena.connector.lambda.records.ReadRecordsResponse; +import com.amazonaws.athena.connector.lambda.records.RecordResponse; +import com.amazonaws.athena.connector.lambda.records.RemoteReadRecordsResponse; +import com.amazonaws.athena.connector.lambda.security.EncryptionKeyFactory; +import com.amazonaws.athena.connector.lambda.security.FederatedIdentity; +import com.amazonaws.athena.connector.lambda.security.LocalKeyFactory; +import com.google.common.collect.ImmutableList; +import com.google.common.io.ByteStreams; + +import software.amazon.awssdk.core.ResponseInputStream; +import software.amazon.awssdk.core.sync.RequestBody; +import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.GetObjectRequest; +import software.amazon.awssdk.services.s3.model.GetObjectResponse; +import software.amazon.awssdk.services.s3.model.PutObjectRequest; +import software.amazon.awssdk.services.s3.model.PutObjectResponse; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; + +import static com.amazonaws.athena.connector.lambda.domain.predicate.Constraints.DEFAULT_NO_LIMIT; +import static com.amazonaws.athena.connectors.vertica.VerticaConstants.VERTICA_SPLIT_EXPORT_BUCKET; +import static com.amazonaws.athena.connectors.vertica.VerticaConstants.VERTICA_SPLIT_OBJECT_KEY; +import static com.amazonaws.athena.connectors.vertica.VerticaConstants.VERTICA_SPLIT_QUERY_ID; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.when; + +@RunWith(MockitoJUnitRunner.class) + +public class VerticaRecordHandlerTest + extends TestBase +{ + private static final Logger logger = LoggerFactory.getLogger(VerticaRecordHandlerTest.class); + + private VerticaRecordHandler handler; + private BlockAllocator allocator; + private List mockS3Storage = new ArrayList<>(); + private S3BlockSpillReader spillReader; + private FederatedIdentity identity = new FederatedIdentity("arn", "account", Collections.emptyMap(), Collections.emptyList()); + private EncryptionKeyFactory keyFactory = new LocalKeyFactory(); + + private static final BufferAllocator bufferAllocator = new RootAllocator(); + + @Rule + public TestName testName = new TestName(); + + @Mock + private S3Client mockS3; + + @Mock + private SecretsManagerClient mockSecretsManager; + + @Mock + private AthenaClient mockAthena; + + @Before + public void setup() + { + logger.info("{}: enter", testName.getMethodName()); + + allocator = new BlockAllocatorImpl(); + handler = new VerticaRecordHandler(mockS3, mockSecretsManager, mockAthena, com.google.common.collect.ImmutableMap.of()); + spillReader = new S3BlockSpillReader(mockS3, allocator); + + Mockito.lenient().when(mockS3.putObject(any(PutObjectRequest.class), any(RequestBody.class))) + .thenAnswer((InvocationOnMock invocationOnMock) -> { + InputStream inputStream = ((RequestBody) invocationOnMock.getArguments()[1]).contentStreamProvider().newStream(); + ByteHolder byteHolder = new ByteHolder(); + byteHolder.setBytes(ByteStreams.toByteArray(inputStream)); + synchronized (mockS3Storage) { + mockS3Storage.add(byteHolder); + logger.info("puObject: total size " + mockS3Storage.size()); + } + return PutObjectResponse.builder().build(); + }); + + Mockito.lenient().when(mockS3.getObject(any(GetObjectRequest.class))) + .thenAnswer((InvocationOnMock invocationOnMock) -> { + ByteHolder byteHolder; + synchronized (mockS3Storage) { + byteHolder = mockS3Storage.get(0); + mockS3Storage.remove(0); + logger.info("getObject: total size " + mockS3Storage.size()); + } + return new ResponseInputStream<>(GetObjectResponse.builder().build(), new ByteArrayInputStream(byteHolder.getBytes())); + }); + } + + @After + public void after() + { + allocator.close(); + logger.info("{}: exit ", testName.getMethodName()); + } + + @Test + public void doReadRecordsNoSpill() + throws Exception + { + logger.info("doReadRecordsNoSpill: enter"); + + VectorSchemaRoot schemaRoot = createRoot(); + ArrowReader mockReader = mock(ArrowReader.class); + when(mockReader.loadNextBatch()).thenReturn(true, false); + when(mockReader.getVectorSchemaRoot()).thenReturn(schemaRoot); + VerticaRecordHandler handlerSpy = spy(handler); + doReturn(mockReader).when(handlerSpy).constructArrowReader(any()); + + Map constraintsMap = new HashMap<>(); + constraintsMap.put("time", SortedRangeSet.copyOf(Types.MinorType.BIGINT.getType(), + ImmutableList.of(Range.equal(allocator, Types.MinorType.BIGINT.getType(), 100L)), false)); + + S3SpillLocation splitLoc = S3SpillLocation.newBuilder() + .withBucket(UUID.randomUUID().toString()) + .withSplitId(UUID.randomUUID().toString()) + .withQueryId(UUID.randomUUID().toString()) + .withIsDirectory(true) + .build(); + + Split.Builder splitBuilder = Split.newBuilder(splitLoc, keyFactory.create()) + .add(VERTICA_SPLIT_QUERY_ID, "query_id") + .add(VERTICA_SPLIT_EXPORT_BUCKET, "export_bucket") + .add(VERTICA_SPLIT_OBJECT_KEY, "s3_object_key"); + + ReadRecordsRequest request = new ReadRecordsRequest(identity, + DEFAULT_CATALOG, + QUERY_ID, + TABLE_NAME, + schemaRoot.getSchema(), + splitBuilder.build(), + new Constraints(constraintsMap, Collections.emptyList(), Collections.emptyList(), DEFAULT_NO_LIMIT), + 100_000_000_000L, + 100_000_000_000L//100GB don't expect this to spill + ); + RecordResponse rawResponse = handlerSpy.doReadRecords(allocator, request); + + assertTrue(rawResponse instanceof ReadRecordsResponse); + + ReadRecordsResponse response = (ReadRecordsResponse) rawResponse; + logger.info("doReadRecordsNoSpill: rows[{}]", response.getRecordCount()); + + assertTrue(response.getRecords().getRowCount() == 2); + logger.info("doReadRecordsNoSpill: {}", BlockUtils.rowToString(response.getRecords(), 0)); + logger.info("doReadRecordsNoSpill: {}", BlockUtils.rowToString(response.getRecords(), 1)); + + for (Field field : schemaRoot.getSchema().getFields()) { + assertTrue(response.getRecords().getFieldVector(field.getName()).getObject(0).equals(schemaRoot.getVector(field).getObject(0))); + assertTrue(response.getRecords().getFieldVector(field.getName()).getObject(1).equals(schemaRoot.getVector(field).getObject(1))); + } + + logger.info("doReadRecordsNoSpill: exit"); + } + + @Test + public void doReadRecordsSpill() + throws Exception + { + logger.info("doReadRecordsSpill: enter"); + + VectorSchemaRoot schemaRoot = createRoot(); + ArrowReader mockReader = mock(ArrowReader.class); + when(mockReader.loadNextBatch()).thenReturn(true, false); + when(mockReader.getVectorSchemaRoot()).thenReturn(schemaRoot); + VerticaRecordHandler handlerSpy = spy(handler); + doReturn(mockReader).when(handlerSpy).constructArrowReader(any()); + + Map constraintsMap = new HashMap<>(); + constraintsMap.put("time", SortedRangeSet.copyOf(Types.MinorType.BIGINT.getType(), + ImmutableList.of(Range.equal(allocator, Types.MinorType.BIGINT.getType(), 100L)), false)); + + S3SpillLocation splitLoc = S3SpillLocation.newBuilder() + .withBucket(UUID.randomUUID().toString()) + .withSplitId(UUID.randomUUID().toString()) + .withQueryId(UUID.randomUUID().toString()) + .withIsDirectory(true) + .build(); + + Split.Builder splitBuilder = Split.newBuilder(splitLoc, keyFactory.create()) + .add(VERTICA_SPLIT_QUERY_ID, "query_id") + .add(VERTICA_SPLIT_EXPORT_BUCKET, "export_bucket") + .add(VERTICA_SPLIT_OBJECT_KEY, "s3_object_key"); + + ReadRecordsRequest request = new ReadRecordsRequest(identity, + DEFAULT_CATALOG, + QUERY_ID, + TABLE_NAME, + schemaRoot.getSchema(), + splitBuilder.build(), + new Constraints(constraintsMap, Collections.emptyList(), Collections.emptyList(), DEFAULT_NO_LIMIT), + 1_500_000L, //~1.5MB so we should see some spill + 0L + ); + RecordResponse rawResponse = handlerSpy.doReadRecords(allocator, request); + + assertTrue(rawResponse instanceof RemoteReadRecordsResponse); + + try (RemoteReadRecordsResponse response = (RemoteReadRecordsResponse) rawResponse) { + logger.info("doReadRecordsSpill: remoteBlocks[{}]", response.getRemoteBlocks().size()); + + //assertTrue(response.getNumberBlocks() > 1); + + int blockNum = 0; + for (SpillLocation next : response.getRemoteBlocks()) { + S3SpillLocation spillLocation = (S3SpillLocation) next; + try (Block block = spillReader.read(spillLocation, response.getEncryptionKey(), response.getSchema())) { + + logger.info("doReadRecordsSpill: blockNum[{}] and recordCount[{}]", blockNum++, block.getRowCount()); + // assertTrue(++blockNum < response.getRemoteBlocks().size() && block.getRowCount() > 10_000); + + logger.info("doReadRecordsSpill: {}", BlockUtils.rowToString(block, 0)); + assertNotNull(BlockUtils.rowToString(block, 0)); + } + } + } + + logger.info("doReadRecordsSpill: exit"); + } + + private class ByteHolder + { + private byte[] bytes; + + public void setBytes(byte[] bytes) + { + this.bytes = bytes; + } + + public byte[] getBytes() + { + return bytes; + } + } + + private VectorSchemaRoot createRoot() + { + Schema schema = SchemaBuilder.newBuilder() + .addBigIntField("day") + .addBigIntField("month") + .addBigIntField("year") + .addStringField("preparedStmt") + .addStringField("queryId") + .addStringField("awsRegionSql") + .build(); + VectorSchemaRoot schemaRoot = VectorSchemaRoot.create(schema, bufferAllocator); + BigIntVector dayVector = (BigIntVector) schemaRoot.getVector("day"); + dayVector.allocateNew(2); + dayVector.set(0, 0); + dayVector.set(1, 1); + dayVector.setValueCount(2); + BigIntVector monthVector = (BigIntVector) schemaRoot.getVector("month"); + monthVector.allocateNew(2); + monthVector.set(0, 0); + monthVector.set(1, 1); + monthVector.setValueCount(2); + BigIntVector yearVector = (BigIntVector) schemaRoot.getVector("year"); + yearVector.allocateNew(2); + yearVector.set(0, 2000); + yearVector.set(1, 2001); + yearVector.setValueCount(2); + VarCharVector stmtVector = (VarCharVector) schemaRoot.getVector("preparedStmt"); + stmtVector.allocateNew(2); + stmtVector.set(0, new Text("test1")); + stmtVector.set(1, new Text("test2")); + stmtVector.setValueCount(2); + VarCharVector idVector = (VarCharVector) schemaRoot.getVector("queryId"); + idVector.allocateNew(2); + idVector.set(0, new Text("queryID1")); + idVector.set(1, new Text("queryID2")); + idVector.setValueCount(2); + VarCharVector regionVector = (VarCharVector) schemaRoot.getVector("awsRegionSql"); + regionVector.allocateNew(2); + regionVector.set(0, new Text("region1")); + regionVector.set(1, new Text("region2")); + regionVector.setValueCount(2); + schemaRoot.setRowCount(2); + return schemaRoot; + } +} From ec12d57b37af529ed8e288a7c0d93385a35ae3dc Mon Sep 17 00:00:00 2001 From: Aimery Methena Date: Thu, 22 Aug 2024 20:17:15 +0000 Subject: [PATCH 09/87] fixed SpillLocationVerifier merge errors --- .../domain/spill/SpillLocationVerifier.java | 4 +- .../spill/SpillLocationVerifierTest.java | 40 +++++++++++-------- 2 files changed, 25 insertions(+), 19 deletions(-) diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/domain/spill/SpillLocationVerifier.java b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/domain/spill/SpillLocationVerifier.java index e2d6b468e8..4e06af8044 100644 --- a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/domain/spill/SpillLocationVerifier.java +++ b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/domain/spill/SpillLocationVerifier.java @@ -23,9 +23,9 @@ import com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import software.amazon.awssdk.awscore.exception.AwsServiceException; import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.s3.model.HeadBucketRequest; +import software.amazon.awssdk.services.s3.model.S3Exception; /** * This class is used to track the bucket and its state, and check its validity @@ -86,7 +86,7 @@ void updateBucketState() amazons3.headBucket(HeadBucketRequest.builder().bucket(bucket).build()); state = BucketState.VALID; } - catch (AwsServiceException ex) { + catch (S3Exception ex) { int statusCode = ex.statusCode(); // returns 404 if bucket was not found, 403 if bucket access is forbidden if (statusCode == 404 || statusCode == 403) { diff --git a/athena-federation-sdk/src/test/java/com/amazonaws/athena/connector/lambda/domain/spill/SpillLocationVerifierTest.java b/athena-federation-sdk/src/test/java/com/amazonaws/athena/connector/lambda/domain/spill/SpillLocationVerifierTest.java index 22ca66424f..b78c58c4eb 100644 --- a/athena-federation-sdk/src/test/java/com/amazonaws/athena/connector/lambda/domain/spill/SpillLocationVerifierTest.java +++ b/athena-federation-sdk/src/test/java/com/amazonaws/athena/connector/lambda/domain/spill/SpillLocationVerifierTest.java @@ -24,12 +24,16 @@ import org.junit.Before; import org.junit.Test; import org.mockito.Spy; +import org.mockito.stubbing.Answer; import org.slf4j.Logger; import org.slf4j.LoggerFactory; - +import software.amazon.awssdk.awscore.exception.AwsServiceException; import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.s3.model.Bucket; +import software.amazon.awssdk.services.s3.model.HeadBucketRequest; +import software.amazon.awssdk.services.s3.model.HeadBucketResponse; import software.amazon.awssdk.services.s3.model.ListBucketsResponse; +import software.amazon.awssdk.services.s3.model.S3Exception; import java.util.ArrayList; import java.util.Arrays; @@ -37,6 +41,7 @@ import java.util.Random; import static org.junit.Assert.*; +import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.times; @@ -57,8 +62,7 @@ public void setup() logger.info("setUpBefore - enter"); bucketNames = Arrays.asList("bucket1", "bucket2", "bucket3"); - List buckets = createBuckets(bucketNames); - S3Client mockS3 = createMockS3(buckets); + S3Client mockS3 = createMockS3(bucketNames); spyVerifier = spy(new SpillLocationVerifier(mockS3)); logger.info("setUpBefore - exit"); @@ -138,22 +142,24 @@ public void checkBucketAuthZFail() logger.info("checkBucketAuthZFail - exit"); } - private S3Client createMockS3(List buckets) + private S3Client createMockS3(List buckets) { S3Client s3mock = mock(S3Client.class); - ListBucketsResponse response = ListBucketsResponse.builder().buckets(buckets).build(); - when(s3mock.listBuckets()).thenReturn(response); + when(s3mock.headBucket(any(HeadBucketRequest.class))) + .thenAnswer((Answer) invocationOnMock -> { + String bucketName = ((HeadBucketRequest) invocationOnMock.getArguments()[0]).bucket(); + if (buckets.contains(bucketName)) { + return null; + } + AwsServiceException exception; + if (bucketName.equals("forbidden")) { + exception = S3Exception.builder().statusCode(403).message("Forbidden").build(); + } + else { + exception = S3Exception.builder().statusCode(404).message("Not Found").build(); + } + throw exception; + }); return s3mock; } - - private List createBuckets(List names) - { - List buckets = new ArrayList<>(); - for (String name : names) { - Bucket bucket = Bucket.builder().name(name).build(); - buckets.add(bucket); - } - - return buckets; - } } From cfd6f21aa32ad49fb83eaf0522a65094c94d777e Mon Sep 17 00:00:00 2001 From: Jithendar Trianz <106380520+Jithendar12@users.noreply.github.com> Date: Fri, 23 Aug 2024 19:41:12 +0530 Subject: [PATCH 10/87] Fix return statement in getGlueSchemaType method (#2199) --- .../com/amazonaws/athena/connectors/msk/GlueRegistryReader.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/athena-msk/src/main/java/com/amazonaws/athena/connectors/msk/GlueRegistryReader.java b/athena-msk/src/main/java/com/amazonaws/athena/connectors/msk/GlueRegistryReader.java index e4c1b4e6e5..8dfffb5a9e 100644 --- a/athena-msk/src/main/java/com/amazonaws/athena/connectors/msk/GlueRegistryReader.java +++ b/athena-msk/src/main/java/com/amazonaws/athena/connectors/msk/GlueRegistryReader.java @@ -80,7 +80,7 @@ public T getGlueSchema(String glueRegistryName, String glueSchemaName, Class public String getGlueSchemaType(String glueRegistryName, String glueSchemaName) { GetSchemaVersionResponse result = getSchemaVersionResult(glueRegistryName, glueSchemaName); - return result.dataFormat().toString(); + return result.dataFormatAsString(); } public String getSchemaDef(String glueRegistryName, String glueSchemaName) { From e978122d718796702b9d9bb50e5bdbf8f4a21ca9 Mon Sep 17 00:00:00 2001 From: VenkatasivareddyTR <110587813+VenkatasivareddyTR@users.noreply.github.com> Date: Fri, 23 Aug 2024 19:41:46 +0530 Subject: [PATCH 11/87] v2 migration vertica issue fix (#2147) --- athena-vertica/pom.xml | 5 ++ .../vertica/VerticaCompositeHandler.java | 12 +++- .../connectors/vertica/VerticaConstants.java | 8 +++ .../vertica/VerticaSchemaUtils.java | 61 +++++++++++++++++++ 4 files changed, 85 insertions(+), 1 deletion(-) diff --git a/athena-vertica/pom.xml b/athena-vertica/pom.xml index 745c4e5782..4e8543e54c 100644 --- a/athena-vertica/pom.xml +++ b/athena-vertica/pom.xml @@ -22,6 +22,11 @@ + + net.java.dev.jna + jna-platform + 5.14.0 + org.slf4j slf4j-api diff --git a/athena-vertica/src/main/java/com/amazonaws/athena/connectors/vertica/VerticaCompositeHandler.java b/athena-vertica/src/main/java/com/amazonaws/athena/connectors/vertica/VerticaCompositeHandler.java index 7ccbb0f34d..07467897b2 100644 --- a/athena-vertica/src/main/java/com/amazonaws/athena/connectors/vertica/VerticaCompositeHandler.java +++ b/athena-vertica/src/main/java/com/amazonaws/athena/connectors/vertica/VerticaCompositeHandler.java @@ -21,6 +21,14 @@ import com.amazonaws.athena.connector.lambda.handlers.CompositeHandler; +import java.io.IOException; +import java.security.KeyStoreException; +import java.security.NoSuchAlgorithmException; +import java.security.cert.CertificateEncodingException; + +import static com.amazonaws.athena.connectors.vertica.VerticaSchemaUtils.installCaCertificate; +import static com.amazonaws.athena.connectors.vertica.VerticaSchemaUtils.setupNativeEnvironmentVariables; + /** * Boilerplate composite handler that allows us to use a single Lambda function for both * Metadata and Data. @@ -28,8 +36,10 @@ public class VerticaCompositeHandler extends CompositeHandler { - public VerticaCompositeHandler() + public VerticaCompositeHandler() throws CertificateEncodingException, IOException, NoSuchAlgorithmException, KeyStoreException { super(new VerticaMetadataHandler(System.getenv()), new VerticaRecordHandler(System.getenv())); + installCaCertificate(); + setupNativeEnvironmentVariables(); } } diff --git a/athena-vertica/src/main/java/com/amazonaws/athena/connectors/vertica/VerticaConstants.java b/athena-vertica/src/main/java/com/amazonaws/athena/connectors/vertica/VerticaConstants.java index 72bdacf8c2..a12d790501 100644 --- a/athena-vertica/src/main/java/com/amazonaws/athena/connectors/vertica/VerticaConstants.java +++ b/athena-vertica/src/main/java/com/amazonaws/athena/connectors/vertica/VerticaConstants.java @@ -28,5 +28,13 @@ public final class VerticaConstants public static final String VERTICA_SPLIT_EXPORT_BUCKET = "exportBucket"; public static final String VERTICA_SPLIT_OBJECT_KEY = "s3ObjectKey"; + /** + * A ssl file location constant to store the SSL certificate + * The file location is fixed at /tmp directory + * to retrieve ssl certificate location + */ + public static final String SSL_CERT_FILE_LOCATION = "SSL_CERT_FILE"; + public static final String SSL_CERT_FILE_LOCATION_VALUE = "/tmp/cacert.pem"; + private VerticaConstants() {} } diff --git a/athena-vertica/src/main/java/com/amazonaws/athena/connectors/vertica/VerticaSchemaUtils.java b/athena-vertica/src/main/java/com/amazonaws/athena/connectors/vertica/VerticaSchemaUtils.java index 6939f8d2bb..547a01f26a 100644 --- a/athena-vertica/src/main/java/com/amazonaws/athena/connectors/vertica/VerticaSchemaUtils.java +++ b/athena-vertica/src/main/java/com/amazonaws/athena/connectors/vertica/VerticaSchemaUtils.java @@ -21,19 +21,39 @@ import com.amazonaws.athena.connector.lambda.data.SchemaBuilder; import com.amazonaws.athena.connector.lambda.domain.TableName; +import com.sun.jna.platform.unix.LibC; import org.apache.arrow.vector.types.pojo.Schema; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import javax.net.ssl.TrustManager; +import javax.net.ssl.TrustManagerFactory; +import javax.net.ssl.X509TrustManager; +import java.io.FileWriter; +import java.io.IOException; +import java.security.KeyStore; +import java.security.KeyStoreException; +import java.security.NoSuchAlgorithmException; +import java.security.cert.Certificate; +import java.security.cert.CertificateEncodingException; +import java.security.cert.X509Certificate; import java.sql.Connection; import java.sql.DatabaseMetaData; import java.sql.ResultSet; import java.sql.SQLException; +import java.util.Base64; + +import static com.amazonaws.athena.connectors.vertica.VerticaConstants.SSL_CERT_FILE_LOCATION; +import static com.amazonaws.athena.connectors.vertica.VerticaConstants.SSL_CERT_FILE_LOCATION_VALUE; public class VerticaSchemaUtils { private static final Logger logger = LoggerFactory.getLogger(VerticaSchemaUtils.class); + private static final String BEGIN_CERT = "-----BEGIN CERTIFICATE-----"; + private static final String END_CERT = "-----END CERTIFICATE-----"; + private static final String LINE_SEPARATOR = System.getProperty("line.separator"); + //Builds the table schema protected Schema buildTableSchema(Connection connection, TableName name) { @@ -125,4 +145,45 @@ public static void convertToArrowType(SchemaBuilder tableSchemaBuilder, String c tableSchemaBuilder.addStringField(colName); } } + + /** + * Write out the cacerts that we trust from the default java truststore. + * + */ + public static void installCaCertificate() throws IOException, NoSuchAlgorithmException, KeyStoreException, CertificateEncodingException + { + FileWriter caBundleWriter = new FileWriter(SSL_CERT_FILE_LOCATION_VALUE); + try { + TrustManagerFactory trustManagerFactory = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm()); + trustManagerFactory.init((KeyStore) null); + for (TrustManager trustManager : trustManagerFactory.getTrustManagers()) { + X509TrustManager x509TrustManager = (X509TrustManager) trustManager; + for (X509Certificate x509Certificate : x509TrustManager.getAcceptedIssuers()) { + caBundleWriter.write(formatCrtFileContents(x509Certificate)); + caBundleWriter.write(LINE_SEPARATOR); + } + } + } + finally { + caBundleWriter.close(); + } + } + + private static String formatCrtFileContents(Certificate certificate) throws CertificateEncodingException + { + Base64.Encoder encoder = Base64.getMimeEncoder(64, LINE_SEPARATOR.getBytes()); + byte[] rawCrtText = certificate.getEncoded(); + String encodedCertText = new String(encoder.encode(rawCrtText)); + String prettifiedCert = BEGIN_CERT + LINE_SEPARATOR + encodedCertText + LINE_SEPARATOR + END_CERT; + return prettifiedCert; + } + + public static void setupNativeEnvironmentVariables() + { + LibC.INSTANCE.setenv(SSL_CERT_FILE_LOCATION, SSL_CERT_FILE_LOCATION_VALUE, 1); + if (logger.isDebugEnabled()) { + logger.debug("Set native environment variables: {}: {}", + SSL_CERT_FILE_LOCATION, LibC.INSTANCE.getenv(SSL_CERT_FILE_LOCATION)); + } + } } From 8277628ee168931c25e543b0f97fa6372ef94e1a Mon Sep 17 00:00:00 2001 From: ejeffrli <144148373+ejeffrli@users.noreply.github.com> Date: Fri, 23 Aug 2024 13:49:50 -0400 Subject: [PATCH 12/87] update v2-master with msk dependency change (#2208) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> --- athena-gcs/pom.xml | 2 +- athena-msk/pom.xml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/athena-gcs/pom.xml b/athena-gcs/pom.xml index 6b61c42aa3..50e66976cb 100644 --- a/athena-gcs/pom.xml +++ b/athena-gcs/pom.xml @@ -75,7 +75,7 @@ com.google.cloud google-cloud-storage - 2.41.0 + 2.42.0 diff --git a/athena-msk/pom.xml b/athena-msk/pom.xml index 094b04b79c..81881d0156 100644 --- a/athena-msk/pom.xml +++ b/athena-msk/pom.xml @@ -66,7 +66,7 @@ software.amazon.msk aws-msk-iam-auth - 2.2.0 + 2.1.1 From 500861b9a68ed4e6dc839a23c50aba8c9c396464 Mon Sep 17 00:00:00 2001 From: Jeffrey Lin Date: Mon, 26 Aug 2024 23:31:54 +0000 Subject: [PATCH 13/87] Panama phase 1 squashed commit --- athena-aws-cmdb/athena-aws-cmdb.yaml | 150 ++++++++++--- athena-aws-cmdb/pom.xml | 6 +- .../aws/cmdb/AwsCmdbCompositeHandler.java | 3 +- .../athena-cloudera-hive.yaml | 168 ++++++++++---- athena-cloudera-hive/pom.xml | 10 +- .../cloudera/HiveCompositeHandler.java | 3 +- .../athena-cloudera-impala.yaml | 169 ++++++++++---- athena-cloudera-impala/pom.xml | 11 +- .../cloudera/ImpalaCompositeHandler.java | 3 +- .../athena-cloudwatch-metrics.yaml | 138 ++++++++++-- athena-cloudwatch-metrics/pom.xml | 6 +- .../metrics/MetricsCompositeHandler.java | 3 +- athena-cloudwatch/athena-cloudwatch.yaml | 86 ++++--- athena-cloudwatch/pom.xml | 8 +- .../CloudwatchCompositeHandler.java | 3 +- athena-datalakegen2/athena-datalakegen2.yaml | 180 +++++++++++---- athena-datalakegen2/pom.xml | 10 +- .../DataLakeGen2CompositeHandler.java | 3 +- athena-db2-as400/athena-db2-as400.yaml | 180 +++++++++++---- athena-db2-as400/pom.xml | 10 +- .../db2as400/Db2As400CompositeHandler.java | 3 +- athena-db2/athena-db2.yaml | 180 +++++++++++---- athena-db2/pom.xml | 11 +- .../connectors/db2/Db2CompositeHandler.java | 3 +- athena-docdb/athena-docdb.yaml | 179 +++++++++++---- athena-docdb/pom.xml | 8 +- .../docdb/DocDBCompositeHandler.java | 3 +- .../docdb/DocDBMetadataHandler.java | 13 +- .../connectors/docdb/DocDBRecordHandler.java | 16 +- athena-dynamodb/athena-dynamodb.yaml | 124 +++++----- athena-dynamodb/pom.xml | 10 +- .../dynamodb/DynamoDBCompositeHandler.java | 3 +- .../athena-elasticsearch.yaml | 211 ++++++++++++------ athena-elasticsearch/pom.xml | 10 +- .../AwsRestHighLevelClientFactory.java | 23 +- .../ElasticsearchCompositeHandler.java | 3 +- .../ElasticsearchCredential.java | 73 ++++++ .../ElasticsearchCredentialProvider.java | 63 ++++++ .../ElasticsearchMetadataHandler.java | 70 +++++- .../ElasticsearchRecordHandler.java | 8 +- athena-example/athena-example.yaml | 4 +- athena-example/pom.xml | 6 +- athena-federation-integ-test/README.md | 2 +- athena-federation-integ-test/pom.xml | 6 +- athena-federation-sdk-tools/pom.xml | 6 +- .../athena-federation-sdk.yaml | 4 +- athena-federation-sdk/pom.xml | 10 +- .../connector/lambda/GlueConnectionUtils.java | 112 ++++++++++ athena-gcs/athena-gcs.yaml | 164 ++++++++++---- athena-gcs/pom.xml | 10 +- .../connectors/gcs/GcsCompositeHandler.java | 5 +- .../athena-google-bigquery.yaml | 186 +++++++++++---- athena-google-bigquery/pom.xml | 38 +--- .../bigquery/BigQueryCompositeHandler.java | 5 +- athena-hbase/athena-hbase.yaml | 195 +++++++++++----- athena-hbase/pom.xml | 8 +- .../hbase/HbaseCompositeHandler.java | 3 +- .../hbase/HbaseConnectionFactory.java | 3 +- .../connection/HbaseConnectionFactory.java | 3 +- .../hbase/integ/HbaseTableUtils.java | 3 +- .../athena-hortonworks-hive.yaml | 168 ++++++++++---- athena-hortonworks-hive/pom.xml | 11 +- .../hortonworks/HiveCompositeHandler.java | 3 +- athena-jdbc/pom.xml | 8 +- .../MultiplexingJdbcCompositeHandler.java | 9 +- .../DatabaseConnectionConfigBuilder.java | 60 +++-- .../DatabaseConnectionConfigBuilderTest.java | 39 ++++ athena-kafka/athena-kafka.yaml | 102 +++++++-- athena-kafka/pom.xml | 9 +- athena-msk/athena-msk.yaml | 81 +++++-- athena-msk/pom.xml | 8 +- .../msk/AmazonMskCompositeHandler.java | 3 +- athena-mysql/athena-mysql.yaml | 125 +++++++---- athena-mysql/pom.xml | 10 +- .../mysql/MySqlCompositeHandler.java | 3 +- athena-neptune/athena-neptune.yaml | 199 ++++++++++++----- .../docs/aws-glue-sample-scripts/RDF.md | 2 +- .../docs/neptune-connector-setup/README.md | 12 +- athena-neptune/pom.xml | 6 +- .../neptune/NeptuneCompositeHandler.java | 3 +- athena-oracle/athena-oracle.yaml | 169 ++++++++++---- athena-oracle/pom.xml | 13 +- .../oracle/OracleCompositeHandler.java | 3 +- .../oracle/OracleJdbcConnectionFactory.java | 2 +- athena-postgresql/athena-postgresql.yaml | 132 +++++++---- athena-postgresql/pom.xml | 10 +- .../PostGreSqlCompositeHandler.java | 3 +- athena-redis/athena-redis.yaml | 181 +++++++++++---- athena-redis/pom.xml | 8 +- .../redis/RedisCompositeHandler.java | 3 +- athena-redshift/athena-redshift.yaml | 91 +++++--- athena-redshift/pom.xml | 10 +- athena-saphana/athena-saphana.yaml | 176 +++++++++++---- athena-saphana/pom.xml | 10 +- .../saphana/SaphanaCompositeHandler.java | 3 +- athena-snowflake/athena-snowflake.yaml | 178 +++++++++++---- athena-snowflake/pom.xml | 10 +- .../snowflake/SnowflakeCompositeHandler.java | 3 +- athena-sqlserver/athena-sqlserver.yaml | 125 +++++++---- athena-sqlserver/pom.xml | 10 +- .../sqlserver/SqlServerCompositeHandler.java | 3 +- athena-synapse/athena-synapse.yaml | 97 ++++++-- athena-synapse/pom.xml | 10 +- .../synapse/SynapseCompositeHandler.java | 3 +- athena-teradata/athena-teradata.yaml | 184 +++++++++++---- athena-teradata/pom.xml | 12 +- .../teradata/TeradataCompositeHandler.java | 4 +- .../teradata/TeradataMetadataHandler.java | 2 +- athena-timestream/athena-timestream.yaml | 152 ++++++++++--- athena-timestream/pom.xml | 8 +- .../TimestreamCompositeHandler.java | 3 +- athena-tpcds/athena-tpcds.yaml | 132 +++++++++-- athena-tpcds/pom.xml | 6 +- .../tpcds/TPCDSCompositeHandler.java | 3 +- athena-udfs/athena-udfs.yaml | 8 +- athena-udfs/pom.xml | 6 +- athena-vertica/athena-vertica.yaml | 205 ++++++++++++----- athena-vertica/pom.xml | 9 +- .../vertica/VerticaCompositeHandler.java | 3 +- pom.xml | 4 +- tools/validate_connector.sh | 2 +- .../app/lib/stacks/opensearch-stack.ts | 2 +- .../app/lib/stacks/rds-generic-stack.ts | 2 +- .../app/lib/stacks/redshift-stack.ts | 2 +- 124 files changed, 4274 insertions(+), 1526 deletions(-) create mode 100644 athena-elasticsearch/src/main/java/com/amazonaws/athena/connectors/elasticsearch/ElasticsearchCredential.java create mode 100644 athena-elasticsearch/src/main/java/com/amazonaws/athena/connectors/elasticsearch/ElasticsearchCredentialProvider.java create mode 100644 athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/GlueConnectionUtils.java diff --git a/athena-aws-cmdb/athena-aws-cmdb.yaml b/athena-aws-cmdb/athena-aws-cmdb.yaml index 03b66b5ff0..03e9982c08 100644 --- a/athena-aws-cmdb/athena-aws-cmdb.yaml +++ b/athena-aws-cmdb/athena-aws-cmdb.yaml @@ -10,10 +10,10 @@ Metadata: Labels: - athena-federation HomePageUrl: 'https://github.com/awslabs/aws-athena-query-federation' - SemanticVersion: 2022.47.1 + SemanticVersion: 2024.18.2 SourceCodeUrl: 'https://github.com/awslabs/aws-athena-query-federation' Parameters: - AthenaCatalogName: + LambdaFunctionName: Description: 'This is the name of the lambda function that will be created. This name must satisfy the pattern ^[a-z0-9-_]{1,64}$' Type: String AllowedPattern: ^[a-z0-9-_]{1,64}$ @@ -24,6 +24,18 @@ Parameters: Description: 'The prefix within SpillBucket where this function can spill data.' Type: String Default: athena-spill + GlueConnection: + Description: "(Optional) Name of glue connection storing connection details for Federated Data source." + Type: String + Default: "" + KmsKeyId: + Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." + Type: String + Default: "" + LambdaRoleArn: + Description: "(Optional) A custom role to be used by the Connector lambda" + Type: String + Default: "" LambdaTimeout: Description: 'Maximum Lambda invocation runtime in seconds. (min 1 - 900 max)' Default: 900 @@ -36,47 +48,127 @@ Parameters: Description: "WARNING: If set to 'true' encryption for spilled data is disabled." Default: 'false' Type: String - PermissionsBoundaryARN: + PermissionsBoundaryArn: Description: "(Optional) An IAM policy ARN to use as the PermissionsBoundary for the created Lambda function's execution role" Default: '' Type: String + Conditions: - HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryARN, "" ] ] + HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] + NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] + HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryArn, "" ] ] + CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] + HasGlueConnection: !Not [ !Equals [ !Ref GlueConnection, "" ] ] + NotHasGlueConnectionAndHasKmsKeyId: !And + - !Not [ !Condition HasGlueConnection ] + - !Condition HasKmsKeyId + Resources: ConnectorConfig: Type: 'AWS::Serverless::Function' Properties: Environment: Variables: - disable_spill_encryption: !Ref DisableSpillEncryption - spill_bucket: !Ref SpillBucket - spill_prefix: !Ref SpillPrefix - FunctionName: !Ref AthenaCatalogName + disable_spill_encryption: !If [HasGlueConnection, !Ref "AWS::NoValue", !Ref DisableSpillEncryption] + spill_bucket: !If [HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillBucket] + spill_prefix: !If [HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillPrefix] + kms_key_id: !If [NotHasGlueConnectionAndHasKmsKeyId, !Ref KmsKeyId, !Ref "AWS::NoValue"] + glue_connection: !If [HasGlueConnection, !Ref GlueConnection, !Ref "AWS::NoValue"] + FunctionName: !Ref LambdaFunctionName Handler: "com.amazonaws.athena.connectors.aws.cmdb.AwsCmdbCompositeHandler" - CodeUri: "./target/athena-aws-cmdb-2022.47.1.jar" + CodeUri: "./target/athena-aws-cmdb-2024.18.2.jar" Description: "Enables Amazon Athena to communicate with various AWS Services, making your resource inventories accessible via SQL." Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory - PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] - Policies: - - Statement: + Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn] + + FunctionRole: + Condition: NotHasLambdaRole + Type: AWS::IAM::Role + Properties: + PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryArn, !Ref "AWS::NoValue" ] + ManagedPolicyArns: + - "arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + Service: + - lambda.amazonaws.com + Action: + - "sts:AssumeRole" + + FunctionExecutionPolicy: + Condition: NotHasLambdaRole + Type: "AWS::IAM::Policy" + Properties: + PolicyName: FunctionExecutionPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Action: + - autoscaling:Describe* + - elasticloadbalancing:Describe* + - ec2:Describe* + - elasticmapreduce:Describe* + - elasticmapreduce:List* + - rds:Describe* + - rds:ListTagsForResource + - athena:GetQueryExecution + - s3:ListAllMyBuckets + - s3:ListBucket + - athena:GetQueryExecution + Effect: Allow + Resource: '*' + - Action: + - s3:GetObject + - s3:ListBucket + - s3:GetBucketLocation + - s3:GetObjectVersion + - s3:PutObject + - s3:PutObjectAcl + - s3:GetLifecycleConfiguration + - s3:PutLifecycleConfiguration + - s3:DeleteObject + Effect: Allow + Resource: + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName} + - bucketName: + Ref: SpillBucket + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName}/* + - bucketName: + Ref: SpillBucket + - !If + - HasGlueConnection - Action: - - autoscaling:Describe* - - elasticloadbalancing:Describe* - - ec2:Describe* - - elasticmapreduce:Describe* - - elasticmapreduce:List* - - rds:Describe* - - rds:ListTagsForResource - - athena:GetQueryExecution - - s3:ListAllMyBuckets - - s3:ListBucket - - athena:GetQueryExecution + - glue:GetConnection Effect: Allow - Resource: '*' - Version: '2012-10-17' - #S3CrudPolicy allows our connector to spill large responses to S3. You can optionally replace this pre-made policy - #with one that is more restrictive and can only 'put' but not read,delete, or overwrite files. - - S3CrudPolicy: - BucketName: !Ref SpillBucket \ No newline at end of file + Resource: + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' + - !Ref "AWS::NoValue" + Roles: + - !Ref FunctionRole + + FunctionKmsPolicy: + Condition: CreateKmsPolicy + Type: "AWS::IAM::Policy" + Properties: + PolicyName: FunctionKmsPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Action: + - kms:GenerateRandom + Resource: '*' + - Effect: Allow + Action: + - kms:GenerateDataKey + Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" + Roles: + - !Ref FunctionRole \ No newline at end of file diff --git a/athena-aws-cmdb/pom.xml b/athena-aws-cmdb/pom.xml index 90c1ad7b59..c9e6bdf137 100644 --- a/athena-aws-cmdb/pom.xml +++ b/athena-aws-cmdb/pom.xml @@ -3,16 +3,16 @@ aws-athena-query-federation com.amazonaws - 2022.47.1 + 2024.18.2 4.0.0 athena-aws-cmdb - 2022.47.1 + 2024.18.2 com.amazonaws aws-athena-federation-sdk - 2022.47.1 + 2024.18.2 withdep diff --git a/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/AwsCmdbCompositeHandler.java b/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/AwsCmdbCompositeHandler.java index 7b4653cb6b..a7d58bf7ae 100644 --- a/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/AwsCmdbCompositeHandler.java +++ b/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/AwsCmdbCompositeHandler.java @@ -19,6 +19,7 @@ */ package com.amazonaws.athena.connectors.aws.cmdb; +import com.amazonaws.athena.connector.lambda.GlueConnectionUtils; import com.amazonaws.athena.connector.lambda.handlers.CompositeHandler; /** @@ -30,6 +31,6 @@ public class AwsCmdbCompositeHandler { public AwsCmdbCompositeHandler() { - super(new AwsCmdbMetadataHandler(System.getenv()), new AwsCmdbRecordHandler(System.getenv())); + super(new AwsCmdbMetadataHandler(GlueConnectionUtils.getGlueConnection()), new AwsCmdbRecordHandler(GlueConnectionUtils.getGlueConnection())); } } diff --git a/athena-cloudera-hive/athena-cloudera-hive.yaml b/athena-cloudera-hive/athena-cloudera-hive.yaml index 719467e498..490a97d9e9 100644 --- a/athena-cloudera-hive/athena-cloudera-hive.yaml +++ b/athena-cloudera-hive/athena-cloudera-hive.yaml @@ -10,7 +10,7 @@ Metadata: Labels: - athena-federation HomePageUrl: 'https://github.com/awslabs/aws-athena-query-federation' - SemanticVersion: 2022.47.1 + SemanticVersion: 2024.18.2 SourceCodeUrl: 'https://github.com/awslabs/aws-athena-query-federation' Parameters: LambdaFunctionName: @@ -20,9 +20,10 @@ Parameters: DefaultConnectionString: Description: 'The default connection string is used when catalog is "lambda:${LambdaFunctionName}". Catalog specific Connection Strings can be added later. Format: ${DatabaseType}://${NativeJdbcConnectionString}.' Type: String - SecretNamePrefix: - Description: 'Used to create resource-based authorization policy for "secretsmanager:GetSecretValue" action. E.g. All Athena JDBC Federation secret names can be prefixed with "AthenaJdbcFederation" and authorization policy will allow "arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:AthenaJdbcFederation*". Parameter value in this case should be "AthenaJdbcFederation". If you do not have a prefix, you can manually update the IAM policy to add allow any secret names.' - Type: String + Default: "" + SecretName: + Description: 'Used to create resource-based authorization policy for "secretsmanager:GetSecretValue" action. E.g. All Athena JDBC Federation secret names can be prefixed with "AthenaJdbcFederation" and authorization policy will allow "arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:AthenaJdbcFederation*". Parameter value in this case should be "AthenaJdbcFederation". If you do not have a prefix, you can manually update the IAM policy to add allow any secret names.' + Type: String SpillBucket: Description: 'The name of the bucket where this function can spill data.' Type: String @@ -30,6 +31,18 @@ Parameters: Description: 'The prefix within SpillBucket where this function can spill data.' Type: String Default: athena-spill + GlueConnection: + Description: "(Optional) Name of glue connection storing connection details for Federated Data source." + Type: String + Default: "" + KmsKeyId: + Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." + Type: String + Default: "" + LambdaRoleArn: + Description: "(Optional) A custom role to be used by the Connector lambda" + Type: String + Default: "" LambdaTimeout: Description: 'Maximum Lambda invocation runtime in seconds. (min 1 - 900 max)' Default: 900 @@ -48,63 +61,140 @@ Parameters: SubnetIds: Description: 'One or more Subnet IDs corresponding to the Subnet that the Lambda function can use to access you data source. (e.g. subnet1,subnet2)' Type: 'List' - PermissionsBoundaryARN: + PermissionsBoundaryArn: Description: "(Optional) An IAM policy ARN to use as the PermissionsBoundary for the created Lambda function's execution role" Default: '' Type: String Conditions: - HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryARN, "" ] ] + HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryArn, "" ] ] + NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] + HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] + CreateKmsPolicy: !And [!Condition NotHasLambdaRole, !Condition HasKmsKeyId] + HasGlueConnection: !Not [ !Equals [ !Ref GlueConnection, "" ] ] + NotHasGlueConnectionAndHasKmsKeyId: !And + - !Not [ !Condition HasGlueConnection ] + - !Condition HasKmsKeyId Resources: JdbcConnectorConfig: Type: 'AWS::Serverless::Function' Properties: Environment: Variables: - disable_spill_encryption: !Ref DisableSpillEncryption - spill_bucket: !Ref SpillBucket - spill_prefix: !Ref SpillPrefix - default: !Ref DefaultConnectionString + default: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref DefaultConnectionString ] + disable_spill_encryption: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref DisableSpillEncryption ] + spill_bucket: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillBucket ] + spill_prefix: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillPrefix ] + kms_key_id: !If [ NotHasGlueConnectionAndHasKmsKeyId, !Ref KmsKeyId, !Ref "AWS::NoValue" ] + glue_connection: !If [ HasGlueConnection, !Ref GlueConnection, !Ref "AWS::NoValue" ] FunctionName: !Ref LambdaFunctionName Handler: "com.amazonaws.athena.connectors.cloudera.HiveMuxCompositeHandler" - CodeUri: "./target/athena-cloudera-hive-2022.47.1.jar" - Description: "Enables Amazon Athena to communicate with Coludera Hive using JDBC" + CodeUri: "./target/athena-cloudera-hive-2024.18.2.jar" + Description: "Enables Amazon Athena to communicate with Cloudera Hive using JDBC" Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory - PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] - Policies: - - Statement: - - Action: - - secretsmanager:GetSecretValue - Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretNamePrefix}*' - Version: '2012-10-17' - - Statement: - - Action: - - logs:CreateLogGroup - Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:*' - Version: '2012-10-17' - - Statement: + Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn] + VpcConfig: + SecurityGroupIds: !Ref SecurityGroupIds + SubnetIds: !Ref SubnetIds + + FunctionRole: + Condition: NotHasLambdaRole + Type: AWS::IAM::Role + Properties: + PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryArn, !Ref "AWS::NoValue" ] + ManagedPolicyArns: + - "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole" + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + Service: + - lambda.amazonaws.com + Action: + - "sts:AssumeRole" + + FunctionExecutionPolicy: + Condition: NotHasLambdaRole + Type: "AWS::IAM::Policy" + Properties: + Roles: + - !Ref FunctionRole + PolicyName: FunctionExecutionPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Action: + - secretsmanager:GetSecretValue + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretName}*' + - Action: + - logs:CreateLogGroup + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:*' - Action: - logs:CreateLogStream - logs:PutLogEvents Effect: Allow Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/${LambdaFunctionName}:*' - Version: '2012-10-17' - - Statement: - Action: - athena:GetQueryExecution - s3:ListAllMyBuckets Effect: Allow Resource: '*' - Version: '2012-10-17' - #S3CrudPolicy allows our connector to spill large responses to S3. You can optionally replace this pre-made policy - #with one that is more restrictive and can only 'put' but not read,delete, or overwrite files. - - S3CrudPolicy: - BucketName: !Ref SpillBucket - #VPCAccessPolicy allows our connector to run in a VPC so that it can access your data source. - - VPCAccessPolicy: {} - VpcConfig: - SecurityGroupIds: !Ref SecurityGroupIds - SubnetIds: !Ref SubnetIds \ No newline at end of file + - Action: + - ec2:CreateNetworkInterface + - ec2:DeleteNetworkInterface + - ec2:DescribeNetworkInterfaces + - ec2:DetachNetworkInterface + Effect: Allow + Resource: '*' + - Action: + - s3:GetObject + - s3:ListBucket + - s3:GetBucketLocation + - s3:GetObjectVersion + - s3:PutObject + - s3:PutObjectAcl + - s3:GetLifecycleConfiguration + - s3:PutLifecycleConfiguration + - s3:DeleteObject + Effect: Allow + Resource: + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName} + - bucketName: + Ref: SpillBucket + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName}/* + - bucketName: + Ref: SpillBucket + - !If + - HasGlueConnection + - Action: + - glue:GetConnection + Effect: Allow + Resource: + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' + - !Ref "AWS::NoValue" + + FunctionKmsPolicy: + Condition: CreateKmsPolicy + Type: "AWS::IAM::Policy" + Properties: + PolicyName: FunctionKmsPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Action: + - kms:GenerateRandom + Resource: '*' + - Effect: Allow + Action: + - kms:GenerateDataKey + Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" + Roles: + - !Ref FunctionRole \ No newline at end of file diff --git a/athena-cloudera-hive/pom.xml b/athena-cloudera-hive/pom.xml index 7e4dfdbcc3..396f3061d8 100644 --- a/athena-cloudera-hive/pom.xml +++ b/athena-cloudera-hive/pom.xml @@ -3,11 +3,11 @@ aws-athena-query-federation com.amazonaws - 2022.47.1 + 2024.18.2 4.0.0 athena-cloudera-hive - 2022.47.1 + 2024.18.2 2.6.23.1027 @@ -15,13 +15,13 @@ com.amazonaws athena-federation-integ-test - 2022.47.1 + 2024.18.2 test com.amazonaws athena-jdbc - 2022.47.1 + 2024.18.2 Hive @@ -31,7 +31,7 @@ com.amazonaws athena-jdbc - 2022.47.1 + 2024.18.2 test-jar test diff --git a/athena-cloudera-hive/src/main/java/com/amazonaws/athena/connectors/cloudera/HiveCompositeHandler.java b/athena-cloudera-hive/src/main/java/com/amazonaws/athena/connectors/cloudera/HiveCompositeHandler.java index fc97001c78..e22a0a01af 100644 --- a/athena-cloudera-hive/src/main/java/com/amazonaws/athena/connectors/cloudera/HiveCompositeHandler.java +++ b/athena-cloudera-hive/src/main/java/com/amazonaws/athena/connectors/cloudera/HiveCompositeHandler.java @@ -19,6 +19,7 @@ */ package com.amazonaws.athena.connectors.cloudera; +import com.amazonaws.athena.connector.lambda.GlueConnectionUtils; import com.amazonaws.athena.connector.lambda.handlers.CompositeHandler; /** @@ -32,6 +33,6 @@ public class HiveCompositeHandler { public HiveCompositeHandler() { - super(new HiveMetadataHandler(System.getenv()), new HiveRecordHandler(System.getenv())); + super(new HiveMetadataHandler(GlueConnectionUtils.getGlueConnection()), new HiveRecordHandler(GlueConnectionUtils.getGlueConnection())); } } diff --git a/athena-cloudera-impala/athena-cloudera-impala.yaml b/athena-cloudera-impala/athena-cloudera-impala.yaml index 32c52cb710..97d936c99a 100644 --- a/athena-cloudera-impala/athena-cloudera-impala.yaml +++ b/athena-cloudera-impala/athena-cloudera-impala.yaml @@ -10,7 +10,7 @@ Metadata: Labels: - athena-federation HomePageUrl: 'https://github.com/awslabs/aws-athena-query-federation' - SemanticVersion: 2022.47.1 + SemanticVersion: 2024.18.2 SourceCodeUrl: 'https://github.com/awslabs/aws-athena-query-federation' Parameters: LambdaFunctionName: @@ -20,9 +20,10 @@ Parameters: DefaultConnectionString: Description: 'The default connection string is used when catalog is "lambda:${LambdaFunctionName}". Catalog specific Connection Strings can be added later. Format: ${DatabaseType}://${NativeJdbcConnectionString}.' Type: String - SecretNamePrefix: - Description: 'Used to create resource-based authorization policy for "secretsmanager:GetSecretValue" action. E.g. All Athena JDBC Federation secret names can be prefixed with "AthenaJdbcFederation" and authorization policy will allow "arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:AthenaJdbcFederation*". Parameter value in this case should be "AthenaJdbcFederation". If you do not have a prefix, you can manually update the IAM policy to add allow any secret names.' - Type: String + Default: "" + SecretName: + Description: 'Used to create resource-based authorization policy for "secretsmanager:GetSecretValue" action. E.g. All Athena JDBC Federation secret names can be prefixed with "AthenaJdbcFederation" and authorization policy will allow "arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:AthenaJdbcFederation*". Parameter value in this case should be "AthenaJdbcFederation". If you do not have a prefix, you can manually update the IAM policy to add allow any secret names.' + Type: String SpillBucket: Description: 'The name of the bucket where this function can spill data.' Type: String @@ -30,6 +31,10 @@ Parameters: Description: 'The prefix within SpillBucket where this function can spill data.' Type: String Default: athena-spill + GlueConnection: + Description: "(Optional) Name of glue connection storing connection details for Federated Data source." + Type: String + Default: "" LambdaTimeout: Description: 'Maximum Lambda invocation runtime in seconds. (min 1 - 900 max)' Default: 900 @@ -48,69 +53,147 @@ Parameters: SubnetIds: Description: 'One or more Subnet IDs corresponding to the Subnet that the Lambda function can use to access you data source. (e.g. subnet1,subnet2)' Type: 'List' - PermissionsBoundaryARN: + PermissionsBoundaryArn: Description: "(Optional) An IAM policy ARN to use as the PermissionsBoundary for the created Lambda function's execution role" Default: '' Type: String - LambdaEncryptionKmsKeyARN: - Description: "(Optional) The KMS Key ARN used for encrypting your Lambda environment variables." - Default: "" + KmsKeyId: + Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." + Type: String + LambdaRoleArn: + Description: "(Optional) A custom role to be used by the Connector lambda" Type: String + Default: "" Conditions: - HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryARN, "" ] ] - HasLambdaEncryptionKmsKeyARN: !Not [ !Equals [ !Ref LambdaEncryptionKmsKeyARN, "" ] ] + HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryArn, "" ] ] + HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] + NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] + CreateKmsPolicy: !And [!Condition NotHasLambdaRole, !Condition HasKmsKeyId] + HasGlueConnection: !Not [!Equals [!Ref GlueConnection, ""]] + NotHasGlueConnectionAndHasKmsKeyId: !And + - !Not [ !Condition HasGlueConnection ] + - !Condition HasKmsKeyId Resources: JdbcConnectorConfig: Type: 'AWS::Serverless::Function' Properties: Environment: Variables: - disable_spill_encryption: !Ref DisableSpillEncryption - spill_bucket: !Ref SpillBucket - spill_prefix: !Ref SpillPrefix - default: !Ref DefaultConnectionString + default: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref DefaultConnectionString ] + disable_spill_encryption: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref DisableSpillEncryption ] + spill_bucket: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillBucket ] + spill_prefix: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillPrefix ] + kms_key_id: !If [ NotHasGlueConnectionAndHasKmsKeyId, !Ref KmsKeyId, !Ref "AWS::NoValue" ] + glue_connection: !If [ HasGlueConnection, !Ref GlueConnection, !Ref "AWS::NoValue" ] FunctionName: !Ref LambdaFunctionName Handler: "com.amazonaws.athena.connectors.cloudera.ImpalaMuxCompositeHandler" - CodeUri: "./target/athena-cloudera-impala-2022.47.1.jar" + CodeUri: "./target/athena-cloudera-impala-2024.18.2.jar" Description: "Enables Amazon Athena to communicate with Cloudera Impala using JDBC" Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory - PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] - Policies: - - Statement: - - Action: - - secretsmanager:GetSecretValue - Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretNamePrefix}*' - Version: '2012-10-17' - - Statement: - - Action: - - logs:CreateLogGroup - Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:*' - Version: '2012-10-17' - - Statement: + Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn] + VpcConfig: + SecurityGroupIds: !Ref SecurityGroupIds + SubnetIds: !Ref SubnetIds + + FunctionRole: + Condition: NotHasLambdaRole + Type: AWS::IAM::Role + Properties: + PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryArn, !Ref "AWS::NoValue" ] + ManagedPolicyArns: + - "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole" + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + Service: + - lambda.amazonaws.com + Action: + - "sts:AssumeRole" + + FunctionExecutionPolicy: + Condition: NotHasLambdaRole + Type: "AWS::IAM::Policy" + Properties: + Roles: + - !Ref FunctionRole + PolicyName: FunctionExecutionPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Action: + - secretsmanager:GetSecretValue + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretName}*' + - Action: + - logs:CreateLogGroup + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:*' - Action: - logs:CreateLogStream - logs:PutLogEvents Effect: Allow Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/${LambdaFunctionName}:*' - Version: '2012-10-17' - - Statement: - Action: - athena:GetQueryExecution - s3:ListAllMyBuckets Effect: Allow Resource: '*' - Version: '2012-10-17' - #S3CrudPolicy allows our connector to spill large responses to S3. You can optionally replace this pre-made policy - #with one that is more restrictive and can only 'put' but not read,delete, or overwrite files. - - S3CrudPolicy: - BucketName: !Ref SpillBucket - #VPCAccessPolicy allows our connector to run in a VPC so that it can access your data source. - - VPCAccessPolicy: {} - VpcConfig: - SecurityGroupIds: !Ref SecurityGroupIds - SubnetIds: !Ref SubnetIds - KmsKeyArn: !If [ HasLambdaEncryptionKmsKeyARN, !Ref LambdaEncryptionKmsKeyARN, !Ref "AWS::NoValue" ] + - Action: + - ec2:CreateNetworkInterface + - ec2:DeleteNetworkInterface + - ec2:DescribeNetworkInterfaces + - ec2:DetachNetworkInterface + Effect: Allow + Resource: '*' + - Action: + - s3:GetObject + - s3:ListBucket + - s3:GetBucketLocation + - s3:GetObjectVersion + - s3:PutObject + - s3:PutObjectAcl + - s3:GetLifecycleConfiguration + - s3:PutLifecycleConfiguration + - s3:DeleteObject + Effect: Allow + Resource: + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName} + - bucketName: + Ref: SpillBucket + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName}/* + - bucketName: + Ref: SpillBucket + - !If + - HasGlueConnection + - Action: + - glue:GetConnection + Effect: Allow + Resource: + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' + - !Ref "AWS::NoValue" + + FunctionKmsPolicy: + Condition: CreateKmsPolicy + Type: "AWS::IAM::Policy" + Properties: + PolicyName: FunctionKmsPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Action: + - kms:GenerateRandom + Resource: '*' + - Effect: Allow + Action: + - kms:GenerateDataKey + Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" + Roles: + - !Ref FunctionRole diff --git a/athena-cloudera-impala/pom.xml b/athena-cloudera-impala/pom.xml index a771183326..190047d5d7 100644 --- a/athena-cloudera-impala/pom.xml +++ b/athena-cloudera-impala/pom.xml @@ -3,11 +3,11 @@ aws-athena-query-federation com.amazonaws - 2022.47.1 + 2024.18.2 4.0.0 athena-cloudera-impala - 2022.47.1 + 2024.18.2 2.6.32.1041 @@ -15,13 +15,13 @@ com.amazonaws athena-federation-integ-test - 2022.47.1 + 2024.18.2 test com.amazonaws athena-jdbc - 2022.47.1 + 2024.18.2 Impala @@ -31,11 +31,10 @@ com.amazonaws athena-jdbc - 2022.47.1 + 2024.18.2 test-jar test - org.mockito mockito-core diff --git a/athena-cloudera-impala/src/main/java/com/amazonaws/athena/connectors/cloudera/ImpalaCompositeHandler.java b/athena-cloudera-impala/src/main/java/com/amazonaws/athena/connectors/cloudera/ImpalaCompositeHandler.java index ecb9576da5..6413ca0f95 100644 --- a/athena-cloudera-impala/src/main/java/com/amazonaws/athena/connectors/cloudera/ImpalaCompositeHandler.java +++ b/athena-cloudera-impala/src/main/java/com/amazonaws/athena/connectors/cloudera/ImpalaCompositeHandler.java @@ -20,6 +20,7 @@ */ package com.amazonaws.athena.connectors.cloudera; +import com.amazonaws.athena.connector.lambda.GlueConnectionUtils; import com.amazonaws.athena.connector.lambda.handlers.CompositeHandler; /** @@ -33,6 +34,6 @@ public class ImpalaCompositeHandler { public ImpalaCompositeHandler() { - super(new ImpalaMetadataHandler(System.getenv()), new ImpalaRecordHandler(System.getenv())); + super(new ImpalaMetadataHandler(GlueConnectionUtils.getGlueConnection()), new ImpalaRecordHandler(GlueConnectionUtils.getGlueConnection())); } } diff --git a/athena-cloudwatch-metrics/athena-cloudwatch-metrics.yaml b/athena-cloudwatch-metrics/athena-cloudwatch-metrics.yaml index ed0bd724bc..925ac76b76 100644 --- a/athena-cloudwatch-metrics/athena-cloudwatch-metrics.yaml +++ b/athena-cloudwatch-metrics/athena-cloudwatch-metrics.yaml @@ -10,10 +10,10 @@ Metadata: Labels: - athena-federation HomePageUrl: 'https://github.com/awslabs/aws-athena-query-federation' - SemanticVersion: 2022.47.1 + SemanticVersion: 2024.18.2 SourceCodeUrl: 'https://github.com/awslabs/aws-athena-query-federation' Parameters: - AthenaCatalogName: + LambdaFunctionName: Description: 'This is the name of the lambda function that will be created. This name must satisfy the pattern ^[a-z0-9-_]{1,64}$' Type: String AllowedPattern: ^[a-z0-9-_]{1,64}$ @@ -24,6 +24,10 @@ Parameters: Description: 'The prefix within SpillBucket where this function can spill data.' Type: String Default: athena-spill + GlueConnection: + Description: "(Optional) Name of glue connection storing connection details for Federated Data source." + Type: String + Default: "" LambdaTimeout: Description: 'Maximum Lambda invocation runtime in seconds. (min 1 - 900 max)' Default: 900 @@ -36,41 +40,129 @@ Parameters: Description: "WARNING: If set to 'true' encryption for spilled data is disabled." Default: 'false' Type: String - PermissionsBoundaryARN: + PermissionsBoundaryArn: Description: "(Optional) An IAM policy ARN to use as the PermissionsBoundary for the created Lambda function's execution role" Default: '' Type: String + KmsKeyId: + Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." + Type: String + Default: "" + LambdaRoleArn: + Description: "(Optional) A custom role to be used by the Connector lambda" + Type: String + Default: "" + Conditions: - HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryARN, "" ] ] + HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] + NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] + HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryArn, "" ] ] + CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] + HasGlueConnection: !Not [ !Equals [ !Ref GlueConnection, "" ] ] + NotHasGlueConnectionAndHasKmsKeyId: !And + - !Not [ !Condition HasGlueConnection ] + - !Condition HasKmsKeyId + Resources: ConnectorConfig: Type: 'AWS::Serverless::Function' Properties: Environment: Variables: - disable_spill_encryption: !Ref DisableSpillEncryption - spill_bucket: !Ref SpillBucket - spill_prefix: !Ref SpillPrefix - FunctionName: !Ref AthenaCatalogName + disable_spill_encryption: !If [HasGlueConnection, !Ref "AWS::NoValue", !Ref DisableSpillEncryption] + spill_bucket: !If [HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillBucket] + spill_prefix: !If [HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillPrefix] + kms_key_id: !If [NotHasGlueConnectionAndHasKmsKeyId, !Ref KmsKeyId, !Ref "AWS::NoValue"] + glue_connection: !If [HasGlueConnection, !Ref GlueConnection, !Ref "AWS::NoValue"] + FunctionName: !Ref LambdaFunctionName Handler: "com.amazonaws.athena.connectors.cloudwatch.metrics.MetricsCompositeHandler" - CodeUri: "./target/athena-cloudwatch-metrics-2022.47.1.jar" + CodeUri: "./target/athena-cloudwatch-metrics-2024.18.2.jar" Description: "Enables Amazon Athena to communicate with Cloudwatch Metrics, making your metrics data accessible via SQL" Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory - PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] - Policies: - - Statement: + Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn] + + FunctionRole: + Condition: NotHasLambdaRole + Type: AWS::IAM::Role + Properties: + PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryArn, !Ref "AWS::NoValue" ] + ManagedPolicyArns: + - "arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + Service: + - lambda.amazonaws.com + Action: + - "sts:AssumeRole" + + FunctionExecutionPolicy: + Condition: NotHasLambdaRole + Type: "AWS::IAM::Policy" + Properties: + Roles: + - !Ref FunctionRole + PolicyName: FunctionExecutionPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Action: + - cloudwatch:Describe* + - cloudwatch:Get* + - cloudwatch:List* + - athena:GetQueryExecution + - s3:ListAllMyBuckets + Effect: Allow + Resource: '*' + - Action: + - s3:GetObject + - s3:ListBucket + - s3:GetBucketLocation + - s3:GetObjectVersion + - s3:PutObject + - s3:PutObjectAcl + - s3:GetLifecycleConfiguration + - s3:PutLifecycleConfiguration + - s3:DeleteObject + Effect: Allow + Resource: + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName} + - bucketName: + Ref: SpillBucket + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName}/* + - bucketName: + Ref: SpillBucket + - !If + - HasGlueConnection - Action: - - cloudwatch:Describe* - - cloudwatch:Get* - - cloudwatch:List* - - athena:GetQueryExecution - - s3:ListAllMyBuckets + - glue:GetConnection Effect: Allow - Resource: '*' - Version: '2012-10-17' - #S3CrudPolicy allows our connector to spill large responses to S3. You can optionally replace this pre-made policy - #with one that is more restrictive and can only 'put' but not read,delete, or overwrite files. - - S3CrudPolicy: - BucketName: !Ref SpillBucket \ No newline at end of file + Resource: + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' + - !Ref "AWS::NoValue" + + FunctionKmsPolicy: + Condition: CreateKmsPolicy + Type: "AWS::IAM::Policy" + Properties: + PolicyName: FunctionKmsPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Action: + - kms:GenerateRandom + Resource: '*' + - Effect: Allow + Action: + - kms:GenerateDataKey + Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" + Roles: + - !Ref FunctionRole \ No newline at end of file diff --git a/athena-cloudwatch-metrics/pom.xml b/athena-cloudwatch-metrics/pom.xml index 6c8bff216e..2ed910a861 100644 --- a/athena-cloudwatch-metrics/pom.xml +++ b/athena-cloudwatch-metrics/pom.xml @@ -3,16 +3,16 @@ aws-athena-query-federation com.amazonaws - 2022.47.1 + 2024.18.2 4.0.0 athena-cloudwatch-metrics - 2022.47.1 + 2024.18.2 com.amazonaws aws-athena-federation-sdk - 2022.47.1 + 2024.18.2 withdep diff --git a/athena-cloudwatch-metrics/src/main/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricsCompositeHandler.java b/athena-cloudwatch-metrics/src/main/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricsCompositeHandler.java index 7f1e429660..2fe68ea0d0 100644 --- a/athena-cloudwatch-metrics/src/main/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricsCompositeHandler.java +++ b/athena-cloudwatch-metrics/src/main/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricsCompositeHandler.java @@ -19,6 +19,7 @@ */ package com.amazonaws.athena.connectors.cloudwatch.metrics; +import com.amazonaws.athena.connector.lambda.GlueConnectionUtils; import com.amazonaws.athena.connector.lambda.handlers.CompositeHandler; /** @@ -30,6 +31,6 @@ public class MetricsCompositeHandler { public MetricsCompositeHandler() { - super(new MetricsMetadataHandler(System.getenv()), new MetricsRecordHandler(System.getenv())); + super(new MetricsMetadataHandler(GlueConnectionUtils.getGlueConnection()), new MetricsRecordHandler(GlueConnectionUtils.getGlueConnection())); } } diff --git a/athena-cloudwatch/athena-cloudwatch.yaml b/athena-cloudwatch/athena-cloudwatch.yaml index 2f03b6cea5..c187a79e4b 100644 --- a/athena-cloudwatch/athena-cloudwatch.yaml +++ b/athena-cloudwatch/athena-cloudwatch.yaml @@ -10,10 +10,10 @@ Metadata: Labels: - athena-federation HomePageUrl: 'https://github.com/awslabs/aws-athena-query-federation' - SemanticVersion: 2022.47.1 + SemanticVersion: 2024.18.2 SourceCodeUrl: 'https://github.com/awslabs/aws-athena-query-federation' Parameters: - AthenaCatalogName: + LambdaFunctionName: Description: 'This is the name of the lambda function that will be created. This name must satisfy the pattern ^[a-z0-9-_]{1,64}$' Type: String AllowedPattern: ^[a-z0-9-_]{1,64}$ @@ -24,6 +24,10 @@ Parameters: Description: 'The prefix within SpillBucket where this function can spill data.' Type: String Default: athena-spill + GlueConnection: + Description: "(Optional) Name of glue connection storing connection details for Federated Data source." + Type: String + Default: "" LambdaTimeout: Description: 'Maximum Lambda invocation runtime in seconds. (min 1 - 900 max)' Default: 900 @@ -40,20 +44,24 @@ Parameters: Description: "WARNING: If set to 'true' encryption for spilled data is disabled." Default: 'false' Type: String - KMSKeyId: + KmsKeyId: Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." Type: String Default: "" - PermissionsBoundaryARN: + PermissionsBoundaryArn: Description: "(Optional) An IAM policy ARN to use as the PermissionsBoundary for the created Lambda function's execution role" Default: '' Type: String Conditions: - HasKMSKeyId: !Not [!Equals [!Ref KMSKeyId, ""]] + HasKmsKeyId: !Not [!Equals [!Ref KmsKeyId, ""]] NotHasLambdaRole: !Equals [!Ref LambdaRole, ""] - HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryARN, "" ] ] - CreateKMSPolicy: !And [ !Condition HasKMSKeyId, !Condition NotHasLambdaRole ] + HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryArn, "" ] ] + CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] + HasGlueConnection: !Not [ !Equals [ !Ref GlueConnection, "" ] ] + NotHasGlueConnectionAndHasKmsKeyId: !And + - !Not [ !Condition HasGlueConnection ] + - !Condition HasKmsKeyId Resources: ConnectorConfig: @@ -61,13 +69,14 @@ Resources: Properties: Environment: Variables: - disable_spill_encryption: !Ref DisableSpillEncryption - spill_bucket: !Ref SpillBucket - spill_prefix: !Ref SpillPrefix - kms_key_id: !If [HasKMSKeyId, !Ref KMSKeyId, !Ref "AWS::NoValue"] - FunctionName: !Ref AthenaCatalogName + disable_spill_encryption: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref DisableSpillEncryption ] + spill_bucket: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillBucket ] + spill_prefix: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillPrefix ] + kms_key_id: !If [ NotHasGlueConnectionAndHasKmsKeyId, !Ref KmsKeyId, !Ref "AWS::NoValue" ] + glue_connection: !If [ HasGlueConnection, !Ref GlueConnection, !Ref "AWS::NoValue" ] + FunctionName: !Ref LambdaFunctionName Handler: "com.amazonaws.athena.connectors.cloudwatch.CloudwatchCompositeHandler" - CodeUri: "./target/athena-cloudwatch-2022.47.1.jar" + CodeUri: "./target/athena-cloudwatch-2024.18.2.jar" Description: "Enables Amazon Athena to communicate with Cloudwatch, making your log accessible via SQL" Runtime: java11 Timeout: !Ref LambdaTimeout @@ -78,7 +87,7 @@ Resources: Condition: NotHasLambdaRole Type: AWS::IAM::Role Properties: - PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] + PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryArn, !Ref "AWS::NoValue" ] ManagedPolicyArns: - "arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" AssumeRolePolicyDocument: @@ -112,43 +121,52 @@ Resources: Effect: Allow Resource: '*' - Action: - - s3:GetObject - - s3:ListBucket - - s3:GetBucketLocation - - s3:GetObjectVersion - - s3:PutObject - - s3:PutObjectAcl - - s3:GetLifecycleConfiguration - - s3:PutLifecycleConfiguration - - s3:DeleteObject + - s3:GetObject + - s3:ListBucket + - s3:GetBucketLocation + - s3:GetObjectVersion + - s3:PutObject + - s3:PutObjectAcl + - s3:GetLifecycleConfiguration + - s3:PutLifecycleConfiguration + - s3:DeleteObject Effect: Allow Resource: - Fn::Sub: - - arn:${AWS::Partition}:s3:::${bucketName} - - bucketName: - Ref: SpillBucket + - arn:${AWS::Partition}:s3:::${bucketName} + - bucketName: + Ref: SpillBucket - Fn::Sub: - - arn:${AWS::Partition}:s3:::${bucketName}/* - - bucketName: - Ref: SpillBucket + - arn:${AWS::Partition}:s3:::${bucketName}/* + - bucketName: + Ref: SpillBucket + - !If + - HasGlueConnection + - Action: + - glue:GetConnection + Effect: Allow + Resource: + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' + - !Ref "AWS::NoValue" Roles: - !Ref FunctionRole - FunctionKMSPolicy: - Condition: CreateKMSPolicy + FunctionKmsPolicy: + Condition: CreateKmsPolicy Type: "AWS::IAM::Policy" Properties: - PolicyName: FunctionKMSPolicy + PolicyName: FunctionKmsPolicy PolicyDocument: Version: 2012-10-17 Statement: - Action: - - kms:GenerateRandom + - kms:GenerateRandom Effect: Allow Resource: '*' - Action: - kms:GenerateDataKey Effect: Allow - Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KMSKeyId}" + Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" Roles: - !Ref FunctionRole diff --git a/athena-cloudwatch/pom.xml b/athena-cloudwatch/pom.xml index 79757d6167..5b892aa5e1 100644 --- a/athena-cloudwatch/pom.xml +++ b/athena-cloudwatch/pom.xml @@ -3,22 +3,22 @@ aws-athena-query-federation com.amazonaws - 2022.47.1 + 2024.18.2 4.0.0 athena-cloudwatch - 2022.47.1 + 2024.18.2 com.amazonaws aws-athena-federation-sdk - 2022.47.1 + 2024.18.2 withdep com.amazonaws athena-federation-integ-test - 2022.47.1 + 2024.18.2 test diff --git a/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchCompositeHandler.java b/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchCompositeHandler.java index bf8b9ee1e5..561a7a25cd 100644 --- a/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchCompositeHandler.java +++ b/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchCompositeHandler.java @@ -19,6 +19,7 @@ */ package com.amazonaws.athena.connectors.cloudwatch; +import com.amazonaws.athena.connector.lambda.GlueConnectionUtils; import com.amazonaws.athena.connector.lambda.handlers.CompositeHandler; /** @@ -30,6 +31,6 @@ public class CloudwatchCompositeHandler { public CloudwatchCompositeHandler() { - super(new CloudwatchMetadataHandler(System.getenv()), new CloudwatchRecordHandler(System.getenv())); + super(new CloudwatchMetadataHandler(GlueConnectionUtils.getGlueConnection()), new CloudwatchRecordHandler(GlueConnectionUtils.getGlueConnection())); } } diff --git a/athena-datalakegen2/athena-datalakegen2.yaml b/athena-datalakegen2/athena-datalakegen2.yaml index bf89304fe3..c94d9aea66 100644 --- a/athena-datalakegen2/athena-datalakegen2.yaml +++ b/athena-datalakegen2/athena-datalakegen2.yaml @@ -12,7 +12,7 @@ Metadata: - athena-federation - jdbc HomePageUrl: 'https://github.com/awslabs/aws-athena-query-federation' - SemanticVersion: 2022.47.1 + SemanticVersion: 2024.18.2 SourceCodeUrl: 'https://github.com/awslabs/aws-athena-query-federation' Parameters: LambdaFunctionName: @@ -22,7 +22,8 @@ Parameters: DefaultConnectionString: Description: 'The default connection string is used when catalog is "lambda:${LambdaFunctionName}". Catalog specific Connection Strings can be added later. Format: ${DatabaseType}://${NativeJdbcConnectionString}.' Type: String - SecretNamePrefix: + Default: "" + SecretName: Description: 'Used to create resource-based authorization policy for "secretsmanager:GetSecretValue" action. E.g. All Athena JDBC Federation secret names can be prefixed with "AthenaJdbcFederation" and authorization policy will allow "arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:AthenaJdbcFederation*". Parameter value in this case should be "AthenaJdbcFederation". If you do not have a prefix, you can manually update the IAM policy to add allow any secret names.' Type: String SpillBucket: @@ -32,6 +33,10 @@ Parameters: Description: 'The prefix within SpillBucket where this function can spill data.' Type: String Default: athena-spill + GlueConnection: + Description: "(Optional) Name of glue connection storing connection details for Federated Data source." + Type: String + Default: "" LambdaTimeout: Description: 'Maximum Lambda invocation runtime in seconds. (min 1 - 900 max)' Default: 900 @@ -52,65 +57,152 @@ Parameters: Description: '(Optional) One or more Subnet IDs corresponding to the Subnet that the Lambda function can use to access you data source. (e.g. subnet1,subnet2)' Type: CommaDelimitedList Default: "" - PermissionsBoundaryARN: + PermissionsBoundaryArn: Description: "(Optional) An IAM policy ARN to use as the PermissionsBoundary for the created Lambda function's execution role" Default: '' Type: String + KmsKeyId: + Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." + Type: String + Default: "" + LambdaRoleArn: + Description: "(Optional) A custom role to be used by the Connector lambda" + Type: String + Default: "" + Conditions: - HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryARN, "" ] ] + HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryArn, "" ] ] HasSecurityGroups: !Not [ !Equals [ !Join ["", !Ref SecurityGroupIds], "" ] ] HasSubnets: !Not [ !Equals [ !Join ["", !Ref SubnetIds], "" ] ] + HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] + NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] + CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] + HasGlueConnection: !Not [ !Equals [ !Ref GlueConnection, "" ] ] + NotHasGlueConnectionAndHasKmsKeyId: !And + - !Not [ !Condition HasGlueConnection ] + - !Condition HasKmsKeyId + Resources: JdbcConnectorConfig: Type: 'AWS::Serverless::Function' Properties: Environment: Variables: - disable_spill_encryption: !Ref DisableSpillEncryption - spill_bucket: !Ref SpillBucket - spill_prefix: !Ref SpillPrefix - default: !Ref DefaultConnectionString + default: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref DefaultConnectionString ] + disable_spill_encryption: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref DisableSpillEncryption ] + spill_bucket: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillBucket ] + spill_prefix: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillPrefix ] + kms_key_id: !If [ NotHasGlueConnectionAndHasKmsKeyId, !Ref KmsKeyId, !Ref "AWS::NoValue" ] + glue_connection: !If [ HasGlueConnection, !Ref GlueConnection, !Ref "AWS::NoValue" ] FunctionName: !Ref LambdaFunctionName Handler: "com.amazonaws.athena.connectors.datalakegen2.DataLakeGen2MuxCompositeHandler" - CodeUri: "./target/athena-datalakegen2-2022.47.1.jar" + CodeUri: "./target/athena-datalakegen2-2024.18.2.jar" Description: "Enables Amazon Athena to communicate with DataLake Gen2 using JDBC" Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory - PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] - Policies: - - Statement: - - Action: - - secretsmanager:GetSecretValue - Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretNamePrefix}*' - Version: '2012-10-17' - - Statement: - - Action: - - logs:CreateLogGroup - Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:*' - Version: '2012-10-17' - - Statement: - - Action: - - logs:CreateLogStream - - logs:PutLogEvents - Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/${LambdaFunctionName}:*' - Version: '2012-10-17' - - Statement: - - Action: - - athena:GetQueryExecution - - s3:ListAllMyBuckets - Effect: Allow - Resource: '*' - Version: '2012-10-17' - #S3CrudPolicy allows our connector to spill large responses to S3. You can optionally replace this pre-made policy - #with one that is more restrictive and can only 'put' but not read,delete, or overwrite files. - - S3CrudPolicy: - BucketName: !Ref SpillBucket - #VPCAccessPolicy allows our connector to run in a VPC so that it can access your data source. - - VPCAccessPolicy: {} + Role: !If [ NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn ] VpcConfig: SecurityGroupIds: !If [ HasSecurityGroups, !Ref SecurityGroupIds, !Ref "AWS::NoValue" ] - SubnetIds: !If [ HasSubnets, !Ref SubnetIds, !Ref "AWS::NoValue" ] \ No newline at end of file + SubnetIds: !If [ HasSubnets, !Ref SubnetIds, !Ref "AWS::NoValue" ] + + FunctionRole: + Condition: NotHasLambdaRole + Type: AWS::IAM::Role + Properties: + PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryArn, !Ref "AWS::NoValue" ] + ManagedPolicyArns: + - "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole" + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + Service: + - lambda.amazonaws.com + Action: + - "sts:AssumeRole" + + FunctionExecutionPolicy: + Condition: NotHasLambdaRole + Type: "AWS::IAM::Policy" + Properties: + Roles: + - !Ref FunctionRole + PolicyName: FunctionExecutionPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Action: + - secretsmanager:GetSecretValue + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretName}*' + - Action: + - logs:CreateLogGroup + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:*' + - Action: + - logs:CreateLogStream + - logs:PutLogEvents + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/${LambdaFunctionName}:*' + - Action: + - athena:GetQueryExecution + - s3:ListAllMyBuckets + Effect: Allow + Resource: '*' + - Action: + - ec2:CreateNetworkInterface + - ec2:DeleteNetworkInterface + - ec2:DescribeNetworkInterfaces + - ec2:DetachNetworkInterface + Effect: Allow + Resource: '*' + - Action: + - s3:GetObject + - s3:ListBucket + - s3:GetBucketLocation + - s3:GetObjectVersion + - s3:PutObject + - s3:PutObjectAcl + - s3:GetLifecycleConfiguration + - s3:PutLifecycleConfiguration + - s3:DeleteObject + Effect: Allow + Resource: + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName} + - bucketName: + Ref: SpillBucket + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName}/* + - bucketName: + Ref: SpillBucket + - !If + - HasGlueConnection + - Action: + - glue:GetConnection + Effect: Allow + Resource: + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' + - !Ref "AWS::NoValue" + + FunctionKmsPolicy: + Condition: CreateKmsPolicy + Type: "AWS::IAM::Policy" + Properties: + PolicyName: FunctionKmsPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Action: + - kms:GenerateRandom + Resource: '*' + - Effect: Allow + Action: + - kms:GenerateDataKey + Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" + Roles: + - !Ref FunctionRole \ No newline at end of file diff --git a/athena-datalakegen2/pom.xml b/athena-datalakegen2/pom.xml index c72c6c4813..043c1e4647 100644 --- a/athena-datalakegen2/pom.xml +++ b/athena-datalakegen2/pom.xml @@ -3,27 +3,27 @@ aws-athena-query-federation com.amazonaws - 2022.47.1 + 2024.18.2 4.0.0 athena-datalakegen2 - 2022.47.1 + 2024.18.2 com.amazonaws athena-federation-integ-test - 2022.47.1 + 2024.18.2 test com.amazonaws athena-jdbc - 2022.47.1 + 2024.18.2 com.amazonaws athena-jdbc - 2022.47.1 + 2024.18.2 test-jar test diff --git a/athena-datalakegen2/src/main/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2CompositeHandler.java b/athena-datalakegen2/src/main/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2CompositeHandler.java index 62cfce965b..c9be952c40 100644 --- a/athena-datalakegen2/src/main/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2CompositeHandler.java +++ b/athena-datalakegen2/src/main/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2CompositeHandler.java @@ -19,6 +19,7 @@ */ package com.amazonaws.athena.connectors.datalakegen2; +import com.amazonaws.athena.connector.lambda.GlueConnectionUtils; import com.amazonaws.athena.connector.lambda.handlers.CompositeHandler; /** @@ -31,6 +32,6 @@ public class DataLakeGen2CompositeHandler extends CompositeHandler { public DataLakeGen2CompositeHandler() { - super(new DataLakeGen2MetadataHandler(System.getenv()), new DataLakeGen2RecordHandler(System.getenv())); + super(new DataLakeGen2MetadataHandler(GlueConnectionUtils.getGlueConnection()), new DataLakeGen2RecordHandler(GlueConnectionUtils.getGlueConnection())); } } diff --git a/athena-db2-as400/athena-db2-as400.yaml b/athena-db2-as400/athena-db2-as400.yaml index 838cb59229..b108bed968 100644 --- a/athena-db2-as400/athena-db2-as400.yaml +++ b/athena-db2-as400/athena-db2-as400.yaml @@ -13,7 +13,7 @@ Metadata: - athena-federation - jdbc HomePageUrl: 'https://github.com/awslabs/aws-athena-query-federation' - SemanticVersion: 2022.47.1 + SemanticVersion: 2024.18.2 SourceCodeUrl: 'https://github.com/awslabs/aws-athena-query-federation' Parameters: LambdaFunctionName: @@ -23,7 +23,8 @@ Parameters: DefaultConnectionString: Description: 'The default connection string is used when catalog is "lambda:${LambdaFunctionName}". Catalog specific Connection Strings can be added later. Format: ${DatabaseType}://${NativeJdbcConnectionString}.' Type: String - SecretNamePrefix: + Default: "" + SecretName: Description: 'Used to create resource-based authorization policy for "secretsmanager:GetSecretValue" action. E.g. All Athena JDBC Federation secret names can be prefixed with "AthenaJdbcFederation" and authorization policy will allow "arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:AthenaJdbcFederation*". Parameter value in this case should be "AthenaJdbcFederation". If you do not have a prefix, you can manually update the IAM policy to add allow any secret names.' Type: String SpillBucket: @@ -33,6 +34,10 @@ Parameters: Description: 'The prefix within SpillBucket where this function can spill data.' Type: String Default: athena-spill + GlueConnection: + Description: "(Optional) Name of glue connection storing connection details for Federated Data source." + Type: String + Default: "" LambdaTimeout: Description: 'Maximum Lambda invocation runtime in seconds. (min 1 - 900 max)' Default: 900 @@ -51,63 +56,150 @@ Parameters: SubnetIds: Description: 'One or more Subnet IDs corresponding to the Subnet that the Lambda function can use to access you data source. (e.g. subnet1,subnet2)' Type: 'List' - PermissionsBoundaryARN: + PermissionsBoundaryArn: Description: "(Optional) An IAM policy ARN to use as the PermissionsBoundary for the created Lambda function's execution role" Default: '' Type: String + KmsKeyId: + Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." + Type: String + Default: "" + LambdaRoleArn: + Description: "(Optional) A custom role to be used by the Connector lambda" + Type: String + Default: "" + Conditions: - HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryARN, "" ] ] + HasKmsKeyId: !Not [!Equals [!Ref KmsKeyId, ""]] + NotHasLambdaRole: !Equals [!Ref LambdaRoleArn, ""] + HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryArn, "" ] ] + CreateKmsPolicy: !And [!Condition HasKmsKeyId, !Condition NotHasLambdaRole] + HasGlueConnection: !Not [!Equals [!Ref GlueConnection, ""]] + NotHasGlueConnectionAndHasKmsKeyId: !And + - !Not [!Condition HasGlueConnection] + - !Condition HasKmsKeyId + Resources: JdbcConnectorConfig: Type: 'AWS::Serverless::Function' Properties: Environment: Variables: - disable_spill_encryption: !Ref DisableSpillEncryption - spill_bucket: !Ref SpillBucket - spill_prefix: !Ref SpillPrefix - default: !Ref DefaultConnectionString + disable_spill_encryption: !If [HasGlueConnection, !Ref "AWS::NoValue", !Ref DisableSpillEncryption] + spill_bucket: !If [HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillBucket] + spill_prefix: !If [HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillPrefix] + kms_key_id: !If [NotHasGlueConnectionAndHasKmsKeyId, !Ref KmsKeyId, !Ref "AWS::NoValue"] + glue_connection: !If [HasGlueConnection, !Ref GlueConnection, !Ref "AWS::NoValue"] + default: !If [HasGlueConnection, !Ref "AWS::NoValue", !Ref DefaultConnectionString] FunctionName: !Ref LambdaFunctionName Handler: "com.amazonaws.athena.connectors.db2as400.Db2As400MuxCompositeHandler" - CodeUri: "./target/athena-db2-as400-2022.47.1.jar" + CodeUri: "./target/athena-db2-as400-2024.18.2.jar" Description: "Enables Amazon Athena to communicate with DB2 on iSeries (AS400) using JDBC" Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory - PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] - Policies: - - Statement: - - Action: - - secretsmanager:GetSecretValue - Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretNamePrefix}*' - Version: '2012-10-17' - - Statement: - - Action: - - logs:CreateLogGroup - Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:*' - Version: '2012-10-17' - - Statement: - - Action: - - logs:CreateLogStream - - logs:PutLogEvents - Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/${LambdaFunctionName}:*' - Version: '2012-10-17' - - Statement: - - Action: - - athena:GetQueryExecution - - s3:ListAllMyBuckets - Effect: Allow - Resource: '*' - Version: '2012-10-17' - #S3CrudPolicy allows our connector to spill large responses to S3. You can optionally replace this pre-made policy - #with one that is more restrictive and can only 'put' but not read,delete, or overwrite files. - - S3CrudPolicy: - BucketName: !Ref SpillBucket - #VPCAccessPolicy allows our connector to run in a VPC so that it can access your data source. - - VPCAccessPolicy: {} + Role: !If [ NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn ] VpcConfig: SecurityGroupIds: !Ref SecurityGroupIds - SubnetIds: !Ref SubnetIds \ No newline at end of file + SubnetIds: !Ref SubnetIds + + FunctionRole: + Condition: NotHasLambdaRole + Type: AWS::IAM::Role + Properties: + PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryArn, !Ref "AWS::NoValue" ] + ManagedPolicyArns: + - "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole" + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + Service: + - lambda.amazonaws.com + Action: + - "sts:AssumeRole" + + FunctionExecutionPolicy: + Condition: NotHasLambdaRole + Type: "AWS::IAM::Policy" + Properties: + Roles: + - !Ref FunctionRole + PolicyName: FunctionExecutionPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Action: + - secretsmanager:GetSecretValue + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretName}*' + - Action: + - logs:CreateLogGroup + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:*' + - Action: + - logs:CreateLogStream + - logs:PutLogEvents + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/${LambdaFunctionName}:*' + - Action: + - athena:GetQueryExecution + - s3:ListAllMyBuckets + Effect: Allow + Resource: '*' + - Action: + - ec2:CreateNetworkInterface + - ec2:DeleteNetworkInterface + - ec2:DescribeNetworkInterfaces + - ec2:DetachNetworkInterface + Effect: Allow + Resource: '*' + - Action: + - s3:GetObject + - s3:ListBucket + - s3:GetBucketLocation + - s3:GetObjectVersion + - s3:PutObject + - s3:PutObjectAcl + - s3:GetLifecycleConfiguration + - s3:PutLifecycleConfiguration + - s3:DeleteObject + Effect: Allow + Resource: + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName} + - bucketName: + Ref: SpillBucket + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName}/* + - bucketName: + Ref: SpillBucket + - !If + - HasGlueConnection + - Action: + - glue:GetConnection + Effect: Allow + Resource: + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' + - !Ref "AWS::NoValue" + + FunctionKmsPolicy: + Condition: CreateKmsPolicy + Type: "AWS::IAM::Policy" + Properties: + PolicyName: FunctionKmsPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Action: + - kms:GenerateRandom + Resource: '*' + - Effect: Allow + Action: + - kms:GenerateDataKey + Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" + Roles: + - !Ref FunctionRole \ No newline at end of file diff --git a/athena-db2-as400/pom.xml b/athena-db2-as400/pom.xml index 7c458b8caf..9bac277d2d 100644 --- a/athena-db2-as400/pom.xml +++ b/athena-db2-as400/pom.xml @@ -3,27 +3,27 @@ aws-athena-query-federation com.amazonaws - 2022.47.1 + 2024.18.2 4.0.0 athena-db2-as400 - 2022.47.1 + 2024.18.2 com.amazonaws athena-federation-integ-test - 2022.47.1 + 2024.18.2 test com.amazonaws athena-jdbc - 2022.47.1 + 2024.18.2 com.amazonaws athena-jdbc - 2022.47.1 + 2024.18.2 test-jar test diff --git a/athena-db2-as400/src/main/java/com/amazonaws/athena/connectors/db2as400/Db2As400CompositeHandler.java b/athena-db2-as400/src/main/java/com/amazonaws/athena/connectors/db2as400/Db2As400CompositeHandler.java index e0066d295e..136e1357c8 100644 --- a/athena-db2-as400/src/main/java/com/amazonaws/athena/connectors/db2as400/Db2As400CompositeHandler.java +++ b/athena-db2-as400/src/main/java/com/amazonaws/athena/connectors/db2as400/Db2As400CompositeHandler.java @@ -19,6 +19,7 @@ */ package com.amazonaws.athena.connectors.db2as400; +import com.amazonaws.athena.connector.lambda.GlueConnectionUtils; import com.amazonaws.athena.connector.lambda.handlers.CompositeHandler; /** @@ -31,6 +32,6 @@ public class Db2As400CompositeHandler extends CompositeHandler { public Db2As400CompositeHandler() { - super(new Db2As400MetadataHandler(System.getenv()), new Db2As400RecordHandler(System.getenv())); + super(new Db2As400MetadataHandler(GlueConnectionUtils.getGlueConnection()), new Db2As400RecordHandler(GlueConnectionUtils.getGlueConnection())); } } diff --git a/athena-db2/athena-db2.yaml b/athena-db2/athena-db2.yaml index 283fd36ab6..f2a98fba2f 100644 --- a/athena-db2/athena-db2.yaml +++ b/athena-db2/athena-db2.yaml @@ -13,7 +13,7 @@ Metadata: - athena-federation - jdbc HomePageUrl: 'https://github.com/awslabs/aws-athena-query-federation' - SemanticVersion: 2022.47.1 + SemanticVersion: 2024.18.2 SourceCodeUrl: 'https://github.com/awslabs/aws-athena-query-federation' Parameters: LambdaFunctionName: @@ -23,7 +23,8 @@ Parameters: DefaultConnectionString: Description: 'The default connection string is used when catalog is "lambda:${LambdaFunctionName}". Catalog specific Connection Strings can be added later. Format: ${DatabaseType}://${NativeJdbcConnectionString}.' Type: String - SecretNamePrefix: + Default: "" + SecretName: Description: 'Used to create resource-based authorization policy for "secretsmanager:GetSecretValue" action. E.g. All Athena JDBC Federation secret names can be prefixed with "AthenaJdbcFederation" and authorization policy will allow "arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:AthenaJdbcFederation*". Parameter value in this case should be "AthenaJdbcFederation". If you do not have a prefix, you can manually update the IAM policy to add allow any secret names.' Type: String SpillBucket: @@ -33,6 +34,10 @@ Parameters: Description: 'The prefix within SpillBucket where this function can spill data.' Type: String Default: athena-spill + GlueConnection: + Description: "(Optional) Name of glue connection storing connection details for Federated Data source." + Type: String + Default: "" LambdaTimeout: Description: 'Maximum Lambda invocation runtime in seconds. (min 1 - 900 max)' Default: 900 @@ -51,63 +56,150 @@ Parameters: SubnetIds: Description: 'One or more Subnet IDs corresponding to the Subnet that the Lambda function can use to access you data source. (e.g. subnet1,subnet2)' Type: 'List' - PermissionsBoundaryARN: + PermissionsBoundaryArn: Description: "(Optional) An IAM policy ARN to use as the PermissionsBoundary for the created Lambda function's execution role" Default: '' Type: String + KmsKeyId: + Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." + Type: String + Default: "" + LambdaRoleArn: + Description: "(Optional) A custom role to be used by the Connector lambda" + Type: String + Default: "" + Conditions: - HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryARN, "" ] ] + HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] + NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] + HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryArn, "" ] ] + CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] + HasGlueConnection: !Not [ !Equals [ !Ref GlueConnection, "" ] ] + NotHasGlueConnectionAndHasKmsKeyId: !And + - !Not [ !Condition HasGlueConnection ] + - !Condition HasKmsKeyId + Resources: JdbcConnectorConfig: Type: 'AWS::Serverless::Function' Properties: Environment: Variables: - disable_spill_encryption: !Ref DisableSpillEncryption - spill_bucket: !Ref SpillBucket - spill_prefix: !Ref SpillPrefix - default: !Ref DefaultConnectionString + default: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref DefaultConnectionString ] + disable_spill_encryption: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref DisableSpillEncryption ] + spill_bucket: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillBucket ] + spill_prefix: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillPrefix ] + kms_key_id: !If [ NotHasGlueConnectionAndHasKmsKeyId, !Ref KmsKeyId, !Ref "AWS::NoValue" ] + glue_connection: !If [ HasGlueConnection, !Ref GlueConnection, !Ref "AWS::NoValue" ] FunctionName: !Ref LambdaFunctionName Handler: "com.amazonaws.athena.connectors.db2.Db2MuxCompositeHandler" - CodeUri: "./target/athena-db2-2022.47.1.jar" + CodeUri: "./target/athena-db2-2024.18.2.jar" Description: "Enables Amazon Athena to communicate with DB2 using JDBC" Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory - PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] - Policies: - - Statement: - - Action: - - secretsmanager:GetSecretValue - Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretNamePrefix}*' - Version: '2012-10-17' - - Statement: - - Action: - - logs:CreateLogGroup - Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:*' - Version: '2012-10-17' - - Statement: - - Action: - - logs:CreateLogStream - - logs:PutLogEvents - Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/${LambdaFunctionName}:*' - Version: '2012-10-17' - - Statement: - - Action: - - athena:GetQueryExecution - - s3:ListAllMyBuckets - Effect: Allow - Resource: '*' - Version: '2012-10-17' - #S3CrudPolicy allows our connector to spill large responses to S3. You can optionally replace this pre-made policy - #with one that is more restrictive and can only 'put' but not read,delete, or overwrite files. - - S3CrudPolicy: - BucketName: !Ref SpillBucket - #VPCAccessPolicy allows our connector to run in a VPC so that it can access your data source. - - VPCAccessPolicy: {} + Role: !If [ NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn ] VpcConfig: SecurityGroupIds: !Ref SecurityGroupIds - SubnetIds: !Ref SubnetIds \ No newline at end of file + SubnetIds: !Ref SubnetIds + + FunctionRole: + Condition: NotHasLambdaRole + Type: AWS::IAM::Role + Properties: + PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryArn, !Ref "AWS::NoValue" ] + ManagedPolicyArns: + - "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole" + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + Service: + - lambda.amazonaws.com + Action: + - "sts:AssumeRole" + + FunctionExecutionPolicy: + Condition: NotHasLambdaRole + Type: "AWS::IAM::Policy" + Properties: + Roles: + - !Ref FunctionRole + PolicyName: FunctionExecutionPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Action: + - secretsmanager:GetSecretValue + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretName}*' + - Action: + - logs:CreateLogGroup + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:*' + - Action: + - logs:CreateLogStream + - logs:PutLogEvents + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/${LambdaFunctionName}:*' + - Action: + - athena:GetQueryExecution + - s3:ListAllMyBuckets + Effect: Allow + Resource: '*' + - Action: + - ec2:CreateNetworkInterface + - ec2:DeleteNetworkInterface + - ec2:DescribeNetworkInterfaces + - ec2:DetachNetworkInterface + Effect: Allow + Resource: '*' + - Action: + - s3:GetObject + - s3:ListBucket + - s3:GetBucketLocation + - s3:GetObjectVersion + - s3:PutObject + - s3:PutObjectAcl + - s3:GetLifecycleConfiguration + - s3:PutLifecycleConfiguration + - s3:DeleteObject + Effect: Allow + Resource: + - Fn::Sub: + - "arn:${AWS::Partition}:s3:::${bucketName}" + - bucketName: + Ref: SpillBucket + - Fn::Sub: + - "arn:${AWS::Partition}:s3:::${bucketName}/*" + - bucketName: + Ref: SpillBucket + - !If + - HasGlueConnection + - Action: + - glue:GetConnection + Effect: Allow + Resource: + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' + - !Ref "AWS::NoValue" + + FunctionKmsPolicy: + Condition: CreateKmsPolicy + Type: "AWS::IAM::Policy" + Properties: + PolicyName: FunctionKmsPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Action: + - kms:GenerateRandom + Resource: '*' + - Effect: Allow + Action: + - kms:GenerateDataKey + Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" + Roles: + - !Ref FunctionRole \ No newline at end of file diff --git a/athena-db2/pom.xml b/athena-db2/pom.xml index fbe105f1b7..25dac1feea 100644 --- a/athena-db2/pom.xml +++ b/athena-db2/pom.xml @@ -3,27 +3,27 @@ aws-athena-query-federation com.amazonaws - 2022.47.1 + 2024.18.2 4.0.0 athena-db2 - 2022.47.1 + 2024.18.2 com.amazonaws athena-federation-integ-test - 2022.47.1 + 2024.18.2 test com.amazonaws athena-jdbc - 2022.47.1 + 2024.18.2 com.amazonaws athena-jdbc - 2022.47.1 + 2024.18.2 test-jar test @@ -59,7 +59,6 @@ ${mockito.version} test - diff --git a/athena-db2/src/main/java/com/amazonaws/athena/connectors/db2/Db2CompositeHandler.java b/athena-db2/src/main/java/com/amazonaws/athena/connectors/db2/Db2CompositeHandler.java index 4affa252af..9c0ccec19a 100644 --- a/athena-db2/src/main/java/com/amazonaws/athena/connectors/db2/Db2CompositeHandler.java +++ b/athena-db2/src/main/java/com/amazonaws/athena/connectors/db2/Db2CompositeHandler.java @@ -19,6 +19,7 @@ */ package com.amazonaws.athena.connectors.db2; +import com.amazonaws.athena.connector.lambda.GlueConnectionUtils; import com.amazonaws.athena.connector.lambda.handlers.CompositeHandler; /** @@ -31,6 +32,6 @@ public class Db2CompositeHandler extends CompositeHandler { public Db2CompositeHandler() { - super(new Db2MetadataHandler(System.getenv()), new Db2RecordHandler(System.getenv())); + super(new Db2MetadataHandler(GlueConnectionUtils.getGlueConnection()), new Db2RecordHandler(GlueConnectionUtils.getGlueConnection())); } } diff --git a/athena-docdb/athena-docdb.yaml b/athena-docdb/athena-docdb.yaml index 8be16edf43..dd8d6fb7eb 100644 --- a/athena-docdb/athena-docdb.yaml +++ b/athena-docdb/athena-docdb.yaml @@ -10,10 +10,10 @@ Metadata: Labels: - athena-federation HomePageUrl: 'https://github.com/awslabs/aws-athena-query-federation' - SemanticVersion: 2022.47.1 + SemanticVersion: 2024.18.2 SourceCodeUrl: 'https://github.com/awslabs/aws-athena-query-federation' Parameters: - AthenaCatalogName: + LambdaFunctionName: Description: 'This is the name of the lambda function that will be created. This name must satisfy the pattern ^[a-z0-9-_]{1,64}$' Type: String AllowedPattern: ^[a-z0-9-_]{1,64}$ @@ -24,6 +24,10 @@ Parameters: Description: 'The prefix within SpillBucket where this function can spill data.' Type: String Default: athena-spill + GlueConnection: + Description: "(Optional) Name of glue connection storing connection details for Federated Data source." + Type: String + Default: "" LambdaTimeout: Description: 'Maximum Lambda invocation runtime in seconds. (min 1 - 900 max)' Default: 900 @@ -42,65 +46,156 @@ Parameters: SubnetIds: Description: 'One or more Subnet IDs corresponding to the Subnet that the Lambda function can use to access you data source. (e.g. subnet1,subnet2)' Type: 'List' - SecretNameOrPrefix: + SecretName: Description: 'The name or prefix of a set of names within Secrets Manager that this function should have access to. (e.g. hbase-*).' Type: String - DocDBConnectionString: + DefaultDocdb: Description: 'The DocDB connection details to use by default if not catalog specific connection is defined and optionally using SecretsManager (e.g. ${secret_name}).' Type: String Default: "e.g. mongodb://:@:/?ssl=true&ssl_ca_certs=rds-combined-ca-bundle.pem&replicaSet=rs0" - PermissionsBoundaryARN: + PermissionsBoundaryArn: Description: "(Optional) An IAM policy ARN to use as the PermissionsBoundary for the created Lambda function's execution role" Default: '' Type: String + KmsKeyId: + Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." + Type: String + Default: "" + LambdaRoleArn: + Description: "(Optional) A custom role to be used by the Connector lambda" + Type: String + Default: "" + Conditions: - HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryARN, "" ] ] + HasKmsKeyId: !Not [!Equals [!Ref KmsKeyId, ""]] + NotHasLambdaRole: !Equals [!Ref LambdaRoleArn, ""] + HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryArn, "" ] ] + CreateKmsPolicy: !And [!Condition HasKmsKeyId, !Condition NotHasLambdaRole] + HasGlueConnection: !Not [!Equals [!Ref GlueConnection, ""]] + NotHasGlueConnectionAndHasKmsKeyId: !And + - !Not [!Condition HasGlueConnection] + - !Condition HasKmsKeyId + Resources: ConnectorConfig: Type: 'AWS::Serverless::Function' Properties: Environment: Variables: - disable_spill_encryption: !Ref DisableSpillEncryption - spill_bucket: !Ref SpillBucket - spill_prefix: !Ref SpillPrefix - default_docdb: !Ref DocDBConnectionString - FunctionName: !Ref AthenaCatalogName + disable_spill_encryption: !If [HasGlueConnection, !Ref "AWS::NoValue", !Ref DisableSpillEncryption] + spill_bucket: !If [HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillBucket] + spill_prefix: !If [HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillPrefix] + kms_key_id: !If [NotHasGlueConnectionAndHasKmsKeyId, !Ref KmsKeyId, !Ref "AWS::NoValue"] + glue_connection: !If [HasGlueConnection, !Ref GlueConnection, !Ref "AWS::NoValue"] + default_docdb: !If [HasGlueConnection, !Ref "AWS::NoValue", !Ref DefaultDocdb] + FunctionName: !Ref LambdaFunctionName Handler: "com.amazonaws.athena.connectors.docdb.DocDBCompositeHandler" - CodeUri: "./target/athena-docdb-2022.47.1.jar" + CodeUri: "./target/athena-docdb-2024.18.2.jar" Description: "Enables Amazon Athena to communicate with DocumentDB, making your DocumentDB data accessible via SQL." Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory - PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] - Policies: - - Statement: - - Action: - - secretsmanager:GetSecretValue - Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:*:*:secret:${SecretNameOrPrefix}' - Version: '2012-10-17' - - Statement: - - Action: - - glue:GetTableVersions - - glue:GetPartitions - - glue:GetTables - - glue:GetTableVersion - - glue:GetDatabases - - glue:GetTable - - glue:GetPartition - - glue:GetDatabase - - athena:GetQueryExecution - - s3:ListAllMyBuckets - Effect: Allow - Resource: '*' - Version: '2012-10-17' - #S3CrudPolicy allows our connector to spill large responses to S3. You can optionally replace this pre-made policy - #with one that is more restrictive and can only 'put' but not read,delete, or overwrite files. - - S3CrudPolicy: - BucketName: !Ref SpillBucket - #VPCAccessPolicy allows our connector to run in a VPC so that it can access your data source. - - VPCAccessPolicy: {} + Role: !If [ NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn ] VpcConfig: SecurityGroupIds: !Ref SecurityGroupIds - SubnetIds: !Ref SubnetIds \ No newline at end of file + SubnetIds: !Ref SubnetIds + + FunctionRole: + Condition: NotHasLambdaRole + Type: AWS::IAM::Role + Properties: + PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryArn, !Ref "AWS::NoValue" ] + ManagedPolicyArns: + - "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole" + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + Service: + - lambda.amazonaws.com + Action: + - "sts:AssumeRole" + + FunctionExecutionPolicy: + Condition: NotHasLambdaRole + Type: "AWS::IAM::Policy" + Properties: + Roles: + - !Ref FunctionRole + PolicyName: FunctionExecutionPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Action: + - secretsmanager:GetSecretValue + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:*:*:secret:${SecretName}*' + - Action: + - glue:GetTableVersions + - glue:GetPartitions + - glue:GetTables + - glue:GetTableVersion + - glue:GetDatabases + - glue:GetTable + - glue:GetPartition + - glue:GetDatabase + - athena:GetQueryExecution + - s3:ListAllMyBuckets + Effect: Allow + Resource: '*' + - Action: + - ec2:CreateNetworkInterface + - ec2:DeleteNetworkInterface + - ec2:DescribeNetworkInterfaces + - ec2:DetachNetworkInterface + Effect: Allow + Resource: '*' + - Action: + - s3:GetObject + - s3:ListBucket + - s3:GetBucketLocation + - s3:GetObjectVersion + - s3:PutObject + - s3:PutObjectAcl + - s3:GetLifecycleConfiguration + - s3:PutLifecycleConfiguration + - s3:DeleteObject + Effect: Allow + Resource: + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName} + - bucketName: + Ref: SpillBucket + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName}/* + - bucketName: + Ref: SpillBucket + - !If + - HasGlueConnection + - Action: + - glue:GetConnection + Effect: Allow + Resource: + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' + - !Ref "AWS::NoValue" + + FunctionKmsPolicy: + Condition: CreateKmsPolicy + Type: "AWS::IAM::Policy" + Properties: + PolicyName: FunctionKmsPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Action: + - kms:GenerateRandom + Resource: '*' + - Effect: Allow + Action: + - kms:GenerateDataKey + Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" + Roles: + - !Ref FunctionRole \ No newline at end of file diff --git a/athena-docdb/pom.xml b/athena-docdb/pom.xml index 5dd645c20a..ca5f71c5fa 100644 --- a/athena-docdb/pom.xml +++ b/athena-docdb/pom.xml @@ -3,16 +3,16 @@ aws-athena-query-federation com.amazonaws - 2022.47.1 + 2024.18.2 4.0.0 athena-docdb - 2022.47.1 + 2024.18.2 com.amazonaws aws-athena-federation-sdk - 2022.47.1 + 2024.18.2 withdep @@ -25,7 +25,7 @@ com.amazonaws athena-federation-integ-test - 2022.47.1 + 2024.18.2 test diff --git a/athena-docdb/src/main/java/com/amazonaws/athena/connectors/docdb/DocDBCompositeHandler.java b/athena-docdb/src/main/java/com/amazonaws/athena/connectors/docdb/DocDBCompositeHandler.java index 2810491031..0d64948410 100644 --- a/athena-docdb/src/main/java/com/amazonaws/athena/connectors/docdb/DocDBCompositeHandler.java +++ b/athena-docdb/src/main/java/com/amazonaws/athena/connectors/docdb/DocDBCompositeHandler.java @@ -19,6 +19,7 @@ */ package com.amazonaws.athena.connectors.docdb; +import com.amazonaws.athena.connector.lambda.GlueConnectionUtils; import com.amazonaws.athena.connector.lambda.handlers.CompositeHandler; /** @@ -30,6 +31,6 @@ public class DocDBCompositeHandler { public DocDBCompositeHandler() { - super(new DocDBMetadataHandler(System.getenv()), new DocDBRecordHandler(System.getenv())); + super(new DocDBMetadataHandler(GlueConnectionUtils.getGlueConnection()), new DocDBRecordHandler(GlueConnectionUtils.getGlueConnection())); } } diff --git a/athena-docdb/src/main/java/com/amazonaws/athena/connectors/docdb/DocDBMetadataHandler.java b/athena-docdb/src/main/java/com/amazonaws/athena/connectors/docdb/DocDBMetadataHandler.java index 5a25b6f50c..4963d0c160 100644 --- a/athena-docdb/src/main/java/com/amazonaws/athena/connectors/docdb/DocDBMetadataHandler.java +++ b/athena-docdb/src/main/java/com/amazonaws/athena/connectors/docdb/DocDBMetadataHandler.java @@ -91,6 +91,8 @@ public class DocDBMetadataHandler //The Env variable name used to store the default DocDB connection string if no catalog specific //env variable is set. private static final String DEFAULT_DOCDB = "default_docdb"; + //The env secret_name to use if defined + private static final String SECRET_NAME = "secret_name"; //The Glue table property that indicates that a table matching the name of an DocDB table //is indeed enabled for use by this connector. private static final String DOCDB_METADATA_FLAG = "docdb-metadata-flag"; @@ -130,10 +132,19 @@ protected DocDBMetadataHandler( private MongoClient getOrCreateConn(MetadataRequest request) { - String endpoint = resolveSecrets(getConnStr(request)); + String connStr = getConnStr(request); + if (configOptions.containsKey(SECRET_NAME) && !hasEmbeddedSecret(connStr)) { + connStr = connStr.substring(0, 10) + "${" + configOptions.get(SECRET_NAME) + "}@" + connStr.substring(10); + } + String endpoint = resolveSecrets(connStr); return connectionFactory.getOrCreateConn(endpoint); } + private boolean hasEmbeddedSecret(String connStr) + { + return connStr.contains("${"); + } + /** * Retrieves the DocDB connection details from an env variable matching the catalog name, if no such * env variable exists we fall back to the default env variable defined by DEFAULT_DOCDB. diff --git a/athena-docdb/src/main/java/com/amazonaws/athena/connectors/docdb/DocDBRecordHandler.java b/athena-docdb/src/main/java/com/amazonaws/athena/connectors/docdb/DocDBRecordHandler.java index ecba05bc18..955eca8f8b 100644 --- a/athena-docdb/src/main/java/com/amazonaws/athena/connectors/docdb/DocDBRecordHandler.java +++ b/athena-docdb/src/main/java/com/amazonaws/athena/connectors/docdb/DocDBRecordHandler.java @@ -68,6 +68,8 @@ public class DocDBRecordHandler //Used to denote the 'type' of this connector for diagnostic purposes. private static final String SOURCE_TYPE = "documentdb"; + //The env secret_name to use if defined + private static final String SECRET_NAME = "secret_name"; //Controls the page size for fetching batches of documents from the MongoDB client. private static final int MONGO_QUERY_BATCH_SIZE = 100; @@ -106,14 +108,22 @@ protected DocDBRecordHandler(AmazonS3 amazonS3, AWSSecretsManager secretsManager */ private MongoClient getOrCreateConn(Split split) { - String conStr = split.getProperty(DOCDB_CONN_STR); - if (conStr == null) { + String connStr = split.getProperty(DOCDB_CONN_STR); + if (connStr == null) { throw new RuntimeException(DOCDB_CONN_STR + " Split property is null! Unable to create connection."); } - String endpoint = resolveSecrets(conStr); + if (configOptions.containsKey(SECRET_NAME) && !hasEmbeddedSecret(connStr)) { + connStr = connStr.substring(0, 10) + "${" + configOptions.get(SECRET_NAME) + "}@" + connStr.substring(10); + } + String endpoint = resolveSecrets(connStr); return connectionFactory.getOrCreateConn(endpoint); } + private boolean hasEmbeddedSecret(String connStr) + { + return connStr.contains("${"); + } + private static Map documentAsMap(Document document, boolean caseInsensitive) { logger.info("documentAsMap: caseInsensitive: {}", caseInsensitive); diff --git a/athena-dynamodb/athena-dynamodb.yaml b/athena-dynamodb/athena-dynamodb.yaml index 83390efb5c..348e25cb56 100644 --- a/athena-dynamodb/athena-dynamodb.yaml +++ b/athena-dynamodb/athena-dynamodb.yaml @@ -10,10 +10,10 @@ Metadata: Labels: - athena-federation HomePageUrl: 'https://github.com/awslabs/aws-athena-query-federation' - SemanticVersion: 2022.47.1 + SemanticVersion: 2024.18.2 SourceCodeUrl: 'https://github.com/awslabs/aws-athena-query-federation' Parameters: - AthenaCatalogName: + LambdaFunctionName: Description: 'This is the name of the lambda function that will be created. This name must satisfy the pattern ^[a-z0-9-_]{1,64}$' Type: String AllowedPattern: ^[a-z0-9-_]{1,64}$ @@ -24,6 +24,10 @@ Parameters: Description: 'The prefix within SpillBucket where this function can spill data.' Type: String Default: athena-spill + GlueConnection: + Description: "(Optional) Name of glue connection storing connection details for Federated Data source." + Type: String + Default: "" LambdaTimeout: Description: 'Maximum Lambda invocation runtime in seconds. (min 1 - 900 max)' Default: 900 @@ -32,7 +36,7 @@ Parameters: Description: 'Lambda memory in MB (min 128 - 3008 max).' Default: 3008 Type: Number - LambdaRole: + LambdaRoleArn: Description: "(Optional) A custom role to be used by the Connector lambda" Type: String Default: "" @@ -40,20 +44,24 @@ Parameters: Description: "WARNING: If set to 'true' encryption for spilled data is disabled." Default: 'false' Type: String - KMSKeyId: + KmsKeyId: Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." Type: String Default: "" - PermissionsBoundaryARN: + PermissionsBoundaryArn: Description: "(Optional) An IAM policy ARN to use as the PermissionsBoundary for the created Lambda function's execution role" Default: '' Type: String Conditions: - HasKMSKeyId: !Not [!Equals [!Ref KMSKeyId, ""]] - NotHasLambdaRole: !Equals [!Ref LambdaRole, ""] - HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryARN, "" ] ] - CreateKMSPolicy: !And [!Condition HasKMSKeyId, !Condition NotHasLambdaRole] + HasKmsKeyId: !Not [!Equals [!Ref KmsKeyId, ""]] + NotHasLambdaRole: !Equals [!Ref LambdaRoleArn, ""] + HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryArn, "" ] ] + CreateKmsPolicy: !And [!Condition HasKmsKeyId, !Condition NotHasLambdaRole] + HasGlueConnection: !Not [!Equals [!Ref GlueConnection, ""]] + NotHasGlueConnectionAndHasKmsKeyId: !And + - !Not [!Condition HasGlueConnection] + - !Condition HasKmsKeyId Resources: ConnectorConfig: @@ -61,24 +69,25 @@ Resources: Properties: Environment: Variables: - disable_spill_encryption: !Ref DisableSpillEncryption - spill_bucket: !Ref SpillBucket - spill_prefix: !Ref SpillPrefix - kms_key_id: !If [HasKMSKeyId, !Ref KMSKeyId, !Ref "AWS::NoValue"] - FunctionName: !Ref AthenaCatalogName + disable_spill_encryption: !If [HasGlueConnection, !Ref "AWS::NoValue", !Ref DisableSpillEncryption] + spill_bucket: !If [HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillBucket] + spill_prefix: !If [HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillPrefix] + kms_key_id: !If [NotHasGlueConnectionAndHasKmsKeyId, !Ref KmsKeyId, !Ref "AWS::NoValue"] + glue_connection: !If [HasGlueConnection, !Ref GlueConnection, !Ref "AWS::NoValue"] + FunctionName: !Ref LambdaFunctionName Handler: "com.amazonaws.athena.connectors.dynamodb.DynamoDBCompositeHandler" - CodeUri: "./target/athena-dynamodb-2022.47.1.jar" + CodeUri: "./target/athena-dynamodb-2024.18.2.jar" Description: "Enables Amazon Athena to communicate with DynamoDB, making your tables accessible via SQL" Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory - Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRole] + Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn] FunctionRole: Condition: NotHasLambdaRole Type: AWS::IAM::Role Properties: - PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] + PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryArn, !Ref "AWS::NoValue" ] ManagedPolicyArns: - "arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" AssumeRolePolicyDocument: @@ -100,62 +109,71 @@ Resources: Version: 2012-10-17 Statement: - Action: - - dynamodb:DescribeTable - - dynamodb:ListSchemas - - dynamodb:ListTables - - dynamodb:Query - - dynamodb:Scan - - dynamodb:PartiQLSelect - - glue:GetTableVersions - - glue:GetPartitions - - glue:GetTables - - glue:GetTableVersion - - glue:GetDatabases - - glue:GetTable - - glue:GetPartition - - glue:GetDatabase - - athena:GetQueryExecution - - s3:ListAllMyBuckets + - dynamodb:DescribeTable + - dynamodb:ListSchemas + - dynamodb:ListTables + - dynamodb:Query + - dynamodb:Scan + - dynamodb:PartiQLSelect + - glue:GetTableVersions + - glue:GetPartitions + - glue:GetTables + - glue:GetTableVersion + - glue:GetDatabases + - glue:GetTable + - glue:GetPartition + - glue:GetDatabase + - athena:GetQueryExecution + - s3:ListAllMyBuckets Effect: Allow Resource: '*' - Action: - - s3:GetObject - - s3:ListBucket - - s3:GetBucketLocation - - s3:GetObjectVersion - - s3:PutObject - - s3:PutObjectAcl - - s3:GetLifecycleConfiguration - - s3:PutLifecycleConfiguration - - s3:DeleteObject + - s3:GetObject + - s3:ListBucket + - s3:GetBucketLocation + - s3:GetObjectVersion + - s3:PutObject + - s3:PutObjectAcl + - s3:GetLifecycleConfiguration + - s3:PutLifecycleConfiguration + - s3:DeleteObject Effect: Allow Resource: - Fn::Sub: - - arn:${AWS::Partition}:s3:::${bucketName} - - bucketName: - Ref: SpillBucket + - arn:${AWS::Partition}:s3:::${bucketName} + - bucketName: + Ref: SpillBucket - Fn::Sub: - - arn:${AWS::Partition}:s3:::${bucketName}/* - - bucketName: - Ref: SpillBucket + - arn:${AWS::Partition}:s3:::${bucketName}/* + - bucketName: + Ref: SpillBucket + - !If + - HasGlueConnection + - Action: + - glue:GetConnection + Effect: Allow + Resource: + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' + - !Ref "AWS::NoValue" Roles: - !Ref FunctionRole - FunctionKMSPolicy: - Condition: CreateKMSPolicy + FunctionKmsPolicy: + Condition: CreateKmsPolicy Type: "AWS::IAM::Policy" Properties: - PolicyName: FunctionKMSPolicy + PolicyName: FunctionKmsPolicy PolicyDocument: Version: 2012-10-17 Statement: - Action: - - kms:GenerateRandom + - kms:GenerateRandom Effect: Allow Resource: '*' - Action: - kms:GenerateDataKey Effect: Allow - Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KMSKeyId}" + Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" Roles: - !Ref FunctionRole diff --git a/athena-dynamodb/pom.xml b/athena-dynamodb/pom.xml index 15b7fbff6e..685128b26f 100644 --- a/athena-dynamodb/pom.xml +++ b/athena-dynamodb/pom.xml @@ -3,11 +3,11 @@ aws-athena-query-federation com.amazonaws - 2022.47.1 + 2024.18.2 4.0.0 athena-dynamodb - 2022.47.1 + 2024.18.2 @@ -23,13 +23,13 @@ com.amazonaws aws-athena-federation-sdk - 2022.47.1 + 2024.18.2 withdep com.amazonaws athena-federation-integ-test - 2022.47.1 + 2024.18.2 test @@ -107,7 +107,7 @@ com.amazonaws aws-athena-federation-sdk - 2022.47.1 + 2024.18.2 test-jar test diff --git a/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/DynamoDBCompositeHandler.java b/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/DynamoDBCompositeHandler.java index 5a17e74600..079ac4283c 100644 --- a/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/DynamoDBCompositeHandler.java +++ b/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/DynamoDBCompositeHandler.java @@ -19,6 +19,7 @@ */ package com.amazonaws.athena.connectors.dynamodb; +import com.amazonaws.athena.connector.lambda.GlueConnectionUtils; import com.amazonaws.athena.connector.lambda.handlers.CompositeHandler; /** @@ -30,6 +31,6 @@ public class DynamoDBCompositeHandler { public DynamoDBCompositeHandler() { - super(new DynamoDBMetadataHandler(System.getenv()), new DynamoDBRecordHandler(System.getenv())); + super(new DynamoDBMetadataHandler(GlueConnectionUtils.getGlueConnection()), new DynamoDBRecordHandler(GlueConnectionUtils.getGlueConnection())); } } diff --git a/athena-elasticsearch/athena-elasticsearch.yaml b/athena-elasticsearch/athena-elasticsearch.yaml index 3f0b36000e..b620949c7e 100644 --- a/athena-elasticsearch/athena-elasticsearch.yaml +++ b/athena-elasticsearch/athena-elasticsearch.yaml @@ -10,17 +10,17 @@ Metadata: ReadmeUrl: README.md Labels: ['athena-federation'] HomePageUrl: https://github.com/awslabs/aws-athena-query-federation - SemanticVersion: 2022.47.1 + SemanticVersion: 2024.18.2 SourceCodeUrl: https://github.com/awslabs/aws-athena-query-federation # Parameters are CloudFormation features to pass input # to your template when you create a stack Parameters: - AthenaCatalogName: + LambdaFunctionName: Description: 'This is the name of the lambda function that will be created. This name must satisfy the pattern ^[a-z0-9-_]{1,64}$' Type: String AllowedPattern: ^[a-z0-9-_]{1,64}$ - SecretNamePrefix: + SecretName: Description: 'Used to create resource-based authorization policy for "secretsmanager:GetSecretValue" action. E.g. All Athena Elasticsearch Federation secret names can be prefixed with "AthenaESFederation" and authorization policy will allow "arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:AthenaESFederation*". Parameter value in this case should be "AthenaESFederation". If you do not have a prefix, you can manually update the IAM policy to add allow any secret names.' Type: String Default: "" @@ -31,6 +31,10 @@ Parameters: Description: 'The prefix within SpillBucket where this function can spill data.' Type: String Default: athena-spill + GlueConnection: + Description: "(Optional) Name of glue connection storing connection details for Federated Data source." + Type: String + Default: "" LambdaTimeout: Description: "Maximum Lambda invocation runtime in seconds. (min 1 - 900 max)" Default: 900 @@ -63,29 +67,38 @@ Parameters: Description: "timeout period (in seconds) for scroll timeout used in the retrieval of documents (default is 60 seconds)." Default: 60 Type: Number - IsVPCAccess: - AllowedValues: - - true - - false - Default: false - Description: "If ElasticSearch cluster is in VPC select true, [true, false] (default is false)" - Type: String SecurityGroupIds: - Description: '**If IsVPCAccess is True**. Provide one or more SecurityGroup IDs corresponding to the SecurityGroup that should be applied to the Lambda function. (e.g. sg1,sg2,sg3)' + Description: '(Optional) Provide one or more SecurityGroup IDs corresponding to the SecurityGroup that should be applied to the Lambda function. (e.g. sg1,sg2,sg3)' Type: CommaDelimitedList Default: "" SubnetIds: - Description: '**If IsVPCAccess is True**. Provide one or more Subnet IDs corresponding to the Subnet that the Lambda function can use to access you data source. (e.g. subnet1,subnet2)' + Description: '(Optional) Provide one or more Subnet IDs corresponding to the Subnet that the Lambda function can use to access you data source. (e.g. subnet1,subnet2)' Type: CommaDelimitedList Default: "" - PermissionsBoundaryARN: + PermissionsBoundaryArn: Description: "(Optional) An IAM policy ARN to use as the PermissionsBoundary for the created Lambda function's execution role" Default: '' Type: String + KmsKeyId: + Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." + Type: String + Default: "" + LambdaRoleArn: + Description: "(Optional) A custom role to be used by the Connector lambda" + Type: String + Default: "" Conditions: - IsVPCAccessSelected: !Equals [!Ref IsVPCAccess, true] - HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryARN, "" ] ] + HasSecurityGroups: !Not [ !Equals [ !Join [ "", !Ref SecurityGroupIds ], "" ] ] + HasSubnets: !Not [ !Equals [ !Join [ "", !Ref SubnetIds ], "" ] ] + HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] + NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] + HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryArn, "" ] ] + CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] + HasGlueConnection: !Not [ !Equals [ !Ref GlueConnection, "" ] ] + NotHasGlueConnectionAndHasKmsKeyId: !And + - !Not [ !Condition HasGlueConnection ] + - !Condition HasKmsKeyId Resources: ConnectorConfig: @@ -93,61 +106,127 @@ Resources: Properties: Environment: Variables: - disable_spill_encryption: !Ref DisableSpillEncryption - spill_bucket: !Ref SpillBucket - spill_prefix: !Ref SpillPrefix - auto_discover_endpoint: !Ref AutoDiscoverEndpoint - domain_mapping: !Ref DomainMapping - query_timeout_cluster: !Ref QueryTimeoutCluster - query_timeout_search: !Ref QueryTimeoutSearch - query_scroll_timeout: !Ref QueryScrollTimeout - FunctionName: !Sub "${AthenaCatalogName}" + disable_spill_encryption: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref DisableSpillEncryption ] + spill_bucket: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillBucket ] + spill_prefix: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillPrefix ] + kms_key_id: !If [ NotHasGlueConnectionAndHasKmsKeyId, !Ref KmsKeyId, !Ref "AWS::NoValue" ] + glue_connection: !If [ HasGlueConnection, !Ref GlueConnection, !Ref "AWS::NoValue" ] + auto_discover_endpoint: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref AutoDiscoverEndpoint ] + domain_mapping: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref DomainMapping ] + query_timeout_cluster: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref QueryTimeoutCluster ] + query_timeout_search: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref QueryTimeoutSearch ] + query_scroll_timeout: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref QueryScrollTimeout ] + FunctionName: !Ref LambdaFunctionName Handler: "com.amazonaws.athena.connectors.elasticsearch.ElasticsearchCompositeHandler" - CodeUri: "./target/athena-elasticsearch-2022.47.1.jar" + CodeUri: "./target/athena-elasticsearch-2024.18.2.jar" Description: "The Elasticsearch Lambda Connector provides Athena users the ability to query data stored on Elasticsearch clusters." Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory - PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] - Policies: - - Statement: - - Action: - - secretsmanager:GetSecretValue - Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretNamePrefix}*' - Version: '2012-10-17' - - Statement: + Role: !If [ NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn ] + VpcConfig: + SecurityGroupIds: !If [ HasSecurityGroups, !Ref SecurityGroupIds, !Ref "AWS::NoValue" ] + SubnetIds: !If [ HasSubnets, !Ref SubnetIds, !Ref "AWS::NoValue" ] + + FunctionRole: + Condition: NotHasLambdaRole + Type: AWS::IAM::Role + Properties: + PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryArn, !Ref "AWS::NoValue" ] + ManagedPolicyArns: + - "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole" + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + Service: + - lambda.amazonaws.com + Action: + - "sts:AssumeRole" + + FunctionExecutionPolicy: + Condition: NotHasLambdaRole + Type: "AWS::IAM::Policy" + Properties: + Roles: + - !Ref FunctionRole + PolicyName: FunctionExecutionPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Action: + - secretsmanager:GetSecretValue + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretName}*' + - Action: + - es:List* + - es:Describe* + - es:ESHttp* + - glue:GetTableVersions + - glue:GetPartitions + - glue:GetTables + - glue:GetTableVersion + - glue:GetDatabases + - glue:GetTable + - glue:GetPartition + - glue:GetDatabase + - athena:GetQueryExecution + - s3:ListAllMyBuckets + Effect: Allow + Resource: '*' + - Action: + - ec2:CreateNetworkInterface + - ec2:DeleteNetworkInterface + - ec2:DescribeNetworkInterfaces + - ec2:DetachNetworkInterface + Effect: Allow + Resource: '*' + - Action: + - s3:GetObject + - s3:ListBucket + - s3:GetBucketLocation + - s3:GetObjectVersion + - s3:PutObject + - s3:PutObjectAcl + - s3:GetLifecycleConfiguration + - s3:PutLifecycleConfiguration + - s3:DeleteObject + Effect: Allow + Resource: + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName} + - bucketName: + Ref: SpillBucket + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName}/* + - bucketName: + Ref: SpillBucket + - !If + - HasGlueConnection - Action: - - es:List* - - es:Describe* - - es:ESHttp* - - glue:GetTableVersions - - glue:GetPartitions - - glue:GetTables - - glue:GetTableVersion - - glue:GetDatabases - - glue:GetTable - - glue:GetPartition - - glue:GetDatabase - - athena:GetQueryExecution - - s3:ListAllMyBuckets + - glue:GetConnection Effect: Allow - Resource: '*' - Version: '2012-10-17' - #S3CrudPolicy allows our connector to spill large responses to S3. You can optionally replace this pre-made policy - #with one that is more restrictive and can only 'put' but not read,delete, or overwrite files. - - S3CrudPolicy: - BucketName: !Ref SpillBucket - VpcConfig: - SecurityGroupIds: - !If - - IsVPCAccessSelected - - - !Ref SecurityGroupIds - - !Ref "AWS::NoValue" - SubnetIds: - !If - - IsVPCAccessSelected - - - !Ref SubnetIds - - !Ref "AWS::NoValue" \ No newline at end of file + Resource: + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' + - !Ref "AWS::NoValue" + + FunctionKmsPolicy: + Condition: CreateKmsPolicy + Type: "AWS::IAM::Policy" + Properties: + PolicyName: FunctionKmsPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Action: + - kms:GenerateRandom + Resource: '*' + - Effect: Allow + Action: + - kms:GenerateDataKey + Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" + Roles: + - !Ref FunctionRole \ No newline at end of file diff --git a/athena-elasticsearch/pom.xml b/athena-elasticsearch/pom.xml index a502e7ada2..9abba795ef 100644 --- a/athena-elasticsearch/pom.xml +++ b/athena-elasticsearch/pom.xml @@ -3,16 +3,16 @@ aws-athena-query-federation com.amazonaws - 2022.47.1 + 2024.18.2 4.0.0 athena-elasticsearch - 2022.47.1 + 2024.18.2 com.amazonaws aws-athena-federation-sdk - 2022.47.1 + 2024.18.2 withdep @@ -25,7 +25,7 @@ com.amazonaws athena-federation-integ-test - 2022.47.1 + 2024.18.2 test @@ -191,7 +191,7 @@ com.amazonaws aws-athena-federation-sdk - 2022.47.1 + 2024.18.2 test-jar test diff --git a/athena-elasticsearch/src/main/java/com/amazonaws/athena/connectors/elasticsearch/AwsRestHighLevelClientFactory.java b/athena-elasticsearch/src/main/java/com/amazonaws/athena/connectors/elasticsearch/AwsRestHighLevelClientFactory.java index 422c3884dc..ae9c225647 100644 --- a/athena-elasticsearch/src/main/java/com/amazonaws/athena/connectors/elasticsearch/AwsRestHighLevelClientFactory.java +++ b/athena-elasticsearch/src/main/java/com/amazonaws/athena/connectors/elasticsearch/AwsRestHighLevelClientFactory.java @@ -84,6 +84,18 @@ public synchronized AwsRestHighLevelClient getOrCreateClient(String endpoint) return client; } + public synchronized AwsRestHighLevelClient getOrCreateClient(String endpoint, String username, String password) + { + AwsRestHighLevelClient client = clientCache.get(endpoint); + + if (client == null) { + client = createClient(endpoint, username, password); + clientCache.put(endpoint, client); + } + + return client; + } + /** * Creates a new Elasticsearch REST client. If useAwsCredentials = true, the client is injected with AWS * credentials. If useAwsCredentials = false and username/password are extracted using the credentialsPattern, @@ -99,12 +111,14 @@ public synchronized AwsRestHighLevelClient getOrCreateClient(String endpoint) private AwsRestHighLevelClient createClient(String endpoint) { if (useAwsCredentials) { + logger.debug("Creating Client using Aws Credentials."); return new AwsRestHighLevelClient.Builder(endpoint) .withCredentials(new DefaultAWSCredentialsProviderChain()).build(); } else { Matcher credentials = credentialsPattern.matcher(endpoint); if (credentials.find()) { + logger.debug("Creating Client using embedded Secret in Connection String."); String usernameAndPassword = credentials.group(); String username = usernameAndPassword.substring(0, usernameAndPassword.indexOf("@")); String password = usernameAndPassword.substring(usernameAndPassword.indexOf("@") + 1, @@ -115,9 +129,14 @@ private AwsRestHighLevelClient createClient(String endpoint) } } - logger.debug("Default client w/o credentials"); - + logger.debug("Creating default client w/o credentials"); // Default client w/o credentials. return new AwsRestHighLevelClient.Builder(endpoint).build(); } + + private AwsRestHighLevelClient createClient(String endpoint, String username, String password) + { + logger.debug("Creating Client using credentials provided by Glue Connectionn secret_name property"); + return new AwsRestHighLevelClient.Builder(endpoint).withCredentials(username, password).build(); + } } diff --git a/athena-elasticsearch/src/main/java/com/amazonaws/athena/connectors/elasticsearch/ElasticsearchCompositeHandler.java b/athena-elasticsearch/src/main/java/com/amazonaws/athena/connectors/elasticsearch/ElasticsearchCompositeHandler.java index 4cc082596a..1b0ba6906d 100644 --- a/athena-elasticsearch/src/main/java/com/amazonaws/athena/connectors/elasticsearch/ElasticsearchCompositeHandler.java +++ b/athena-elasticsearch/src/main/java/com/amazonaws/athena/connectors/elasticsearch/ElasticsearchCompositeHandler.java @@ -19,6 +19,7 @@ */ package com.amazonaws.athena.connectors.elasticsearch; +import com.amazonaws.athena.connector.lambda.GlueConnectionUtils; import com.amazonaws.athena.connector.lambda.handlers.CompositeHandler; /** @@ -30,6 +31,6 @@ public class ElasticsearchCompositeHandler { public ElasticsearchCompositeHandler() { - super(new ElasticsearchMetadataHandler(System.getenv()), new ElasticsearchRecordHandler(System.getenv())); + super(new ElasticsearchMetadataHandler(GlueConnectionUtils.getGlueConnection()), new ElasticsearchRecordHandler(GlueConnectionUtils.getGlueConnection())); } } diff --git a/athena-elasticsearch/src/main/java/com/amazonaws/athena/connectors/elasticsearch/ElasticsearchCredential.java b/athena-elasticsearch/src/main/java/com/amazonaws/athena/connectors/elasticsearch/ElasticsearchCredential.java new file mode 100644 index 0000000000..92354c5c6e --- /dev/null +++ b/athena-elasticsearch/src/main/java/com/amazonaws/athena/connectors/elasticsearch/ElasticsearchCredential.java @@ -0,0 +1,73 @@ +/*- + * #%L + * athena-elasticsearch + * %% + * Copyright (C) 2019 Amazon Web Services + * %% + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * #L% + */ +package com.amazonaws.athena.connectors.elasticsearch; + +import org.apache.commons.lang3.Validate; + +import java.util.Objects; + +/** + * Encapsulates database connection user name and password information. + */ +public class ElasticsearchCredential +{ + private final String user; + private final String password; + + /** + * @param user Database user name. + * @param password Database password. + */ + public ElasticsearchCredential(String user, String password) + { + this.user = Validate.notBlank(user, "User must not be blank"); + this.password = Validate.notBlank(password, "Password must not be blank"); + } + + public String getUser() + { + return user; + } + + public String getPassword() + { + return password; + } + + @Override + public boolean equals(Object o) + { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + ElasticsearchCredential that = (ElasticsearchCredential) o; + return Objects.equals(getUser(), that.getUser()) && + Objects.equals(getPassword(), that.getPassword()); + } + + @Override + public int hashCode() + { + return Objects.hash(getUser(), getPassword()); + } +} diff --git a/athena-elasticsearch/src/main/java/com/amazonaws/athena/connectors/elasticsearch/ElasticsearchCredentialProvider.java b/athena-elasticsearch/src/main/java/com/amazonaws/athena/connectors/elasticsearch/ElasticsearchCredentialProvider.java new file mode 100644 index 0000000000..90cf3194a5 --- /dev/null +++ b/athena-elasticsearch/src/main/java/com/amazonaws/athena/connectors/elasticsearch/ElasticsearchCredentialProvider.java @@ -0,0 +1,63 @@ +/*- + * #%L + * athena-elasticsearch + * %% + * Copyright (C) 2019 Amazon Web Services + * %% + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * #L% + */ +package com.amazonaws.athena.connectors.elasticsearch; + +import com.fasterxml.jackson.databind.ObjectMapper; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +/** + * Encapsulates Elasticsearch secrets deserialization, stored in following JSON format (showing minimal required for extraction): + * + * { + * "username": "${user}", + * "password": "${password}" + * } + * + */ +public class ElasticsearchCredentialProvider +{ + private static final Logger LOGGER = LoggerFactory.getLogger(ElasticsearchCredentialProvider.class); + private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); + + private final ElasticsearchCredential elasticsearchCredential; + + public ElasticsearchCredentialProvider(final String secretString) + { + Map elasticsearchSecrets; + try { + elasticsearchSecrets = OBJECT_MAPPER.readValue(secretString, HashMap.class); + } + catch (IOException ioException) { + throw new RuntimeException("Could not deserialize Elasticsearch credentials into HashMap", ioException); + } + + this.elasticsearchCredential = new ElasticsearchCredential(elasticsearchSecrets.get("username"), elasticsearchSecrets.get("password")); + } + + public ElasticsearchCredential getCredential() + { + return this.elasticsearchCredential; + } +} diff --git a/athena-elasticsearch/src/main/java/com/amazonaws/athena/connectors/elasticsearch/ElasticsearchMetadataHandler.java b/athena-elasticsearch/src/main/java/com/amazonaws/athena/connectors/elasticsearch/ElasticsearchMetadataHandler.java index c51e32c100..d4d6b1fc33 100644 --- a/athena-elasticsearch/src/main/java/com/amazonaws/athena/connectors/elasticsearch/ElasticsearchMetadataHandler.java +++ b/athena-elasticsearch/src/main/java/com/amazonaws/athena/connectors/elasticsearch/ElasticsearchMetadataHandler.java @@ -48,6 +48,7 @@ import org.apache.arrow.util.VisibleForTesting; import org.apache.arrow.vector.types.pojo.Field; import org.apache.arrow.vector.types.pojo.Schema; +import org.apache.commons.lang3.StringUtils; import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.indices.GetDataStreamRequest; import org.elasticsearch.client.indices.GetIndexRequest; @@ -58,6 +59,7 @@ import java.io.IOException; import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; @@ -89,8 +91,19 @@ public class ElasticsearchMetadataHandler // this environment variable is fed into the domainSplitter to populate the domainMap where the key = domain-name, // and the value = endpoint. private static final String DOMAIN_MAPPING = "domain_mapping"; + + // Individual domain endpoint which is associated with a Glue Connection + private static final String DOMAIN_ENDPOINT = "domain_endpoint"; + // Secret Name that provides credentials + private static final String SECRET_NAME = "secret_name"; + + // credential keys of secret + protected static final String SECRET_USERNAME = "username"; + protected static final String SECRET_PASSWORD = "password"; + // A Map of the domain-names and their respective endpoints. private Map domainMap; + private Map secretMap; // Env. variable that holds the query timeout period for the Cluster-Health queries. private static final String QUERY_TIMEOUT_CLUSTER = "query_timeout_cluster"; @@ -120,12 +133,13 @@ public ElasticsearchMetadataHandler(Map configOptions) { super(SOURCE_TYPE, configOptions); this.awsGlue = getAwsGlue(); + this.secretMap = new HashMap<>(); this.autoDiscoverEndpoint = configOptions.getOrDefault(AUTO_DISCOVER_ENDPOINT, "").equalsIgnoreCase("true"); this.domainMapProvider = new ElasticsearchDomainMapProvider(this.autoDiscoverEndpoint); - this.domainMap = domainMapProvider.getDomainMap(resolveSecrets(configOptions.getOrDefault(DOMAIN_MAPPING, ""))); + this.domainMap = resolveDomainMap(configOptions); this.clientFactory = new AwsRestHighLevelClientFactory(this.autoDiscoverEndpoint); this.glueTypeMapper = new ElasticsearchGlueTypeMapper(); - this.queryTimeout = Long.parseLong(configOptions.getOrDefault(QUERY_TIMEOUT_CLUSTER, "")); + this.queryTimeout = Long.parseLong(configOptions.getOrDefault(QUERY_TIMEOUT_CLUSTER, "10")); } @VisibleForTesting @@ -143,6 +157,7 @@ protected ElasticsearchMetadataHandler( { super(awsGlue, keyFactory, awsSecretsManager, athena, SOURCE_TYPE, spillBucket, spillPrefix, configOptions); this.awsGlue = awsGlue; + this.secretMap = new HashMap<>(); this.domainMapProvider = domainMapProvider; this.domainMap = this.domainMapProvider.getDomainMap(null); this.clientFactory = clientFactory; @@ -150,6 +165,25 @@ protected ElasticsearchMetadataHandler( this.queryTimeout = queryTimeout; } + protected Map resolveDomainMap(Map config) + { + String secretName = config.getOrDefault(SECRET_NAME, ""); + String domainEndpoint = config.getOrDefault(DOMAIN_ENDPOINT, ""); + if (StringUtils.isNotBlank(secretName) && StringUtils.isNotBlank(domainEndpoint)) { + logger.info("Using Secrets Manager provided by Glue Connection secret_name."); + this.secretMap.put(domainEndpoint.split("=")[0], new ElasticsearchCredentialProvider(getSecret(secretName))); + } + else { + logger.info("No secret_name provided as Config property."); + if (StringUtils.isBlank(domainEndpoint)) { + domainEndpoint = config.getOrDefault(DOMAIN_MAPPING, ""); + } + domainEndpoint = resolveSecrets(domainEndpoint); + } + + return domainMapProvider.getDomainMap(domainEndpoint); + } + /** * Used to get the list of domains (aka databases) for the Elasticsearch service. * @param allocator Tool for creating and managing Apache Arrow Blocks. @@ -186,7 +220,11 @@ public ListTablesResponse doListTables(BlockAllocator allocator, ListTablesReque logger.debug("doListTables: enter - " + request); String endpoint = getDomainEndpoint(request.getSchemaName()); - AwsRestHighLevelClient client = clientFactory.getOrCreateClient(endpoint); + String domain = request.getSchemaName(); + ElasticsearchCredentialProvider creds = secretMap.get(domain); + String username = creds != null ? creds.getCredential().getUser() : ""; + String password = creds != null ? creds.getCredential().getPassword() : ""; + AwsRestHighLevelClient client = creds != null ? clientFactory.getOrCreateClient(endpoint, username, password) : clientFactory.getOrCreateClient(endpoint); // get regular indices from ES, ignore all system indices starting with period `.` (e.g. .kibana, .tasks, etc...) Stream indicesStream = client.getAliases() .stream() @@ -245,8 +283,9 @@ public GetTableResponse doGetTable(BlockAllocator allocator, GetTableRequest req // Supplement GLUE catalog if not present. if (schema == null) { String index = request.getTableName().getTableName(); - String endpoint = getDomainEndpoint(request.getTableName().getSchemaName()); - schema = getSchema(index, endpoint); + String domain = request.getTableName().getSchemaName(); + String endpoint = getDomainEndpoint(domain); + schema = getSchema(index, endpoint, domain); } return new GetTableResponse(request.getCatalogName(), request.getTableName(), @@ -293,9 +332,12 @@ public GetSplitsResponse doGetSplits(BlockAllocator allocator, GetSplitsRequest domain = request.getTableName().getSchemaName(); indx = request.getTableName().getTableName(); } - String endpoint = getDomainEndpoint(domain); - AwsRestHighLevelClient client = clientFactory.getOrCreateClient(endpoint); + + ElasticsearchCredentialProvider creds = secretMap.get(domain); + String username = creds != null ? creds.getCredential().getUser() : ""; + String password = creds != null ? creds.getCredential().getPassword() : ""; + AwsRestHighLevelClient client = creds != null ? clientFactory.getOrCreateClient(endpoint, username, password) : clientFactory.getOrCreateClient(endpoint); // We send index request in case the table name is a data stream, a data stream can contains multiple indices which are created by ES // For non data stream, index name is same as table name GetIndexResponse indexResponse = client.indices().get(new GetIndexRequest(indx), RequestOptions.DEFAULT); @@ -303,7 +345,7 @@ public GetSplitsResponse doGetSplits(BlockAllocator allocator, GetSplitsRequest Set splits = Arrays.stream(indexResponse.getIndices()) .flatMap(index -> getShardsIDsFromES(client, index) // get all shards for an index. .stream() - .map(shardId -> new Split(makeSpillLocation(request), makeEncryptionKey(), ImmutableMap.of(domain, endpoint, SHARD_KEY, SHARD_VALUE + shardId.toString(), INDEX_KEY, index))) // make split for each (index + shardId) combination + .map(shardId -> new Split(makeSpillLocation(request), makeEncryptionKey(), ImmutableMap.of(SECRET_USERNAME, username, SECRET_PASSWORD, password, domain, endpoint, SHARD_KEY, SHARD_VALUE + shardId.toString(), INDEX_KEY, index))) // make split for each (index + shardId) combination ) .collect(Collectors.toSet()); @@ -328,17 +370,21 @@ public GetTableResponse doGetQueryPassthroughSchema(BlockAllocator allocator, Ge } queryPassthrough.verify(request.getQueryPassthroughArguments()); String index = request.getQueryPassthroughArguments().get(ElasticsearchQueryPassthrough.INDEX); - String endpoint = getDomainEndpoint(request.getQueryPassthroughArguments().get(ElasticsearchQueryPassthrough.SCHEMA)); - Schema schema = getSchema(index, endpoint); + String domain = request.getQueryPassthroughArguments().get(ElasticsearchQueryPassthrough.SCHEMA); + String endpoint = getDomainEndpoint(domain); + Schema schema = getSchema(index, endpoint, domain); return new GetTableResponse(request.getCatalogName(), request.getTableName(), (schema == null) ? SchemaBuilder.newBuilder().build() : schema, Collections.emptySet()); } - private Schema getSchema(String index, String endpoint) + private Schema getSchema(String index, String endpoint, String domain) { Schema schema; - AwsRestHighLevelClient client = clientFactory.getOrCreateClient(endpoint); + ElasticsearchCredentialProvider creds = secretMap.get(domain); + String username = creds != null ? creds.getCredential().getUser() : ""; + String password = creds != null ? creds.getCredential().getPassword() : ""; + AwsRestHighLevelClient client = creds != null ? clientFactory.getOrCreateClient(endpoint, username, password) : clientFactory.getOrCreateClient(endpoint); try { Map mappings = client.getMapping(index); schema = ElasticsearchSchemaUtils.parseMapping(mappings); diff --git a/athena-elasticsearch/src/main/java/com/amazonaws/athena/connectors/elasticsearch/ElasticsearchRecordHandler.java b/athena-elasticsearch/src/main/java/com/amazonaws/athena/connectors/elasticsearch/ElasticsearchRecordHandler.java index 2307ddcd46..6999587d14 100644 --- a/athena-elasticsearch/src/main/java/com/amazonaws/athena/connectors/elasticsearch/ElasticsearchRecordHandler.java +++ b/athena-elasticsearch/src/main/java/com/amazonaws/athena/connectors/elasticsearch/ElasticsearchRecordHandler.java @@ -35,6 +35,7 @@ import com.amazonaws.services.secretsmanager.AWSSecretsManagerClientBuilder; import org.apache.arrow.util.VisibleForTesting; import org.apache.arrow.vector.types.pojo.Field; +import org.apache.commons.lang3.StringUtils; import org.elasticsearch.action.search.ClearScrollRequest; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; @@ -96,7 +97,7 @@ public ElasticsearchRecordHandler(Map configOptions) this.typeUtils = new ElasticsearchTypeUtils(); this.clientFactory = new AwsRestHighLevelClientFactory(configOptions.getOrDefault(AUTO_DISCOVER_ENDPOINT, "").equalsIgnoreCase("true")); - this.queryTimeout = Long.parseLong(configOptions.getOrDefault(QUERY_TIMEOUT_SEARCH, "")); + this.queryTimeout = Long.parseLong(configOptions.getOrDefault(QUERY_TIMEOUT_SEARCH, "720")); this.scrollTimeout = Long.parseLong(configOptions.getOrDefault(SCROLL_TIMEOUT, "60")); } @@ -156,13 +157,16 @@ protected void readWithConstraint(BlockSpiller spiller, ReadRecordsRequest recor String endpoint = recordsRequest.getSplit().getProperty(domain); String shard = recordsRequest.getSplit().getProperty(ElasticsearchMetadataHandler.SHARD_KEY); + String username = recordsRequest.getSplit().getProperty(ElasticsearchMetadataHandler.SECRET_USERNAME); + String password = recordsRequest.getSplit().getProperty(ElasticsearchMetadataHandler.SECRET_PASSWORD); + boolean useSecret = StringUtils.isNotBlank(username) && StringUtils.isNotBlank(password); logger.info("readWithConstraint - enter - Domain: {}, Index: {}, Mapping: {}, Query: {}", domain, index, recordsRequest.getSchema(), query); long numRows = 0; if (queryStatusChecker.isQueryRunning()) { - AwsRestHighLevelClient client = clientFactory.getOrCreateClient(endpoint); + AwsRestHighLevelClient client = useSecret ? clientFactory.getOrCreateClient(endpoint, username, password) : clientFactory.getOrCreateClient(endpoint); try { // Create field extractors for all data types in the schema. GeneratedRowWriter rowWriter = createFieldExtractors(recordsRequest); diff --git a/athena-example/athena-example.yaml b/athena-example/athena-example.yaml index 93dbec9cab..a86c917a30 100644 --- a/athena-example/athena-example.yaml +++ b/athena-example/athena-example.yaml @@ -10,7 +10,7 @@ Metadata: ReadmeUrl: README.md Labels: ['athena-federation'] HomePageUrl: https://github.com/awslabs/aws-athena-query-federation - SemanticVersion: 2022.47.1 + SemanticVersion: 2024.18.2 SourceCodeUrl: https://github.com/awslabs/aws-athena-query-federation # Parameters are CloudFormation features to pass input @@ -55,7 +55,7 @@ Resources: data_bucket: !Ref DataBucket FunctionName: !Sub "${AthenaCatalogName}" Handler: "com.amazonaws.athena.connectors.example.ExampleCompositeHandler" - CodeUri: "./target/athena-example-2022.47.1.jar" + CodeUri: "./target/athena-example-2024.18.2.jar" Description: "A guided example for writing and deploying your own federated Amazon Athena connector for a custom source." Runtime: java11 Timeout: !Ref LambdaTimeout diff --git a/athena-example/pom.xml b/athena-example/pom.xml index e3a84da25e..bcd3ad0ef3 100644 --- a/athena-example/pom.xml +++ b/athena-example/pom.xml @@ -3,16 +3,16 @@ aws-athena-query-federation com.amazonaws - 2022.47.1 + 2024.18.2 4.0.0 athena-example - 2022.47.1 + 2024.18.2 com.amazonaws aws-athena-federation-sdk - 2022.47.1 + 2024.18.2 withdep diff --git a/athena-federation-integ-test/README.md b/athena-federation-integ-test/README.md index 5440cb091d..6b9655b235 100644 --- a/athena-federation-integ-test/README.md +++ b/athena-federation-integ-test/README.md @@ -36,7 +36,7 @@ in most **pom.xml** files (e.g. com.amazonaws athena-federation-integ-test - Current version of the SDK (e.g. 2022.47.1) + Current version of the SDK (e.g. 2024.18.2) test ``` diff --git a/athena-federation-integ-test/pom.xml b/athena-federation-integ-test/pom.xml index dc5db5c2d1..3348f15d21 100644 --- a/athena-federation-integ-test/pom.xml +++ b/athena-federation-integ-test/pom.xml @@ -3,11 +3,11 @@ aws-athena-query-federation com.amazonaws - 2022.47.1 + 2024.18.2 4.0.0 athena-federation-integ-test - 2022.47.1 + 2024.18.2 jar Amazon Athena Query Federation Integ Test @@ -198,7 +198,7 @@ org.apache.commons commons-lang3 - + 3.14.0 diff --git a/athena-federation-sdk-tools/pom.xml b/athena-federation-sdk-tools/pom.xml index 30eb909a1c..b407c4c4b0 100644 --- a/athena-federation-sdk-tools/pom.xml +++ b/athena-federation-sdk-tools/pom.xml @@ -3,18 +3,18 @@ aws-athena-query-federation com.amazonaws - 2022.47.1 + 2024.18.2 4.0.0 athena-federation-sdk-tools jar Amazon Athena Query Federation SDK Tools - 2022.47.1 + 2024.18.2 com.amazonaws aws-athena-federation-sdk - 2022.47.1 + 2024.18.2 withdep diff --git a/athena-federation-sdk/athena-federation-sdk.yaml b/athena-federation-sdk/athena-federation-sdk.yaml index 8499d0bd67..b6892ea3ab 100644 --- a/athena-federation-sdk/athena-federation-sdk.yaml +++ b/athena-federation-sdk/athena-federation-sdk.yaml @@ -10,7 +10,7 @@ Metadata: Labels: - athena-federation HomePageUrl: 'https://github.com/awslabs/aws-athena-query-federation' - SemanticVersion: 2022.47.1 + SemanticVersion: 2024.18.2 SourceCodeUrl: 'https://github.com/awslabs/aws-athena-query-federation' Parameters: AthenaCatalogName: @@ -47,7 +47,7 @@ Resources: spill_prefix: !Ref SpillPrefix FunctionName: !Ref AthenaCatalogName Handler: "com.amazonaws.athena.connector.lambda.examples.ExampleCompositeHandler" - CodeUri: "./target/aws-athena-federation-sdk-2022.47.1-withdep.jar" + CodeUri: "./target/aws-athena-federation-sdk-2024.18.2-withdep.jar" Description: "This connector enables Amazon Athena to communicate with a randomly generated data source." Runtime: java11 Timeout: !Ref LambdaTimeout diff --git a/athena-federation-sdk/pom.xml b/athena-federation-sdk/pom.xml index 4db6670cc7..899a75eaac 100644 --- a/athena-federation-sdk/pom.xml +++ b/athena-federation-sdk/pom.xml @@ -4,11 +4,11 @@ com.amazonaws aws-athena-query-federation - 2022.47.1 + 2024.18.2 com.amazonaws aws-athena-federation-sdk - 2022.47.1 + 2024.18.2 jar Amazon Athena Query Federation SDK The Athena Query Federation SDK defines a set of interfaces and wire protocols that you can implement to enable Athena to delegate portions of it's query execution plan to code that you deploy/write. @@ -287,7 +287,7 @@ org.apache.commons commons-lang3 - + 3.14.0 @@ -382,7 +382,7 @@ - + @@ -459,7 +459,7 @@ - + org.apache.maven.plugins maven-source-plugin ${mvn.source.plugin.version} diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/GlueConnectionUtils.java b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/GlueConnectionUtils.java new file mode 100644 index 0000000000..7062b96f0c --- /dev/null +++ b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/GlueConnectionUtils.java @@ -0,0 +1,112 @@ +/*- + * #%L + * Amazon Athena Query Federation SDK + * %% + * Copyright (C) 2019 - 2023 Amazon Web Services + * %% + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * #L% + */ +package com.amazonaws.athena.connector.lambda; + +import com.amazonaws.ClientConfiguration; +import com.amazonaws.client.builder.AwsClientBuilder; +import com.amazonaws.services.glue.AWSGlue; +import com.amazonaws.services.glue.AWSGlueClientBuilder; +import com.amazonaws.services.glue.model.Connection; +import com.amazonaws.services.glue.model.GetConnectionRequest; +import com.amazonaws.services.glue.model.GetConnectionResult; +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.ObjectMapper; +import org.apache.commons.lang3.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.HashMap; +import java.util.Map; +import java.util.stream.Collectors; + +public class GlueConnectionUtils +{ + // config property to store glue connection reference + public static final String DEFAULT_GLUE_CONNECTION = "glue_connection"; + // Connection properties storing athena specific connection details + public static final String GLUE_CONNECTION_ATHENA_PROPERTIES = "AthenaProperties"; + public static final String GLUE_CONNECTION_ATHENA_CONNECTOR_PROPERTIES = "connectorProperties"; + public static final String GLUE_CONNECTION_ATHENA_DRIVER_PROPERTIES = "driverProperties"; + public static final String[] propertySubsets = {GLUE_CONNECTION_ATHENA_CONNECTOR_PROPERTIES, GLUE_CONNECTION_ATHENA_DRIVER_PROPERTIES}; + + private static final int CONNECT_TIMEOUT = 250; + private static final Logger logger = LoggerFactory.getLogger(GlueConnectionUtils.class); + private static HashMap> connectionNameCache = new HashMap<>(); + + private GlueConnectionUtils() + { + } + + public static Map getGlueConnection() + { + HashMap envConfig = new HashMap<>(System.getenv()); + + String glueConnectionName = envConfig.get(DEFAULT_GLUE_CONNECTION); + if (StringUtils.isNotBlank(glueConnectionName)) { + HashMap cachedConfig = connectionNameCache.get(glueConnectionName); + if (cachedConfig == null) { + try { + HashMap> athenaPropertiesToMap = new HashMap>(); + + AWSGlue awsGlue = AWSGlueClientBuilder.standard() + .withEndpointConfiguration(new AwsClientBuilder.EndpointConfiguration( + "https://glue-gamma.ap-south-1.amazonaws.com", "ap-south-1" + )) + .withClientConfiguration(new ClientConfiguration().withConnectionTimeout(CONNECT_TIMEOUT)).build(); + GetConnectionResult glueConnection = awsGlue.getConnection(new GetConnectionRequest().withName(glueConnectionName)); + logger.debug("Successfully retrieved connection {}", glueConnectionName); + Connection connection = glueConnection.getConnection(); + String athenaPropertiesAsString = connection.getConnectionProperties().get(GLUE_CONNECTION_ATHENA_PROPERTIES); + try { + ObjectMapper mapper = new ObjectMapper(); + athenaPropertiesToMap = mapper.readValue(athenaPropertiesAsString, new TypeReference(){}); + logger.debug("Successfully parsed connection properties"); + } + catch (Exception err) { + logger.error("Error Parsing AthenaDriverProperties JSON to Map", err.toString()); + } + for (String subset : propertySubsets) { + if (athenaPropertiesToMap.containsKey(subset)) { + logger.debug("Adding {} subset from Glue Connection config.", subset); + Map properties = athenaPropertiesToMap.get(subset).entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, element -> String.valueOf(element.getValue()))); + logger.debug("Adding the following set of properties to config: {}", properties); + envConfig.putAll(properties); + } + else { + logger.debug("{} properties not included in Glue Connnection config.", subset); + } + } + connectionNameCache.put(glueConnectionName, envConfig); + } + catch (Exception err) { + logger.error("Failed to retrieve connection: {}, and parse the connection properties!", glueConnectionName); + throw new RuntimeException(err.toString()); + } + } + else { + return cachedConfig; + } + } + else { + logger.debug("No Glue Connection name was defined in Environment Variables."); + } + return envConfig; + } +} diff --git a/athena-gcs/athena-gcs.yaml b/athena-gcs/athena-gcs.yaml index 20494ba243..9bdb8c40ea 100644 --- a/athena-gcs/athena-gcs.yaml +++ b/athena-gcs/athena-gcs.yaml @@ -10,7 +10,7 @@ Metadata: ReadmeUrl: README.md Labels: ['athena-federation', 'GCS', 'Google-Cloud-Storage', 'parquet', 'csv'] HomePageUrl: https://github.com/awslabs/aws-athena-query-federation - SemanticVersion: 2022.47.1 + SemanticVersion: 2024.18.2 SourceCodeUrl: https://github.com/awslabs/aws-athena-query-federation Parameters: @@ -25,6 +25,9 @@ Parameters: Description: 'The prefix within SpillBucket where this function can spill data.' Type: String Default: athena-spill + GlueConnection: + Description: "Name of glue connection storing connection details for Federated Data source." + Type: String LambdaTimeout: Description: "Maximum Lambda invocation runtime in seconds. (min 1 - 900 max)" Default: 900 @@ -37,16 +40,31 @@ Parameters: Description: "WARNING: If set to 'true' encryption for spilled data is disabled." Default: "false" Type: String - GCSSecretName: + SecretManagerGcpCredsName: Description: 'Secret key name in the AWS Secrets Manager.' Type: String - PermissionsBoundaryARN: + PermissionsBoundaryArn: Description: "(Optional) An IAM policy ARN to use as the PermissionsBoundary for the created Lambda function's execution role" Default: '' Type: String + KmsKeyId: + Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." + Type: String + Default: "" + LambdaRoleArn: + Description: "(Optional) A custom role to be used by the Connector lambda" + Type: String + Default: "" Conditions: - HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryARN, "" ] ] + HasKmsKeyId: !Not [!Equals [!Ref KmsKeyId, ""]] + NotHasLambdaRole: !Equals [!Ref LambdaRoleArn, ""] + HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryArn, "" ] ] + CreateKmsPolicy: !And [!Condition HasKmsKeyId, !Condition NotHasLambdaRole] + HasGlueConnection: !Not [!Equals [!Ref GlueConnection, ""]] + NotHasGlueConnectionAndHasKmsKeyId: !And + - !Not [!Condition HasGlueConnection] + - !Condition HasKmsKeyId Resources: AthenaGCSConnector: @@ -54,10 +72,12 @@ Resources: Properties: Environment: Variables: - disable_spill_encryption: !Ref DisableSpillEncryption - spill_bucket: !Ref SpillBucket - spill_prefix: !Ref SpillPrefix - secret_manager_gcp_creds_name: !Ref GCSSecretName + disable_spill_encryption: !If [HasGlueConnection, !Ref "AWS::NoValue", !Ref DisableSpillEncryption] + spill_bucket: !If [HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillBucket] + spill_prefix: !If [HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillPrefix] + kms_key_id: !If [NotHasGlueConnectionAndHasKmsKeyId, !Ref KmsKeyId, !Ref "AWS::NoValue"] + glue_connection: !If [HasGlueConnection, !Ref GlueConnection, !Ref "AWS::NoValue"] + secret_manager_gcp_creds_name: !If [HasGlueConnection, !Ref "AWS::NoValue", !Ref SecretManagerGcpCredsName] FunctionName: !Ref LambdaFunctionName Handler: "com.amazonaws.athena.connectors.gcs.GcsCompositeHandler" CodeUri: "./target/athena-gcs.zip" @@ -65,38 +85,102 @@ Resources: Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory - PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] - Policies: - - Statement: - - Action: - - athena:GetQueryExecution - - s3:ListAllMyBuckets - - glue:GetTableVersions - - glue:GetPartitions - - glue:GetTables - - glue:GetTableVersion - - glue:GetDatabases - - glue:GetTable - - glue:GetPartition - - glue:GetDatabase - Effect: Allow - Resource: '*' - Version: '2012-10-17' - - Statement: + Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn] + + FunctionRole: + Condition: NotHasLambdaRole + Type: AWS::IAM::Role + Properties: + PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryArn, !Ref "AWS::NoValue" ] + ManagedPolicyArns: + - "arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + Service: + - lambda.amazonaws.com + Action: + - "sts:AssumeRole" + + FunctionExecutionPolicy: + Condition: NotHasLambdaRole + Type: "AWS::IAM::Policy" + Properties: + Roles: + - !Ref FunctionRole + PolicyName: FunctionExecutionPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Action: + - athena:GetQueryExecution + - s3:ListAllMyBuckets + - glue:GetTableVersions + - glue:GetPartitions + - glue:GetTables + - glue:GetTableVersion + - glue:GetDatabases + - glue:GetTable + - glue:GetPartition + - glue:GetDatabase + Effect: Allow + Resource: '*' + - Action: + - s3:ListBucket + Effect: Allow + Resource: + - !Sub 'arn:${AWS::Partition}:s3:::${SpillBucket}' + - Action: + - secretsmanager:GetSecretValue + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:*:*:secret:${SecretManagerGcpCredsName}*' + - Action: + - s3:GetObject + - s3:ListBucket + - s3:GetBucketLocation + - s3:GetObjectVersion + - s3:PutObject + - s3:PutObjectAcl + - s3:GetLifecycleConfiguration + - s3:PutLifecycleConfiguration + - s3:DeleteObject + Effect: Allow + Resource: + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName} + - bucketName: + Ref: SpillBucket + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName}/* + - bucketName: + Ref: SpillBucket + - !If + - HasGlueConnection - Action: - - s3:ListBucket + - glue:GetConnection Effect: Allow Resource: - - !Sub 'arn:${AWS::Partition}:s3:::${SpillBucket}' - Version: '2012-10-17' - - Statement: - - Action: - - secretsmanager:GetSecretValue - Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:*:*:secret:${GCSSecretName}*' - - S3ReadPolicy: - BucketName: - Ref: SpillBucket - - S3WritePolicy: - BucketName: - Ref: SpillBucket + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' + - !Ref "AWS::NoValue" + + FunctionKmsPolicy: + Condition: CreateKmsPolicy + Type: "AWS::IAM::Policy" + Properties: + PolicyName: FunctionKmsPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Action: + - kms:GenerateRandom + Resource: '*' + - Effect: Allow + Action: + - kms:GenerateDataKey + Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" + Roles: + - !Ref FunctionRole \ No newline at end of file diff --git a/athena-gcs/pom.xml b/athena-gcs/pom.xml index a92c92c90f..4b383500a2 100644 --- a/athena-gcs/pom.xml +++ b/athena-gcs/pom.xml @@ -3,16 +3,16 @@ aws-athena-query-federation com.amazonaws - 2022.47.1 + 2024.18.2 4.0.0 athena-gcs - 2022.47.1 + 2024.18.2 com.amazonaws aws-athena-federation-sdk - 2022.47.1 + 2024.18.2 @@ -81,7 +81,7 @@ com.amazonaws athena-federation-integ-test - 2022.47.1 + 2024.18.2 com.google.guava @@ -90,7 +90,6 @@ test - org.testng testng @@ -115,7 +114,6 @@ ${mockito.version} test - software.amazon.awscdk logs diff --git a/athena-gcs/src/main/java/com/amazonaws/athena/connectors/gcs/GcsCompositeHandler.java b/athena-gcs/src/main/java/com/amazonaws/athena/connectors/gcs/GcsCompositeHandler.java index 1d10ab5ce8..651878eeca 100644 --- a/athena-gcs/src/main/java/com/amazonaws/athena/connectors/gcs/GcsCompositeHandler.java +++ b/athena-gcs/src/main/java/com/amazonaws/athena/connectors/gcs/GcsCompositeHandler.java @@ -19,6 +19,7 @@ */ package com.amazonaws.athena.connectors.gcs; +import com.amazonaws.athena.connector.lambda.GlueConnectionUtils; import com.amazonaws.athena.connector.lambda.handlers.CompositeHandler; import org.apache.arrow.memory.BufferAllocator; import org.apache.arrow.memory.RootAllocator; @@ -46,9 +47,9 @@ public class GcsCompositeHandler */ public GcsCompositeHandler() throws IOException, CertificateEncodingException, NoSuchAlgorithmException, KeyStoreException { - super(new GcsMetadataHandler(allocator, System.getenv()), new GcsRecordHandler(allocator, System.getenv())); + super(new GcsMetadataHandler(allocator, GlueConnectionUtils.getGlueConnection()), new GcsRecordHandler(allocator, GlueConnectionUtils.getGlueConnection())); installCaCertificate(); - installGoogleCredentialsJsonFile(System.getenv()); + installGoogleCredentialsJsonFile(GlueConnectionUtils.getGlueConnection()); setupNativeEnvironmentVariables(); } } diff --git a/athena-google-bigquery/athena-google-bigquery.yaml b/athena-google-bigquery/athena-google-bigquery.yaml index 0738bc1920..52d7299b16 100644 --- a/athena-google-bigquery/athena-google-bigquery.yaml +++ b/athena-google-bigquery/athena-google-bigquery.yaml @@ -13,21 +13,22 @@ Metadata: - Athena-Federation - Google-SDK HomePageUrl: 'https://github.com/awslabs/aws-athena-query-federation' - SemanticVersion: 2022.47.1 + SemanticVersion: 2024.18.2 SourceCodeUrl: 'https://github.com/awslabs/aws-athena-query-federation' Parameters: LambdaFunctionName: Description: 'This is the name of the lambda function that will be created. This name must satisfy the pattern ^[a-z0-9-_]{1,64}$' Type: String AllowedPattern: ^[a-z0-9-_]{1,64}$ - GCPProjectID: - Description: "The project ID within Google Cloud Platform ." + GcpProjectId: + Description: "(Optional if Glue Connection is provided) The project ID within Google Cloud Platform ." + Default: '' Type: String BigQueryEndpoint: Description: "(Optional) BigQuery Private Endpoint" Default: '' Type: String - SecretNamePrefix: + SecretManagerGcpCredsName: Description: "The secret name within AWS Secrets Manager that contains your Google Cloud Platform Credentials." Type: String SpillBucket: @@ -37,6 +38,10 @@ Parameters: Description: 'The prefix within SpillBucket where this function can spill data.' Type: String Default: athena-spill + GlueConnection: + Description: "(Optional) Name of glue connection storing connection details for Federated Data source." + Type: String + Default: "" LambdaTimeout: Description: 'Maximum Lambda invocation runtime in seconds. (min 1 - 900 max)' Default: 900 @@ -57,68 +62,155 @@ Parameters: Description: '(Optional) One or more Subnet IDs corresponding to the Subnet that the Lambda function can use to access you data source. (e.g. subnet1,subnet2)' Type: CommaDelimitedList Default: "" - PermissionsBoundaryARN: + PermissionsBoundaryArn: Description: "(Optional) An IAM policy ARN to use as the PermissionsBoundary for the created Lambda function's execution role" Default: '' Type: String + KmsKeyId: + Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." + Type: String + Default: "" + LambdaRoleArn: + Description: "(Optional) A custom role to be used by the Connector lambda" + Type: String + Default: "" + Conditions: - HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryARN, "" ] ] HasSecurityGroups: !Not [ !Equals [ !Join ["", !Ref SecurityGroupIds], "" ] ] HasSubnets: !Not [ !Equals [ !Join ["", !Ref SubnetIds], "" ] ] + HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] + NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] + HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryArn, "" ] ] + CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] + HasGlueConnection: !Not [ !Equals [ !Ref GlueConnection, "" ] ] + NotHasGlueConnectionAndHasKmsKeyId: !And + - !Not [ !Condition HasGlueConnection ] + - !Condition HasKmsKeyId + Resources: AthenaBigQueryConnector: Type: 'AWS::Serverless::Function' Properties: Environment: Variables: - disable_spill_encryption: !Ref DisableSpillEncryption - spill_bucket: !Ref SpillBucket - spill_prefix: !Ref SpillPrefix - secret_manager_gcp_creds_name: !Ref SecretNamePrefix - gcp_project_id: !Ref GCPProjectID - big_query_endpoint: !Ref BigQueryEndpoint + disable_spill_encryption: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref DisableSpillEncryption ] + spill_bucket: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillBucket ] + spill_prefix: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillPrefix ] + kms_key_id: !If [ NotHasGlueConnectionAndHasKmsKeyId, !Ref KmsKeyId, !Ref "AWS::NoValue" ] + glue_connection: !If [ HasGlueConnection, !Ref GlueConnection, !Ref "AWS::NoValue" ] + secret_manager_gcp_creds_name: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SecretManagerGcpCredsName ] + gcp_project_id: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref GcpProjectId ] + big_query_endpoint: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref BigQueryEndpoint ] GOOGLE_APPLICATION_CREDENTIALS: '/tmp/service-account.json' FunctionName: !Ref LambdaFunctionName Handler: "com.amazonaws.athena.connectors.google.bigquery.BigQueryCompositeHandler" - CodeUri: "./target/athena-google-bigquery-2022.47.1.jar" + CodeUri: "./target/athena-google-bigquery-2024.18.2.jar" Description: "Enables Amazon Athena to communicate with BigQuery using Google SDK" Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory - PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] - Policies: - - Statement: - - Action: - - secretsmanager:GetSecretValue - Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretNamePrefix}*' - Version: '2012-10-17' - - Statement: - - Action: - - logs:CreateLogGroup - Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:*' - Version: '2012-10-17' - - Statement: - - Action: - - logs:CreateLogStream - - logs:PutLogEvents - Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/${LambdaFunctionName}:*' - Version: '2012-10-17' - - Statement: - - Action: - - athena:GetQueryExecution - - s3:ListAllMyBuckets - Effect: Allow - Resource: '*' - Version: '2012-10-17' - #S3CrudPolicy allows our connector to spill large responses to S3. You can optionally replace this pre-made policy - #with one that is more restrictive and can only 'put' but not read,delete, or overwrite files. - - S3CrudPolicy: - BucketName: !Ref SpillBucket - #VPCAccessPolicy allows our connector to run in a VPC so that it can access your data source. - - VPCAccessPolicy: { } + Role: !If [ NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn ] VpcConfig: SecurityGroupIds: !If [ HasSecurityGroups, !Ref SecurityGroupIds, !Ref "AWS::NoValue" ] SubnetIds: !If [ HasSubnets, !Ref SubnetIds, !Ref "AWS::NoValue" ] + + FunctionRole: + Condition: NotHasLambdaRole + Type: AWS::IAM::Role + Properties: + PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryArn, !Ref "AWS::NoValue" ] + ManagedPolicyArns: + - "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole" + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + Service: + - lambda.amazonaws.com + Action: + - "sts:AssumeRole" + + FunctionExecutionPolicy: + Condition: NotHasLambdaRole + Type: "AWS::IAM::Policy" + Properties: + Roles: + - !Ref FunctionRole + PolicyName: FunctionExecutionPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Action: + - secretsmanager:GetSecretValue + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretManagerGcpCredsName}*' + - Action: + - logs:CreateLogGroup + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:*' + - Action: + - logs:CreateLogStream + - logs:PutLogEvents + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/${LambdaFunctionName}:*' + - Action: + - athena:GetQueryExecution + - s3:ListAllMyBuckets + Effect: Allow + Resource: '*' + - Action: + - ec2:CreateNetworkInterface + - ec2:DeleteNetworkInterface + - ec2:DescribeNetworkInterfaces + - ec2:DetachNetworkInterface + Effect: Allow + Resource: '*' + - Action: + - s3:GetObject + - s3:ListBucket + - s3:GetBucketLocation + - s3:GetObjectVersion + - s3:PutObject + - s3:PutObjectAcl + - s3:GetLifecycleConfiguration + - s3:PutLifecycleConfiguration + - s3:DeleteObject + Effect: Allow + Resource: + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName} + - bucketName: + Ref: SpillBucket + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName}/* + - bucketName: + Ref: SpillBucket + - !If + - HasGlueConnection + - Action: + - glue:GetConnection + Effect: Allow + Resource: + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' + - !Ref "AWS::NoValue" + + FunctionKmsPolicy: + Condition: CreateKmsPolicy + Type: "AWS::IAM::Policy" + Properties: + PolicyName: FunctionKmsPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Action: + - kms:GenerateRandom + Resource: '*' + - Effect: Allow + Action: + - kms:GenerateDataKey + Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" + Roles: + - !Ref FunctionRole \ No newline at end of file diff --git a/athena-google-bigquery/pom.xml b/athena-google-bigquery/pom.xml index 5b7043b86b..9f00bdeedd 100644 --- a/athena-google-bigquery/pom.xml +++ b/athena-google-bigquery/pom.xml @@ -3,42 +3,28 @@ aws-athena-query-federation com.amazonaws - 2022.47.1 + 2024.18.2 4.0.0 athena-google-bigquery - 2022.47.1 + 2024.18.2 com.amazonaws athena-federation-integ-test - 2022.47.1 + 2024.18.2 test com.amazonaws athena-jdbc - 2022.47.1 + 2024.18.2 net.java.dev.jna jna-platform 5.14.0 - - com.amazonaws - athena-jdbc - 2022.47.1 - test-jar - test - - - - com.amazonaws - aws-java-sdk-rds - ${aws-sdk.version} - test - software.amazon.awscdk @@ -67,22 +53,6 @@ - - io.grpc - grpc-api - 1.63.0 - - - com.google.cloud - google-cloud-resourcemanager - 1.45.0 - - - nl.jqno.equalsverifier - equalsverifier - 3.16.1 - test - org.mockito mockito-inline diff --git a/athena-google-bigquery/src/main/java/com/amazonaws/athena/connectors/google/bigquery/BigQueryCompositeHandler.java b/athena-google-bigquery/src/main/java/com/amazonaws/athena/connectors/google/bigquery/BigQueryCompositeHandler.java index 51c8418ae0..ba94b34315 100644 --- a/athena-google-bigquery/src/main/java/com/amazonaws/athena/connectors/google/bigquery/BigQueryCompositeHandler.java +++ b/athena-google-bigquery/src/main/java/com/amazonaws/athena/connectors/google/bigquery/BigQueryCompositeHandler.java @@ -20,6 +20,7 @@ */ package com.amazonaws.athena.connectors.google.bigquery; +import com.amazonaws.athena.connector.lambda.GlueConnectionUtils; import com.amazonaws.athena.connector.lambda.handlers.CompositeHandler; import org.apache.arrow.memory.BufferAllocator; import org.apache.arrow.memory.RootAllocator; @@ -41,8 +42,8 @@ public class BigQueryCompositeHandler public BigQueryCompositeHandler() throws IOException { - super(new BigQueryMetadataHandler(System.getenv()), new BigQueryRecordHandler(System.getenv(), allocator)); - installGoogleCredentialsJsonFile(System.getenv()); + super(new BigQueryMetadataHandler(GlueConnectionUtils.getGlueConnection()), new BigQueryRecordHandler(GlueConnectionUtils.getGlueConnection(), allocator)); + installGoogleCredentialsJsonFile(GlueConnectionUtils.getGlueConnection()); setupNativeEnvironmentVariables(); logger.info("Inside BigQueryCompositeHandler()"); } diff --git a/athena-hbase/athena-hbase.yaml b/athena-hbase/athena-hbase.yaml index 7445029804..2a8b4b56a2 100644 --- a/athena-hbase/athena-hbase.yaml +++ b/athena-hbase/athena-hbase.yaml @@ -10,10 +10,10 @@ Metadata: Labels: - athena-federation HomePageUrl: 'https://github.com/awslabs/aws-athena-query-federation' - SemanticVersion: 2022.47.1 + SemanticVersion: 2024.18.2 SourceCodeUrl: 'https://github.com/awslabs/aws-athena-query-federation' Parameters: - AthenaCatalogName: + LambdaFunctionName: Description: 'This is the name of the lambda function that will be created. This name must satisfy the pattern ^[a-z0-9-_]{1,64}$' Type: String AllowedPattern: ^[a-z0-9-_]{1,64}$ @@ -24,6 +24,10 @@ Parameters: Description: 'The prefix within SpillBucket where this function can spill data.' Type: String Default: athena-spill + GlueConnection: + Description: "(Optional) Name of glue connection storing connection details for Federated Data source." + Type: String + Default: "" LambdaTimeout: Description: 'Maximum Lambda invocation runtime in seconds. (min 1 - 900 max)' Default: 900 @@ -42,19 +46,20 @@ Parameters: SubnetIds: Description: 'One or more Subnet IDs corresponding to the Subnet that the Lambda function can use to access you data source. (e.g. subnet1,subnet2)' Type: 'List' - SecretNameOrPrefix: + SecretName: Description: 'The name or prefix of a set of names within Secrets Manager that this function should have access to. (e.g. hbase-*).' Type: String - HBaseConnectionString: + DefaultHbase: Description: 'The HBase connection details to use by default in the format: master_hostname:hbase_port:zookeeper_port and optionally using SecretsManager (e.g. ${secret_name}).' Type: String - PermissionsBoundaryARN: + Default: "" + PermissionsBoundaryArn: Description: "(Optional) An IAM policy ARN to use as the PermissionsBoundary for the created Lambda function's execution role" Default: '' Type: String KerberosAuthEnabled: Description: 'Kerberos authentication enabled or not' - Default: "false" + Default: "" Type: String KerberosConfigFilesS3Reference: Description: 'The S3 bucket reference where kerberos auth config files are uploaded. Applicable for Kerberos auth' @@ -68,63 +73,149 @@ Parameters: Description: 'Hbase Rpc Protection value for Kerberos authentication' Default: "" Type: String + KmsKeyId: + Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." + Type: String + Default: "" + LambdaRoleArn: + Description: "(Optional) A custom role to be used by the Connector lambda" + Type: String + Default: "" + Conditions: - HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryARN, "" ] ] + HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] + NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] + HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryArn, "" ] ] + CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] + HasGlueConnection: !Not [ !Equals [ !Ref GlueConnection, "" ] ] + NotHasGlueConnectionAndHasKmsKeyId: !And + - !Not [ !Condition HasGlueConnection ] + - !Condition HasKmsKeyId + Resources: ConnectorConfig: Type: 'AWS::Serverless::Function' Properties: Environment: Variables: - disable_spill_encryption: !Ref DisableSpillEncryption - spill_bucket: !Ref SpillBucket - spill_prefix: !Ref SpillPrefix - default_hbase: !Ref HBaseConnectionString - kerberos_auth_enabled: !Ref KerberosAuthEnabled - kerberos_config_files_s3_reference: !Ref KerberosConfigFilesS3Reference - principal_name: !Ref PrincipalName - hbase_rpc_protection: !Ref HbaseRpcProtection - FunctionName: !Ref AthenaCatalogName + disable_spill_encryption: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref DisableSpillEncryption ] + spill_bucket: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillBucket ] + spill_prefix: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillPrefix ] + kms_key_id: !If [ NotHasGlueConnectionAndHasKmsKeyId, !Ref KmsKeyId, !Ref "AWS::NoValue" ] + glue_connection: !If [ HasGlueConnection, !Ref GlueConnection, !Ref "AWS::NoValue" ] + default_hbase: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref DefaultHbase ] + kerberos_auth_enabled: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref KerberosAuthEnabled ] + kerberos_config_files_s3_reference: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref KerberosConfigFilesS3Reference ] + principal_name: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref PrincipalName ] + hbase_rpc_protection: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref HbaseRpcProtection ] + FunctionName: !Ref LambdaFunctionName Handler: "com.amazonaws.athena.connectors.hbase.HbaseCompositeHandler" - CodeUri: "./target/athena-hbase-2022.47.1.jar" + CodeUri: "./target/athena-hbase-2024.18.2.jar" Description: "Enables Amazon Athena to communicate with HBase, making your HBase data accessible via SQL" Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory - PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] - Policies: - - Statement: - - Action: - - secretsmanager:GetSecretValue - Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:*:*:secret:${SecretNameOrPrefix}' - Version: '2012-10-17' - - Statement: - - Action: - - glue:GetTableVersions - - glue:GetPartitions - - glue:GetTables - - glue:GetTableVersion - - glue:GetDatabases - - glue:GetTable - - glue:GetPartition - - glue:GetDatabase - - athena:GetQueryExecution - - s3:ListAllMyBuckets - - s3:ListBucket - - s3:GetObject - - s3:GetBucketLocation - - s3:GetObjectVersion - - s3:GetLifecycleConfiguration - Effect: Allow - Resource: '*' - Version: '2012-10-17' - #S3CrudPolicy allows our connector to spill large responses to S3. You can optionally replace this pre-made policy - #with one that is more restrictive and can only 'put' but not read,delete, or overwrite files. - - S3CrudPolicy: - BucketName: !Ref SpillBucket - #VPCAccessPolicy allows our connector to run in a VPC so that it can access your data source. - - VPCAccessPolicy: {} + Role: !If [ NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn ] VpcConfig: SecurityGroupIds: !Ref SecurityGroupIds - SubnetIds: !Ref SubnetIds \ No newline at end of file + SubnetIds: !Ref SubnetIds + + FunctionRole: + Condition: NotHasLambdaRole + Type: AWS::IAM::Role + Properties: + PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryArn, !Ref "AWS::NoValue" ] + ManagedPolicyArns: + - "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole" + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + Service: + - lambda.amazonaws.com + Action: + - "sts:AssumeRole" + + FunctionExecutionPolicy: + Condition: NotHasLambdaRole + Type: "AWS::IAM::Policy" + Properties: + Roles: + - !Ref FunctionRole + PolicyName: FunctionExecutionPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Action: + - secretsmanager:GetSecretValue + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:*:*:secret:${SecretName}*' + - Action: + - glue:GetTableVersions + - glue:GetPartitions + - glue:GetTables + - glue:GetTableVersion + - glue:GetDatabases + - glue:GetTable + - glue:GetPartition + - glue:GetDatabase + - athena:GetQueryExecution + - s3:ListAllMyBuckets + Effect: Allow + Resource: '*' + - Action: + - ec2:CreateNetworkInterface + - ec2:DeleteNetworkInterface + - ec2:DescribeNetworkInterfaces + - ec2:DetachNetworkInterface + Effect: Allow + Resource: '*' + - Action: + - s3:GetObject + - s3:ListBucket + - s3:GetBucketLocation + - s3:GetObjectVersion + - s3:PutObject + - s3:PutObjectAcl + - s3:GetLifecycleConfiguration + - s3:PutLifecycleConfiguration + - s3:DeleteObject + Effect: Allow + Resource: + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName} + - bucketName: + Ref: SpillBucket + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName}/* + - bucketName: + Ref: SpillBucket + - !If + - HasGlueConnection + - Action: + - glue:GetConnection + Effect: Allow + Resource: + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' + - !Ref "AWS::NoValue" + + FunctionKmsPolicy: + Condition: CreateKmsPolicy + Type: "AWS::IAM::Policy" + Properties: + PolicyName: FunctionKmsPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Action: + - kms:GenerateRandom + Resource: '*' + - Effect: Allow + Action: + - kms:GenerateDataKey + Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" + Roles: + - !Ref FunctionRole \ No newline at end of file diff --git a/athena-hbase/pom.xml b/athena-hbase/pom.xml index 2262539e7e..dc62c60e96 100644 --- a/athena-hbase/pom.xml +++ b/athena-hbase/pom.xml @@ -3,11 +3,11 @@ aws-athena-query-federation com.amazonaws - 2022.47.1 + 2024.18.2 4.0.0 athena-hbase - 2022.47.1 + 2024.18.2 11.0.16 2.5.8-hadoop3 @@ -21,7 +21,7 @@ com.amazonaws aws-athena-federation-sdk - 2022.47.1 + 2024.18.2 withdep @@ -34,7 +34,7 @@ com.amazonaws athena-federation-integ-test - 2022.47.1 + 2024.18.2 withdep test diff --git a/athena-hbase/src/main/java/com/amazonaws/athena/connectors/hbase/HbaseCompositeHandler.java b/athena-hbase/src/main/java/com/amazonaws/athena/connectors/hbase/HbaseCompositeHandler.java index b2ea994987..c61470c4c4 100644 --- a/athena-hbase/src/main/java/com/amazonaws/athena/connectors/hbase/HbaseCompositeHandler.java +++ b/athena-hbase/src/main/java/com/amazonaws/athena/connectors/hbase/HbaseCompositeHandler.java @@ -19,6 +19,7 @@ */ package com.amazonaws.athena.connectors.hbase; +import com.amazonaws.athena.connector.lambda.GlueConnectionUtils; import com.amazonaws.athena.connector.lambda.handlers.CompositeHandler; /** @@ -30,6 +31,6 @@ public class HbaseCompositeHandler { public HbaseCompositeHandler() { - super(new HbaseMetadataHandler(System.getenv()), new HbaseRecordHandler(System.getenv())); + super(new HbaseMetadataHandler(GlueConnectionUtils.getGlueConnection()), new HbaseRecordHandler(GlueConnectionUtils.getGlueConnection())); } } diff --git a/athena-hbase/src/main/java/com/amazonaws/athena/connectors/hbase/HbaseConnectionFactory.java b/athena-hbase/src/main/java/com/amazonaws/athena/connectors/hbase/HbaseConnectionFactory.java index a84d3bb061..6492992bdb 100644 --- a/athena-hbase/src/main/java/com/amazonaws/athena/connectors/hbase/HbaseConnectionFactory.java +++ b/athena-hbase/src/main/java/com/amazonaws/athena/connectors/hbase/HbaseConnectionFactory.java @@ -19,6 +19,7 @@ */ package com.amazonaws.athena.connectors.hbase; +import com.amazonaws.athena.connector.lambda.GlueConnectionUtils; import org.apache.arrow.util.VisibleForTesting; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; @@ -124,7 +125,7 @@ private Connection createConnection(String host, String masterPort, String zooke config.set(nextConfig.getKey(), nextConfig.getValue()); } - Map configOptions = System.getenv(); + Map configOptions = GlueConnectionUtils.getGlueConnection(); boolean kerberosAuthEnabled = configOptions.get(KERBEROS_AUTH_ENABLED) != null && "true".equalsIgnoreCase(configOptions.get(KERBEROS_AUTH_ENABLED)); logger.info("Kerberos Authentication Enabled: " + kerberosAuthEnabled); if (kerberosAuthEnabled) { diff --git a/athena-hbase/src/main/java/com/amazonaws/athena/connectors/hbase/connection/HbaseConnectionFactory.java b/athena-hbase/src/main/java/com/amazonaws/athena/connectors/hbase/connection/HbaseConnectionFactory.java index c3bd8560dc..72df9dae6c 100644 --- a/athena-hbase/src/main/java/com/amazonaws/athena/connectors/hbase/connection/HbaseConnectionFactory.java +++ b/athena-hbase/src/main/java/com/amazonaws/athena/connectors/hbase/connection/HbaseConnectionFactory.java @@ -19,6 +19,7 @@ */ package com.amazonaws.athena.connectors.hbase.connection; +import com.amazonaws.athena.connector.lambda.GlueConnectionUtils; import org.apache.arrow.util.VisibleForTesting; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; @@ -126,7 +127,7 @@ private HBaseConnection createConnection(String host, String masterPort, String config.set(nextConfig.getKey(), nextConfig.getValue()); } - Map configOptions = System.getenv(); + Map configOptions = GlueConnectionUtils.getGlueConnection(); boolean kerberosAuthEnabled = configOptions.get(KERBEROS_AUTH_ENABLED) != null && "true".equalsIgnoreCase(configOptions.get(KERBEROS_AUTH_ENABLED)); logger.info("Kerberos Authentication Enabled: " + kerberosAuthEnabled); if (kerberosAuthEnabled) { diff --git a/athena-hbase/src/main/java/com/amazonaws/athena/connectors/hbase/integ/HbaseTableUtils.java b/athena-hbase/src/main/java/com/amazonaws/athena/connectors/hbase/integ/HbaseTableUtils.java index 5e5453e0a2..f2b89d63b8 100644 --- a/athena-hbase/src/main/java/com/amazonaws/athena/connectors/hbase/integ/HbaseTableUtils.java +++ b/athena-hbase/src/main/java/com/amazonaws/athena/connectors/hbase/integ/HbaseTableUtils.java @@ -19,6 +19,7 @@ */ package com.amazonaws.athena.connectors.hbase.integ; +import com.amazonaws.athena.connector.lambda.GlueConnectionUtils; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; @@ -98,7 +99,7 @@ private Configuration getHbaseConfiguration(String connectionStr) configuration.set("hbase.client.pause", "500"); configuration.set("zookeeper.recovery.retry", "2"); - java.util.Map configOptions = System.getenv(); + java.util.Map configOptions = GlueConnectionUtils.getGlueConnection(); boolean kerberosAuthEnabled = configOptions.get(KERBEROS_AUTH_ENABLED) != null && "true".equalsIgnoreCase(configOptions.get(KERBEROS_AUTH_ENABLED)); logger.info("Kerberos Authentication Enabled: " + kerberosAuthEnabled); if (kerberosAuthEnabled) { diff --git a/athena-hortonworks-hive/athena-hortonworks-hive.yaml b/athena-hortonworks-hive/athena-hortonworks-hive.yaml index 7876420c7d..001ad4c67e 100644 --- a/athena-hortonworks-hive/athena-hortonworks-hive.yaml +++ b/athena-hortonworks-hive/athena-hortonworks-hive.yaml @@ -10,7 +10,7 @@ Metadata: Labels: - athena-federation HomePageUrl: 'https://github.com/awslabs/aws-athena-query-federation' - SemanticVersion: 2022.47.1 + SemanticVersion: 2024.18.2 SourceCodeUrl: 'https://github.com/awslabs/aws-athena-query-federation' Parameters: LambdaFunctionName: @@ -20,9 +20,10 @@ Parameters: DefaultConnectionString: Description: 'The default connection string is used when catalog is "lambda:${LambdaFunctionName}". Catalog specific Connection Strings can be added later. Format: ${DatabaseType}://${NativeJdbcConnectionString}.' Type: String - SecretNamePrefix: - Description: 'Used to create resource-based authorization policy for "secretsmanager:GetSecretValue" action. E.g. All Athena JDBC Federation secret names can be prefixed with "AthenaJdbcFederation" and authorization policy will allow "arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:AthenaJdbcFederation*". Parameter value in this case should be "AthenaJdbcFederation". If you do not have a prefix, you can manually update the IAM policy to add allow any secret names.' - Type: String + Default: "" + SecretName: + Description: 'Used to create resource-based authorization policy for "secretsmanager:GetSecretValue" action. E.g. All Athena JDBC Federation secret names can be prefixed with "AthenaJdbcFederation" and authorization policy will allow "arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:AthenaJdbcFederation*". Parameter value in this case should be "AthenaJdbcFederation". If you do not have a prefix, you can manually update the IAM policy to add allow any secret names.' + Type: String SpillBucket: Description: 'The name of the bucket where this function can spill data.' Type: String @@ -30,6 +31,10 @@ Parameters: Description: 'The prefix within SpillBucket where this function can spill data.' Type: String Default: athena-spill + GlueConnection: + Description: "(Optional) Name of glue connection storing connection details for Federated Data source." + Type: String + Default: "" LambdaTimeout: Description: 'Maximum Lambda invocation runtime in seconds. (min 1 - 900 max)' Default: 900 @@ -50,65 +55,152 @@ Parameters: Description: '(Optional) One or more Subnet IDs corresponding to the Subnet that the Lambda function can use to access you data source. (e.g. subnet1,subnet2)' Type: CommaDelimitedList Default: "" - PermissionsBoundaryARN: + PermissionsBoundaryArn: Description: "(Optional) An IAM policy ARN to use as the PermissionsBoundary for the created Lambda function's execution role" Default: '' Type: String + KmsKeyId: + Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." + Type: String + Default: "" + LambdaRoleArn: + Description: "(Optional) A custom role to be used by the Connector lambda" + Type: String + Default: "" + Conditions: - HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryARN, "" ] ] HasSecurityGroups: !Not [ !Equals [ !Join ["", !Ref SecurityGroupIds], "" ] ] HasSubnets: !Not [ !Equals [ !Join ["", !Ref SubnetIds], "" ] ] + HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] + NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] + HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryArn, "" ] ] + CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] + HasGlueConnection: !Not [ !Equals [ !Ref GlueConnection, "" ] ] + NotHasGlueConnectionAndHasKmsKeyId: !And + - !Not [ !Condition HasGlueConnection ] + - !Condition HasKmsKeyId + Resources: JdbcConnectorConfig: Type: 'AWS::Serverless::Function' Properties: Environment: Variables: - disable_spill_encryption: !Ref DisableSpillEncryption - spill_bucket: !Ref SpillBucket - spill_prefix: !Ref SpillPrefix - default: !Ref DefaultConnectionString + disable_spill_encryption: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref DisableSpillEncryption ] + spill_bucket: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillBucket ] + spill_prefix: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillPrefix ] + kms_key_id: !If [ NotHasGlueConnectionAndHasKmsKeyId, !Ref KmsKeyId, !Ref "AWS::NoValue" ] + glue_connection: !If [ HasGlueConnection, !Ref GlueConnection, !Ref "AWS::NoValue" ] + default: [ HasGlueConnection, !Ref "AWS::NoValue", !Ref DefaultConnectionString ] FunctionName: !Ref LambdaFunctionName Handler: "com.amazonaws.athena.connectors.hortonworks.HiveMuxCompositeHandler" - CodeUri: "./target/athena-hortonworks-hive-2022.47.1.jar" + CodeUri: "./target/athena-hortonworks-hive-2024.18.2.jar" Description: "Enables Amazon Athena to communicate with Hortonworks Hive using JDBC" Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory - PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] - Policies: - - Statement: - - Action: - - secretsmanager:GetSecretValue - Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretNamePrefix}*' - Version: '2012-10-17' - - Statement: - - Action: - - logs:CreateLogGroup - Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:*' - Version: '2012-10-17' - - Statement: + Role: !If [ NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn ] + VpcConfig: + SecurityGroupIds: !If [ HasSecurityGroups, !Ref SecurityGroupIds, !Ref "AWS::NoValue" ] + SubnetIds: !If [ HasSubnets, !Ref SubnetIds, !Ref "AWS::NoValue" ] + + FunctionRole: + Condition: NotHasLambdaRole + Type: AWS::IAM::Role + Properties: + PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryArn, !Ref "AWS::NoValue" ] + ManagedPolicyArns: + - "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole" + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + Service: + - lambda.amazonaws.com + Action: + - "sts:AssumeRole" + + FunctionExecutionPolicy: + Condition: NotHasLambdaRole + Type: "AWS::IAM::Policy" + Properties: + Roles: + - !Ref FunctionRole + PolicyName: FunctionExecutionPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Action: + - secretsmanager:GetSecretValue + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretName}*' + - Action: + - logs:CreateLogGroup + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:*' - Action: - logs:CreateLogStream - logs:PutLogEvents Effect: Allow Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/${LambdaFunctionName}:*' - Version: '2012-10-17' - - Statement: - Action: - athena:GetQueryExecution - s3:ListAllMyBuckets Effect: Allow Resource: '*' - Version: '2012-10-17' - #S3CrudPolicy allows our connector to spill large responses to S3. You can optionally replace this pre-made policy - #with one that is more restrictive and can only 'put' but not read,delete, or overwrite files. - - S3CrudPolicy: - BucketName: !Ref SpillBucket - #VPCAccessPolicy allows our connector to run in a VPC so that it can access your data source. - - VPCAccessPolicy: {} - VpcConfig: - SecurityGroupIds: !If [ HasSecurityGroups, !Ref SecurityGroupIds, !Ref "AWS::NoValue" ] - SubnetIds: !If [ HasSubnets, !Ref SubnetIds, !Ref "AWS::NoValue" ] \ No newline at end of file + - Action: + - ec2:CreateNetworkInterface + - ec2:DeleteNetworkInterface + - ec2:DescribeNetworkInterfaces + - ec2:DetachNetworkInterface + Effect: Allow + Resource: '*' + - Action: + - s3:GetObject + - s3:ListBucket + - s3:GetBucketLocation + - s3:GetObjectVersion + - s3:PutObject + - s3:PutObjectAcl + - s3:GetLifecycleConfiguration + - s3:PutLifecycleConfiguration + - s3:DeleteObject + Effect: Allow + Resource: + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName} + - bucketName: + Ref: SpillBucket + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName}/* + - bucketName: + Ref: SpillBucket + - !If + - HasGlueConnection + - Action: + - glue:GetConnection + Effect: Allow + Resource: + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' + - !Ref "AWS::NoValue" + + FunctionKmsPolicy: + Condition: CreateKmsPolicy + Type: "AWS::IAM::Policy" + Properties: + PolicyName: FunctionKmsPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Action: + - kms:GenerateRandom + Resource: '*' + - Effect: Allow + Action: + - kms:GenerateDataKey + Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" + Roles: + - !Ref FunctionRole \ No newline at end of file diff --git a/athena-hortonworks-hive/pom.xml b/athena-hortonworks-hive/pom.xml index 1b67ad4b8c..e33ef02199 100644 --- a/athena-hortonworks-hive/pom.xml +++ b/athena-hortonworks-hive/pom.xml @@ -3,11 +3,11 @@ aws-athena-query-federation com.amazonaws - 2022.47.1 + 2024.18.2 4.0.0 athena-hortonworks-hive - 2022.47.1 + 2024.18.2 2.6.23.1027 @@ -15,13 +15,13 @@ com.amazonaws athena-federation-integ-test - 2022.47.1 + 2024.18.2 test com.amazonaws athena-jdbc - 2022.47.1 + 2024.18.2 Hive @@ -31,7 +31,7 @@ com.amazonaws athena-jdbc - 2022.47.1 + 2024.18.2 test-jar test @@ -47,7 +47,6 @@ ${mockito.version} test - com.amazonaws diff --git a/athena-hortonworks-hive/src/main/java/com/amazonaws/athena/connectors/hortonworks/HiveCompositeHandler.java b/athena-hortonworks-hive/src/main/java/com/amazonaws/athena/connectors/hortonworks/HiveCompositeHandler.java index 70ac1f47aa..65e877242d 100644 --- a/athena-hortonworks-hive/src/main/java/com/amazonaws/athena/connectors/hortonworks/HiveCompositeHandler.java +++ b/athena-hortonworks-hive/src/main/java/com/amazonaws/athena/connectors/hortonworks/HiveCompositeHandler.java @@ -19,6 +19,7 @@ */ package com.amazonaws.athena.connectors.hortonworks; +import com.amazonaws.athena.connector.lambda.GlueConnectionUtils; import com.amazonaws.athena.connector.lambda.handlers.CompositeHandler; /** @@ -32,6 +33,6 @@ public class HiveCompositeHandler { public HiveCompositeHandler() { - super(new HiveMetadataHandler(System.getenv()), new HiveRecordHandler(System.getenv())); + super(new HiveMetadataHandler(GlueConnectionUtils.getGlueConnection()), new HiveRecordHandler(GlueConnectionUtils.getGlueConnection())); } } diff --git a/athena-jdbc/pom.xml b/athena-jdbc/pom.xml index 380f9dc1c9..150d8c17b9 100644 --- a/athena-jdbc/pom.xml +++ b/athena-jdbc/pom.xml @@ -3,11 +3,11 @@ aws-athena-query-federation com.amazonaws - 2022.47.1 + 2024.18.2 4.0.0 athena-jdbc - 2022.47.1 + 2024.18.2 com.amazonaws @@ -93,7 +93,7 @@ com.amazonaws aws-athena-federation-sdk - 2022.47.1 + 2024.18.2 withdep @@ -106,7 +106,7 @@ com.amazonaws athena-federation-integ-test - 2022.47.1 + 2024.18.2 test diff --git a/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/MultiplexingJdbcCompositeHandler.java b/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/MultiplexingJdbcCompositeHandler.java index 254fdf81ba..d2a216a674 100644 --- a/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/MultiplexingJdbcCompositeHandler.java +++ b/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/MultiplexingJdbcCompositeHandler.java @@ -19,6 +19,7 @@ */ package com.amazonaws.athena.connectors.jdbc; +import com.amazonaws.athena.connector.lambda.GlueConnectionUtils; import com.amazonaws.athena.connector.lambda.handlers.CompositeHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; @@ -43,10 +44,10 @@ public MultiplexingJdbcCompositeHandler( { super( hasCatalogConnections ? - muxMetadataHandlerClass.getConstructor(java.util.Map.class).newInstance(System.getenv()) : - metadataHandlerClass.getConstructor(java.util.Map.class).newInstance(System.getenv()), + muxMetadataHandlerClass.getConstructor(java.util.Map.class).newInstance(GlueConnectionUtils.getGlueConnection()) : + metadataHandlerClass.getConstructor(java.util.Map.class).newInstance(GlueConnectionUtils.getGlueConnection()), hasCatalogConnections ? - muxRecordHandlerClass.getConstructor(java.util.Map.class).newInstance(System.getenv()) : - recordHandlerClass.getConstructor(java.util.Map.class).newInstance(System.getenv())); + muxRecordHandlerClass.getConstructor(java.util.Map.class).newInstance(GlueConnectionUtils.getGlueConnection()) : + recordHandlerClass.getConstructor(java.util.Map.class).newInstance(GlueConnectionUtils.getGlueConnection())); } } diff --git a/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/connection/DatabaseConnectionConfigBuilder.java b/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/connection/DatabaseConnectionConfigBuilder.java index 25418ac93c..5594e14e5e 100644 --- a/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/connection/DatabaseConnectionConfigBuilder.java +++ b/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/connection/DatabaseConnectionConfigBuilder.java @@ -43,6 +43,12 @@ public class DatabaseConnectionConfigBuilder private static final String SECRET_PATTERN_STRING = "\\$\\{(([a-z-]+!)?[a-zA-Z0-9:/_+=.@-]+)}"; public static final Pattern SECRET_PATTERN = Pattern.compile(SECRET_PATTERN_STRING); + // Config variables used when glue connection supplements connection properties + public static final String DEFAULT_JDBC_CONNECTION_URL_PROPERTY = "default_connection_string"; + public static final String DEFAULT_SECRET_PROPERTY = "secret_name"; + + public static final String DEFAULT_GLUE_CONNECTION = "glue_connection"; + private Map properties; private String engine; @@ -87,31 +93,37 @@ public DatabaseConnectionConfigBuilder properties(final Map prop public List build() { Validate.notEmpty(this.properties, "properties must not be empty"); - Validate.notBlank(this.properties.get(DEFAULT_CONNECTION_STRING_PROPERTY), "Default connection string must be present"); + Validate.isTrue(properties.containsKey(DEFAULT_CONNECTION_STRING_PROPERTY) || properties.containsKey(DEFAULT_JDBC_CONNECTION_URL_PROPERTY), "Default connection string must be present"); List databaseConnectionConfigs = new ArrayList<>(); int numberOfCatalogs = 0; - for (Map.Entry property : this.properties.entrySet()) { - final String key = property.getKey(); - final String value = property.getValue(); - - String catalogName; - if (DEFAULT_CONNECTION_STRING_PROPERTY.equals(key.toLowerCase())) { - catalogName = key.toLowerCase(); - } - else if (key.endsWith(CONNECTION_STRING_PROPERTY_SUFFIX)) { - catalogName = key.replace(CONNECTION_STRING_PROPERTY_SUFFIX, ""); - } - else { - // unknown property ignore - continue; - } - databaseConnectionConfigs.add(extractDatabaseConnectionConfig(catalogName, value)); - + if (!StringUtils.isBlank(properties.get(DEFAULT_GLUE_CONNECTION))) { + databaseConnectionConfigs.add(extractDatabaseGlueConnectionConfig(DEFAULT_CONNECTION_STRING_PROPERTY)); numberOfCatalogs++; - if (numberOfCatalogs > MUX_CATALOG_LIMIT) { - throw new RuntimeException("Too many database instances in mux. Max supported is " + MUX_CATALOG_LIMIT); + } + else { + for (Map.Entry property : this.properties.entrySet()) { + final String key = property.getKey(); + final String value = property.getValue(); + + String catalogName; + if (DEFAULT_CONNECTION_STRING_PROPERTY.equals(key.toLowerCase())) { + catalogName = key.toLowerCase(); + } + else if (key.endsWith(CONNECTION_STRING_PROPERTY_SUFFIX)) { + catalogName = key.replace(CONNECTION_STRING_PROPERTY_SUFFIX, ""); + } + else { + // unknown property ignore + continue; + } + databaseConnectionConfigs.add(extractDatabaseConnectionConfig(catalogName, value)); + + numberOfCatalogs++; + if (numberOfCatalogs > MUX_CATALOG_LIMIT) { + throw new RuntimeException("Too many database instances in mux. Max supported is " + MUX_CATALOG_LIMIT); + } } } @@ -141,6 +153,14 @@ private DatabaseConnectionConfig extractDatabaseConnectionConfig(final String ca .orElseGet(() -> new DatabaseConnectionConfig(catalogName, this.engine, jdbcConnectionString)); } + private DatabaseConnectionConfig extractDatabaseGlueConnectionConfig(final String catalogName) + { + final String jdbcConnectionString = properties.get(DEFAULT_JDBC_CONNECTION_URL_PROPERTY); + final String secretName = properties.get(DEFAULT_SECRET_PROPERTY); + Validate.notBlank(jdbcConnectionString, "JDBC Connection string must not be blank."); + return StringUtils.isBlank(secretName) ? new DatabaseConnectionConfig(catalogName, this.engine, jdbcConnectionString) : new DatabaseConnectionConfig(catalogName, this.engine, jdbcConnectionString, secretName); + } + private Optional extractSecretName(final String jdbcConnectionString) { Matcher secretMatcher = SECRET_PATTERN.matcher(jdbcConnectionString); diff --git a/athena-jdbc/src/test/java/com/amazonaws/athena/connectors/jdbc/connection/DatabaseConnectionConfigBuilderTest.java b/athena-jdbc/src/test/java/com/amazonaws/athena/connectors/jdbc/connection/DatabaseConnectionConfigBuilderTest.java index 1f54475206..b17a88770b 100644 --- a/athena-jdbc/src/test/java/com/amazonaws/athena/connectors/jdbc/connection/DatabaseConnectionConfigBuilderTest.java +++ b/athena-jdbc/src/test/java/com/amazonaws/athena/connectors/jdbc/connection/DatabaseConnectionConfigBuilderTest.java @@ -33,6 +33,9 @@ public class DatabaseConnectionConfigBuilderTest private static final String CONNECTION_STRING2 = "postgres://jdbc:postgresql://hostname/user=testUser&password=testPassword"; private static final String CONNECTION_STRING3 = "redshift://jdbc:redshift://hostname:5439/dev?${arn:aws:secretsmanager:us-east-1:1234567890:secret:redshift/user/secret}"; private static final String CONNECTION_STRING4 = "postgres://jdbc:postgresql://hostname:5439/dev?${arn:aws:secretsmanager:us-east-1:1234567890:secret:postgresql/user/secret}"; + private static final String CONNECTION_STRING5 = "jdbc:postgresql://hostname/test"; + private static final String CONNECTION_STRING5_SECRET = "testSecret"; + private static final String MOCK_GLUE_CONNECTION_NAME = "postgresql-connection"; @Test public void build() @@ -122,4 +125,40 @@ public void validSecretsSyntaxTest() Assert.assertEquals(secrets[i], databaseConnectionConfigs.get(i).getSecret()); } } + + @Test + public void buildUsingGlueConnectionWithSecret() + { + DatabaseConnectionConfig glueSupplementedConnection = new DatabaseConnectionConfig("default", "postgres", + "jdbc:postgresql://hostname/test", "testSecret"); + + List databaseConnectionConfigs = new DatabaseConnectionConfigBuilder() + .engine("postgres") + .properties(ImmutableMap.of( + "default", CONNECTION_STRING2, + "default_connection_string", CONNECTION_STRING5, + "secret_name", CONNECTION_STRING5_SECRET, + "glue_connection", MOCK_GLUE_CONNECTION_NAME)) + .build(); + + Assert.assertEquals(Arrays.asList(glueSupplementedConnection), databaseConnectionConfigs); + } + + @Test + public void buildUsingGlueConnectionNoSecret() + { + DatabaseConnectionConfig glueSupplementedConnection = new DatabaseConnectionConfig("default", "postgres", + "jdbc:postgresql://hostname/test"); + + List databaseConnectionConfigs = new DatabaseConnectionConfigBuilder() + .engine("postgres") + .properties(ImmutableMap.of( + "default", CONNECTION_STRING2, + "default_connection_string", CONNECTION_STRING5, + "glue_connection", MOCK_GLUE_CONNECTION_NAME)) + .build(); + + Assert.assertEquals(Arrays.asList(glueSupplementedConnection), databaseConnectionConfigs); + } } + diff --git a/athena-kafka/athena-kafka.yaml b/athena-kafka/athena-kafka.yaml index 812e7cbfcc..c863900049 100644 --- a/athena-kafka/athena-kafka.yaml +++ b/athena-kafka/athena-kafka.yaml @@ -11,7 +11,7 @@ Metadata: - kafka - athena-federation HomePageUrl: 'https://github.com/awslabs/aws-athena-query-federation' - SemanticVersion: 2022.47.1 + SemanticVersion: 2024.18.2 SourceCodeUrl: 'https://github.com/awslabs/aws-athena-query-federation' Parameters: AuthType: @@ -32,7 +32,7 @@ Parameters: Description: 'This is the name of the lambda function that will be created. This name must satisfy the pattern ^[a-z0-9-_]{1,64}$' Type: String AllowedPattern: ^[a-z0-9-_]{1,64}$ - SecretNamePrefix: + SecretsManagerSecret: Description: "The secret name within AWS Secrets Manager that contains your aws key and secret Credentials" Default: "" Type: String @@ -43,6 +43,10 @@ Parameters: Description: 'The prefix within SpillBucket where this function can spill data.' Type: String Default: athena-spill + GlueConnection: + Description: "(Optional) Name of glue connection storing connection details for Federated Data source." + Type: String + Default: "" LambdaTimeout: Description: 'Maximum Lambda invocation runtime in seconds. (min 1 - 900 max)' Default: 900 @@ -67,20 +71,30 @@ Parameters: Description: 'The S3 bucket reference where keystore and truststore certificates are uploaded. Applicable for SSL auth' Default: "" Type: String - LambdaRoleARN: + LambdaRoleArn: Description: "(Optional) A custom role to be used by the Connector lambda" Default: "" Type: String - PermissionsBoundaryARN: + PermissionsBoundaryArn: Description: "(Optional) An IAM policy ARN to use as the PermissionsBoundary for the created Lambda function's execution role" Default: '' Type: String + KmsKeyId: + Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." + Type: String + Default: "" Conditions: - NotHasLambdaRole: !Equals [!Ref LambdaRoleARN, ""] - HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryARN, "" ] ] - HasSecurityGroups: !Not [ !Equals [ !Join ["", !Ref SecurityGroupIds], "" ] ] - HasSubnets: !Not [ !Equals [ !Join ["", !Ref SubnetIds], "" ] ] + HasSecurityGroups: !Not [ !Equals [ !Join [ "", !Ref SecurityGroupIds ], "" ] ] + HasSubnets: !Not [ !Equals [ !Join [ "", !Ref SubnetIds ], "" ] ] + HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] + NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] + HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryArn, "" ] ] + CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] + HasGlueConnection: !Not [ !Equals [ !Ref GlueConnection, "" ] ] + NotHasGlueConnectionAndHasKmsKeyId: !And + - !Not [ !Condition HasGlueConnection ] + - !Condition HasKmsKeyId Resources: AthenaKafkaConnector: @@ -88,21 +102,23 @@ Resources: Properties: Environment: Variables: - disable_spill_encryption: !Ref DisableSpillEncryption - spill_bucket: !Ref SpillBucket - spill_prefix: !Ref SpillPrefix - secrets_manager_secret: !Ref SecretNamePrefix - certificates_s3_reference: !Ref CertificatesS3Reference - kafka_endpoint: !Ref KafkaEndpoint - auth_type: !Ref AuthType + disable_spill_encryption: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref DisableSpillEncryption ] + spill_bucket: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillBucket ] + spill_prefix: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillPrefix ] + kms_key_id: !If [ NotHasGlueConnectionAndHasKmsKeyId, !Ref KmsKeyId, !Ref "AWS::NoValue" ] + glue_connection: !If [ HasGlueConnection, !Ref GlueConnection, !Ref "AWS::NoValue" ] + secrets_manager_secret: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SecretsManagerSecret ] + certificates_s3_reference: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref CertificatesS3Reference ] + kafka_endpoint: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref KafkaEndpoint ] + auth_type: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref AuthType ] FunctionName: !Ref LambdaFunctionName Handler: "com.amazonaws.athena.connectors.kafka.KafkaCompositeHandler" - CodeUri: "./target/athena-kafka-2022.47.1.jar" + CodeUri: "./target/athena-kafka-2024.18.2.jar" Description: "Enables Amazon Athena to communicate with Kafka clusters" Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory - Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleARN] + Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn] VpcConfig: SecurityGroupIds: !If [ HasSecurityGroups, !Ref SecurityGroupIds, !Ref "AWS::NoValue" ] SubnetIds: !If [ HasSubnets, !Ref SubnetIds, !Ref "AWS::NoValue"] @@ -111,7 +127,7 @@ Resources: Condition: NotHasLambdaRole Type: AWS::IAM::Role Properties: - PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] + PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryArn, !Ref "AWS::NoValue" ] ManagedPolicyArns: - "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole" AssumeRolePolicyDocument: @@ -135,7 +151,7 @@ Resources: - Action: - secretsmanager:GetSecretValue Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretNamePrefix}*' + Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretsManagerSecret}*' - Action: - logs:CreateLogGroup Effect: Allow @@ -169,5 +185,53 @@ Resources: - autoscaling:CompleteLifecycleAction Effect: Allow Resource: '*' + - Action: + - s3:GetObject + - s3:ListBucket + - s3:GetBucketLocation + - s3:GetObjectVersion + - s3:PutObject + - s3:PutObjectAcl + - s3:GetLifecycleConfiguration + - s3:PutLifecycleConfiguration + - s3:DeleteObject + Effect: Allow + Resource: + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName} + - bucketName: + Ref: SpillBucket + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName}/* + - bucketName: + Ref: SpillBucket + - !If + - HasGlueConnection + - Action: + - glue:GetConnection + Effect: Allow + Resource: + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' + - !Ref "AWS::NoValue" Roles: - !Ref FunctionRole + + FunctionKmsPolicy: + Condition: CreateKmsPolicy + Type: "AWS::IAM::Policy" + Properties: + PolicyName: FunctionKmsPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Action: + - kms:GenerateRandom + Resource: '*' + - Effect: Allow + Action: + - kms:GenerateDataKey + Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" + Roles: + - !Ref FunctionRole \ No newline at end of file diff --git a/athena-kafka/pom.xml b/athena-kafka/pom.xml index 6e42ff1876..ab125f1866 100644 --- a/athena-kafka/pom.xml +++ b/athena-kafka/pom.xml @@ -3,12 +3,12 @@ aws-athena-query-federation com.amazonaws - 2022.47.1 + 2024.18.2 4.0.0 athena-kafka Athena Kafka Connector - 2022.47.1 + 2024.18.2 11 11 @@ -68,7 +68,7 @@ com.amazonaws athena-federation-integ-test - 2022.47.1 + 2024.18.2 withdep test @@ -104,11 +104,10 @@ com.amazonaws aws-athena-federation-sdk - 2022.47.1 + 2024.18.2 withdep - org.testng testng diff --git a/athena-msk/athena-msk.yaml b/athena-msk/athena-msk.yaml index cc3e984242..1dd3b0aa96 100644 --- a/athena-msk/athena-msk.yaml +++ b/athena-msk/athena-msk.yaml @@ -11,7 +11,7 @@ Metadata: - msk - athena-federation HomePageUrl: 'https://github.com/awslabs/aws-athena-query-federation' - SemanticVersion: 2022.47.1 + SemanticVersion: 2024.18.2 SourceCodeUrl: 'https://github.com/awslabs/aws-athena-query-federation' Parameters: AuthType: @@ -26,13 +26,14 @@ Parameters: - SSL - NO_AUTH KafkaEndpoint: - Description: 'MSK cluster endpoint' + Description: '(Optional if Glue Connection is provided) MSK cluster endpoint' Type: String + Default: "" LambdaFunctionName: Description: 'This is the name of the lambda function that will be created. This name must satisfy the pattern ^[a-z0-9-_]{1,64}$' Type: String AllowedPattern: ^[a-z0-9-_]{1,64}$ - SecretNamePrefix: + SecretsManagerSecret: Description: "The secret name within AWS Secrets Manager that contains your aws key and secret Credentials(Not Required for IAM AUTH)" Default: "" Type: String @@ -43,6 +44,10 @@ Parameters: Description: 'The prefix within SpillBucket where this function can spill data.' Type: String Default: athena-spill + GlueConnection: + Description: "(Optional) Name of glue connection storing connection details for Federated Data source." + Type: String + Default: "" LambdaTimeout: Description: 'Maximum Lambda invocation runtime in seconds. (min 1 - 900 max)' Default: 900 @@ -67,20 +72,30 @@ Parameters: Description: 'The S3 bucket reference where keystore and truststore certificates are uploaded. Applicable for SSL auth' Default: "" Type: String - LambdaRoleARN: + LambdaRoleArn: Description: "(Must for auth type IAM) A custom role to be used by the Connector lambda" Default: "" Type: String - PermissionsBoundaryARN: + PermissionsBoundaryArn: Description: "(Optional) An IAM policy ARN to use as the PermissionsBoundary for the created Lambda function's execution role" Default: '' Type: String + KmsKeyId: + Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." + Type: String + Default: "" Conditions: - NotHasLambdaRole: !Equals [!Ref LambdaRoleARN, ""] - HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryARN, "" ] ] HasSecurityGroups: !Not [ !Equals [ !Join ["", !Ref SecurityGroupIds], "" ] ] HasSubnets: !Not [ !Equals [ !Join ["", !Ref SubnetIds], "" ] ] + HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] + NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] + HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryArn, "" ] ] + CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] + HasGlueConnection: !Not [ !Equals [ !Ref GlueConnection, "" ] ] + NotHasGlueConnectionAndHasKmsKeyId: !And + - !Not [ !Condition HasGlueConnection ] + - !Condition HasKmsKeyId Resources: AthenaMSKConnector: @@ -88,21 +103,23 @@ Resources: Properties: Environment: Variables: - disable_spill_encryption: !Ref DisableSpillEncryption - spill_bucket: !Ref SpillBucket - spill_prefix: !Ref SpillPrefix - secrets_manager_secret: !Ref SecretNamePrefix - certificates_s3_reference: !Ref CertificatesS3Reference - kafka_endpoint: !Ref KafkaEndpoint - auth_type: !Ref AuthType + disable_spill_encryption: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref DisableSpillEncryption ] + spill_bucket: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillBucket ] + spill_prefix: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillPrefix ] + kms_key_id: !If [ NotHasGlueConnectionAndHasKmsKeyId, !Ref KmsKeyId, !Ref "AWS::NoValue" ] + glue_connection: !If [ HasGlueConnection, !Ref GlueConnection, !Ref "AWS::NoValue" ] + secrets_manager_secret: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SecretsManagerSecret ] + certificates_s3_reference: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref CertificatesS3Reference ] + kafka_endpoint: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref KafkaEndpoint ] + auth_type: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref AuthType ] FunctionName: !Ref LambdaFunctionName Handler: "com.amazonaws.athena.connectors.msk.AmazonMskCompositeHandler" - CodeUri: "./target/athena-msk-2022.47.1.jar" + CodeUri: "./target/athena-msk-2024.18.2.jar" Description: "Enables Amazon Athena to communicate with MSK clusters" Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory - Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleARN] + Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn] VpcConfig: SecurityGroupIds: !If [ HasSecurityGroups, !Ref SecurityGroupIds, !Ref "AWS::NoValue" ] SubnetIds: !If [ HasSubnets, !Ref SubnetIds, !Ref "AWS::NoValue"] @@ -111,7 +128,7 @@ Resources: Condition: NotHasLambdaRole Type: AWS::IAM::Role Properties: - PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] + PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryArn, !Ref "AWS::NoValue" ] ManagedPolicyArns: - "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole" AssumeRolePolicyDocument: @@ -135,7 +152,7 @@ Resources: - Action: - secretsmanager:GetSecretValue Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretNamePrefix}*' + Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretsManagerSecret}*' - Action: - logs:CreateLogGroup Effect: Allow @@ -169,5 +186,33 @@ Resources: - autoscaling:CompleteLifecycleAction Effect: Allow Resource: '*' + - !If + - HasGlueConnection + - Action: + - glue:GetConnection + Effect: Allow + Resource: + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' + - !Ref "AWS::NoValue" Roles: - !Ref FunctionRole + + FunctionKmsPolicy: + Condition: CreateKmsPolicy + Type: "AWS::IAM::Policy" + Properties: + PolicyName: FunctionKmsPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Action: + - kms:GenerateRandom + Resource: '*' + - Effect: Allow + Action: + - kms:GenerateDataKey + Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" + Roles: + - !Ref FunctionRole \ No newline at end of file diff --git a/athena-msk/pom.xml b/athena-msk/pom.xml index 8ca7bc747d..3e343efa4b 100644 --- a/athena-msk/pom.xml +++ b/athena-msk/pom.xml @@ -3,12 +3,12 @@ aws-athena-query-federation com.amazonaws - 2022.47.1 + 2024.18.2 4.0.0 athena-msk Athena MSK Connector - 2022.47.1 + 2024.18.2 11 11 @@ -68,7 +68,7 @@ com.amazonaws athena-federation-integ-test - 2022.47.1 + 2024.18.2 withdep test @@ -110,7 +110,7 @@ com.amazonaws aws-athena-federation-sdk - 2022.47.1 + 2024.18.2 withdep diff --git a/athena-msk/src/main/java/com/amazonaws/athena/connectors/msk/AmazonMskCompositeHandler.java b/athena-msk/src/main/java/com/amazonaws/athena/connectors/msk/AmazonMskCompositeHandler.java index 14cb474d87..9879638c38 100644 --- a/athena-msk/src/main/java/com/amazonaws/athena/connectors/msk/AmazonMskCompositeHandler.java +++ b/athena-msk/src/main/java/com/amazonaws/athena/connectors/msk/AmazonMskCompositeHandler.java @@ -19,6 +19,7 @@ */ package com.amazonaws.athena.connectors.msk; +import com.amazonaws.athena.connector.lambda.GlueConnectionUtils; import com.amazonaws.athena.connector.lambda.handlers.CompositeHandler; public class AmazonMskCompositeHandler @@ -26,6 +27,6 @@ public class AmazonMskCompositeHandler { public AmazonMskCompositeHandler() throws Exception { - super(new AmazonMskMetadataHandler(System.getenv()), new AmazonMskRecordHandler(System.getenv())); + super(new AmazonMskMetadataHandler(GlueConnectionUtils.getGlueConnection()), new AmazonMskRecordHandler(GlueConnectionUtils.getGlueConnection())); } } diff --git a/athena-mysql/athena-mysql.yaml b/athena-mysql/athena-mysql.yaml index e566fe1500..26364d2724 100644 --- a/athena-mysql/athena-mysql.yaml +++ b/athena-mysql/athena-mysql.yaml @@ -10,7 +10,7 @@ Metadata: Labels: - athena-federation HomePageUrl: 'https://github.com/awslabs/aws-athena-query-federation' - SemanticVersion: 2022.47.1 + SemanticVersion: 2024.18.2 SourceCodeUrl: 'https://github.com/awslabs/aws-athena-query-federation' Parameters: LambdaFunctionName: @@ -20,7 +20,8 @@ Parameters: DefaultConnectionString: Description: 'The default connection string is used when catalog is "lambda:${LambdaFunctionName}". Catalog specific Connection Strings can be added later. Format: ${DatabaseType}://${NativeJdbcConnectionString}.' Type: String - SecretNamePrefix: + Default: "" + SecretName: Description: 'Used to create resource-based authorization policy for "secretsmanager:GetSecretValue" action. E.g. All Athena MySQL Federation secret names can be prefixed with "AthenaMySQLFederation" and authorization policy will allow "arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:AthenaJdbcFederation*". Parameter value in this case should be "AthenaMySQLFederation". If you do not have a prefix, you can manually update the IAM policy to add allow any secret names.' Type: String SpillBucket: @@ -30,6 +31,10 @@ Parameters: Description: 'The prefix within SpillBucket where this function can spill data.' Type: String Default: athena-spill + GlueConnection: + Description: "(Optional) Name of glue connection storing connection details for Federated Data source." + Type: String + Default: "" LambdaTimeout: Description: 'Maximum Lambda invocation runtime in seconds. (min 1 - 900 max)' Default: 900 @@ -38,7 +43,7 @@ Parameters: Description: 'Lambda memory in MB (min 128 - 3008 max).' Default: 3008 Type: Number - LambdaRoleARN: + LambdaRoleArn: Description: "(Optional) A custom role to be used by the Connector lambda" Type: String Default: "" @@ -52,39 +57,54 @@ Parameters: SubnetIds: Description: 'One or more Subnet IDs corresponding to the Subnet that the Lambda function can use to access you data source. (e.g. subnet1,subnet2)' Type: 'List' - PermissionsBoundaryARN: + PermissionsBoundaryArn: Description: "(Optional) An IAM policy ARN to use as the PermissionsBoundary for the created Lambda function's execution role" Default: '' Type: String + KmsKeyId: + Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." + Type: String + Default: "" + Conditions: - HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryARN, "" ] ] - NotHasLambdaRole: !Equals [!Ref LambdaRoleARN, ""] + HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] + NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] + HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryArn, "" ] ] + CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] + HasGlueConnection: !Not [ !Equals [ !Ref GlueConnection, "" ] ] + NotHasGlueConnectionAndHasKmsKeyId: !And + - !Not [ !Condition HasGlueConnection ] + - !Condition HasKmsKeyId + Resources: JdbcConnectorConfig: Type: 'AWS::Serverless::Function' Properties: Environment: Variables: - disable_spill_encryption: !Ref DisableSpillEncryption - spill_bucket: !Ref SpillBucket - spill_prefix: !Ref SpillPrefix - default: !Ref DefaultConnectionString + default: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref DefaultConnectionString ] + disable_spill_encryption: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref DisableSpillEncryption ] + spill_bucket: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillBucket ] + spill_prefix: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillPrefix ] + kms_key_id: !If [ NotHasGlueConnectionAndHasKmsKeyId, !Ref KmsKeyId, !Ref "AWS::NoValue" ] + glue_connection: !If [ HasGlueConnection, !Ref GlueConnection, !Ref "AWS::NoValue" ] FunctionName: !Ref LambdaFunctionName Handler: "com.amazonaws.athena.connectors.mysql.MySqlMuxCompositeHandler" - CodeUri: "./target/athena-mysql-2022.47.1.jar" + CodeUri: "./target/athena-mysql-2024.18.2.jar" Description: "Enables Amazon Athena to communicate with MySQL using JDBC" Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory - PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] - Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleARN] + Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn] VpcConfig: SecurityGroupIds: !Ref SecurityGroupIds SubnetIds: !Ref SubnetIds + FunctionRole: Condition: NotHasLambdaRole Type: AWS::IAM::Role Properties: + PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryArn, !Ref "AWS::NoValue" ] ManagedPolicyArns: - "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole" AssumeRolePolicyDocument: @@ -95,7 +115,8 @@ Resources: Service: - lambda.amazonaws.com Action: - - "sts:AssumeRole" + - "sts:AssumeRole" + FunctionExecutionPolicy: Condition: NotHasLambdaRole Type: "AWS::IAM::Policy" @@ -109,45 +130,73 @@ Resources: - Action: - secretsmanager:GetSecretValue Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretNamePrefix}*' + Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretName}*' - Action: - logs:CreateLogGroup Effect: Allow Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:*' - Action: - - logs:CreateLogStream - - logs:PutLogEvents + - logs:CreateLogStream + - logs:PutLogEvents Effect: Allow Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/${LambdaFunctionName}:*' - Action: - - athena:GetQueryExecution - - s3:ListAllMyBuckets + - athena:GetQueryExecution + - s3:ListAllMyBuckets Effect: Allow Resource: '*' - Action: - - ec2:CreateNetworkInterface - - ec2:DeleteNetworkInterface - - ec2:DescribeNetworkInterfaces - - ec2:DetachNetworkInterface + - ec2:CreateNetworkInterface + - ec2:DeleteNetworkInterface + - ec2:DescribeNetworkInterfaces + - ec2:DetachNetworkInterface Effect: Allow Resource: '*' - Action: - - s3:GetObject - - s3:ListBucket - - s3:GetBucketLocation - - s3:GetObjectVersion - - s3:PutObject - - s3:PutObjectAcl - - s3:GetLifecycleConfiguration - - s3:PutLifecycleConfiguration - - s3:DeleteObject + - s3:GetObject + - s3:ListBucket + - s3:GetBucketLocation + - s3:GetObjectVersion + - s3:PutObject + - s3:PutObjectAcl + - s3:GetLifecycleConfiguration + - s3:PutLifecycleConfiguration + - s3:DeleteObject Effect: Allow Resource: - Fn::Sub: - - arn:${AWS::Partition}:s3:::${bucketName} - - bucketName: - Ref: SpillBucket + - arn:${AWS::Partition}:s3:::${bucketName} + - bucketName: + Ref: SpillBucket - Fn::Sub: - - arn:${AWS::Partition}:s3:::${bucketName}/* - - bucketName: - Ref: SpillBucket + - arn:${AWS::Partition}:s3:::${bucketName}/* + - bucketName: + Ref: SpillBucket + - !If + - HasGlueConnection + - Action: + - glue:GetConnection + Effect: Allow + Resource: + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' + - !Ref "AWS::NoValue" + + FunctionKmsPolicy: + Condition: CreateKmsPolicy + Type: "AWS::IAM::Policy" + Properties: + PolicyName: FunctionKmsPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Action: + - kms:GenerateRandom + Resource: '*' + - Effect: Allow + Action: + - kms:GenerateDataKey + Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" + Roles: + - !Ref FunctionRole diff --git a/athena-mysql/pom.xml b/athena-mysql/pom.xml index e5bfe62786..ab7a4d4aa1 100644 --- a/athena-mysql/pom.xml +++ b/athena-mysql/pom.xml @@ -3,27 +3,27 @@ aws-athena-query-federation com.amazonaws - 2022.47.1 + 2024.18.2 4.0.0 athena-mysql - 2022.47.1 + 2024.18.2 com.amazonaws athena-federation-integ-test - 2022.47.1 + 2024.18.2 test com.amazonaws athena-jdbc - 2022.47.1 + 2024.18.2 com.amazonaws athena-jdbc - 2022.47.1 + 2024.18.2 test-jar test diff --git a/athena-mysql/src/main/java/com/amazonaws/athena/connectors/mysql/MySqlCompositeHandler.java b/athena-mysql/src/main/java/com/amazonaws/athena/connectors/mysql/MySqlCompositeHandler.java index 35265e60db..05776c3a2f 100644 --- a/athena-mysql/src/main/java/com/amazonaws/athena/connectors/mysql/MySqlCompositeHandler.java +++ b/athena-mysql/src/main/java/com/amazonaws/athena/connectors/mysql/MySqlCompositeHandler.java @@ -19,6 +19,7 @@ */ package com.amazonaws.athena.connectors.mysql; +import com.amazonaws.athena.connector.lambda.GlueConnectionUtils; import com.amazonaws.athena.connector.lambda.handlers.CompositeHandler; /** @@ -32,6 +33,6 @@ public class MySqlCompositeHandler { public MySqlCompositeHandler() { - super(new MySqlMetadataHandler(System.getenv()), new MySqlRecordHandler(System.getenv())); + super(new MySqlMetadataHandler(GlueConnectionUtils.getGlueConnection()), new MySqlRecordHandler(GlueConnectionUtils.getGlueConnection())); } } diff --git a/athena-neptune/athena-neptune.yaml b/athena-neptune/athena-neptune.yaml index 0158f57946..63f08389b7 100644 --- a/athena-neptune/athena-neptune.yaml +++ b/athena-neptune/athena-neptune.yaml @@ -10,29 +10,31 @@ Metadata: ReadmeUrl: README.md Labels: ['athena-federation','athena-neptune','neptune'] HomePageUrl: https://github.com/awslabs/aws-athena-query-federation - SemanticVersion: 2022.47.1 + SemanticVersion: 2024.18.2 SourceCodeUrl: https://github.com/awslabs/aws-athena-query-federation Parameters: - NeptuneClusterEndpoint: - Description: 'The Neptune cluster endpoint' + NeptuneEndpoint: + Description: '(Optional if Glue Connection is provided) The Neptune cluster endpoint' Type: String + Default: "" NeptunePort: Description: 'The Neptune port' Type: String Default: '8182' - NeptuneClusterResourceID: + NeptuneClusterResId: Description: 'To find the Neptune cluster resource ID in the Amazon Neptune AWS Management Console, choose the DB cluster that you want. The Resource ID is shown in the Configuration section.' Type: String - NeptuneGraphType: + NeptuneGraphtype: Description: 'Type of graph created in Neptune, defaults to PROPERTYGRAPH. Allowed values: PROPERTYGRAPH, RDF' Type: String Default: 'PROPERTYGRAPH' AllowedValues: ["PROPERTYGRAPH", "RDF"] GlueDatabaseName: - Description: 'Name of the Neptune cluster specific Glue Database that contains schemas of graph vertices' + Description: '(Optional if Glue Connection is provided) Name of the Neptune cluster specific Glue Database that contains schemas of graph vertices' Type: String - AthenaCatalogName: + Default: "" + LambdaFunctionName: Description: 'This is the name of the lambda function that will be created. This name must satisfy the pattern ^[a-z0-9-_]{1,64}$' Type: String Default: 'athena-catalog' @@ -44,6 +46,10 @@ Parameters: Description: 'The prefix within SpillBucket where this function can spill data.' Type: String Default: athena-neptune-spill + GlueConnection: + Description: "(Optional) Name of glue connection storing connection details for Federated Data source." + Type: String + Default: "" LambdaTimeout: Description: 'Maximum Lambda invocation runtime in seconds. (min 1 - 900 max)' Default: 900 @@ -62,21 +68,36 @@ Parameters: SubnetIds: Description: 'One or more Subnet IDs corresponding to the Subnet(s) that the Lambda function can use to access the Neptune cluster. (e.g. subnet1,subnet2)' Type: 'List' - IAMEnabled: + IamEnabled: Description: 'If set to ''true'' the connector uses Signature Version 4 Signing' Default: false Type: String - EnableCaseInsensitiveMatch: + EnableCaseinsensitivematch: Description: 'If set to ''false'' the connector does a case sensitive match for keys' Default: true Type: String - PermissionsBoundaryARN: + PermissionsBoundaryArn: Description: "(Optional) An IAM policy ARN to use as the PermissionsBoundary for the created Lambda function's execution role" Default: '' Type: String + KmsKeyId: + Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." + Type: String + Default: "" + LambdaRoleArn: + Description: "(Optional) A custom role to be used by the Connector lambda" + Type: String + Default: "" Conditions: - HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryARN, "" ] ] + HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] + NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] + HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryArn, "" ] ] + CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] + HasGlueConnection: !Not [ !Equals [ !Ref GlueConnection, "" ] ] + NotHasGlueConnectionAndHasKmsKeyId: !And + - !Not [ !Condition HasGlueConnection ] + - !Condition HasKmsKeyId Resources: ConnectorConfig: @@ -84,52 +105,128 @@ Resources: Properties: Environment: Variables: - disable_spill_encryption: !Ref DisableSpillEncryption - spill_bucket: !Ref SpillBucket - spill_prefix: !Ref SpillPrefix - glue_database_name: !Ref GlueDatabaseName - neptune_endpoint: !Ref NeptuneClusterEndpoint - neptune_port: !Ref NeptunePort - neptune_cluster_res_id: !Ref NeptuneClusterResourceID - iam_enabled: !Ref IAMEnabled - neptune_graphtype: !Ref NeptuneGraphType + disable_spill_encryption: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref DisableSpillEncryption ] + spill_bucket: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillBucket ] + spill_prefix: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillPrefix ] + kms_key_id: !If [ NotHasGlueConnectionAndHasKmsKeyId, !Ref KmsKeyId, !Ref "AWS::NoValue" ] + glue_connection: !If [ HasGlueConnection, !Ref GlueConnection, !Ref "AWS::NoValue" ] + glue_database_name: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref GlueDatabaseName ] + neptune_endpoint: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref NeptuneEndpoint ] + neptune_port: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref NeptunePort ] + neptune_cluster_res_id: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref NeptuneClusterResId ] + iam_enabled: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref IamEnabled ] + neptune_graphtype: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref NeptuneGraphtype ] SERVICE_REGION: !Ref AWS::Region - enable_caseinsensitivematch: !Ref EnableCaseInsensitiveMatch - FunctionName: !Ref AthenaCatalogName + enable_caseinsensitivematch: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref EnableCaseinsensitivematch ] + FunctionName: !Ref LambdaFunctionName Handler: "com.amazonaws.athena.connectors.neptune.NeptuneCompositeHandler" - CodeUri: "./target/athena-neptune-2022.47.1.jar" + CodeUri: "./target/athena-neptune-2024.18.2.jar" Description: "Enables Amazon Athena to communicate with Neptune, making your Neptune graph data accessible via SQL." Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory - PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] - Policies: - - Statement: - - Action: - - glue:GetTableVersions - - glue:GetPartitions - - glue:GetTables - - glue:GetTableVersion - - glue:GetDatabases - - glue:GetTable - - glue:GetPartition - - glue:GetDatabase - - athena:GetQueryExecution - - s3:ListAllMyBuckets - Effect: Allow - Resource: '*' - - Action: - - neptune-db:* - Effect: Allow - #Dynamically construct Neptune Cluster Resource ARN to limit permissions to the specific cluster provided - Resource: !Sub 'arn:${AWS::Partition}:neptune-db:${AWS::Region}:${AWS::AccountId}:${NeptuneClusterResourceID}/*' - Version: '2012-10-17' - #S3CrudPolicy allows our connector to spill large responses to S3. You can optionally replace this pre-made policy - #with one that is more restrictive and can only 'put' but not read,delete, or overwrite files. - - S3CrudPolicy: - BucketName: !Ref SpillBucket - #VPCAccessPolicy allows our connector to run in a VPC so that it can access your data source. - - VPCAccessPolicy: {} + Role: !If [ NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn ] VpcConfig: SecurityGroupIds: !Ref SecurityGroupIds SubnetIds: !Ref SubnetIds + + FunctionRole: + Condition: NotHasLambdaRole + Type: AWS::IAM::Role + Properties: + PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryArn, !Ref "AWS::NoValue" ] + ManagedPolicyArns: + - "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole" + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + Service: + - lambda.amazonaws.com + Action: + - "sts:AssumeRole" + + FunctionExecutionPolicy: + Condition: NotHasLambdaRole + Type: "AWS::IAM::Policy" + Properties: + Roles: + - !Ref FunctionRole + PolicyName: FunctionExecutionPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Action: + - glue:GetTableVersions + - glue:GetPartitions + - glue:GetTables + - glue:GetTableVersion + - glue:GetDatabases + - glue:GetTable + - glue:GetPartition + - glue:GetDatabase + - athena:GetQueryExecution + - s3:ListAllMyBuckets + Effect: Allow + Resource: '*' + - Action: + - neptune-db:* + Effect: Allow + #Dynamically construct Neptune Cluster Resource ARN to limit permissions to the specific cluster provided + Resource: !Sub 'arn:${AWS::Partition}:neptune-db:${AWS::Region}:${AWS::AccountId}:${NeptuneClusterResId}/*' + - Action: + - ec2:CreateNetworkInterface + - ec2:DeleteNetworkInterface + - ec2:DescribeNetworkInterfaces + - ec2:DetachNetworkInterface + Effect: Allow + Resource: '*' + - Action: + - s3:GetObject + - s3:ListBucket + - s3:GetBucketLocation + - s3:GetObjectVersion + - s3:PutObject + - s3:PutObjectAcl + - s3:GetLifecycleConfiguration + - s3:PutLifecycleConfiguration + - s3:DeleteObject + Effect: Allow + Resource: + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName} + - bucketName: + Ref: SpillBucket + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName}/* + - bucketName: + Ref: SpillBucket + - !If + - HasGlueConnection + - Action: + - glue:GetConnection + Effect: Allow + Resource: + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' + - !Ref "AWS::NoValue" + + FunctionKmsPolicy: + Condition: CreateKmsPolicy + Type: "AWS::IAM::Policy" + Properties: + PolicyName: FunctionKmsPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Action: + - kms:GenerateRandom + Resource: '*' + - Effect: Allow + Action: + - kms:GenerateDataKey + Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" + Roles: + - !Ref FunctionRole \ No newline at end of file diff --git a/athena-neptune/docs/aws-glue-sample-scripts/RDF.md b/athena-neptune/docs/aws-glue-sample-scripts/RDF.md index 46700273ad..00a33bc71d 100644 --- a/athena-neptune/docs/aws-glue-sample-scripts/RDF.md +++ b/athena-neptune/docs/aws-glue-sample-scripts/RDF.md @@ -144,7 +144,7 @@ In this example, use the following settings: - ApplicationName: AthenaNeptuneConnectorRDF - AthenaCatalogName: athena-catalog-neptune-rdf - GlueDatabaseName: graph-database-rdf -- NeptuneGraphType: RDF +- NeptuneGraphtype: RDF ### Step 6: Query Once connector is deployed, you can run SQL queries against the Athena service to retrieve this RDF data. diff --git a/athena-neptune/docs/neptune-connector-setup/README.md b/athena-neptune/docs/neptune-connector-setup/README.md index 882e344346..9fb376f392 100644 --- a/athena-neptune/docs/neptune-connector-setup/README.md +++ b/athena-neptune/docs/neptune-connector-setup/README.md @@ -8,10 +8,10 @@ To deploy the Amazon Athena Neptune connector, we will need the following pre-requisite information: 1) SpillBucket – You can either use an existing S3 bucket or create a new one to be used by the connector to store spill over results for Athena to consume. -2) NeptuneClusterEndpoint – You can get this information from the Neptune console and copying the cluster “Writer” endpoint information. +2) NeptuneEndpoint – You can get this information from the Neptune console and copying the cluster “Writer” endpoint information. ![](./assets/connector-clusterendpoint.png) -3) NeptuneClusterResourceID - To find the Neptune cluster resource ID in the Amazon Neptune AWS Management Console, choose the DB cluster that you want. The Resource ID is shown in the Configuration section. +3) NeptuneClusterResId - To find the Neptune cluster resource ID in the Amazon Neptune AWS Management Console, choose the DB cluster that you want. The Resource ID is shown in the Configuration section. ![](./assets/connector-clusterId.png) 4) SecurityGroupIds – These are the Security Group ID(s) that the connector Lambda function uses to communicate with Neptune. There are two steps: @@ -52,15 +52,15 @@ Scroll down to “Application Settings” and specify the following field values * GlueDatabaseName: This should be same as the glue database you created in one of the earlier steps. Example: graph-database. - * IAMEnabled: This option indicates whether you have IAM DB Auth enabled on your Neptune Cluster or not. Default value is "false". + * IamEnabled: This option indicates whether you have IAM DB Auth enabled on your Neptune Cluster or not. Default value is "false". * LambdaMemory: The memory allocation for the connector lambda function ranging between 128 – 3008 MB. The default is 3008 MB. * LambdaTimeout: Timeout value in seconds for the connector lambda function. Default value is 900 seconds. - * NeptuneClusterEndpoint: Provide the Neptune Cluster endpoint that you have captured in one of the previous steps. + * NeptuneEndpoint: Provide the Neptune Cluster endpoint that you have captured in one of the previous steps. - * NeptuneClusterResourceID: Provide the Neptune Cluster resourceid that you have captured in one of the previous steps. + * NeptuneClusterResId: Provide the Neptune Cluster resourceid that you have captured in one of the previous steps. * NeptunePort: The listener port for your Neptune Cluster. Default is 8182. @@ -72,7 +72,7 @@ Scroll down to “Application Settings” and specify the following field values * SubnetIds: Subnet IDs that you have captured in one of the earlier steps separated by commas. - * NeptuneGraphType: PROPERTYGRAPH or RDF. + * NeptuneGraphtype: PROPERTYGRAPH or RDF. Provide Acknowledgement on the custom IAM roles creation and click on “Deploy”. Sample screenshots below: diff --git a/athena-neptune/pom.xml b/athena-neptune/pom.xml index 9394d9982d..f745ebe6d1 100644 --- a/athena-neptune/pom.xml +++ b/athena-neptune/pom.xml @@ -3,11 +3,11 @@ aws-athena-query-federation com.amazonaws - 2022.47.1 + 2024.18.2 4.0.0 athena-neptune - 2022.47.1 + 2024.18.2 3.7.2 @@ -17,7 +17,7 @@ com.amazonaws aws-athena-federation-sdk - 2022.47.1 + 2024.18.2 withdep diff --git a/athena-neptune/src/main/java/com/amazonaws/athena/connectors/neptune/NeptuneCompositeHandler.java b/athena-neptune/src/main/java/com/amazonaws/athena/connectors/neptune/NeptuneCompositeHandler.java index 5659602885..0ae4fbc607 100644 --- a/athena-neptune/src/main/java/com/amazonaws/athena/connectors/neptune/NeptuneCompositeHandler.java +++ b/athena-neptune/src/main/java/com/amazonaws/athena/connectors/neptune/NeptuneCompositeHandler.java @@ -19,6 +19,7 @@ */ package com.amazonaws.athena.connectors.neptune; +import com.amazonaws.athena.connector.lambda.GlueConnectionUtils; import com.amazonaws.athena.connector.lambda.handlers.CompositeHandler; /** @@ -30,6 +31,6 @@ public class NeptuneCompositeHandler { public NeptuneCompositeHandler() { - super(new NeptuneMetadataHandler(System.getenv()), new NeptuneRecordHandler(System.getenv())); + super(new NeptuneMetadataHandler(GlueConnectionUtils.getGlueConnection()), new NeptuneRecordHandler(GlueConnectionUtils.getGlueConnection())); } } diff --git a/athena-oracle/athena-oracle.yaml b/athena-oracle/athena-oracle.yaml index b3abe7d8e6..e400b00641 100644 --- a/athena-oracle/athena-oracle.yaml +++ b/athena-oracle/athena-oracle.yaml @@ -10,7 +10,7 @@ Metadata: Labels: - athena-federation HomePageUrl: 'https://github.com/awslabs/aws-athena-query-federation' - SemanticVersion: 2022.47.1 + SemanticVersion: 2024.18.2 SourceCodeUrl: 'https://github.com/awslabs/aws-athena-query-federation' Parameters: LambdaFunctionName: @@ -20,10 +20,11 @@ Parameters: DefaultConnectionString: Description: 'The default connection string is used when catalog is "lambda:${LambdaFunctionName}". Catalog specific Connection Strings can be added later. Format: ${DatabaseType}://${NativeJdbcConnectionString}.' Type: String - SecretNamePrefix: + Default: "" + SecretName: Description: 'Used to create resource-based authorization policy for "secretsmanager:GetSecretValue" action. E.g. All Athena JDBC Federation secret names can be prefixed with "AthenaJdbcFederation" and authorization policy will allow "arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:AthenaJdbcFederation*". Parameter value in this case should be "AthenaJdbcFederation". If you do not have a prefix, you can manually update the IAM policy to add allow any secret names.' Type: String - IsFIPSEnabled: + IsFipsEnabled: AllowedValues: - true - false @@ -37,6 +38,10 @@ Parameters: Description: 'The prefix within SpillBucket where this function can spill data.' Type: String Default: athena-spill + GlueConnection: + Description: "(Optional) Name of glue connection storing connection details for Federated Data source." + Type: String + Default: "" LambdaTimeout: Description: 'Maximum Lambda invocation runtime in seconds. (min 1 - 900 max)' Default: 900 @@ -57,66 +62,150 @@ Parameters: Description: '(Optional) One or more Subnet IDs corresponding to the Subnet that the Lambda function can use to access you data source. (e.g. subnet1,subnet2)' Type: CommaDelimitedList Default: "" - PermissionsBoundaryARN: + PermissionsBoundaryArn: Description: "(Optional) An IAM policy ARN to use as the PermissionsBoundary for the created Lambda function's execution role" Default: '' Type: String + KmsKeyId: + Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." + Type: String + LambdaRoleArn: + Description: "(Optional) A custom role to be used by the Connector lambda" + Type: String + Default: "" Conditions: - HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryARN, "" ] ] - HasSecurityGroups: !Not [ !Equals [ !Join ["", !Ref SecurityGroupIds], "" ] ] - HasSubnets: !Not [ !Equals [ !Join ["", !Ref SubnetIds], "" ] ] + HasSecurityGroups: !Not [ !Equals [ !Join [ "", !Ref SecurityGroupIds ], "" ] ] + HasSubnets: !Not [ !Equals [ !Join [ "", !Ref SubnetIds ], "" ] ] + HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] + NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] + HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryArn, "" ] ] + CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] + HasGlueConnection: !Not [ !Equals [ !Ref GlueConnection, "" ] ] + NotHasGlueConnectionAndHasKmsKeyId: !And + - !Not [ !Condition HasGlueConnection ] + - !Condition HasKmsKeyId Resources: JdbcConnectorConfig: Type: 'AWS::Serverless::Function' Properties: Environment: Variables: - disable_spill_encryption: !Ref DisableSpillEncryption - spill_bucket: !Ref SpillBucket - spill_prefix: !Ref SpillPrefix - default: !Ref DefaultConnectionString - is_FIPS_Enabled: !Ref IsFIPSEnabled + disable_spill_encryption: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref DisableSpillEncryption ] + spill_bucket: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillBucket ] + spill_prefix: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillPrefix ] + kms_key_id: !If [ NotHasGlueConnectionAndHasKmsKeyId, !Ref KmsKeyId, !Ref "AWS::NoValue" ] + glue_connection: !If [ HasGlueConnection, !Ref GlueConnection, !Ref "AWS::NoValue" ] + default: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref DefaultConnectionString ] + is_fips_enabled: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref IsFipsEnabled ] FunctionName: !Ref LambdaFunctionName Handler: "com.amazonaws.athena.connectors.oracle.OracleMuxCompositeHandler" - CodeUri: "./target/athena-oracle-2022.47.1.jar" + CodeUri: "./target/athena-oracle-2024.18.2.jar" Description: "Enables Amazon Athena to communicate with ORACLE using JDBC" Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory - PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] - Policies: - - Statement: - - Action: - - secretsmanager:GetSecretValue - Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretNamePrefix}*' - Version: '2012-10-17' - - Statement: - - Action: - - logs:CreateLogGroup - Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:*' - Version: '2012-10-17' - - Statement: + Role: !If [ NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn ] + VpcConfig: + SecurityGroupIds: !If [ HasSecurityGroups, !Ref SecurityGroupIds, !Ref "AWS::NoValue" ] + SubnetIds: !If [ HasSubnets, !Ref SubnetIds, !Ref "AWS::NoValue" ] + + FunctionRole: + Condition: NotHasLambdaRole + Type: AWS::IAM::Role + Properties: + PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryArn, !Ref "AWS::NoValue" ] + ManagedPolicyArns: + - "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole" + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + Service: + - lambda.amazonaws.com + Action: + - "sts:AssumeRole" + + FunctionExecutionPolicy: + Condition: NotHasLambdaRole + Type: "AWS::IAM::Policy" + Properties: + Roles: + - !Ref FunctionRole + PolicyName: FunctionExecutionPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Action: + - secretsmanager:GetSecretValue + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretName}*' + - Action: + - logs:CreateLogGroup + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:*' - Action: - logs:CreateLogStream - logs:PutLogEvents Effect: Allow Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/${LambdaFunctionName}:*' - Version: '2012-10-17' - - Statement: - Action: - athena:GetQueryExecution - s3:ListAllMyBuckets Effect: Allow Resource: '*' - Version: '2012-10-17' - #S3CrudPolicy allows our connector to spill large responses to S3. You can optionally replace this pre-made policy - #with one that is more restrictive and can only 'put' but not read,delete, or overwrite files. - - S3CrudPolicy: - BucketName: !Ref SpillBucket - #VPCAccessPolicy allows our connector to run in a VPC so that it can access your data source. - - VPCAccessPolicy: {} - VpcConfig: - SecurityGroupIds: !If [ HasSecurityGroups, !Ref SecurityGroupIds, !Ref "AWS::NoValue" ] - SubnetIds: !If [ HasSubnets, !Ref SubnetIds, !Ref "AWS::NoValue" ] \ No newline at end of file + - Action: + - ec2:CreateNetworkInterface + - ec2:DeleteNetworkInterface + - ec2:DescribeNetworkInterfaces + - ec2:DetachNetworkInterface + Effect: Allow + Resource: '*' + - Action: + - s3:GetObject + - s3:ListBucket + - s3:GetBucketLocation + - s3:GetObjectVersion + - s3:PutObject + - s3:PutObjectAcl + - s3:GetLifecycleConfiguration + - s3:PutLifecycleConfiguration + - s3:DeleteObject + Effect: Allow + Resource: + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName} + - bucketName: + Ref: SpillBucket + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName}/* + - bucketName: + Ref: SpillBucket + - !If + - HasGlueConnection + - Action: + - glue:GetConnection + Effect: Allow + Resource: + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' + - !Ref "AWS::NoValue" + + FunctionKmsPolicy: + Condition: CreateKmsPolicy + Type: "AWS::IAM::Policy" + Properties: + PolicyName: FunctionKmsPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Action: + - kms:GenerateRandom + Resource: '*' + - Effect: Allow + Action: + - kms:GenerateDataKey + Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" + Roles: + - !Ref FunctionRole \ No newline at end of file diff --git a/athena-oracle/pom.xml b/athena-oracle/pom.xml index a9afc8b8e3..374bacf9dd 100644 --- a/athena-oracle/pom.xml +++ b/athena-oracle/pom.xml @@ -3,27 +3,27 @@ aws-athena-query-federation com.amazonaws - 2022.47.1 + 2024.18.2 4.0.0 athena-oracle - 2022.47.1 + 2024.18.2 com.amazonaws athena-federation-integ-test - 2022.47.1 + 2024.18.2 test com.amazonaws athena-jdbc - 2022.47.1 + 2024.18.2 com.amazonaws athena-jdbc - 2022.47.1 + 2024.18.2 test-jar test @@ -58,9 +58,6 @@ ${mockito.version} test - - - diff --git a/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleCompositeHandler.java b/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleCompositeHandler.java index 0d8662cffc..0842d73933 100644 --- a/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleCompositeHandler.java +++ b/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleCompositeHandler.java @@ -20,6 +20,7 @@ */ package com.amazonaws.athena.connectors.oracle; +import com.amazonaws.athena.connector.lambda.GlueConnectionUtils; import com.amazonaws.athena.connector.lambda.handlers.CompositeHandler; /** @@ -33,6 +34,6 @@ public class OracleCompositeHandler { public OracleCompositeHandler() { - super(new OracleMetadataHandler(System.getenv()), new OracleRecordHandler(System.getenv())); + super(new OracleMetadataHandler(GlueConnectionUtils.getGlueConnection()), new OracleRecordHandler(GlueConnectionUtils.getGlueConnection())); } } diff --git a/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleJdbcConnectionFactory.java b/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleJdbcConnectionFactory.java index 433f3d28ff..98d5fb08a0 100644 --- a/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleJdbcConnectionFactory.java +++ b/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleJdbcConnectionFactory.java @@ -37,7 +37,7 @@ public class OracleJdbcConnectionFactory extends GenericJdbcConnectionFactory { - public static final String IS_FIPS_ENABLED = "is_FIPS_Enabled"; + public static final String IS_FIPS_ENABLED = "is_fips_enabled"; private final DatabaseConnectionInfo databaseConnectionInfo; private final DatabaseConnectionConfig databaseConnectionConfig; private static final Logger LOGGER = LoggerFactory.getLogger(OracleJdbcConnectionFactory.class); diff --git a/athena-postgresql/athena-postgresql.yaml b/athena-postgresql/athena-postgresql.yaml index 97d207f4b8..4bbb78a507 100644 --- a/athena-postgresql/athena-postgresql.yaml +++ b/athena-postgresql/athena-postgresql.yaml @@ -10,7 +10,7 @@ Metadata: Labels: - athena-federation HomePageUrl: 'https://github.com/awslabs/aws-athena-query-federation' - SemanticVersion: 2022.47.1 + SemanticVersion: 2024.18.2 SourceCodeUrl: 'https://github.com/awslabs/aws-athena-query-federation' Parameters: LambdaFunctionName: @@ -20,9 +20,10 @@ Parameters: DefaultConnectionString: Description: 'The default connection string is used when catalog is "lambda:${LambdaFunctionName}". Catalog specific Connection Strings can be added later. Format: ${DatabaseType}://${NativeJdbcConnectionString}.' Type: String - SecretNamePrefix: - Description: 'Used to create resource-based authorization policy for "secretsmanager:GetSecretValue" action. E.g. All Athena PostgreSQL Federation secret names can be prefixed with "AthenaPostgreSQLFederation" and authorization policy will allow "arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:AthenaJdbcFederation*". Parameter value in this case should be "AthenaPostgreSQLFederation". If you do not have a prefix, you can manually update the IAM policy to add allow any secret names.' - Type: String + Default: "" + SecretName: + Description: 'Used to create resource-based authorization policy for "secretsmanager:GetSecretValue" action. E.g. All Athena PostgreSQL Federation secret names can be prefixed with "AthenaPostgreSQLFederation" and authorization policy will allow "arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:AthenaJdbcFederation*". Parameter value in this case should be "AthenaPostgreSQLFederation". If you do not have a prefix, you can manually update the IAM policy to add allow any secret names.' + Type: String SpillBucket: Description: 'The name of the bucket where this function can spill data.' Type: String @@ -30,6 +31,10 @@ Parameters: Description: 'The prefix within SpillBucket where this function can spill data.' Type: String Default: athena-spill + GlueConnection: + Description: "(Optional) Name of glue connection storing connection details for Federated Data source." + Type: String + Default: "" LambdaTimeout: Description: 'Maximum Lambda invocation runtime in seconds. (min 1 - 900 max)' Default: 900 @@ -42,7 +47,7 @@ Parameters: Description: 'If set to ''false'' data spilled to S3 is encrypted with AES GCM' Default: 'false' Type: String - LambdaRoleARN: + LambdaRoleArn: Description: "(Optional) A custom role to be used by the Connector lambda" Type: String Default: "" @@ -57,7 +62,7 @@ Parameters: Type: String Default: "PostGreSqlMuxCompositeHandler" AllowedValues : ["PostGreSqlMuxCompositeHandler", "PostGreSqlCompositeHandler"] - PermissionsBoundaryARN: + PermissionsBoundaryArn: Description: "(Optional) An IAM policy ARN to use as the PermissionsBoundary for the created Lambda function's execution role" Default: '' Type: String @@ -65,38 +70,51 @@ Parameters: Description: "(Optional) Default value for scale of type Numeric, representing the decimal digits in the fractional part, to the right of the decimal point." Default: 0 Type: Number + KmsKeyId: + Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." + Type: String + Default: "" Conditions: - HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryARN, "" ] ] - NotHasLambdaRole: !Equals [!Ref LambdaRoleARN, ""] + HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] + NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] + HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryArn, "" ] ] + CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] + HasGlueConnection: !Not [ !Equals [ !Ref GlueConnection, "" ] ] + NotHasGlueConnectionAndHasKmsKeyId: !And + - !Not [ !Condition HasGlueConnection ] + - !Condition HasKmsKeyId + Resources: JdbcConnectorConfig: Type: 'AWS::Serverless::Function' Properties: Environment: Variables: - disable_spill_encryption: !Ref DisableSpillEncryption - spill_bucket: !Ref SpillBucket - spill_prefix: !Ref SpillPrefix - default: !Ref DefaultConnectionString - default_scale: !Ref DefaultScale + disable_spill_encryption: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref DisableSpillEncryption ] + spill_bucket: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillBucket ] + spill_prefix: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillPrefix ] + kms_key_id: !If [ NotHasGlueConnectionAndHasKmsKeyId, !Ref KmsKeyId, !Ref "AWS::NoValue" ] + glue_connection: !If [ HasGlueConnection, !Ref GlueConnection, !Ref "AWS::NoValue" ] + default: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref DefaultConnectionString ] + default_scale: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref DefaultScale ] FunctionName: !Ref LambdaFunctionName Handler: !Sub "com.amazonaws.athena.connectors.postgresql.${CompositeHandler}" - CodeUri: "./target/athena-postgresql-2022.47.1.jar" + CodeUri: "./target/athena-postgresql-2024.18.2.jar" Description: "Enables Amazon Athena to communicate with PostgreSQL using JDBC" Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory - PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] - Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleARN] + Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn] VpcConfig: SecurityGroupIds: !Ref SecurityGroupIds SubnetIds: !Ref SubnetIds - + FunctionRole: Condition: NotHasLambdaRole Type: AWS::IAM::Role Properties: + PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryArn, !Ref "AWS::NoValue" ] ManagedPolicyArns: - "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole" AssumeRolePolicyDocument: @@ -107,8 +125,8 @@ Resources: Service: - lambda.amazonaws.com Action: - - "sts:AssumeRole" - + - "sts:AssumeRole" + FunctionExecutionPolicy: Condition: NotHasLambdaRole Type: "AWS::IAM::Policy" @@ -122,45 +140,73 @@ Resources: - Action: - secretsmanager:GetSecretValue Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretNamePrefix}*' + Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretName}*' - Action: - logs:CreateLogGroup Effect: Allow Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:*' - Action: - - logs:CreateLogStream - - logs:PutLogEvents + - logs:CreateLogStream + - logs:PutLogEvents Effect: Allow Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/${LambdaFunctionName}:*' - Action: - - athena:GetQueryExecution - - s3:ListAllMyBuckets + - athena:GetQueryExecution + - s3:ListAllMyBuckets Effect: Allow Resource: '*' - Action: - - ec2:CreateNetworkInterface - - ec2:DeleteNetworkInterface - - ec2:DescribeNetworkInterfaces - - ec2:DetachNetworkInterface + - ec2:CreateNetworkInterface + - ec2:DeleteNetworkInterface + - ec2:DescribeNetworkInterfaces + - ec2:DetachNetworkInterface Effect: Allow Resource: '*' - Action: - - s3:GetObject - - s3:ListBucket - - s3:GetBucketLocation - - s3:GetObjectVersion - - s3:PutObject - - s3:PutObjectAcl - - s3:GetLifecycleConfiguration - - s3:PutLifecycleConfiguration - - s3:DeleteObject + - s3:GetObject + - s3:ListBucket + - s3:GetBucketLocation + - s3:GetObjectVersion + - s3:PutObject + - s3:PutObjectAcl + - s3:GetLifecycleConfiguration + - s3:PutLifecycleConfiguration + - s3:DeleteObject Effect: Allow Resource: - Fn::Sub: - - arn:${AWS::Partition}:s3:::${bucketName} - - bucketName: - Ref: SpillBucket + - arn:${AWS::Partition}:s3:::${bucketName} + - bucketName: + Ref: SpillBucket - Fn::Sub: - - arn:${AWS::Partition}:s3:::${bucketName}/* - - bucketName: - Ref: SpillBucket + - arn:${AWS::Partition}:s3:::${bucketName}/* + - bucketName: + Ref: SpillBucket + - !If + - HasGlueConnection + - Action: + - glue:GetConnection + Effect: Allow + Resource: + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' + - !Ref "AWS::NoValue" + + FunctionKmsPolicy: + Condition: CreateKmsPolicy + Type: "AWS::IAM::Policy" + Properties: + PolicyName: FunctionKmsPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Action: + - kms:GenerateRandom + Resource: '*' + - Effect: Allow + Action: + - kms:GenerateDataKey + Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" + Roles: + - !Ref FunctionRole \ No newline at end of file diff --git a/athena-postgresql/pom.xml b/athena-postgresql/pom.xml index bd47c23aaf..5c2db60723 100644 --- a/athena-postgresql/pom.xml +++ b/athena-postgresql/pom.xml @@ -3,28 +3,28 @@ aws-athena-query-federation com.amazonaws - 2022.47.1 + 2024.18.2 4.0.0 athena-postgresql - 2022.47.1 + 2024.18.2 jar com.amazonaws athena-federation-integ-test - 2022.47.1 + 2024.18.2 test com.amazonaws athena-jdbc - 2022.47.1 + 2024.18.2 com.amazonaws athena-jdbc - 2022.47.1 + 2024.18.2 test-jar test diff --git a/athena-postgresql/src/main/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlCompositeHandler.java b/athena-postgresql/src/main/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlCompositeHandler.java index 5ad466fcb3..d493516957 100644 --- a/athena-postgresql/src/main/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlCompositeHandler.java +++ b/athena-postgresql/src/main/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlCompositeHandler.java @@ -19,6 +19,7 @@ */ package com.amazonaws.athena.connectors.postgresql; +import com.amazonaws.athena.connector.lambda.GlueConnectionUtils; import com.amazonaws.athena.connector.lambda.handlers.CompositeHandler; /** @@ -32,6 +33,6 @@ public class PostGreSqlCompositeHandler { public PostGreSqlCompositeHandler() { - super(new PostGreSqlMetadataHandler(System.getenv()), new PostGreSqlRecordHandler(System.getenv())); + super(new PostGreSqlMetadataHandler(GlueConnectionUtils.getGlueConnection()), new PostGreSqlRecordHandler(GlueConnectionUtils.getGlueConnection())); } } diff --git a/athena-redis/athena-redis.yaml b/athena-redis/athena-redis.yaml index c8e8c6458f..6825ea2dcf 100644 --- a/athena-redis/athena-redis.yaml +++ b/athena-redis/athena-redis.yaml @@ -10,10 +10,10 @@ Metadata: Labels: - athena-federation HomePageUrl: 'https://github.com/awslabs/aws-athena-query-federation' - SemanticVersion: 2022.47.1 + SemanticVersion: 2024.18.2 SourceCodeUrl: 'https://github.com/awslabs/aws-athena-query-federation' Parameters: - AthenaCatalogName: + LambdaFunctionName: Description: 'This is the name of the lambda function that will be created. This name must satisfy the pattern ^[a-z0-9-_]{1,64}$' Type: String AllowedPattern: ^[a-z0-9-_]{1,64}$ @@ -24,6 +24,10 @@ Parameters: Description: 'The prefix within SpillBucket where this function can spill data.' Type: String Default: athena-spill + GlueConnection: + Description: "(Optional) Name of glue connection storing connection details for Federated Data source." + Type: String + Default: "" LambdaTimeout: Description: 'Maximum Lambda invocation runtime in seconds. (min 1 - 900 max)' Default: 900 @@ -42,10 +46,10 @@ Parameters: SubnetIds: Description: 'One or more Subnet IDs corresponding to the Subnet that the Lambda function can use to access you data source. (e.g. subnet1,subnet2)' Type: 'List' - SecretNameOrPrefix: + SecretName: Description: 'The name or prefix of a set of names within Secrets Manager that this function should have access to. (e.g. redis-*).' Type: String - PermissionsBoundaryARN: + PermissionsBoundaryArn: Description: "(Optional) An IAM policy ARN to use as the PermissionsBoundary for the created Lambda function's execution role" Default: '' Type: String @@ -65,57 +69,146 @@ Parameters: Description: "(Optional) Set this number (for example 1, 2, or 3) to read from a non-default Redis database. Used for Query Pass Through queries only." Default: 0 Type: Number + KmsKeyId: + Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." + Type: String + Default: "" + LambdaRoleArn: + Description: "(Optional) A custom role to be used by the Connector lambda" + Type: String + Default: "" Conditions: - HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryARN, "" ] ] + HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] + NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] + HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryArn, "" ] ] + CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] + HasGlueConnection: !Not [ !Equals [ !Ref GlueConnection, "" ] ] + NotHasGlueConnectionAndHasKmsKeyId: !And + - !Not [ !Condition HasGlueConnection ] + - !Condition HasKmsKeyId Resources: ConnectorConfig: Type: 'AWS::Serverless::Function' Properties: Environment: Variables: - disable_spill_encryption: !Ref DisableSpillEncryption - spill_bucket: !Ref SpillBucket - spill_prefix: !Ref SpillPrefix - qpt_endpoint: !Ref QPTConnectionEndpoint - qpt_ssl: !Ref QPTConnectionSSL - qpt_cluster: !Ref QPTConnectionCluster - qpt_db_number: !Ref QPTConnectionDBNumber - FunctionName: !Ref AthenaCatalogName + disable_spill_encryption: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref DisableSpillEncryption ] + spill_bucket: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillBucket ] + spill_prefix: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillPrefix ] + kms_key_id: !If [ NotHasGlueConnectionAndHasKmsKeyId, !Ref KmsKeyId, !Ref "AWS::NoValue" ] + glue_connection: !If [ HasGlueConnection, !Ref GlueConnection, !Ref "AWS::NoValue" ] + qpt_endpoint: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref QPTConnectionEndpoint ] + qpt_ssl: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref QPTConnectionSSL ] + qpt_cluster: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref QPTConnectionCluster ] + qpt_db_number: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref QPTConnectionDBNumber ] + FunctionName: !Ref LambdaFunctionName Handler: "com.amazonaws.athena.connectors.redis.RedisCompositeHandler" - CodeUri: "./target/athena-redis-2022.47.1.jar" + CodeUri: "./target/athena-redis-2024.18.2.jar" Description: "Enables Amazon Athena to communicate with Redis, making your Redis data accessible via SQL" Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory - PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] - Policies: - - Statement: - - Action: - - secretsmanager:GetSecretValue - Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:*:*:secret:${SecretNameOrPrefix}' - Version: '2012-10-17' - - Statement: - - Action: - - glue:GetTableVersions - - glue:GetPartitions - - glue:GetTables - - glue:GetTableVersion - - glue:GetDatabases - - glue:GetTable - - glue:GetPartition - - glue:GetDatabase - - athena:GetQueryExecution - - s3:ListAllMyBuckets - Effect: Allow - Resource: '*' - Version: '2012-10-17' - #S3CrudPolicy allows our connector to spill large responses to S3. You can optionally replace this pre-made policy - #with one that is more restrictive and can only 'put' but not read,delete, or overwrite files. - - S3CrudPolicy: - BucketName: !Ref SpillBucket - #VPCAccessPolicy allows our connector to run in a VPC so that it can access your data source. - - VPCAccessPolicy: {} + Role: !If [ NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn ] VpcConfig: SecurityGroupIds: !Ref SecurityGroupIds - SubnetIds: !Ref SubnetIds \ No newline at end of file + SubnetIds: !Ref SubnetIds + + FunctionRole: + Condition: NotHasLambdaRole + Type: AWS::IAM::Role + Properties: + PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryArn, !Ref "AWS::NoValue" ] + ManagedPolicyArns: + - "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole" + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + Service: + - lambda.amazonaws.com + Action: + - "sts:AssumeRole" + + FunctionExecutionPolicy: + Condition: NotHasLambdaRole + Type: "AWS::IAM::Policy" + Properties: + Roles: + - !Ref FunctionRole + PolicyName: FunctionExecutionPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Action: + - secretsmanager:GetSecretValue + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:*:*:secret:${SecretName}*' + - Action: + - glue:GetTableVersions + - glue:GetPartitions + - glue:GetTables + - glue:GetTableVersion + - glue:GetDatabases + - glue:GetTable + - glue:GetPartition + - glue:GetDatabase + - athena:GetQueryExecution + - s3:ListAllMyBuckets + Effect: Allow + Resource: '*' + - Action: + - ec2:CreateNetworkInterface + - ec2:DeleteNetworkInterface + - ec2:DescribeNetworkInterfaces + - ec2:DetachNetworkInterface + Effect: Allow + Resource: '*' + - Action: + - s3:GetObject + - s3:ListBucket + - s3:GetBucketLocation + - s3:GetObjectVersion + - s3:PutObject + - s3:PutObjectAcl + - s3:GetLifecycleConfiguration + - s3:PutLifecycleConfiguration + - s3:DeleteObject + Effect: Allow + Resource: + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName} + - bucketName: + Ref: SpillBucket + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName}/* + - bucketName: + Ref: SpillBucket + - !If + - HasGlueConnection + - Action: + - glue:GetConnection + Effect: Allow + Resource: + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' + - !Ref "AWS::NoValue" + + FunctionKmsPolicy: + Condition: CreateKmsPolicy + Type: "AWS::IAM::Policy" + Properties: + PolicyName: FunctionKmsPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Action: + - kms:GenerateRandom + Resource: '*' + - Effect: Allow + Action: + - kms:GenerateDataKey + Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" + Roles: + - !Ref FunctionRole \ No newline at end of file diff --git a/athena-redis/pom.xml b/athena-redis/pom.xml index d1b63710d9..56a2ae7c00 100644 --- a/athena-redis/pom.xml +++ b/athena-redis/pom.xml @@ -3,11 +3,11 @@ aws-athena-query-federation com.amazonaws - 2022.47.1 + 2024.18.2 4.0.0 athena-redis - 2022.47.1 + 2024.18.2 com.amazonaws @@ -93,7 +93,7 @@ com.amazonaws aws-athena-federation-sdk - 2022.47.1 + 2024.18.2 withdep @@ -141,7 +141,7 @@ com.amazonaws athena-federation-integ-test - 2022.47.1 + 2024.18.2 test diff --git a/athena-redis/src/main/java/com/amazonaws/athena/connectors/redis/RedisCompositeHandler.java b/athena-redis/src/main/java/com/amazonaws/athena/connectors/redis/RedisCompositeHandler.java index 08d9982471..341516839b 100644 --- a/athena-redis/src/main/java/com/amazonaws/athena/connectors/redis/RedisCompositeHandler.java +++ b/athena-redis/src/main/java/com/amazonaws/athena/connectors/redis/RedisCompositeHandler.java @@ -19,6 +19,7 @@ */ package com.amazonaws.athena.connectors.redis; +import com.amazonaws.athena.connector.lambda.GlueConnectionUtils; import com.amazonaws.athena.connector.lambda.handlers.CompositeHandler; /** @@ -30,6 +31,6 @@ public class RedisCompositeHandler { public RedisCompositeHandler() { - super(new RedisMetadataHandler(System.getenv()), new RedisRecordHandler(System.getenv())); + super(new RedisMetadataHandler(GlueConnectionUtils.getGlueConnection()), new RedisRecordHandler(GlueConnectionUtils.getGlueConnection())); } } diff --git a/athena-redshift/athena-redshift.yaml b/athena-redshift/athena-redshift.yaml index c59c129c44..b933f6c8c7 100644 --- a/athena-redshift/athena-redshift.yaml +++ b/athena-redshift/athena-redshift.yaml @@ -10,7 +10,7 @@ Metadata: Labels: - athena-federation HomePageUrl: 'https://github.com/awslabs/aws-athena-query-federation' - SemanticVersion: 2022.47.1 + SemanticVersion: 2024.18.2 SourceCodeUrl: 'https://github.com/awslabs/aws-athena-query-federation' Parameters: LambdaFunctionName: @@ -20,9 +20,10 @@ Parameters: DefaultConnectionString: Description: 'The default connection string is used when catalog is "lambda:${LambdaFunctionName}". Catalog specific Connection Strings can be added later. Format: ${DatabaseType}://${NativeJdbcConnectionString}.' Type: String - SecretNamePrefix: - Description: 'Used to create resource-based authorization policy for "secretsmanager:GetSecretValue" action. E.g. All Athena Redshift Federation secret names can be prefixed with "AthenaRedshiftFederation" and authorization policy will allow "arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:AthenaRedshiftFederation*". Parameter value in this case should be "AthenaRedshiftFederation". If you do not have a prefix, you can manually update the IAM policy to add allow any secret names.' - Type: String + Default: "" + SecretName: + Description: 'Used to create resource-based authorization policy for "secretsmanager:GetSecretValue" action. E.g. All Athena Redshift Federation secret names can be prefixed with "AthenaRedshiftFederation" and authorization policy will allow "arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:AthenaRedshiftFederation*". Parameter value in this case should be "AthenaRedshiftFederation". If you do not have a prefix, you can manually update the IAM policy to add allow any secret names.' + Type: String SpillBucket: Description: 'The name of the bucket where this function can spill data.' Type: String @@ -30,6 +31,10 @@ Parameters: Description: 'The prefix within SpillBucket where this function can spill data.' Type: String Default: athena-spill + GlueConnection: + Description: "(Optional) Name of glue connection storing connection details for Federated Data source." + Type: String + Default: "" LambdaTimeout: Description: 'Maximum Lambda invocation runtime in seconds. (min 1 - 900 max)' Default: 900 @@ -48,44 +53,47 @@ Parameters: SubnetIds: Description: 'One or more Subnet IDs corresponding to the Subnet that the Lambda function can use to access you data source. (e.g. subnet1,subnet2)' Type: 'List' - KMSKeyId: + KmsKeyId: Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." Type: String Default: "" - LambdaRole: + LambdaRoleArn: Description: "(Optional) A custom IAM role ARN to be used by the Connector lambda" Type: String Default: "" - PermissionsBoundaryARN: + PermissionsBoundaryArn: Description: "(Optional) An IAM policy ARN to use as the PermissionsBoundary for the created Lambda function's execution role" Default: '' Type: String Conditions: - HasKMSKeyId: !Not [!Equals [!Ref KMSKeyId, ""]] - NotHasLambdaRole: !Equals [!Ref LambdaRole, ""] - NotHasLambdaRoleAndHasKMSKeyId: !And - - !Condition NotHasLambdaRole - - !Condition HasKMSKeyId - HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryARN, "" ] ] + HasKmsKeyId: !Not [!Equals [!Ref KmsKeyId, ""]] + NotHasLambdaRole: !Equals [!Ref LambdaRoleArn, ""] + CreateKmsPolicy: !And [!Condition NotHasLambdaRole, !Condition HasKmsKeyId] + HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryArn, "" ] ] + HasGlueConnection: !Not [ !Equals [ !Ref GlueConnection, "" ] ] + NotHasGlueConnectionAndHasKmsKeyId: !And + - !Not [ !Condition HasGlueConnection ] + - !Condition HasKmsKeyId Resources: JdbcConnectorConfig: Type: 'AWS::Serverless::Function' Properties: Environment: Variables: - disable_spill_encryption: !Ref DisableSpillEncryption - spill_bucket: !Ref SpillBucket - spill_prefix: !Ref SpillPrefix - default: !Ref DefaultConnectionString - kms_key_id: !If [HasKMSKeyId, !Ref KMSKeyId, !Ref "AWS::NoValue"] + disable_spill_encryption: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref DisableSpillEncryption ] + spill_bucket: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillBucket ] + spill_prefix: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillPrefix ] + kms_key_id: !If [ NotHasGlueConnectionAndHasKmsKeyId, !Ref KmsKeyId, !Ref "AWS::NoValue" ] + glue_connection: !If [ HasGlueConnection, !Ref GlueConnection, !Ref "AWS::NoValue" ] + default: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref DefaultConnectionString ] FunctionName: !Ref LambdaFunctionName Handler: "com.amazonaws.athena.connectors.redshift.RedshiftMuxCompositeHandler" - CodeUri: "./target/athena-redshift-2022.47.1.jar" + CodeUri: "./target/athena-redshift-2024.18.2.jar" Description: "Enables Amazon Athena to communicate with Redshift using JDBC" Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory - Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRole] + Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn] VpcConfig: SecurityGroupIds: !Ref SecurityGroupIds SubnetIds: !Ref SubnetIds @@ -94,7 +102,7 @@ Resources: Condition: NotHasLambdaRole Type: AWS::IAM::Role Properties: - PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] + PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryArn, !Ref "AWS::NoValue" ] ManagedPolicyArns: - "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole" AssumeRolePolicyDocument: @@ -107,7 +115,6 @@ Resources: Action: - "sts:AssumeRole" - FunctionExecutionPolicy: Condition: NotHasLambdaRole Type: "AWS::IAM::Policy" @@ -119,12 +126,19 @@ Resources: - Effect: Allow Action: - secretsmanager:GetSecretValue - Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretNamePrefix}*' + Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretName}*' - Effect: Allow Action: - athena:GetQueryExecution - s3:ListAllMyBuckets Resource: '*' + - Action: + - ec2:CreateNetworkInterface + - ec2:DeleteNetworkInterface + - ec2:DescribeNetworkInterfaces + - ec2:DetachNetworkInterface + Effect: Allow + Resource: '*' - Effect: Allow Action: - s3:GetObject @@ -138,31 +152,40 @@ Resources: - s3:DeleteObject Resource: - Fn::Sub: - - arn:${AWS::Partition}:s3:::${bucketName} - - bucketName: - Ref: SpillBucket + - arn:${AWS::Partition}:s3:::${bucketName} + - bucketName: + Ref: SpillBucket - Fn::Sub: - - arn:${AWS::Partition}:s3:::${bucketName}/* - - bucketName: - Ref: SpillBucket + - arn:${AWS::Partition}:s3:::${bucketName}/* + - bucketName: + Ref: SpillBucket + - !If + - HasGlueConnection + - Action: + - glue:GetConnection + Effect: Allow + Resource: + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' + - !Ref "AWS::NoValue" Roles: - !Ref FunctionRole - FunctionKMSPolicy: - Condition: NotHasLambdaRoleAndHasKMSKeyId + FunctionKmsPolicy: + Condition: CreateKmsPolicy Type: "AWS::IAM::Policy" Properties: - PolicyName: FunctionKMSPolicy + PolicyName: FunctionKmsPolicy PolicyDocument: Version: 2012-10-17 Statement: - Effect: Allow Action: - - kms:GenerateRandom + - kms:GenerateRandom Resource: '*' - Effect: Allow Action: - kms:GenerateDataKey - Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KMSKeyId}" + Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" Roles: - !Ref FunctionRole diff --git a/athena-redshift/pom.xml b/athena-redshift/pom.xml index b629315c7a..85a7e9b008 100644 --- a/athena-redshift/pom.xml +++ b/athena-redshift/pom.xml @@ -3,21 +3,21 @@ aws-athena-query-federation com.amazonaws - 2022.47.1 + 2024.18.2 4.0.0 athena-redshift - 2022.47.1 + 2024.18.2 com.amazonaws athena-postgresql - 2022.47.1 + 2024.18.2 com.amazonaws athena-federation-integ-test - 2022.47.1 + 2024.18.2 test @@ -28,7 +28,7 @@ com.amazonaws athena-jdbc - 2022.47.1 + 2024.18.2 test-jar test diff --git a/athena-saphana/athena-saphana.yaml b/athena-saphana/athena-saphana.yaml index 7355137489..fc66a6fcb9 100644 --- a/athena-saphana/athena-saphana.yaml +++ b/athena-saphana/athena-saphana.yaml @@ -10,7 +10,7 @@ Metadata: Labels: - athena-federation HomePageUrl: 'https://github.com/awslabs/aws-athena-query-federation' - SemanticVersion: 2022.47.1 + SemanticVersion: 2024.18.2 SourceCodeUrl: 'https://github.com/awslabs/aws-athena-query-federation' Parameters: LambdaFunctionName: @@ -20,7 +20,8 @@ Parameters: DefaultConnectionString: Description: 'The default connection string is used when catalog is "lambda:${LambdaFunctionName}". Catalog specific Connection Strings can be added later. Format: ${DatabaseType}://${NativeJdbcConnectionString}.' Type: String - SecretNamePrefix: + Default: "" + SecretName: Description: 'Used to create resource-based authorization policy for "secretsmanager:GetSecretValue" action. E.g. All Athena JDBC Federation secret names can be prefixed with "AthenaJdbcFederation" and authorization policy will allow "arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:AthenaJdbcFederation*". Parameter value in this case should be "AthenaJdbcFederation". If you do not have a prefix, you can manually update the IAM policy to add allow any secret names.' Type: String SpillBucket: @@ -30,6 +31,10 @@ Parameters: Description: 'The prefix within SpillBucket where this function can spill data.' Type: String Default: athena-spill + GlueConnection: + Description: "(Optional) Name of glue connection storing connection details for Federated Data source." + Type: String + Default: "" LambdaTimeout: Description: 'Maximum Lambda invocation runtime in seconds. (min 1 - 900 max)' Default: 900 @@ -50,24 +55,43 @@ Parameters: Description: '(Optional) One or more Subnet IDs corresponding to the Subnet that the Lambda function can use to access you data source. (e.g. subnet1,subnet2)' Type: CommaDelimitedList Default: "" - PermissionsBoundaryARN: + PermissionsBoundaryArn: Description: "(Optional) An IAM policy ARN to use as the PermissionsBoundary for the created Lambda function's execution role" Default: '' Type: String + KmsKeyId: + Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." + Type: String + Default: "" + LambdaRoleArn: + Description: "(Optional) A custom role to be used by the Connector lambda" + Type: String + Default: "" + Conditions: - HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryARN, "" ] ] HasSecurityGroups: !Not [ !Equals [ !Join ["", !Ref SecurityGroupIds], "" ] ] HasSubnets: !Not [ !Equals [ !Join ["", !Ref SubnetIds], "" ] ] + HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] + NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] + HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryArn, "" ] ] + CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] + HasGlueConnection: !Not [ !Equals [ !Ref GlueConnection, "" ] ] + NotHasGlueConnectionAndHasKmsKeyId: !And + - !Not [ !Condition HasGlueConnection ] + - !Condition HasKmsKeyId + Resources: JdbcConnectorConfig: Type: 'AWS::Serverless::Function' Properties: Environment: Variables: - disable_spill_encryption: !Ref DisableSpillEncryption - spill_bucket: !Ref SpillBucket - spill_prefix: !Ref SpillPrefix - default: !Ref DefaultConnectionString + disable_spill_encryption: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref DisableSpillEncryption ] + spill_bucket: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillBucket ] + spill_prefix: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillPrefix ] + kms_key_id: !If [ NotHasGlueConnectionAndHasKmsKeyId, !Ref KmsKeyId, !Ref "AWS::NoValue" ] + glue_connection: !If [ HasGlueConnection, !Ref GlueConnection, !Ref "AWS::NoValue" ] + default: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref DefaultConnectionString ] FunctionName: !Ref LambdaFunctionName Handler: "com.amazonaws.athena.connectors.saphana.SaphanaMuxCompositeHandler" CodeUri: "./target/athena-saphana.zip" @@ -75,40 +99,108 @@ Resources: Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory - PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] - Policies: - - Statement: - - Action: - - secretsmanager:GetSecretValue - Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretNamePrefix}*' - Version: '2012-10-17' - - Statement: - - Action: - - logs:CreateLogGroup - Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:*' - Version: '2012-10-17' - - Statement: - - Action: - - logs:CreateLogStream - - logs:PutLogEvents - Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/${LambdaFunctionName}:*' - Version: '2012-10-17' - - Statement: - - Action: - - athena:GetQueryExecution - - s3:ListAllMyBuckets - Effect: Allow - Resource: '*' - Version: '2012-10-17' - #S3CrudPolicy allows our connector to spill large responses to S3. You can optionally replace this pre-made policy - #with one that is more restrictive and can only 'put' but not read,delete, or overwrite files. - - S3CrudPolicy: - BucketName: !Ref SpillBucket - #VPCAccessPolicy allows our connector to run in a VPC so that it can access your data source. - - VPCAccessPolicy: {} + Role: !If [ NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn ] VpcConfig: SecurityGroupIds: !If [ HasSecurityGroups, !Ref SecurityGroupIds, !Ref "AWS::NoValue" ] SubnetIds: !If [ HasSubnets, !Ref SubnetIds, !Ref "AWS::NoValue" ] + + FunctionRole: + Condition: NotHasLambdaRole + Type: AWS::IAM::Role + Properties: + PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryArn, !Ref "AWS::NoValue" ] + ManagedPolicyArns: + - "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole" + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + Service: + - lambda.amazonaws.com + Action: + - "sts:AssumeRole" + + FunctionExecutionPolicy: + Condition: NotHasLambdaRole + Type: "AWS::IAM::Policy" + Properties: + Roles: + - !Ref FunctionRole + PolicyName: FunctionExecutionPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Action: + - secretsmanager:GetSecretValue + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretName}*' + - Action: + - logs:CreateLogGroup + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:*' + - Action: + - logs:CreateLogStream + - logs:PutLogEvents + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/${LambdaFunctionName}:*' + - Action: + - athena:GetQueryExecution + - s3:ListAllMyBuckets + Effect: Allow + Resource: '*' + - Action: + - ec2:CreateNetworkInterface + - ec2:DeleteNetworkInterface + - ec2:DescribeNetworkInterfaces + - ec2:DetachNetworkInterface + Effect: Allow + Resource: '*' + - Action: + - s3:GetObject + - s3:ListBucket + - s3:GetBucketLocation + - s3:GetObjectVersion + - s3:PutObject + - s3:PutObjectAcl + - s3:GetLifecycleConfiguration + - s3:PutLifecycleConfiguration + - s3:DeleteObject + Effect: Allow + Resource: + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName} + - bucketName: + Ref: SpillBucket + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName}/* + - bucketName: + Ref: SpillBucket + - !If + - HasGlueConnection + - Action: + - glue:GetConnection + Effect: Allow + Resource: + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' + - !Ref "AWS::NoValue" + + FunctionKmsPolicy: + Condition: CreateKmsPolicy + Type: "AWS::IAM::Policy" + Properties: + PolicyName: FunctionKmsPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Action: + - kms:GenerateRandom + Resource: '*' + - Effect: Allow + Action: + - kms:GenerateDataKey + Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" + Roles: + - !Ref FunctionRole diff --git a/athena-saphana/pom.xml b/athena-saphana/pom.xml index 1657d17d38..51b3fe1a98 100644 --- a/athena-saphana/pom.xml +++ b/athena-saphana/pom.xml @@ -3,27 +3,27 @@ aws-athena-query-federation com.amazonaws - 2022.47.1 + 2024.18.2 4.0.0 athena-saphana - 2022.47.1 + 2024.18.2 com.amazonaws athena-federation-integ-test - 2022.47.1 + 2024.18.2 test com.amazonaws athena-jdbc - 2022.47.1 + 2024.18.2 com.amazonaws athena-jdbc - 2022.47.1 + 2024.18.2 test-jar test diff --git a/athena-saphana/src/main/java/com/amazonaws/athena/connectors/saphana/SaphanaCompositeHandler.java b/athena-saphana/src/main/java/com/amazonaws/athena/connectors/saphana/SaphanaCompositeHandler.java index 911d762c41..df4f57b295 100644 --- a/athena-saphana/src/main/java/com/amazonaws/athena/connectors/saphana/SaphanaCompositeHandler.java +++ b/athena-saphana/src/main/java/com/amazonaws/athena/connectors/saphana/SaphanaCompositeHandler.java @@ -22,6 +22,7 @@ package com.amazonaws.athena.connectors.saphana; +import com.amazonaws.athena.connector.lambda.GlueConnectionUtils; import com.amazonaws.athena.connector.lambda.handlers.CompositeHandler; /** @@ -35,6 +36,6 @@ public class SaphanaCompositeHandler { public SaphanaCompositeHandler() { - super(new SaphanaMetadataHandler(System.getenv()), new SaphanaRecordHandler(System.getenv())); + super(new SaphanaMetadataHandler(GlueConnectionUtils.getGlueConnection()), new SaphanaRecordHandler(GlueConnectionUtils.getGlueConnection())); } } diff --git a/athena-snowflake/athena-snowflake.yaml b/athena-snowflake/athena-snowflake.yaml index 279dfa8d6d..bf3e1b6fb1 100644 --- a/athena-snowflake/athena-snowflake.yaml +++ b/athena-snowflake/athena-snowflake.yaml @@ -10,7 +10,7 @@ Metadata: Labels: - athena-federation HomePageUrl: 'https://github.com/awslabs/aws-athena-query-federation' - SemanticVersion: 2022.47.1 + SemanticVersion: 2024.18.2 SourceCodeUrl: 'https://github.com/awslabs/aws-athena-query-federation' Parameters: LambdaFunctionName: @@ -20,7 +20,8 @@ Parameters: DefaultConnectionString: Description: 'The default connection string is used when catalog is "lambda:${LambdaFunctionName}". Catalog specific Connection Strings can be added later. Format: ${DatabaseType}://${NativeJdbcConnectionString}.' Type: String - SecretNamePrefix: + Default: "" + SecretName: Description: 'Used to create resource-based authorization policy for "secretsmanager:GetSecretValue" action. E.g. All Athena JDBC Federation secret names can be prefixed with "AthenaJdbcFederation" and authorization policy will allow "arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:AthenaJdbcFederation*". Parameter value in this case should be "AthenaJdbcFederation". If you do not have a prefix, you can manually update the IAM policy to add allow any secret names.' Type: String SpillBucket: @@ -30,6 +31,10 @@ Parameters: Description: 'The prefix within SpillBucket where this function can spill data.' Type: String Default: athena-spill + GlueConnection: + Description: "(Optional) Name of glue connection storing connection details for Federated Data source." + Type: String + Default: "" LambdaTimeout: Description: 'Maximum Lambda invocation runtime in seconds. (min 1 - 900 max)' Default: 900 @@ -50,24 +55,43 @@ Parameters: Description: '(Optional) One or more Subnet IDs corresponding to the Subnet that the Lambda function can use to access you data source. (e.g. subnet1,subnet2)' Type: CommaDelimitedList Default: "" - PermissionsBoundaryARN: + PermissionsBoundaryArn: Description: "(Optional) An IAM policy ARN to use as the PermissionsBoundary for the created Lambda function's execution role" Default: '' Type: String + KmsKeyId: + Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." + Type: String + Default: "" + LambdaRoleArn: + Description: "(Optional) A custom role to be used by the Connector lambda" + Type: String + Default: "" + Conditions: - HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryARN, "" ] ] HasSecurityGroups: !Not [ !Equals [ !Join ["", !Ref SecurityGroupIds], "" ] ] HasSubnets: !Not [ !Equals [ !Join ["", !Ref SubnetIds], "" ] ] + HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] + NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] + HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryArn, "" ] ] + CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] + HasGlueConnection: !Not [ !Equals [ !Ref GlueConnection, "" ] ] + NotHasGlueConnectionAndHasKmsKeyId: !And + - !Not [ !Condition HasGlueConnection ] + - !Condition HasKmsKeyId + Resources: JdbcConnectorConfig: Type: 'AWS::Serverless::Function' Properties: Environment: Variables: - disable_spill_encryption: !Ref DisableSpillEncryption - spill_bucket: !Ref SpillBucket - spill_prefix: !Ref SpillPrefix - default: !Ref DefaultConnectionString + disable_spill_encryption: !If [HasGlueConnection, !Ref "AWS::NoValue", !Ref DisableSpillEncryption] + spill_bucket: !If [HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillBucket] + spill_prefix: !If [HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillPrefix] + kms_key_id: !If [NotHasGlueConnectionAndHasKmsKeyId, !Ref KmsKeyId, !Ref "AWS::NoValue"] + glue_connection: !If [HasGlueConnection, !Ref GlueConnection, !Ref "AWS::NoValue"] + default: !If [HasGlueConnection, !Ref "AWS::NoValue", !Ref DefaultConnectionString] FunctionName: !Ref LambdaFunctionName Handler: "com.amazonaws.athena.connectors.snowflake.SnowflakeMuxCompositeHandler" CodeUri: "./target/athena-snowflake.zip" @@ -75,40 +99,108 @@ Resources: Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory - PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] - Policies: - - Statement: - - Action: - - secretsmanager:GetSecretValue - Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretNamePrefix}*' - Version: '2012-10-17' - - Statement: - - Action: - - logs:CreateLogGroup - Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:*' - Version: '2012-10-17' - - Statement: - - Action: - - logs:CreateLogStream - - logs:PutLogEvents - Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/${LambdaFunctionName}:*' - Version: '2012-10-17' - - Statement: - - Action: - - athena:GetQueryExecution - - s3:ListAllMyBuckets - Effect: Allow - Resource: '*' - Version: '2012-10-17' - #S3CrudPolicy allows our connector to spill large responses to S3. You can optionally replace this pre-made policy - #with one that is more restrictive and can only 'put' but not read,delete, or overwrite files. - - S3CrudPolicy: - BucketName: !Ref SpillBucket - #VPCAccessPolicy allows our connector to run in a VPC so that it can access your data source. - - VPCAccessPolicy: {} + Role: !If [ NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn ] VpcConfig: SecurityGroupIds: !If [ HasSecurityGroups, !Ref SecurityGroupIds, !Ref "AWS::NoValue" ] - SubnetIds: !If [ HasSubnets, !Ref SubnetIds, !Ref "AWS::NoValue" ] \ No newline at end of file + SubnetIds: !If [ HasSubnets, !Ref SubnetIds, !Ref "AWS::NoValue" ] + + FunctionRole: + Condition: NotHasLambdaRole + Type: AWS::IAM::Role + Properties: + PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryArn, !Ref "AWS::NoValue" ] + ManagedPolicyArns: + - "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole" + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + Service: + - lambda.amazonaws.com + Action: + - "sts:AssumeRole" + + FunctionExecutionPolicy: + Condition: NotHasLambdaRole + Type: "AWS::IAM::Policy" + Properties: + Roles: + - !Ref FunctionRole + PolicyName: FunctionExecutionPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Action: + - secretsmanager:GetSecretValue + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretName}*' + - Action: + - logs:CreateLogGroup + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:*' + - Action: + - logs:CreateLogStream + - logs:PutLogEvents + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/${LambdaFunctionName}:*' + - Action: + - athena:GetQueryExecution + - s3:ListAllMyBuckets + Effect: Allow + Resource: '*' + - Action: + - ec2:CreateNetworkInterface + - ec2:DeleteNetworkInterface + - ec2:DescribeNetworkInterfaces + - ec2:DetachNetworkInterface + Effect: Allow + Resource: '*' + - Action: + - s3:GetObject + - s3:ListBucket + - s3:GetBucketLocation + - s3:GetObjectVersion + - s3:PutObject + - s3:PutObjectAcl + - s3:GetLifecycleConfiguration + - s3:PutLifecycleConfiguration + - s3:DeleteObject + Effect: Allow + Resource: + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName} + - bucketName: + Ref: SpillBucket + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName}/* + - bucketName: + Ref: SpillBucket + - !If + - HasGlueConnection + - Action: + - glue:GetConnection + Effect: Allow + Resource: + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' + - !Ref "AWS::NoValue" + + FunctionKmsPolicy: + Condition: CreateKmsPolicy + Type: "AWS::IAM::Policy" + Properties: + PolicyName: FunctionKmsPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Action: + - kms:GenerateRandom + Resource: '*' + - Effect: Allow + Action: + - kms:GenerateDataKey + Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" + Roles: + - !Ref FunctionRole \ No newline at end of file diff --git a/athena-snowflake/pom.xml b/athena-snowflake/pom.xml index ac2fbf7bcb..efac2dd7b6 100644 --- a/athena-snowflake/pom.xml +++ b/athena-snowflake/pom.xml @@ -3,27 +3,27 @@ aws-athena-query-federation com.amazonaws - 2022.47.1 + 2024.18.2 4.0.0 athena-snowflake - 2022.47.1 + 2024.18.2 com.amazonaws athena-federation-integ-test - 2022.47.1 + 2024.18.2 test com.amazonaws athena-jdbc - 2022.47.1 + 2024.18.2 com.amazonaws athena-jdbc - 2022.47.1 + 2024.18.2 test-jar test diff --git a/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeCompositeHandler.java b/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeCompositeHandler.java index 9e6b70ef31..87aa9af6b0 100644 --- a/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeCompositeHandler.java +++ b/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeCompositeHandler.java @@ -22,6 +22,7 @@ package com.amazonaws.athena.connectors.snowflake; +import com.amazonaws.athena.connector.lambda.GlueConnectionUtils; import com.amazonaws.athena.connector.lambda.handlers.CompositeHandler; /** @@ -35,6 +36,6 @@ public class SnowflakeCompositeHandler { public SnowflakeCompositeHandler() { - super(new SnowflakeMetadataHandler(System.getenv()), new SnowflakeRecordHandler(System.getenv())); + super(new SnowflakeMetadataHandler(GlueConnectionUtils.getGlueConnection()), new SnowflakeRecordHandler(GlueConnectionUtils.getGlueConnection())); } } diff --git a/athena-sqlserver/athena-sqlserver.yaml b/athena-sqlserver/athena-sqlserver.yaml index 9b3009c2a4..17f7f3c896 100644 --- a/athena-sqlserver/athena-sqlserver.yaml +++ b/athena-sqlserver/athena-sqlserver.yaml @@ -12,7 +12,7 @@ Metadata: - athena-federation - jdbc HomePageUrl: 'https://github.com/awslabs/aws-athena-query-federation' - SemanticVersion: 2022.47.1 + SemanticVersion: 2024.18.2 SourceCodeUrl: 'https://github.com/awslabs/aws-athena-query-federation' Parameters: LambdaFunctionName: @@ -22,7 +22,8 @@ Parameters: DefaultConnectionString: Description: 'The default connection string is used when catalog is "lambda:${LambdaFunctionName}". Catalog specific Connection Strings can be added later. Format: ${DatabaseType}://${NativeJdbcConnectionString}.' Type: String - SecretNamePrefix: + Default: "" + SecretName: Description: 'Used to create resource-based authorization policy for "secretsmanager:GetSecretValue" action. E.g. All Athena JDBC Federation secret names can be prefixed with "AthenaJdbcFederation" and authorization policy will allow "arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:AthenaJdbcFederation*". Parameter value in this case should be "AthenaJdbcFederation". If you do not have a prefix, you can manually update the IAM policy to add allow any secret names.' Type: String SpillBucket: @@ -32,6 +33,10 @@ Parameters: Description: 'The prefix within SpillBucket where this function can spill data.' Type: String Default: athena-spill + GlueConnection: + Description: "(Optional) Name of glue connection storing connection details for Federated Data source." + Type: String + Default: "" LambdaTimeout: Description: 'Maximum Lambda invocation runtime in seconds. (min 1 - 900 max)' Default: 900 @@ -40,7 +45,7 @@ Parameters: Description: 'Lambda memory in MB (min 128 - 3008 max).' Default: 3008 Type: Number - LambdaRoleARN: + LambdaRoleArn: Description: "(Optional) A custom role to be used by the Connector lambda" Type: String Default: "" @@ -56,34 +61,45 @@ Parameters: Description: '(Optional) One or more Subnet IDs corresponding to the Subnet that the Lambda function can use to access you data source. (e.g. subnet1,subnet2)' Type: CommaDelimitedList Default: "" - PermissionsBoundaryARN: + PermissionsBoundaryArn: Description: "(Optional) An IAM policy ARN to use as the PermissionsBoundary for the created Lambda function's execution role" Default: '' Type: String + KmsKeyId: + Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." + Type: String + Default: "" Conditions: - HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryARN, "" ] ] - NotHasLambdaRole: !Equals [!Ref LambdaRoleARN, ""] - HasSecurityGroups: !Not [ !Equals [ !Join ["", !Ref SecurityGroupIds], "" ] ] - HasSubnets: !Not [ !Equals [ !Join ["", !Ref SubnetIds], "" ] ] + HasSecurityGroups: !Not [ !Equals [ !Join [ "", !Ref SecurityGroupIds ], "" ] ] + HasSubnets: !Not [ !Equals [ !Join [ "", !Ref SubnetIds ], "" ] ] + HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] + NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] + HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryArn, "" ] ] + CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] + HasGlueConnection: !Not [ !Equals [ !Ref GlueConnection, "" ] ] + NotHasGlueConnectionAndHasKmsKeyId: !And + - !Not [ !Condition HasGlueConnection ] + - !Condition HasKmsKeyId Resources: JdbcConnectorConfig: Type: 'AWS::Serverless::Function' Properties: Environment: Variables: - disable_spill_encryption: !Ref DisableSpillEncryption - spill_bucket: !Ref SpillBucket - spill_prefix: !Ref SpillPrefix - default: !Ref DefaultConnectionString + disable_spill_encryption: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref DisableSpillEncryption ] + spill_bucket: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillBucket ] + spill_prefix: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillPrefix ] + kms_key_id: !If [ NotHasGlueConnectionAndHasKmsKeyId, !Ref KmsKeyId, !Ref "AWS::NoValue" ] + glue_connection: !If [ HasGlueConnection, !Ref GlueConnection, !Ref "AWS::NoValue" ] + default: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref DefaultConnectionString ] FunctionName: !Ref LambdaFunctionName Handler: "com.amazonaws.athena.connectors.sqlserver.SqlServerMuxCompositeHandler" - CodeUri: "./target/athena-sqlserver-2022.47.1.jar" + CodeUri: "./target/athena-sqlserver-2024.18.2.jar" Description: "Enables Amazon Athena to communicate with SQLSERVER using JDBC" Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory - PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] - Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleARN] + Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn] VpcConfig: SecurityGroupIds: !If [ HasSecurityGroups, !Ref SecurityGroupIds, !Ref "AWS::NoValue" ] SubnetIds: !If [ HasSubnets, !Ref SubnetIds, !Ref "AWS::NoValue" ] @@ -91,6 +107,7 @@ Resources: Condition: NotHasLambdaRole Type: AWS::IAM::Role Properties: + PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryArn, !Ref "AWS::NoValue" ] ManagedPolicyArns: - "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole" AssumeRolePolicyDocument: @@ -101,7 +118,7 @@ Resources: Service: - lambda.amazonaws.com Action: - - "sts:AssumeRole" + - "sts:AssumeRole" FunctionExecutionPolicy: Condition: NotHasLambdaRole Type: "AWS::IAM::Policy" @@ -115,45 +132,73 @@ Resources: - Action: - secretsmanager:GetSecretValue Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretNamePrefix}*' + Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretName}*' - Action: - logs:CreateLogGroup Effect: Allow Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:*' - Action: - - logs:CreateLogStream - - logs:PutLogEvents + - logs:CreateLogStream + - logs:PutLogEvents Effect: Allow Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/${LambdaFunctionName}:*' - Action: - - athena:GetQueryExecution - - s3:ListAllMyBuckets + - athena:GetQueryExecution + - s3:ListAllMyBuckets Effect: Allow Resource: '*' - Action: - - ec2:CreateNetworkInterface - - ec2:DeleteNetworkInterface - - ec2:DescribeNetworkInterfaces - - ec2:DetachNetworkInterface + - ec2:CreateNetworkInterface + - ec2:DeleteNetworkInterface + - ec2:DescribeNetworkInterfaces + - ec2:DetachNetworkInterface Effect: Allow Resource: '*' - Action: - - s3:GetObject - - s3:ListBucket - - s3:GetBucketLocation - - s3:GetObjectVersion - - s3:PutObject - - s3:PutObjectAcl - - s3:GetLifecycleConfiguration - - s3:PutLifecycleConfiguration - - s3:DeleteObject + - s3:GetObject + - s3:ListBucket + - s3:GetBucketLocation + - s3:GetObjectVersion + - s3:PutObject + - s3:PutObjectAcl + - s3:GetLifecycleConfiguration + - s3:PutLifecycleConfiguration + - s3:DeleteObject Effect: Allow Resource: - Fn::Sub: - - arn:${AWS::Partition}:s3:::${bucketName} - - bucketName: - Ref: SpillBucket + - arn:${AWS::Partition}:s3:::${bucketName} + - bucketName: + Ref: SpillBucket - Fn::Sub: - - arn:${AWS::Partition}:s3:::${bucketName}/* - - bucketName: - Ref: SpillBucket + - arn:${AWS::Partition}:s3:::${bucketName}/* + - bucketName: + Ref: SpillBucket + - !If + - HasGlueConnection + - Action: + - glue:GetConnection + Effect: Allow + Resource: + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' + - !Ref "AWS::NoValue" + + FunctionKmsPolicy: + Condition: CreateKmsPolicy + Type: "AWS::IAM::Policy" + Properties: + PolicyName: FunctionKmsPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Action: + - kms:GenerateRandom + Resource: '*' + - Effect: Allow + Action: + - kms:GenerateDataKey + Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" + Roles: + - !Ref FunctionRole diff --git a/athena-sqlserver/pom.xml b/athena-sqlserver/pom.xml index 6bfab70343..d482876c82 100644 --- a/athena-sqlserver/pom.xml +++ b/athena-sqlserver/pom.xml @@ -3,27 +3,27 @@ aws-athena-query-federation com.amazonaws - 2022.47.1 + 2024.18.2 4.0.0 athena-sqlserver - 2022.47.1 + 2024.18.2 com.amazonaws athena-federation-integ-test - 2022.47.1 + 2024.18.2 test com.amazonaws athena-jdbc - 2022.47.1 + 2024.18.2 com.amazonaws athena-jdbc - 2022.47.1 + 2024.18.2 test-jar test diff --git a/athena-sqlserver/src/main/java/com/amazonaws/athena/connectors/sqlserver/SqlServerCompositeHandler.java b/athena-sqlserver/src/main/java/com/amazonaws/athena/connectors/sqlserver/SqlServerCompositeHandler.java index bc4b1b9077..1cb4873fcb 100644 --- a/athena-sqlserver/src/main/java/com/amazonaws/athena/connectors/sqlserver/SqlServerCompositeHandler.java +++ b/athena-sqlserver/src/main/java/com/amazonaws/athena/connectors/sqlserver/SqlServerCompositeHandler.java @@ -19,12 +19,13 @@ */ package com.amazonaws.athena.connectors.sqlserver; +import com.amazonaws.athena.connector.lambda.GlueConnectionUtils; import com.amazonaws.athena.connector.lambda.handlers.CompositeHandler; public class SqlServerCompositeHandler extends CompositeHandler { public SqlServerCompositeHandler() { - super(new SqlServerMetadataHandler(System.getenv()), new SqlServerRecordHandler(System.getenv())); + super(new SqlServerMetadataHandler(GlueConnectionUtils.getGlueConnection()), new SqlServerRecordHandler(GlueConnectionUtils.getGlueConnection())); } } diff --git a/athena-synapse/athena-synapse.yaml b/athena-synapse/athena-synapse.yaml index 5c3a5fc655..c14aece942 100644 --- a/athena-synapse/athena-synapse.yaml +++ b/athena-synapse/athena-synapse.yaml @@ -12,7 +12,7 @@ Metadata: - athena-federation - jdbc HomePageUrl: 'https://github.com/awslabs/aws-athena-query-federation' - SemanticVersion: 2022.47.1 + SemanticVersion: 2024.18.2 SourceCodeUrl: 'https://github.com/awslabs/aws-athena-query-federation' Parameters: LambdaFunctionName: @@ -22,7 +22,8 @@ Parameters: DefaultConnectionString: Description: 'The default connection string is used when catalog is "lambda:${LambdaFunctionName}". Catalog specific Connection Strings can be added later. Format: ${DatabaseType}://${NativeJdbcConnectionString}.' Type: String - SecretNamePrefix: + Default: "" + SecretName: Description: 'Used to create resource-based authorization policy for "secretsmanager:GetSecretValue" action. E.g. All Athena JDBC Federation secret names can be prefixed with "AthenaJdbcFederation" and authorization policy will allow "arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:AthenaJdbcFederation*". Parameter value in this case should be "AthenaJdbcFederation". If you do not have a prefix, you can manually update the IAM policy to add allow any secret names.' Type: String SpillBucket: @@ -32,6 +33,10 @@ Parameters: Description: 'The prefix within SpillBucket where this function can spill data.' Type: String Default: athena-spill + GlueConnection: + Description: "(Optional) Name of glue connection storing connection details for Federated Data source." + Type: String + Default: "" LambdaTimeout: Description: 'Maximum Lambda invocation runtime in seconds. (min 1 - 900 max)' Default: 900 @@ -40,11 +45,11 @@ Parameters: Description: 'Lambda memory in MB (min 128 - 3008 max).' Default: 3008 Type: Number - LambdaRoleARN: + LambdaRoleArn: Description: "(Optional) A custom role to be used by the Connector lambda" Default: "" Type: String - PermissionsBoundaryARN: + PermissionsBoundaryArn: Description: "(Optional) A custom Permission Boundary to be used by the Connector lambda" Default: "" Type: String @@ -60,12 +65,22 @@ Parameters: Description: '(Optional) One or more Subnet IDs corresponding to the Subnet that the Lambda function can use to access you data source. (e.g. subnet1,subnet2)' Type: CommaDelimitedList Default: "" + KmsKeyId: + Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." + Type: String + Default: "" Conditions: - NotHasLambdaRole: !Equals [!Ref LambdaRoleARN, ""] - HasPermissionsBoundary: !Not [!Equals [!Ref PermissionsBoundaryARN, ""]] HasSecurityGroups: !Not [ !Equals [ !Join ["", !Ref SecurityGroupIds], "" ] ] HasSubnets: !Not [ !Equals [ !Join ["", !Ref SubnetIds], "" ] ] + HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] + NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] + HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryArn, "" ] ] + CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] + HasGlueConnection: !Not [ !Equals [ !Ref GlueConnection, "" ] ] + NotHasGlueConnectionAndHasKmsKeyId: !And + - !Not [ !Condition HasGlueConnection ] + - !Condition HasKmsKeyId Resources: JdbcConnectorConfig: @@ -73,18 +88,20 @@ Resources: Properties: Environment: Variables: - disable_spill_encryption: !Ref DisableSpillEncryption - spill_bucket: !Ref SpillBucket - spill_prefix: !Ref SpillPrefix - default: !Ref DefaultConnectionString + disable_spill_encryption: !If [HasGlueConnection, !Ref "AWS::NoValue", !Ref DisableSpillEncryption] + spill_bucket: !If [HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillBucket] + spill_prefix: !If [HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillPrefix] + kms_key_id: !If [NotHasGlueConnectionAndHasKmsKeyId, !Ref KmsKeyId, !Ref "AWS::NoValue"] + glue_connection: !If [HasGlueConnection, !Ref GlueConnection, !Ref "AWS::NoValue"] + default: !If [HasGlueConnection, !Ref "AWS::NoValue", !Ref DefaultConnectionString] FunctionName: !Ref LambdaFunctionName Handler: "com.amazonaws.athena.connectors.synapse.SynapseMuxCompositeHandler" - CodeUri: "./target/athena-synapse-2022.47.1.jar" + CodeUri: "./target/athena-synapse-2024.18.2.jar" Description: "Enables Amazon Athena to communicate with SYNPASE using JDBC" Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory - Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleARN] + Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn] VpcConfig: SecurityGroupIds: !If [ HasSecurityGroups, !Ref SecurityGroupIds, !Ref "AWS::NoValue" ] SubnetIds: !If [ HasSubnets, !Ref SubnetIds, !Ref "AWS::NoValue" ] @@ -93,11 +110,7 @@ Resources: Condition: NotHasLambdaRole Type: AWS::IAM::Role Properties: - PermissionsBoundary: - Fn::If: - - HasPermissionsBoundary - - Ref: PermissionsBoundaryARN - - Ref: AWS::NoValue + PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryArn, !Ref "AWS::NoValue" ] ManagedPolicyArns: - "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole" AssumeRolePolicyDocument: @@ -121,7 +134,7 @@ Resources: - Action: - secretsmanager:GetSecretValue Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretNamePrefix}*' + Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretName}*' - Action: - logs:CreateLogGroup Effect: Allow @@ -144,5 +157,53 @@ Resources: - autoscaling:CompleteLifecycleAction Effect: Allow Resource: '*' + - Action: + - s3:GetObject + - s3:ListBucket + - s3:GetBucketLocation + - s3:GetObjectVersion + - s3:PutObject + - s3:PutObjectAcl + - s3:GetLifecycleConfiguration + - s3:PutLifecycleConfiguration + - s3:DeleteObject + Effect: Allow + Resource: + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName} + - bucketName: + Ref: SpillBucket + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName}/* + - bucketName: + Ref: SpillBucket + - !If + - HasGlueConnection + - Action: + - glue:GetConnection + Effect: Allow + Resource: + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' + - !Ref "AWS::NoValue" + Roles: + - !Ref FunctionRole + + FunctionKmsPolicy: + Condition: CreateKmsPolicy + Type: "AWS::IAM::Policy" + Properties: + PolicyName: FunctionKmsPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Action: + - kms:GenerateRandom + Resource: '*' + - Effect: Allow + Action: + - kms:GenerateDataKey + Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" Roles: - !Ref FunctionRole diff --git a/athena-synapse/pom.xml b/athena-synapse/pom.xml index 231460d39b..461c00ccfd 100644 --- a/athena-synapse/pom.xml +++ b/athena-synapse/pom.xml @@ -3,27 +3,27 @@ aws-athena-query-federation com.amazonaws - 2022.47.1 + 2024.18.2 4.0.0 athena-synapse - 2022.47.1 + 2024.18.2 com.amazonaws athena-federation-integ-test - 2022.47.1 + 2024.18.2 test com.amazonaws athena-jdbc - 2022.47.1 + 2024.18.2 com.amazonaws athena-jdbc - 2022.47.1 + 2024.18.2 test-jar test diff --git a/athena-synapse/src/main/java/com/amazonaws/athena/connectors/synapse/SynapseCompositeHandler.java b/athena-synapse/src/main/java/com/amazonaws/athena/connectors/synapse/SynapseCompositeHandler.java index e4499930f3..d8f6c1f623 100644 --- a/athena-synapse/src/main/java/com/amazonaws/athena/connectors/synapse/SynapseCompositeHandler.java +++ b/athena-synapse/src/main/java/com/amazonaws/athena/connectors/synapse/SynapseCompositeHandler.java @@ -19,12 +19,13 @@ */ package com.amazonaws.athena.connectors.synapse; +import com.amazonaws.athena.connector.lambda.GlueConnectionUtils; import com.amazonaws.athena.connector.lambda.handlers.CompositeHandler; public class SynapseCompositeHandler extends CompositeHandler { public SynapseCompositeHandler() { - super(new SynapseMetadataHandler(System.getenv()), new SynapseRecordHandler(System.getenv())); + super(new SynapseMetadataHandler(GlueConnectionUtils.getGlueConnection()), new SynapseRecordHandler(GlueConnectionUtils.getGlueConnection())); } } diff --git a/athena-teradata/athena-teradata.yaml b/athena-teradata/athena-teradata.yaml index 171adb844c..4c2254fd99 100644 --- a/athena-teradata/athena-teradata.yaml +++ b/athena-teradata/athena-teradata.yaml @@ -10,7 +10,7 @@ Metadata: Labels: - athena-federation HomePageUrl: 'https://github.com/awslabs/aws-athena-query-federation' - SemanticVersion: 2022.47.1 + SemanticVersion: 2024.18.2 SourceCodeUrl: 'https://github.com/awslabs/aws-athena-query-federation' Parameters: LambdaFunctionName: @@ -20,7 +20,8 @@ Parameters: DefaultConnectionString: Description: 'The default connection string is used when catalog is "lambda:${LambdaFunctionName}". Catalog specific Connection Strings can be added later. Format: ${DatabaseType}://${NativeJdbcConnectionString}.' Type: String - SecretNamePrefix: + Default: "" + SecretName: Description: 'Used to create resource-based authorization policy for "secretsmanager:GetSecretValue" action. E.g. All Athena JDBC Federation secret names can be prefixed with "AthenaJdbcFederation" and authorization policy will allow "arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:AthenaJdbcFederation*". Parameter value in this case should be "AthenaJdbcFederation". If you do not have a prefix, you can manually update the IAM policy to add allow any secret names.' Type: String SpillBucket: @@ -30,7 +31,11 @@ Parameters: Description: 'The prefix within SpillBucket where this function can spill data.' Type: String Default: athena-spill - LambdaJDBCLayername: + GlueConnection: + Description: "(Optional) Name of glue connection storing connection details for Federated Data source." + Type: String + Default: "" + LambdaJdbcLayername: Description: 'Lambda JDBC layer Name. Must be ARN of layer' Type: String LambdaTimeout: @@ -57,68 +62,153 @@ Parameters: Description: 'Partition Count Limit' Type: Number Default: 500 - PermissionsBoundaryARN: + PermissionsBoundaryArn: Description: "(Optional) An IAM policy ARN to use as the PermissionsBoundary for the created Lambda function's execution role" Default: '' Type: String + KmsKeyId: + Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." + Type: String + Default: "" + LambdaRoleArn: + Description: "(Optional) A custom role to be used by the Connector lambda" + Type: String + Default: "" Conditions: - HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryARN, "" ] ] HasSecurityGroups: !Not [ !Equals [ !Join ["", !Ref SecurityGroupIds], "" ] ] HasSubnets: !Not [ !Equals [ !Join ["", !Ref SubnetIds], "" ] ] + HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] + NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] + HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryArn, "" ] ] + CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] + HasGlueConnection: !Not [ !Equals [ !Ref GlueConnection, "" ] ] + NotHasGlueConnectionAndHasKmsKeyId: !And + - !Not [ !Condition HasGlueConnection ] + - !Condition HasKmsKeyId Resources: JdbcConnectorConfig: Type: 'AWS::Serverless::Function' Properties: Environment: Variables: - disable_spill_encryption: !Ref DisableSpillEncryption - spill_bucket: !Ref SpillBucket - spill_prefix: !Ref SpillPrefix - default: !Ref DefaultConnectionString - partitioncount: !Ref PartitionCount + disable_spill_encryption: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref DisableSpillEncryption ] + spill_bucket: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillBucket ] + spill_prefix: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillPrefix ] + kms_key_id: !If [ NotHasGlueConnectionAndHasKmsKeyId, !Ref KmsKeyId, !Ref "AWS::NoValue" ] + glue_connection: !If [ HasGlueConnection, !Ref GlueConnection, !Ref "AWS::NoValue" ] + default: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref DefaultConnectionString ] + partitioncount: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref PartitionCount ] FunctionName: !Ref LambdaFunctionName Handler: "com.amazonaws.athena.connectors.teradata.TeradataMuxCompositeHandler" Layers: - - !Ref LambdaJDBCLayername - CodeUri: "./target/athena-teradata-2022.47.1.jar" + - !Ref LambdaJdbcLayername + CodeUri: "./target/athena-teradata-2024.18.2.jar" Description: "Enables Amazon Athena to communicate with Teradata using JDBC" Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory - PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] - Policies: - - Statement: - - Action: - - secretsmanager:GetSecretValue - Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretNamePrefix}*' - Version: '2012-10-17' - - Statement: - - Action: - - logs:CreateLogGroup - Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:*' - Version: '2012-10-17' - - Statement: - - Action: - - logs:CreateLogStream - - logs:PutLogEvents - Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/${LambdaFunctionName}:*' - Version: '2012-10-17' - - Statement: - - Action: - - athena:GetQueryExecution - - s3:ListAllMyBuckets - Effect: Allow - Resource: '*' - Version: '2012-10-17' - #S3CrudPolicy allows our connector to spill large responses to S3. You can optionally replace this pre-made policy - #with one that is more restrictive and can only 'put' but not read,delete, or overwrite files. - - S3CrudPolicy: - BucketName: !Ref SpillBucket - #VPCAccessPolicy allows our connector to run in a VPC so that it can access your data source. - - VPCAccessPolicy: {} + Role: !If [ NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn ] VpcConfig: SecurityGroupIds: !If [ HasSecurityGroups, !Ref SecurityGroupIds, !Ref "AWS::NoValue" ] - SubnetIds: !If [ HasSubnets, !Ref SubnetIds, !Ref "AWS::NoValue" ] \ No newline at end of file + SubnetIds: !If [ HasSubnets, !Ref SubnetIds, !Ref "AWS::NoValue" ] + + FunctionRole: + Condition: NotHasLambdaRole + Type: AWS::IAM::Role + Properties: + PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryArn, !Ref "AWS::NoValue" ] + ManagedPolicyArns: + - "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole" + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + Service: + - lambda.amazonaws.com + Action: + - "sts:AssumeRole" + + FunctionExecutionPolicy: + Condition: NotHasLambdaRole + Type: "AWS::IAM::Policy" + Properties: + Roles: + - !Ref FunctionRole + PolicyName: FunctionExecutionPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Action: + - secretsmanager:GetSecretValue + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretName}*' + - Action: + - logs:CreateLogGroup + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:*' + - Action: + - logs:CreateLogStream + - logs:PutLogEvents + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/${LambdaFunctionName}:*' + - Action: + - athena:GetQueryExecution + - s3:ListAllMyBuckets + Effect: Allow + Resource: '*' + - Action: + - ec2:CreateNetworkInterface + - ec2:DeleteNetworkInterface + - ec2:DescribeNetworkInterfaces + - ec2:DetachNetworkInterface + Effect: Allow + Resource: '*' + - Action: + - s3:GetObject + - s3:ListBucket + - s3:GetBucketLocation + - s3:GetObjectVersion + - s3:PutObject + - s3:PutObjectAcl + - s3:GetLifecycleConfiguration + - s3:PutLifecycleConfiguration + - s3:DeleteObject + Effect: Allow + Resource: + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName} + - bucketName: + Ref: SpillBucket + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName}/* + - bucketName: + Ref: SpillBucket + - !If + - HasGlueConnection + - Action: + - glue:GetConnection + Effect: Allow + Resource: + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' + - !Ref "AWS::NoValue" + + FunctionKmsPolicy: + Condition: CreateKmsPolicy + Type: "AWS::IAM::Policy" + Properties: + PolicyName: FunctionKmsPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Action: + - kms:GenerateRandom + Resource: '*' + - Effect: Allow + Action: + - kms:GenerateDataKey + Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" + Roles: + - !Ref FunctionRole diff --git a/athena-teradata/pom.xml b/athena-teradata/pom.xml index f643b8709d..02ddace396 100644 --- a/athena-teradata/pom.xml +++ b/athena-teradata/pom.xml @@ -3,27 +3,27 @@ aws-athena-query-federation com.amazonaws - 2022.47.1 + 2024.18.2 4.0.0 athena-teradata - 2022.47.1 + 2024.18.2 com.amazonaws athena-federation-integ-test - 2022.47.1 + 2024.18.2 test com.amazonaws athena-jdbc - 2022.47.1 + 2024.18.2 com.amazonaws athena-jdbc - 2022.47.1 + 2024.18.2 test-jar test @@ -75,7 +75,7 @@ - + diff --git a/athena-teradata/src/main/java/com/amazonaws/athena/connectors/teradata/TeradataCompositeHandler.java b/athena-teradata/src/main/java/com/amazonaws/athena/connectors/teradata/TeradataCompositeHandler.java index 8aec45fc78..c39f7e8ca4 100644 --- a/athena-teradata/src/main/java/com/amazonaws/athena/connectors/teradata/TeradataCompositeHandler.java +++ b/athena-teradata/src/main/java/com/amazonaws/athena/connectors/teradata/TeradataCompositeHandler.java @@ -20,6 +20,8 @@ */ package com.amazonaws.athena.connectors.teradata; + +import com.amazonaws.athena.connector.lambda.GlueConnectionUtils; import com.amazonaws.athena.connector.lambda.handlers.CompositeHandler; /** @@ -33,6 +35,6 @@ public class TeradataCompositeHandler { public TeradataCompositeHandler() { - super(new TeradataMetadataHandler(System.getenv()), new TeradataRecordHandler(System.getenv())); + super(new TeradataMetadataHandler(GlueConnectionUtils.getGlueConnection()), new TeradataRecordHandler(GlueConnectionUtils.getGlueConnection())); } } diff --git a/athena-teradata/src/main/java/com/amazonaws/athena/connectors/teradata/TeradataMetadataHandler.java b/athena-teradata/src/main/java/com/amazonaws/athena/connectors/teradata/TeradataMetadataHandler.java index b7d7ffe201..7464cb31b6 100644 --- a/athena-teradata/src/main/java/com/amazonaws/athena/connectors/teradata/TeradataMetadataHandler.java +++ b/athena-teradata/src/main/java/com/amazonaws/athena/connectors/teradata/TeradataMetadataHandler.java @@ -244,7 +244,7 @@ private boolean useNonPartitionApproach(GetTableLayoutRequest getTableLayoutRequ { final String getPartitionsCountQuery = "Select count(distinct partition ) as partition_count FROM " + getTableLayoutRequest.getTableName().getSchemaName() + "." + getTableLayoutRequest.getTableName().getTableName() + " where 1= ?"; - String partitioncount = configOptions.get("partitioncount"); + String partitioncount = configOptions.containsKey("partition_count") ? configOptions.get("partition_count") : configOptions.getOrDefault("partitioncount", "500"); int totalPartitionCount = Integer.parseInt(partitioncount); int partitionCount = 0; boolean nonPartitionApproach = false; diff --git a/athena-timestream/athena-timestream.yaml b/athena-timestream/athena-timestream.yaml index efcb20daee..9c6296ba04 100644 --- a/athena-timestream/athena-timestream.yaml +++ b/athena-timestream/athena-timestream.yaml @@ -10,10 +10,10 @@ Metadata: Labels: - athena-federation HomePageUrl: 'https://github.com/awslabs/aws-athena-query-federation' - SemanticVersion: 2022.47.1 + SemanticVersion: 2024.18.2 SourceCodeUrl: 'https://github.com/awslabs/aws-athena-query-federation' Parameters: - AthenaCatalogName: + LambdaFunctionName: Description: 'This is the name of the lambda function that will be created. This name must satisfy the pattern ^[a-z0-9-_]{1,64}$' Type: String AllowedPattern: ^[a-z0-9-_]{1,64}$ @@ -24,6 +24,10 @@ Parameters: Description: 'The prefix within SpillBucket where this function can spill data.' Type: String Default: athena-spill + GlueConnection: + Description: "(Optional) Name of glue connection storing connection details for Federated Data source." + Type: String + Default: "" LambdaTimeout: Description: 'Maximum Lambda invocation runtime in seconds. (min 1 - 900 max)' Default: 900 @@ -36,49 +40,135 @@ Parameters: Description: "WARNING: If set to 'true' encryption for spilled data is disabled." Default: 'false' Type: String - PermissionsBoundaryARN: + PermissionsBoundaryArn: Description: "(Optional) An IAM policy ARN to use as the PermissionsBoundary for the created Lambda function's execution role" Default: '' Type: String + KmsKeyId: + Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." + Type: String + Default: "" + LambdaRoleArn: + Description: "(Optional) A custom role to be used by the Connector lambda" + Type: String + Default: "" Conditions: - HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryARN, "" ] ] + HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] + NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] + HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryArn, "" ] ] + CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] + HasGlueConnection: !Not [ !Equals [ !Ref GlueConnection, "" ] ] + NotHasGlueConnectionAndHasKmsKeyId: !And + - !Not [ !Condition HasGlueConnection ] + - !Condition HasKmsKeyId Resources: ConnectorConfig: Type: 'AWS::Serverless::Function' Properties: Environment: Variables: - disable_spill_encryption: !Ref DisableSpillEncryption - spill_bucket: !Ref SpillBucket - spill_prefix: !Ref SpillPrefix - FunctionName: !Ref AthenaCatalogName + disable_spill_encryption: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref DisableSpillEncryption ] + spill_bucket: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillBucket ] + spill_prefix: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillPrefix ] + kms_key_id: !If [ NotHasGlueConnectionAndHasKmsKeyId, !Ref KmsKeyId, !Ref "AWS::NoValue" ] + glue_connection: !If [ HasGlueConnection, !Ref GlueConnection, !Ref "AWS::NoValue" ] + FunctionName: !Ref LambdaFunctionName Handler: "com.amazonaws.athena.connectors.timestream.TimestreamCompositeHandler" - CodeUri: "./target/athena-timestream-2022.47.1.jar" + CodeUri: "./target/athena-timestream-2024.18.2.jar" Description: "Enables Amazon Athena to communicate with Amazon Timestream, making your time series data accessible from Athena." Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory - PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] - Policies: - - Statement: + Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn] + + FunctionRole: + Condition: NotHasLambdaRole + Type: AWS::IAM::Role + Properties: + PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryArn, !Ref "AWS::NoValue" ] + ManagedPolicyArns: + - "arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + Service: + - lambda.amazonaws.com + Action: + - "sts:AssumeRole" + + FunctionExecutionPolicy: + Condition: NotHasLambdaRole + Type: "AWS::IAM::Policy" + Properties: + Roles: + - !Ref FunctionRole + PolicyName: FunctionExecutionPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Action: + - glue:GetTableVersions + - glue:GetPartitions + - glue:GetTables + - glue:GetTableVersion + - glue:GetDatabases + - glue:GetTable + - glue:GetPartition + - glue:GetDatabase + - athena:GetQueryExecution + - s3:ListAllMyBuckets + - timestream:Describe* + - timestream:List* + - timestream:Select* + Effect: Allow + Resource: '*' + - Action: + - s3:GetObject + - s3:ListBucket + - s3:GetBucketLocation + - s3:GetObjectVersion + - s3:PutObject + - s3:PutObjectAcl + - s3:GetLifecycleConfiguration + - s3:PutLifecycleConfiguration + - s3:DeleteObject + Effect: Allow + Resource: + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName} + - bucketName: + Ref: SpillBucket + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName}/* + - bucketName: + Ref: SpillBucket + - !If + - HasGlueConnection - Action: - - glue:GetTableVersions - - glue:GetPartitions - - glue:GetTables - - glue:GetTableVersion - - glue:GetDatabases - - glue:GetTable - - glue:GetPartition - - glue:GetDatabase - - athena:GetQueryExecution - - s3:ListAllMyBuckets - - timestream:Describe* - - timestream:List* - - timestream:Select* + - glue:GetConnection Effect: Allow - Resource: '*' - Version: '2012-10-17' - #S3CrudPolicy allows our connector to spill large responses to S3. You can optionally replace this pre-made policy - #with one that is more restrictive and can only 'put' but not read,delete, or overwrite files. - - S3CrudPolicy: - BucketName: !Ref SpillBucket + Resource: + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' + - !Ref "AWS::NoValue" + + FunctionKmsPolicy: + Condition: CreateKmsPolicy + Type: "AWS::IAM::Policy" + Properties: + PolicyName: FunctionKmsPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Action: + - kms:GenerateRandom + Resource: '*' + - Effect: Allow + Action: + - kms:GenerateDataKey + Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" + Roles: + - !Ref FunctionRole \ No newline at end of file diff --git a/athena-timestream/pom.xml b/athena-timestream/pom.xml index a58b2c13c0..93cc230573 100644 --- a/athena-timestream/pom.xml +++ b/athena-timestream/pom.xml @@ -3,16 +3,16 @@ aws-athena-query-federation com.amazonaws - 2022.47.1 + 2024.18.2 4.0.0 athena-timestream - 2022.47.1 + 2024.18.2 com.amazonaws aws-athena-federation-sdk - 2022.47.1 + 2024.18.2 withdep @@ -25,7 +25,7 @@ com.amazonaws athena-federation-integ-test - 2022.47.1 + 2024.18.2 withdep test diff --git a/athena-timestream/src/main/java/com/amazonaws/athena/connectors/timestream/TimestreamCompositeHandler.java b/athena-timestream/src/main/java/com/amazonaws/athena/connectors/timestream/TimestreamCompositeHandler.java index e85fc84532..502da41d72 100644 --- a/athena-timestream/src/main/java/com/amazonaws/athena/connectors/timestream/TimestreamCompositeHandler.java +++ b/athena-timestream/src/main/java/com/amazonaws/athena/connectors/timestream/TimestreamCompositeHandler.java @@ -19,6 +19,7 @@ */ package com.amazonaws.athena.connectors.timestream; +import com.amazonaws.athena.connector.lambda.GlueConnectionUtils; import com.amazonaws.athena.connector.lambda.handlers.CompositeHandler; public class TimestreamCompositeHandler @@ -26,6 +27,6 @@ public class TimestreamCompositeHandler { public TimestreamCompositeHandler() { - super(new TimestreamMetadataHandler(System.getenv()), new TimestreamRecordHandler(System.getenv())); + super(new TimestreamMetadataHandler(GlueConnectionUtils.getGlueConnection()), new TimestreamRecordHandler(GlueConnectionUtils.getGlueConnection())); } } diff --git a/athena-tpcds/athena-tpcds.yaml b/athena-tpcds/athena-tpcds.yaml index 69654b2136..ad406a7b26 100644 --- a/athena-tpcds/athena-tpcds.yaml +++ b/athena-tpcds/athena-tpcds.yaml @@ -10,10 +10,10 @@ Metadata: Labels: - athena-federation HomePageUrl: 'https://github.com/awslabs/aws-athena-query-federation' - SemanticVersion: 2022.47.1 + SemanticVersion: 2024.18.2 SourceCodeUrl: 'https://github.com/awslabs/aws-athena-query-federation' Parameters: - AthenaCatalogName: + LambdaFunctionName: Description: 'This is the name of the lambda function that will be created. This name must satisfy the pattern ^[a-z0-9-_]{1,64}$' Type: String AllowedPattern: ^[a-z0-9-_]{1,64}$ @@ -24,6 +24,10 @@ Parameters: Description: 'The prefix within SpillBucket where this function can spill data.' Type: String Default: athena-spill + GlueConnection: + Description: "(Optional) Name of glue connection storing connection details for Federated Data source." + Type: String + Default: "" LambdaTimeout: Description: 'Maximum Lambda invocation runtime in seconds. (min 1 - 900 max)' Default: 900 @@ -36,38 +40,126 @@ Parameters: Description: "WARNING: If set to 'true' encryption for spilled data is disabled." Default: 'false' Type: String - PermissionsBoundaryARN: + PermissionsBoundaryArn: Description: "(Optional) An IAM policy ARN to use as the PermissionsBoundary for the created Lambda function's execution role" Default: '' Type: String + KmsKeyId: + Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." + Type: String + Default: "" + LambdaRoleArn: + Description: "(Optional) A custom role to be used by the Connector lambda" + Type: String + Default: "" + Conditions: - HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryARN, "" ] ] + HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] + NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] + HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryArn, "" ] ] + CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] + HasGlueConnection: !Not [ !Equals [ !Ref GlueConnection, "" ] ] + NotHasGlueConnectionAndHasKmsKeyId: !And + - !Not [ !Condition HasGlueConnection ] + - !Condition HasKmsKeyId + Resources: ConnectorConfig: Type: 'AWS::Serverless::Function' Properties: Environment: Variables: - disable_spill_encryption: !Ref DisableSpillEncryption - spill_bucket: !Ref SpillBucket - spill_prefix: !Ref SpillPrefix - FunctionName: !Ref AthenaCatalogName + disable_spill_encryption: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref DisableSpillEncryption ] + spill_bucket: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillBucket ] + spill_prefix: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillPrefix ] + kms_key_id: !If [ NotHasGlueConnectionAndHasKmsKeyId, !Ref KmsKeyId, !Ref "AWS::NoValue" ] + glue_connection: !If [ HasGlueConnection, !Ref GlueConnection, !Ref "AWS::NoValue" ] + FunctionName: !Ref LambdaFunctionName Handler: "com.amazonaws.athena.connectors.tpcds.TPCDSCompositeHandler" - CodeUri: "./target/athena-tpcds-2022.47.1.jar" + CodeUri: "./target/athena-tpcds-2024.18.2.jar" Description: "This connector enables Amazon Athena to communicate with a randomly generated TPC-DS data source." Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory - PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] - Policies: - - Statement: + Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn] + + FunctionRole: + Condition: NotHasLambdaRole + Type: AWS::IAM::Role + Properties: + PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryArn, !Ref "AWS::NoValue" ] + ManagedPolicyArns: + - "arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + Service: + - lambda.amazonaws.com + Action: + - "sts:AssumeRole" + + FunctionExecutionPolicy: + Condition: NotHasLambdaRole + Type: "AWS::IAM::Policy" + Properties: + Roles: + - !Ref FunctionRole + PolicyName: FunctionExecutionPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Action: + - athena:GetQueryExecution + - s3:ListAllMyBuckets + Effect: Allow + Resource: '*' + - Action: + - s3:GetObject + - s3:ListBucket + - s3:GetBucketLocation + - s3:GetObjectVersion + - s3:PutObject + - s3:PutObjectAcl + - s3:GetLifecycleConfiguration + - s3:PutLifecycleConfiguration + - s3:DeleteObject + Effect: Allow + Resource: + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName} + - bucketName: + Ref: SpillBucket + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName}/* + - bucketName: + Ref: SpillBucket + - !If + - HasGlueConnection - Action: - - athena:GetQueryExecution - - s3:ListAllMyBuckets + - glue:GetConnection Effect: Allow - Resource: '*' - Version: '2012-10-17' - #S3CrudPolicy allows our connector to spill large responses to S3. You can optionally replace this pre-made policy - #with one that is more restrictive and can only 'put' but not read,delete, or overwrite files. - - S3CrudPolicy: - BucketName: !Ref SpillBucket \ No newline at end of file + Resource: + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' + - !Ref "AWS::NoValue" + + FunctionKmsPolicy: + Condition: CreateKmsPolicy + Type: "AWS::IAM::Policy" + Properties: + PolicyName: FunctionKmsPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Action: + - kms:GenerateRandom + Resource: '*' + - Effect: Allow + Action: + - kms:GenerateDataKey + Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" + Roles: + - !Ref FunctionRole \ No newline at end of file diff --git a/athena-tpcds/pom.xml b/athena-tpcds/pom.xml index 10490d0609..420188a85e 100644 --- a/athena-tpcds/pom.xml +++ b/athena-tpcds/pom.xml @@ -3,16 +3,16 @@ aws-athena-query-federation com.amazonaws - 2022.47.1 + 2024.18.2 4.0.0 athena-tpcds - 2022.47.1 + 2024.18.2 com.amazonaws aws-athena-federation-sdk - 2022.47.1 + 2024.18.2 withdep diff --git a/athena-tpcds/src/main/java/com/amazonaws/athena/connectors/tpcds/TPCDSCompositeHandler.java b/athena-tpcds/src/main/java/com/amazonaws/athena/connectors/tpcds/TPCDSCompositeHandler.java index baec56ea4c..420a623de8 100644 --- a/athena-tpcds/src/main/java/com/amazonaws/athena/connectors/tpcds/TPCDSCompositeHandler.java +++ b/athena-tpcds/src/main/java/com/amazonaws/athena/connectors/tpcds/TPCDSCompositeHandler.java @@ -19,6 +19,7 @@ */ package com.amazonaws.athena.connectors.tpcds; +import com.amazonaws.athena.connector.lambda.GlueConnectionUtils; import com.amazonaws.athena.connector.lambda.handlers.CompositeHandler; public class TPCDSCompositeHandler @@ -26,6 +27,6 @@ public class TPCDSCompositeHandler { public TPCDSCompositeHandler() { - super(new TPCDSMetadataHandler(System.getenv()), new TPCDSRecordHandler(System.getenv())); + super(new TPCDSMetadataHandler(GlueConnectionUtils.getGlueConnection()), new TPCDSRecordHandler(GlueConnectionUtils.getGlueConnection())); } } diff --git a/athena-udfs/athena-udfs.yaml b/athena-udfs/athena-udfs.yaml index 64fd2f54ef..12b5dcbdfd 100644 --- a/athena-udfs/athena-udfs.yaml +++ b/athena-udfs/athena-udfs.yaml @@ -10,7 +10,7 @@ Metadata: Labels: - athena-federation HomePageUrl: 'https://github.com/awslabs/aws-athena-query-federation' - SemanticVersion: 2022.47.1 + SemanticVersion: 2024.18.2 SourceCodeUrl: 'https://github.com/awslabs/aws-athena-query-federation' Parameters: LambdaFunctionName: @@ -25,7 +25,7 @@ Parameters: Description: 'Lambda memory in MB (min 128 - 3008 max).' Default: 3008 Type: Number - SecretNameOrPrefix: + SecretName: Description: 'The name or prefix of a set of names within Secrets Manager that this function should have access to. (e.g. database-*).' Type: String PermissionsBoundaryARN: @@ -40,7 +40,7 @@ Resources: Properties: FunctionName: !Ref LambdaFunctionName Handler: "com.amazonaws.athena.connectors.udfs.AthenaUDFHandler" - CodeUri: "./target/athena-udfs-2022.47.1.jar" + CodeUri: "./target/athena-udfs-2024.18.2.jar" Description: "This connector enables Amazon Athena to leverage common UDFs made available via Lambda." Runtime: java11 Timeout: !Ref LambdaTimeout @@ -51,5 +51,5 @@ Resources: - Action: - secretsmanager:GetSecretValue Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:*:*:secret:${SecretNameOrPrefix}' + Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:*:*:secret:${SecretName}' Version: '2012-10-17' diff --git a/athena-udfs/pom.xml b/athena-udfs/pom.xml index a5234ec6aa..abd39cafa6 100644 --- a/athena-udfs/pom.xml +++ b/athena-udfs/pom.xml @@ -3,16 +3,16 @@ aws-athena-query-federation com.amazonaws - 2022.47.1 + 2024.18.2 4.0.0 athena-udfs - 2022.47.1 + 2024.18.2 com.amazonaws aws-athena-federation-sdk - 2022.47.1 + 2024.18.2 withdep diff --git a/athena-vertica/athena-vertica.yaml b/athena-vertica/athena-vertica.yaml index 7914cb652e..57390f18ef 100644 --- a/athena-vertica/athena-vertica.yaml +++ b/athena-vertica/athena-vertica.yaml @@ -10,26 +10,30 @@ Metadata: ReadmeUrl: README.md Labels: ['athena-federation'] HomePageUrl: https://github.com/awslabs/aws-athena-query-federation - SemanticVersion: 2022.47.1 + SemanticVersion: 2024.18.2 SourceCodeUrl: https://github.com/awslabs/aws-athena-query-federation # Parameters are CloudFormation features to pass input # to your template when you create a stack Parameters: - AthenaCatalogName: + LambdaFunctionName: Description: 'This is the name of the lambda function that will be created. This name must satisfy the pattern ^[a-z0-9-_]{1,64}$' Type: String AllowedPattern: ^[a-z0-9-_]{1,64}$ SpillBucket: Description: 'The name of the bucket where this function can spill data.' Type: String - VerticaExportBucket: + ExportBucket: Description: "The bucket where the Vertica Query results will be exported." Type: String SpillPrefix: Description: 'The prefix within SpillBucket where this function can spill data.' Type: String Default: athena-spill + GlueConnection: + Description: "(Optional) Name of glue connection storing connection details for Federated Data source." + Type: String + Default: "" LambdaTimeout: Description: "Maximum Lambda invocation runtime in seconds. (min 1 - 900 max)" Default: 900 @@ -42,89 +46,170 @@ Parameters: Description: "WARNING: If set to 'true' encryption for spilled data is disabled." Default: "false" Type: String - VpcId: - Description: 'VPC ID' - Type: 'AWS::EC2::VPC::Id' SubnetIds: Description: 'One or more Subnet IDs corresponding to the Subnet that the Lambda function can use to access you data source. (e.g. subnet1,subnet2)' Type: 'List' - SecretNameOrPrefix: + SecurityGroupIds: + Description: 'One or more SecurityGroup IDs corresponding to the SecurityGroup that should be applied to the Lambda function. (e.g. sg1,sg2,sg3)' + Type: 'List' + SecretName: Description: 'The name or prefix of a set of names within Secrets Manager that this function should have access to. (e.g. vertica-*).' Type: String Default: "vertica-*" - VerticaConnectionString: + DefaultConnectionString: Description: 'The Vertica connection details to use by default if not catalog specific connection is defined and optionally using SecretsManager (e.g. ${secret_name}).' Type: String Default: "vertica://jdbc:vertica://:/?user=${vertica-username}&password=${vertica-password}" - PermissionsBoundaryARN: + PermissionsBoundaryArn: Description: "(Optional) An IAM policy ARN to use as the PermissionsBoundary for the created Lambda function's execution role" Default: '' Type: String + KmsKeyId: + Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." + Type: String + Default: "" + LambdaRoleArn: + Description: "(Optional) A custom role to be used by the Connector lambda" + Type: String + Default: "" Conditions: - HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryARN, "" ] ] + HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] + NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] + HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryArn, "" ] ] + CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] + HasGlueConnection: !Not [ !Equals [ !Ref GlueConnection, "" ] ] + NotHasGlueConnectionAndHasKmsKeyId: !And + - !Not [ !Condition HasGlueConnection ] + - !Condition HasKmsKeyId Resources: - LambdaSecurityGroup: - Type: 'AWS::EC2::SecurityGroup' - Properties: - GroupDescription: 'Athena Vertica Connector Lambda VPC Security Group' - VpcId: !Ref VpcId ConnectorConfig: Type: 'AWS::Serverless::Function' Properties: Environment: Variables: - disable_spill_encryption: !Ref DisableSpillEncryption - spill_bucket: !Ref SpillBucket - spill_prefix: !Ref SpillPrefix - export_bucket: !Ref VerticaExportBucket - default: !Ref VerticaConnectionString - - FunctionName: !Sub "${AthenaCatalogName}" + disable_spill_encryption: !If [HasGlueConnection, !Ref "AWS::NoValue", !Ref DisableSpillEncryption] + spill_bucket: !If [HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillBucket] + spill_prefix: !If [HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillPrefix] + kms_key_id: !If [NotHasGlueConnectionAndHasKmsKeyId, !Ref KmsKeyId, !Ref "AWS::NoValue"] + glue_connection: !If [HasGlueConnection, !Ref GlueConnection, !Ref "AWS::NoValue"] + export_bucket: !If [HasGlueConnection, !Ref "AWS::NoValue", !Ref ExportBucket] + default: !If [HasGlueConnection, !Ref "AWS::NoValue", !Ref DefaultConnectionString] + FunctionName: !Ref LambdaFunctionName Handler: "com.amazonaws.athena.connectors.vertica.VerticaCompositeHandler" - CodeUri: "./target/athena-vertica-2022.47.1.jar" + CodeUri: "./target/athena-vertica-2024.18.2.jar" Description: "Amazon Athena Vertica Connector" Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory - PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] - Policies: - - Statement: - - Action: - - athena:GetQueryExecution - - s3:ListAllMyBuckets - Effect: Allow - Resource: '*' - Version: '2012-10-17' - - Statement: + Role: !If [ NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn ] + VpcConfig: + SecurityGroupIds: !Ref SecurityGroupIds + SubnetIds: !Ref SubnetIds + + FunctionRole: + Condition: NotHasLambdaRole + Type: AWS::IAM::Role + Properties: + PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryArn, !Ref "AWS::NoValue" ] + ManagedPolicyArns: + - "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole" + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + Service: + - lambda.amazonaws.com + Action: + - "sts:AssumeRole" + + FunctionExecutionPolicy: + Condition: NotHasLambdaRole + Type: "AWS::IAM::Policy" + Properties: + Roles: + - !Ref FunctionRole + PolicyName: FunctionExecutionPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Action: + - athena:GetQueryExecution + - s3:ListAllMyBuckets + Effect: Allow + Resource: '*' + - Action: + - s3:ListBucket + Effect: Allow + Resource: + - !Sub 'arn:${AWS::Partition}:s3:::${ExportBucket}' + - !Sub 'arn:${AWS::Partition}:s3:::${SpillBucket}' + - Action: + - secretsmanager:GetSecretValue + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:*:*:secret:${SecretName}*' + - Action: + - ec2:CreateNetworkInterface + - ec2:DeleteNetworkInterface + - ec2:DescribeNetworkInterfaces + - ec2:DetachNetworkInterface + Effect: Allow + Resource: '*' + - Action: + - s3:GetObject + - s3:ListBucket + - s3:GetBucketLocation + - s3:GetObjectVersion + - s3:PutObject + - s3:PutObjectAcl + - s3:GetLifecycleConfiguration + - s3:PutLifecycleConfiguration + - s3:DeleteObject + Effect: Allow + Resource: + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${spillBucketName} + - spillBucketName: + Ref: SpillBucket + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${spillBucketName}/* + - spillBucketName: + Ref: SpillBucket + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${exportBucketName} + - exportBucketName: + Ref: ExportBucket + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${exportBucketName}/* + - exportBucketName: + Ref: ExportBucket + - !If + - HasGlueConnection - Action: - - s3:ListBucket + - glue:GetConnection Effect: Allow Resource: - - !Sub 'arn:${AWS::Partition}:s3:::${VerticaExportBucket}' - - !Sub 'arn:${AWS::Partition}:s3:::${SpillBucket}' - Version: '2012-10-17' - - Statement: - - Action: - - secretsmanager:GetSecretValue - Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:*:*:secret:${SecretNameOrPrefix}' - - S3ReadPolicy: - BucketName: - Ref: SpillBucket - - S3WritePolicy: - BucketName: - Ref: SpillBucket - - S3ReadPolicy: - BucketName: - Ref: VerticaExportBucket - - S3WritePolicy: - BucketName: - Ref: VerticaExportBucket - #VPCAccessPolicy allows our connector to run in a VPC so that it can access your data source. - VpcConfig: - SecurityGroupIds: - #SecurityGroup that should be applied to the Lambda function - - !Ref LambdaSecurityGroup - SubnetIds: !Ref SubnetIds + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' + - !Ref "AWS::NoValue" + + FunctionKmsPolicy: + Condition: CreateKmsPolicy + Type: "AWS::IAM::Policy" + Properties: + PolicyName: FunctionKmsPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Action: + - kms:GenerateRandom + Resource: '*' + - Effect: Allow + Action: + - kms:GenerateDataKey + Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" + Roles: + - !Ref FunctionRole \ No newline at end of file diff --git a/athena-vertica/pom.xml b/athena-vertica/pom.xml index b5b086e449..145a86991a 100644 --- a/athena-vertica/pom.xml +++ b/athena-vertica/pom.xml @@ -3,16 +3,16 @@ aws-athena-query-federation com.amazonaws - 2022.47.1 + 2024.18.2 4.0.0 athena-vertica - 2022.47.1 + 2024.18.2 com.amazonaws aws-athena-federation-sdk - 2022.47.1 + 2024.18.2 withdep @@ -69,7 +69,8 @@ com.amazonaws athena-jdbc - 2022.47.1 + 2024.18.2 + compile diff --git a/athena-vertica/src/main/java/com/amazonaws/athena/connectors/vertica/VerticaCompositeHandler.java b/athena-vertica/src/main/java/com/amazonaws/athena/connectors/vertica/VerticaCompositeHandler.java index 7ccbb0f34d..2fac88d483 100644 --- a/athena-vertica/src/main/java/com/amazonaws/athena/connectors/vertica/VerticaCompositeHandler.java +++ b/athena-vertica/src/main/java/com/amazonaws/athena/connectors/vertica/VerticaCompositeHandler.java @@ -19,6 +19,7 @@ */ package com.amazonaws.athena.connectors.vertica; +import com.amazonaws.athena.connector.lambda.GlueConnectionUtils; import com.amazonaws.athena.connector.lambda.handlers.CompositeHandler; /** @@ -30,6 +31,6 @@ public class VerticaCompositeHandler { public VerticaCompositeHandler() { - super(new VerticaMetadataHandler(System.getenv()), new VerticaRecordHandler(System.getenv())); + super(new VerticaMetadataHandler(GlueConnectionUtils.getGlueConnection()), new VerticaRecordHandler(GlueConnectionUtils.getGlueConnection())); } } diff --git a/pom.xml b/pom.xml index 0525af382c..e8cd7eade8 100644 --- a/pom.xml +++ b/pom.xml @@ -4,7 +4,7 @@ com.amazonaws aws-athena-query-federation pom - 2022.47.1 + 2024.18.2 AWS Athena Query Federation The Amazon Athena Query Federation SDK allows you to customize Amazon Athena with your own code. https://github.com/awslabs/aws-athena-query-federation @@ -26,7 +26,7 @@ 1.8.4 3.25.3 7.10.2 - + 2.17.1 3.2.5 2.23.1 diff --git a/tools/validate_connector.sh b/tools/validate_connector.sh index ebbeb90668..93c0bde549 100755 --- a/tools/validate_connector.sh +++ b/tools/validate_connector.sh @@ -37,7 +37,7 @@ while true; do esac done -VERSION=2022.47.1 +VERSION=2024.18.2 dir=$(cd -P -- "$(dirname -- "$0")" && pwd -P) diff --git a/validation_testing/cdk_federation_infra_provisioning/app/lib/stacks/opensearch-stack.ts b/validation_testing/cdk_federation_infra_provisioning/app/lib/stacks/opensearch-stack.ts index 5b2378e815..58ac20ab52 100644 --- a/validation_testing/cdk_federation_infra_provisioning/app/lib/stacks/opensearch-stack.ts +++ b/validation_testing/cdk_federation_infra_provisioning/app/lib/stacks/opensearch-stack.ts @@ -168,7 +168,7 @@ export class OpenSearchStack extends cdk.Stack { parameters: { 'AthenaCatalogName': `opensearch-cdk-deployed`, 'IsVPCAccess': true, - 'SecretNamePrefix': 'asdf', + 'SecretName': 'asdf', 'AutoDiscoverEndpoint': false, 'DomainMapping': `default=${connectionString}`, 'SecurityGroupIds': [securityGroup.securityGroupId], diff --git a/validation_testing/cdk_federation_infra_provisioning/app/lib/stacks/rds-generic-stack.ts b/validation_testing/cdk_federation_infra_provisioning/app/lib/stacks/rds-generic-stack.ts index c6bde711fc..37502ba751 100644 --- a/validation_testing/cdk_federation_infra_provisioning/app/lib/stacks/rds-generic-stack.ts +++ b/validation_testing/cdk_federation_infra_provisioning/app/lib/stacks/rds-generic-stack.ts @@ -145,7 +145,7 @@ export class RdsGenericStack extends cdk.Stack { templateFile: cfn_template_file, parameters: { 'LambdaFunctionName': `${db_type}-cdk-deployed`, - 'SecretNamePrefix': 'asdf', + 'SecretName': 'asdf', 'DefaultConnectionString': `${connectionStringPrefix}://${connectionString}`, 'SecurityGroupIds': [securityGroup.securityGroupId], 'SubnetIds': [subnet.subnetId], diff --git a/validation_testing/cdk_federation_infra_provisioning/app/lib/stacks/redshift-stack.ts b/validation_testing/cdk_federation_infra_provisioning/app/lib/stacks/redshift-stack.ts index 8f70345b02..631a36ba5a 100644 --- a/validation_testing/cdk_federation_infra_provisioning/app/lib/stacks/redshift-stack.ts +++ b/validation_testing/cdk_federation_infra_provisioning/app/lib/stacks/redshift-stack.ts @@ -141,7 +141,7 @@ export class RedshiftStack extends cdk.Stack { templateFile: cfn_template_file, parameters: { 'LambdaFunctionName': 'redshift-cdk-deployed', - 'SecretNamePrefix': 'asdf', + 'SecretName': 'asdf', 'DefaultConnectionString': `${connectionStringPrefix}://${connectionString}`, 'SecurityGroupIds': [securityGroup.securityGroupId], 'SubnetIds': [subnet.subnetId], From b3bd92fb6be8125009945aaca662b88f6cb2c68d Mon Sep 17 00:00:00 2001 From: Jeffrey Lin Date: Mon, 26 Aug 2024 18:26:19 +0000 Subject: [PATCH 14/87] Patch issues from initial merge --- athena-aws-cmdb/pom.xml | 6 ++-- .../athena-cloudera-hive.yaml | 4 +-- athena-cloudera-hive/pom.xml | 10 +++--- .../athena-cloudera-impala.yaml | 4 +-- athena-cloudera-impala/pom.xml | 10 +++--- athena-cloudwatch-metrics/pom.xml | 6 ++-- athena-cloudwatch/athena-cloudwatch.yaml | 4 +-- athena-cloudwatch/pom.xml | 8 ++--- athena-datalakegen2/pom.xml | 10 +++--- athena-db2-as400/pom.xml | 10 +++--- athena-db2/pom.xml | 10 +++--- athena-docdb/pom.xml | 8 ++--- athena-elasticsearch/pom.xml | 10 +++--- athena-example/athena-example.yaml | 4 +-- athena-example/pom.xml | 6 ++-- athena-federation-integ-test/README.md | 2 +- athena-federation-sdk-tools/pom.xml | 6 ++-- .../athena-federation-sdk.yaml | 4 +-- .../connector/lambda/GlueConnectionUtils.java | 31 ++++++++++--------- athena-gcs/pom.xml | 8 ++--- athena-google-bigquery/pom.xml | 8 ++--- athena-hbase/pom.xml | 8 ++--- .../athena-hortonworks-hive.yaml | 4 +-- athena-hortonworks-hive/pom.xml | 10 +++--- athena-jdbc/pom.xml | 8 ++--- athena-kafka/pom.xml | 8 ++--- athena-msk/athena-msk.yaml | 4 +-- athena-msk/pom.xml | 8 ++--- athena-mysql/pom.xml | 10 +++--- athena-neptune/pom.xml | 6 ++-- athena-oracle/pom.xml | 10 +++--- athena-postgresql/pom.xml | 10 +++--- athena-redis/pom.xml | 8 ++--- athena-redshift/athena-redshift.yaml | 4 +-- athena-redshift/pom.xml | 10 +++--- athena-saphana/pom.xml | 10 +++--- athena-snowflake/pom.xml | 10 +++--- athena-sqlserver/pom.xml | 10 +++--- athena-synapse/athena-synapse.yaml | 4 +-- athena-synapse/pom.xml | 10 +++--- athena-teradata/pom.xml | 10 +++--- athena-timestream/pom.xml | 8 ++--- athena-tpcds/pom.xml | 6 ++-- athena-udfs/athena-udfs.yaml | 4 +-- athena-udfs/pom.xml | 6 ++-- athena-vertica/pom.xml | 8 ++--- tools/validate_connector.sh | 2 +- 47 files changed, 183 insertions(+), 182 deletions(-) diff --git a/athena-aws-cmdb/pom.xml b/athena-aws-cmdb/pom.xml index c9e6bdf137..90c1ad7b59 100644 --- a/athena-aws-cmdb/pom.xml +++ b/athena-aws-cmdb/pom.xml @@ -3,16 +3,16 @@ aws-athena-query-federation com.amazonaws - 2024.18.2 + 2022.47.1 4.0.0 athena-aws-cmdb - 2024.18.2 + 2022.47.1 com.amazonaws aws-athena-federation-sdk - 2024.18.2 + 2022.47.1 withdep diff --git a/athena-cloudera-hive/athena-cloudera-hive.yaml b/athena-cloudera-hive/athena-cloudera-hive.yaml index 1c6bbd9d3b..cffb40b8e6 100644 --- a/athena-cloudera-hive/athena-cloudera-hive.yaml +++ b/athena-cloudera-hive/athena-cloudera-hive.yaml @@ -10,7 +10,7 @@ Metadata: Labels: - athena-federation HomePageUrl: 'https://github.com/awslabs/aws-athena-query-federation' - SemanticVersion: 2024.18.2 + SemanticVersion: 2022.47.1 SourceCodeUrl: 'https://github.com/awslabs/aws-athena-query-federation' Parameters: LambdaFunctionName: @@ -88,7 +88,7 @@ Resources: glue_connection: !If [ HasGlueConnection, !Ref GlueConnection, !Ref "AWS::NoValue" ] FunctionName: !Ref LambdaFunctionName Handler: "com.amazonaws.athena.connectors.cloudera.HiveMuxCompositeHandler" - CodeUri: "./target/athena-cloudera-hive-2024.18.2.jar" + CodeUri: "./target/athena-cloudera-hive-2022.47.1.jar" Description: "Enables Amazon Athena to communicate with Cloudera Hive using JDBC" Runtime: java11 Timeout: !Ref LambdaTimeout diff --git a/athena-cloudera-hive/pom.xml b/athena-cloudera-hive/pom.xml index 396f3061d8..7e4dfdbcc3 100644 --- a/athena-cloudera-hive/pom.xml +++ b/athena-cloudera-hive/pom.xml @@ -3,11 +3,11 @@ aws-athena-query-federation com.amazonaws - 2024.18.2 + 2022.47.1 4.0.0 athena-cloudera-hive - 2024.18.2 + 2022.47.1 2.6.23.1027 @@ -15,13 +15,13 @@ com.amazonaws athena-federation-integ-test - 2024.18.2 + 2022.47.1 test com.amazonaws athena-jdbc - 2024.18.2 + 2022.47.1 Hive @@ -31,7 +31,7 @@ com.amazonaws athena-jdbc - 2024.18.2 + 2022.47.1 test-jar test diff --git a/athena-cloudera-impala/athena-cloudera-impala.yaml b/athena-cloudera-impala/athena-cloudera-impala.yaml index 08fda94c09..c7649ccd23 100644 --- a/athena-cloudera-impala/athena-cloudera-impala.yaml +++ b/athena-cloudera-impala/athena-cloudera-impala.yaml @@ -10,7 +10,7 @@ Metadata: Labels: - athena-federation HomePageUrl: 'https://github.com/awslabs/aws-athena-query-federation' - SemanticVersion: 2024.18.2 + SemanticVersion: 2022.47.1 SourceCodeUrl: 'https://github.com/awslabs/aws-athena-query-federation' Parameters: LambdaFunctionName: @@ -87,7 +87,7 @@ Resources: glue_connection: !If [ HasGlueConnection, !Ref GlueConnection, !Ref "AWS::NoValue" ] FunctionName: !Ref LambdaFunctionName Handler: "com.amazonaws.athena.connectors.cloudera.ImpalaMuxCompositeHandler" - CodeUri: "./target/athena-cloudera-impala-2024.18.2.jar" + CodeUri: "./target/athena-cloudera-impala-2022.47.1.jar" Description: "Enables Amazon Athena to communicate with Cloudera Impala using JDBC" Runtime: java11 Timeout: !Ref LambdaTimeout diff --git a/athena-cloudera-impala/pom.xml b/athena-cloudera-impala/pom.xml index 6ac5089037..8a2e88076d 100644 --- a/athena-cloudera-impala/pom.xml +++ b/athena-cloudera-impala/pom.xml @@ -3,11 +3,11 @@ aws-athena-query-federation com.amazonaws - 2024.18.2 + 2022.47.1 4.0.0 athena-cloudera-impala - 2024.18.2 + 2022.47.1 2.6.32.1041 @@ -15,13 +15,13 @@ com.amazonaws athena-federation-integ-test - 2024.18.2 + 2022.47.1 test com.amazonaws athena-jdbc - 2024.18.2 + 2022.47.1 Impala @@ -31,7 +31,7 @@ com.amazonaws athena-jdbc - 2024.18.2 + 2022.47.1 test-jar test diff --git a/athena-cloudwatch-metrics/pom.xml b/athena-cloudwatch-metrics/pom.xml index 2ed910a861..6c8bff216e 100644 --- a/athena-cloudwatch-metrics/pom.xml +++ b/athena-cloudwatch-metrics/pom.xml @@ -3,16 +3,16 @@ aws-athena-query-federation com.amazonaws - 2024.18.2 + 2022.47.1 4.0.0 athena-cloudwatch-metrics - 2024.18.2 + 2022.47.1 com.amazonaws aws-athena-federation-sdk - 2024.18.2 + 2022.47.1 withdep diff --git a/athena-cloudwatch/athena-cloudwatch.yaml b/athena-cloudwatch/athena-cloudwatch.yaml index 8c4daa9180..968cadc2a0 100644 --- a/athena-cloudwatch/athena-cloudwatch.yaml +++ b/athena-cloudwatch/athena-cloudwatch.yaml @@ -10,7 +10,7 @@ Metadata: Labels: - athena-federation HomePageUrl: 'https://github.com/awslabs/aws-athena-query-federation' - SemanticVersion: 2024.18.2 + SemanticVersion: 2022.47.1 SourceCodeUrl: 'https://github.com/awslabs/aws-athena-query-federation' Parameters: LambdaFunctionName: @@ -76,7 +76,7 @@ Resources: glue_connection: !If [ HasGlueConnection, !Ref GlueConnection, !Ref "AWS::NoValue" ] FunctionName: !Ref LambdaFunctionName Handler: "com.amazonaws.athena.connectors.cloudwatch.CloudwatchCompositeHandler" - CodeUri: "./target/athena-cloudwatch-2024.18.2.jar" + CodeUri: "./target/athena-cloudwatch-2022.47.1.jar" Description: "Enables Amazon Athena to communicate with Cloudwatch, making your log accessible via SQL" Runtime: java11 Timeout: !Ref LambdaTimeout diff --git a/athena-cloudwatch/pom.xml b/athena-cloudwatch/pom.xml index 5b892aa5e1..79757d6167 100644 --- a/athena-cloudwatch/pom.xml +++ b/athena-cloudwatch/pom.xml @@ -3,22 +3,22 @@ aws-athena-query-federation com.amazonaws - 2024.18.2 + 2022.47.1 4.0.0 athena-cloudwatch - 2024.18.2 + 2022.47.1 com.amazonaws aws-athena-federation-sdk - 2024.18.2 + 2022.47.1 withdep com.amazonaws athena-federation-integ-test - 2024.18.2 + 2022.47.1 test diff --git a/athena-datalakegen2/pom.xml b/athena-datalakegen2/pom.xml index 043c1e4647..c72c6c4813 100644 --- a/athena-datalakegen2/pom.xml +++ b/athena-datalakegen2/pom.xml @@ -3,27 +3,27 @@ aws-athena-query-federation com.amazonaws - 2024.18.2 + 2022.47.1 4.0.0 athena-datalakegen2 - 2024.18.2 + 2022.47.1 com.amazonaws athena-federation-integ-test - 2024.18.2 + 2022.47.1 test com.amazonaws athena-jdbc - 2024.18.2 + 2022.47.1 com.amazonaws athena-jdbc - 2024.18.2 + 2022.47.1 test-jar test diff --git a/athena-db2-as400/pom.xml b/athena-db2-as400/pom.xml index 9bac277d2d..7c458b8caf 100644 --- a/athena-db2-as400/pom.xml +++ b/athena-db2-as400/pom.xml @@ -3,27 +3,27 @@ aws-athena-query-federation com.amazonaws - 2024.18.2 + 2022.47.1 4.0.0 athena-db2-as400 - 2024.18.2 + 2022.47.1 com.amazonaws athena-federation-integ-test - 2024.18.2 + 2022.47.1 test com.amazonaws athena-jdbc - 2024.18.2 + 2022.47.1 com.amazonaws athena-jdbc - 2024.18.2 + 2022.47.1 test-jar test diff --git a/athena-db2/pom.xml b/athena-db2/pom.xml index 25dac1feea..919f91b2b3 100644 --- a/athena-db2/pom.xml +++ b/athena-db2/pom.xml @@ -3,27 +3,27 @@ aws-athena-query-federation com.amazonaws - 2024.18.2 + 2022.47.1 4.0.0 athena-db2 - 2024.18.2 + 2022.47.1 com.amazonaws athena-federation-integ-test - 2024.18.2 + 2022.47.1 test com.amazonaws athena-jdbc - 2024.18.2 + 2022.47.1 com.amazonaws athena-jdbc - 2024.18.2 + 2022.47.1 test-jar test diff --git a/athena-docdb/pom.xml b/athena-docdb/pom.xml index ca5f71c5fa..5dd645c20a 100644 --- a/athena-docdb/pom.xml +++ b/athena-docdb/pom.xml @@ -3,16 +3,16 @@ aws-athena-query-federation com.amazonaws - 2024.18.2 + 2022.47.1 4.0.0 athena-docdb - 2024.18.2 + 2022.47.1 com.amazonaws aws-athena-federation-sdk - 2024.18.2 + 2022.47.1 withdep @@ -25,7 +25,7 @@ com.amazonaws athena-federation-integ-test - 2024.18.2 + 2022.47.1 test diff --git a/athena-elasticsearch/pom.xml b/athena-elasticsearch/pom.xml index 0735a8d2f2..7d62694419 100644 --- a/athena-elasticsearch/pom.xml +++ b/athena-elasticsearch/pom.xml @@ -3,16 +3,16 @@ aws-athena-query-federation com.amazonaws - 2024.18.2 + 2022.47.1 4.0.0 athena-elasticsearch - 2024.18.2 + 2022.47.1 com.amazonaws aws-athena-federation-sdk - 2024.18.2 + 2022.47.1 withdep @@ -25,7 +25,7 @@ com.amazonaws athena-federation-integ-test - 2024.18.2 + 2022.47.1 test @@ -137,7 +137,7 @@ com.amazonaws aws-athena-federation-sdk - 2024.18.2 + 2022.47.1 test-jar test diff --git a/athena-example/athena-example.yaml b/athena-example/athena-example.yaml index 07ed3e318b..8f019191ea 100644 --- a/athena-example/athena-example.yaml +++ b/athena-example/athena-example.yaml @@ -10,7 +10,7 @@ Metadata: ReadmeUrl: README.md Labels: ['athena-federation'] HomePageUrl: https://github.com/awslabs/aws-athena-query-federation - SemanticVersion: 2024.18.2 + SemanticVersion: 2022.47.1 SourceCodeUrl: https://github.com/awslabs/aws-athena-query-federation # Parameters are CloudFormation features to pass input @@ -55,7 +55,7 @@ Resources: data_bucket: !Ref DataBucket FunctionName: !Sub "${AthenaCatalogName}" Handler: "com.amazonaws.athena.connectors.example.ExampleCompositeHandler" - CodeUri: "./target/athena-example-2024.18.2.jar" + CodeUri: "./target/athena-example-2022.47.1.jar" Description: "A guided example for writing and deploying your own federated Amazon Athena connector for a custom source." Runtime: java11 Timeout: !Ref LambdaTimeout diff --git a/athena-example/pom.xml b/athena-example/pom.xml index bcd3ad0ef3..e3a84da25e 100644 --- a/athena-example/pom.xml +++ b/athena-example/pom.xml @@ -3,16 +3,16 @@ aws-athena-query-federation com.amazonaws - 2024.18.2 + 2022.47.1 4.0.0 athena-example - 2024.18.2 + 2022.47.1 com.amazonaws aws-athena-federation-sdk - 2024.18.2 + 2022.47.1 withdep diff --git a/athena-federation-integ-test/README.md b/athena-federation-integ-test/README.md index 6b9655b235..5440cb091d 100644 --- a/athena-federation-integ-test/README.md +++ b/athena-federation-integ-test/README.md @@ -36,7 +36,7 @@ in most **pom.xml** files (e.g. com.amazonaws athena-federation-integ-test - Current version of the SDK (e.g. 2024.18.2) + Current version of the SDK (e.g. 2022.47.1) test ``` diff --git a/athena-federation-sdk-tools/pom.xml b/athena-federation-sdk-tools/pom.xml index b407c4c4b0..30eb909a1c 100644 --- a/athena-federation-sdk-tools/pom.xml +++ b/athena-federation-sdk-tools/pom.xml @@ -3,18 +3,18 @@ aws-athena-query-federation com.amazonaws - 2024.18.2 + 2022.47.1 4.0.0 athena-federation-sdk-tools jar Amazon Athena Query Federation SDK Tools - 2024.18.2 + 2022.47.1 com.amazonaws aws-athena-federation-sdk - 2024.18.2 + 2022.47.1 withdep diff --git a/athena-federation-sdk/athena-federation-sdk.yaml b/athena-federation-sdk/athena-federation-sdk.yaml index b4ece64a79..559e6830bc 100644 --- a/athena-federation-sdk/athena-federation-sdk.yaml +++ b/athena-federation-sdk/athena-federation-sdk.yaml @@ -10,7 +10,7 @@ Metadata: Labels: - athena-federation HomePageUrl: 'https://github.com/awslabs/aws-athena-query-federation' - SemanticVersion: 2024.18.2 + SemanticVersion: 2022.47.1 SourceCodeUrl: 'https://github.com/awslabs/aws-athena-query-federation' Parameters: AthenaCatalogName: @@ -47,7 +47,7 @@ Resources: spill_prefix: !Ref SpillPrefix FunctionName: !Ref AthenaCatalogName Handler: "com.amazonaws.athena.connector.lambda.examples.ExampleCompositeHandler" - CodeUri: "./target/aws-athena-federation-sdk-2024.18.2-withdep.jar" + CodeUri: "./target/aws-athena-federation-sdk-2022.47.1-withdep.jar" Description: "This connector enables Amazon Athena to communicate with a randomly generated data source." Runtime: java11 Timeout: !Ref LambdaTimeout diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/GlueConnectionUtils.java b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/GlueConnectionUtils.java index 7062b96f0c..f799805450 100644 --- a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/GlueConnectionUtils.java +++ b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/GlueConnectionUtils.java @@ -19,19 +19,19 @@ */ package com.amazonaws.athena.connector.lambda; -import com.amazonaws.ClientConfiguration; -import com.amazonaws.client.builder.AwsClientBuilder; -import com.amazonaws.services.glue.AWSGlue; -import com.amazonaws.services.glue.AWSGlueClientBuilder; -import com.amazonaws.services.glue.model.Connection; -import com.amazonaws.services.glue.model.GetConnectionRequest; -import com.amazonaws.services.glue.model.GetConnectionResult; import com.fasterxml.jackson.core.type.TypeReference; import com.fasterxml.jackson.databind.ObjectMapper; import org.apache.commons.lang3.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.http.apache.ApacheHttpClient; +import software.amazon.awssdk.services.glue.GlueClient; +import software.amazon.awssdk.services.glue.model.Connection; +import software.amazon.awssdk.services.glue.model.GetConnectionRequest; +import software.amazon.awssdk.services.glue.model.GetConnectionResponse; +import java.net.URI; +import java.time.Duration; import java.util.HashMap; import java.util.Map; import java.util.stream.Collectors; @@ -65,15 +65,16 @@ public static Map getGlueConnection() try { HashMap> athenaPropertiesToMap = new HashMap>(); - AWSGlue awsGlue = AWSGlueClientBuilder.standard() - .withEndpointConfiguration(new AwsClientBuilder.EndpointConfiguration( - "https://glue-gamma.ap-south-1.amazonaws.com", "ap-south-1" - )) - .withClientConfiguration(new ClientConfiguration().withConnectionTimeout(CONNECT_TIMEOUT)).build(); - GetConnectionResult glueConnection = awsGlue.getConnection(new GetConnectionRequest().withName(glueConnectionName)); + GlueClient awsGlue = GlueClient.builder() + .endpointOverride(new URI("https://glue-gamma.ap-south-1.amazonaws.com")) + .httpClientBuilder(ApacheHttpClient + .builder() + .connectionTimeout(Duration.ofMillis(CONNECT_TIMEOUT))) + .build(); + GetConnectionResponse glueConnection = awsGlue.getConnection(GetConnectionRequest.builder().name(glueConnectionName).build()); logger.debug("Successfully retrieved connection {}", glueConnectionName); - Connection connection = glueConnection.getConnection(); - String athenaPropertiesAsString = connection.getConnectionProperties().get(GLUE_CONNECTION_ATHENA_PROPERTIES); + Connection connection = glueConnection.connection(); + String athenaPropertiesAsString = connection.connectionProperties().get(GLUE_CONNECTION_ATHENA_PROPERTIES); try { ObjectMapper mapper = new ObjectMapper(); athenaPropertiesToMap = mapper.readValue(athenaPropertiesAsString, new TypeReference(){}); diff --git a/athena-gcs/pom.xml b/athena-gcs/pom.xml index 72fb105d82..5e1605a0a3 100644 --- a/athena-gcs/pom.xml +++ b/athena-gcs/pom.xml @@ -3,16 +3,16 @@ aws-athena-query-federation com.amazonaws - 2024.18.2 + 2022.47.1 4.0.0 athena-gcs - 2024.18.2 + 2022.47.1 com.amazonaws aws-athena-federation-sdk - 2024.18.2 + 2022.47.1 @@ -81,7 +81,7 @@ com.amazonaws athena-federation-integ-test - 2024.18.2 + 2022.47.1 com.google.guava diff --git a/athena-google-bigquery/pom.xml b/athena-google-bigquery/pom.xml index 19deaa61a2..c68fc6d1a8 100644 --- a/athena-google-bigquery/pom.xml +++ b/athena-google-bigquery/pom.xml @@ -3,22 +3,22 @@ aws-athena-query-federation com.amazonaws - 2024.18.2 + 2022.47.1 4.0.0 athena-google-bigquery - 2024.18.2 + 2022.47.1 com.amazonaws athena-federation-integ-test - 2024.18.2 + 2022.47.1 test com.amazonaws athena-jdbc - 2024.18.2 + 2022.47.1 net.java.dev.jna diff --git a/athena-hbase/pom.xml b/athena-hbase/pom.xml index 61d395ab73..6b6d2d04d3 100644 --- a/athena-hbase/pom.xml +++ b/athena-hbase/pom.xml @@ -3,11 +3,11 @@ aws-athena-query-federation com.amazonaws - 2024.18.2 + 2022.47.1 4.0.0 athena-hbase - 2024.18.2 + 2022.47.1 11.0.16 2.6.0-hadoop3 @@ -16,7 +16,7 @@ com.amazonaws aws-athena-federation-sdk - 2024.18.2 + 2022.47.1 withdep @@ -29,7 +29,7 @@ com.amazonaws athena-federation-integ-test - 2024.18.2 + 2022.47.1 withdep test diff --git a/athena-hortonworks-hive/athena-hortonworks-hive.yaml b/athena-hortonworks-hive/athena-hortonworks-hive.yaml index 076e3fa225..a506e540ce 100644 --- a/athena-hortonworks-hive/athena-hortonworks-hive.yaml +++ b/athena-hortonworks-hive/athena-hortonworks-hive.yaml @@ -10,7 +10,7 @@ Metadata: Labels: - athena-federation HomePageUrl: 'https://github.com/awslabs/aws-athena-query-federation' - SemanticVersion: 2024.18.2 + SemanticVersion: 2022.47.1 SourceCodeUrl: 'https://github.com/awslabs/aws-athena-query-federation' Parameters: LambdaFunctionName: @@ -94,7 +94,7 @@ Resources: default: [ HasGlueConnection, !Ref "AWS::NoValue", !Ref DefaultConnectionString ] FunctionName: !Ref LambdaFunctionName Handler: "com.amazonaws.athena.connectors.hortonworks.HiveMuxCompositeHandler" - CodeUri: "./target/athena-hortonworks-hive-2024.18.2.jar" + CodeUri: "./target/athena-hortonworks-hive-2022.47.1.jar" Description: "Enables Amazon Athena to communicate with Hortonworks Hive using JDBC" Runtime: java11 Timeout: !Ref LambdaTimeout diff --git a/athena-hortonworks-hive/pom.xml b/athena-hortonworks-hive/pom.xml index e33ef02199..14a2ca89c3 100644 --- a/athena-hortonworks-hive/pom.xml +++ b/athena-hortonworks-hive/pom.xml @@ -3,11 +3,11 @@ aws-athena-query-federation com.amazonaws - 2024.18.2 + 2022.47.1 4.0.0 athena-hortonworks-hive - 2024.18.2 + 2022.47.1 2.6.23.1027 @@ -15,13 +15,13 @@ com.amazonaws athena-federation-integ-test - 2024.18.2 + 2022.47.1 test com.amazonaws athena-jdbc - 2024.18.2 + 2022.47.1 Hive @@ -31,7 +31,7 @@ com.amazonaws athena-jdbc - 2024.18.2 + 2022.47.1 test-jar test diff --git a/athena-jdbc/pom.xml b/athena-jdbc/pom.xml index 302fd185bd..a5dadefca8 100644 --- a/athena-jdbc/pom.xml +++ b/athena-jdbc/pom.xml @@ -3,11 +3,11 @@ aws-athena-query-federation com.amazonaws - 2024.18.2 + 2022.47.1 4.0.0 athena-jdbc - 2024.18.2 + 2022.47.1 software.amazon.jsii @@ -39,7 +39,7 @@ com.amazonaws aws-athena-federation-sdk - 2024.18.2 + 2022.47.1 withdep @@ -52,7 +52,7 @@ com.amazonaws athena-federation-integ-test - 2024.18.2 + 2022.47.1 test diff --git a/athena-kafka/pom.xml b/athena-kafka/pom.xml index c2b1738493..9d0b983d66 100644 --- a/athena-kafka/pom.xml +++ b/athena-kafka/pom.xml @@ -3,12 +3,12 @@ aws-athena-query-federation com.amazonaws - 2024.18.2 + 2022.47.1 4.0.0 athena-kafka Athena Kafka Connector - 2024.18.2 + 2022.47.1 11 11 @@ -83,7 +83,7 @@ com.amazonaws athena-federation-integ-test - 2024.18.2 + 2022.47.1 withdep test @@ -114,7 +114,7 @@ com.amazonaws aws-athena-federation-sdk - 2024.18.2 + 2022.47.1 withdep diff --git a/athena-msk/athena-msk.yaml b/athena-msk/athena-msk.yaml index 4d0e82e15f..e821044898 100644 --- a/athena-msk/athena-msk.yaml +++ b/athena-msk/athena-msk.yaml @@ -11,7 +11,7 @@ Metadata: - msk - athena-federation HomePageUrl: 'https://github.com/awslabs/aws-athena-query-federation' - SemanticVersion: 2024.18.2 + SemanticVersion: 2022.47.1 SourceCodeUrl: 'https://github.com/awslabs/aws-athena-query-federation' Parameters: AuthType: @@ -114,7 +114,7 @@ Resources: auth_type: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref AuthType ] FunctionName: !Ref LambdaFunctionName Handler: "com.amazonaws.athena.connectors.msk.AmazonMskCompositeHandler" - CodeUri: "./target/athena-msk-2024.18.2.jar" + CodeUri: "./target/athena-msk-2022.47.1.jar" Description: "Enables Amazon Athena to communicate with MSK clusters" Runtime: java11 Timeout: !Ref LambdaTimeout diff --git a/athena-msk/pom.xml b/athena-msk/pom.xml index e67c611017..81881d0156 100644 --- a/athena-msk/pom.xml +++ b/athena-msk/pom.xml @@ -3,12 +3,12 @@ aws-athena-query-federation com.amazonaws - 2024.18.2 + 2022.47.1 4.0.0 athena-msk Athena MSK Connector - 2024.18.2 + 2022.47.1 11 11 @@ -145,7 +145,7 @@ com.amazonaws athena-federation-integ-test - 2024.18.2 + 2022.47.1 withdep test @@ -181,7 +181,7 @@ com.amazonaws aws-athena-federation-sdk - 2024.18.2 + 2022.47.1 withdep diff --git a/athena-mysql/pom.xml b/athena-mysql/pom.xml index 06aee27e5c..b980b85291 100644 --- a/athena-mysql/pom.xml +++ b/athena-mysql/pom.xml @@ -3,27 +3,27 @@ aws-athena-query-federation com.amazonaws - 2024.18.2 + 2022.47.1 4.0.0 athena-mysql - 2024.18.2 + 2022.47.1 com.amazonaws athena-federation-integ-test - 2024.18.2 + 2022.47.1 test com.amazonaws athena-jdbc - 2024.18.2 + 2022.47.1 com.amazonaws athena-jdbc - 2024.18.2 + 2022.47.1 test-jar test diff --git a/athena-neptune/pom.xml b/athena-neptune/pom.xml index 7bf70c7b03..8726e7b0e7 100644 --- a/athena-neptune/pom.xml +++ b/athena-neptune/pom.xml @@ -3,11 +3,11 @@ aws-athena-query-federation com.amazonaws - 2024.18.2 + 2022.47.1 4.0.0 athena-neptune - 2024.18.2 + 2022.47.1 3.7.2 @@ -17,7 +17,7 @@ com.amazonaws aws-athena-federation-sdk - 2024.18.2 + 2022.47.1 withdep diff --git a/athena-oracle/pom.xml b/athena-oracle/pom.xml index 8b686ca51d..217a194e29 100644 --- a/athena-oracle/pom.xml +++ b/athena-oracle/pom.xml @@ -3,27 +3,27 @@ aws-athena-query-federation com.amazonaws - 2024.18.2 + 2022.47.1 4.0.0 athena-oracle - 2024.18.2 + 2022.47.1 com.amazonaws athena-federation-integ-test - 2024.18.2 + 2022.47.1 test com.amazonaws athena-jdbc - 2024.18.2 + 2022.47.1 com.amazonaws athena-jdbc - 2024.18.2 + 2022.47.1 test-jar test diff --git a/athena-postgresql/pom.xml b/athena-postgresql/pom.xml index edd1c9f126..847b089a93 100644 --- a/athena-postgresql/pom.xml +++ b/athena-postgresql/pom.xml @@ -3,28 +3,28 @@ aws-athena-query-federation com.amazonaws - 2024.18.2 + 2022.47.1 4.0.0 athena-postgresql - 2024.18.2 + 2022.47.1 jar com.amazonaws athena-federation-integ-test - 2024.18.2 + 2022.47.1 test com.amazonaws athena-jdbc - 2024.18.2 + 2022.47.1 com.amazonaws athena-jdbc - 2024.18.2 + 2022.47.1 test-jar test diff --git a/athena-redis/pom.xml b/athena-redis/pom.xml index af3237bb1c..29e356b733 100644 --- a/athena-redis/pom.xml +++ b/athena-redis/pom.xml @@ -3,11 +3,11 @@ aws-athena-query-federation com.amazonaws - 2024.18.2 + 2022.47.1 4.0.0 athena-redis - 2024.18.2 + 2022.47.1 software.amazon.jsii @@ -39,7 +39,7 @@ com.amazonaws aws-athena-federation-sdk - 2024.18.2 + 2022.47.1 withdep @@ -87,7 +87,7 @@ com.amazonaws athena-federation-integ-test - 2024.18.2 + 2022.47.1 test diff --git a/athena-redshift/athena-redshift.yaml b/athena-redshift/athena-redshift.yaml index 907c3424d3..8c2b32060c 100644 --- a/athena-redshift/athena-redshift.yaml +++ b/athena-redshift/athena-redshift.yaml @@ -10,7 +10,7 @@ Metadata: Labels: - athena-federation HomePageUrl: 'https://github.com/awslabs/aws-athena-query-federation' - SemanticVersion: 2024.18.2 + SemanticVersion: 2022.47.1 SourceCodeUrl: 'https://github.com/awslabs/aws-athena-query-federation' Parameters: LambdaFunctionName: @@ -88,7 +88,7 @@ Resources: default: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref DefaultConnectionString ] FunctionName: !Ref LambdaFunctionName Handler: "com.amazonaws.athena.connectors.redshift.RedshiftMuxCompositeHandler" - CodeUri: "./target/athena-redshift-2024.18.2.jar" + CodeUri: "./target/athena-redshift-2022.47.1.jar" Description: "Enables Amazon Athena to communicate with Redshift using JDBC" Runtime: java11 Timeout: !Ref LambdaTimeout diff --git a/athena-redshift/pom.xml b/athena-redshift/pom.xml index 2b47e94dcc..7119660c3e 100644 --- a/athena-redshift/pom.xml +++ b/athena-redshift/pom.xml @@ -3,21 +3,21 @@ aws-athena-query-federation com.amazonaws - 2024.18.2 + 2022.47.1 4.0.0 athena-redshift - 2024.18.2 + 2022.47.1 com.amazonaws athena-postgresql - 2024.18.2 + 2022.47.1 com.amazonaws athena-federation-integ-test - 2024.18.2 + 2022.47.1 test @@ -34,7 +34,7 @@ com.amazonaws athena-jdbc - 2024.18.2 + 2022.47.1 test-jar test diff --git a/athena-saphana/pom.xml b/athena-saphana/pom.xml index aa393177fd..85ca8aa644 100644 --- a/athena-saphana/pom.xml +++ b/athena-saphana/pom.xml @@ -3,27 +3,27 @@ aws-athena-query-federation com.amazonaws - 2024.18.2 + 2022.47.1 4.0.0 athena-saphana - 2024.18.2 + 2022.47.1 com.amazonaws athena-federation-integ-test - 2024.18.2 + 2022.47.1 test com.amazonaws athena-jdbc - 2024.18.2 + 2022.47.1 com.amazonaws athena-jdbc - 2024.18.2 + 2022.47.1 test-jar test diff --git a/athena-snowflake/pom.xml b/athena-snowflake/pom.xml index 42ea59eef9..4e0ea9926a 100644 --- a/athena-snowflake/pom.xml +++ b/athena-snowflake/pom.xml @@ -3,27 +3,27 @@ aws-athena-query-federation com.amazonaws - 2024.18.2 + 2022.47.1 4.0.0 athena-snowflake - 2024.18.2 + 2022.47.1 com.amazonaws athena-federation-integ-test - 2024.18.2 + 2022.47.1 test com.amazonaws athena-jdbc - 2024.18.2 + 2022.47.1 com.amazonaws athena-jdbc - 2024.18.2 + 2022.47.1 test-jar test diff --git a/athena-sqlserver/pom.xml b/athena-sqlserver/pom.xml index d482876c82..6bfab70343 100644 --- a/athena-sqlserver/pom.xml +++ b/athena-sqlserver/pom.xml @@ -3,27 +3,27 @@ aws-athena-query-federation com.amazonaws - 2024.18.2 + 2022.47.1 4.0.0 athena-sqlserver - 2024.18.2 + 2022.47.1 com.amazonaws athena-federation-integ-test - 2024.18.2 + 2022.47.1 test com.amazonaws athena-jdbc - 2024.18.2 + 2022.47.1 com.amazonaws athena-jdbc - 2024.18.2 + 2022.47.1 test-jar test diff --git a/athena-synapse/athena-synapse.yaml b/athena-synapse/athena-synapse.yaml index 47cff6e4a6..60f510cb09 100644 --- a/athena-synapse/athena-synapse.yaml +++ b/athena-synapse/athena-synapse.yaml @@ -12,7 +12,7 @@ Metadata: - athena-federation - jdbc HomePageUrl: 'https://github.com/awslabs/aws-athena-query-federation' - SemanticVersion: 2024.18.2 + SemanticVersion: 2022.47.1 SourceCodeUrl: 'https://github.com/awslabs/aws-athena-query-federation' Parameters: LambdaFunctionName: @@ -96,7 +96,7 @@ Resources: default: !If [HasGlueConnection, !Ref "AWS::NoValue", !Ref DefaultConnectionString] FunctionName: !Ref LambdaFunctionName Handler: "com.amazonaws.athena.connectors.synapse.SynapseMuxCompositeHandler" - CodeUri: "./target/athena-synapse-2024.18.2.jar" + CodeUri: "./target/athena-synapse-2022.47.1.jar" Description: "Enables Amazon Athena to communicate with SYNPASE using JDBC" Runtime: java11 Timeout: !Ref LambdaTimeout diff --git a/athena-synapse/pom.xml b/athena-synapse/pom.xml index fc2cf6eb5a..92dbfaf0da 100644 --- a/athena-synapse/pom.xml +++ b/athena-synapse/pom.xml @@ -3,27 +3,27 @@ aws-athena-query-federation com.amazonaws - 2024.18.2 + 2022.47.1 4.0.0 athena-synapse - 2024.18.2 + 2022.47.1 com.amazonaws athena-federation-integ-test - 2024.18.2 + 2022.47.1 test com.amazonaws athena-jdbc - 2024.18.2 + 2022.47.1 com.amazonaws athena-jdbc - 2024.18.2 + 2022.47.1 test-jar test diff --git a/athena-teradata/pom.xml b/athena-teradata/pom.xml index 02ddace396..81d977c211 100644 --- a/athena-teradata/pom.xml +++ b/athena-teradata/pom.xml @@ -3,27 +3,27 @@ aws-athena-query-federation com.amazonaws - 2024.18.2 + 2022.47.1 4.0.0 athena-teradata - 2024.18.2 + 2022.47.1 com.amazonaws athena-federation-integ-test - 2024.18.2 + 2022.47.1 test com.amazonaws athena-jdbc - 2024.18.2 + 2022.47.1 com.amazonaws athena-jdbc - 2024.18.2 + 2022.47.1 test-jar test diff --git a/athena-timestream/pom.xml b/athena-timestream/pom.xml index 93cc230573..a58b2c13c0 100644 --- a/athena-timestream/pom.xml +++ b/athena-timestream/pom.xml @@ -3,16 +3,16 @@ aws-athena-query-federation com.amazonaws - 2024.18.2 + 2022.47.1 4.0.0 athena-timestream - 2024.18.2 + 2022.47.1 com.amazonaws aws-athena-federation-sdk - 2024.18.2 + 2022.47.1 withdep @@ -25,7 +25,7 @@ com.amazonaws athena-federation-integ-test - 2024.18.2 + 2022.47.1 withdep test diff --git a/athena-tpcds/pom.xml b/athena-tpcds/pom.xml index 420188a85e..10490d0609 100644 --- a/athena-tpcds/pom.xml +++ b/athena-tpcds/pom.xml @@ -3,16 +3,16 @@ aws-athena-query-federation com.amazonaws - 2024.18.2 + 2022.47.1 4.0.0 athena-tpcds - 2024.18.2 + 2022.47.1 com.amazonaws aws-athena-federation-sdk - 2024.18.2 + 2022.47.1 withdep diff --git a/athena-udfs/athena-udfs.yaml b/athena-udfs/athena-udfs.yaml index 12b5dcbdfd..99e8e7d0c7 100644 --- a/athena-udfs/athena-udfs.yaml +++ b/athena-udfs/athena-udfs.yaml @@ -10,7 +10,7 @@ Metadata: Labels: - athena-federation HomePageUrl: 'https://github.com/awslabs/aws-athena-query-federation' - SemanticVersion: 2024.18.2 + SemanticVersion: 2022.47.1 SourceCodeUrl: 'https://github.com/awslabs/aws-athena-query-federation' Parameters: LambdaFunctionName: @@ -40,7 +40,7 @@ Resources: Properties: FunctionName: !Ref LambdaFunctionName Handler: "com.amazonaws.athena.connectors.udfs.AthenaUDFHandler" - CodeUri: "./target/athena-udfs-2024.18.2.jar" + CodeUri: "./target/athena-udfs-2022.47.1.jar" Description: "This connector enables Amazon Athena to leverage common UDFs made available via Lambda." Runtime: java11 Timeout: !Ref LambdaTimeout diff --git a/athena-udfs/pom.xml b/athena-udfs/pom.xml index abd39cafa6..a5234ec6aa 100644 --- a/athena-udfs/pom.xml +++ b/athena-udfs/pom.xml @@ -3,16 +3,16 @@ aws-athena-query-federation com.amazonaws - 2024.18.2 + 2022.47.1 4.0.0 athena-udfs - 2024.18.2 + 2022.47.1 com.amazonaws aws-athena-federation-sdk - 2024.18.2 + 2022.47.1 withdep diff --git a/athena-vertica/pom.xml b/athena-vertica/pom.xml index 09c9c82e17..e537e847fb 100644 --- a/athena-vertica/pom.xml +++ b/athena-vertica/pom.xml @@ -3,16 +3,16 @@ aws-athena-query-federation com.amazonaws - 2024.18.2 + 2022.47.1 4.0.0 athena-vertica - 2024.18.2 + 2022.47.1 com.amazonaws aws-athena-federation-sdk - 2024.18.2 + 2022.47.1 withdep @@ -79,7 +79,7 @@ com.amazonaws athena-jdbc - 2024.18.2 + 2022.47.1 compile diff --git a/tools/validate_connector.sh b/tools/validate_connector.sh index 93c0bde549..ebbeb90668 100755 --- a/tools/validate_connector.sh +++ b/tools/validate_connector.sh @@ -37,7 +37,7 @@ while true; do esac done -VERSION=2024.18.2 +VERSION=2022.47.1 dir=$(cd -P -- "$(dirname -- "$0")" && pwd -P) From fa64d0b50d9f9cde8566946fad1e32492315e1de Mon Sep 17 00:00:00 2001 From: ejeffrli Date: Wed, 28 Aug 2024 13:02:08 -0400 Subject: [PATCH 15/87] Add GDCv2 properties to environment --- .../connector/lambda/GlueConnectionUtils.java | 43 ++++++------------- 1 file changed, 13 insertions(+), 30 deletions(-) diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/GlueConnectionUtils.java b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/GlueConnectionUtils.java index f799805450..f2c729b1d6 100644 --- a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/GlueConnectionUtils.java +++ b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/GlueConnectionUtils.java @@ -19,13 +19,12 @@ */ package com.amazonaws.athena.connector.lambda; -import com.fasterxml.jackson.core.type.TypeReference; -import com.fasterxml.jackson.databind.ObjectMapper; import org.apache.commons.lang3.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import software.amazon.awssdk.http.apache.ApacheHttpClient; import software.amazon.awssdk.services.glue.GlueClient; +import software.amazon.awssdk.services.glue.model.AuthenticationConfiguration; import software.amazon.awssdk.services.glue.model.Connection; import software.amazon.awssdk.services.glue.model.GetConnectionRequest; import software.amazon.awssdk.services.glue.model.GetConnectionResponse; @@ -34,17 +33,11 @@ import java.time.Duration; import java.util.HashMap; import java.util.Map; -import java.util.stream.Collectors; public class GlueConnectionUtils { // config property to store glue connection reference public static final String DEFAULT_GLUE_CONNECTION = "glue_connection"; - // Connection properties storing athena specific connection details - public static final String GLUE_CONNECTION_ATHENA_PROPERTIES = "AthenaProperties"; - public static final String GLUE_CONNECTION_ATHENA_CONNECTOR_PROPERTIES = "connectorProperties"; - public static final String GLUE_CONNECTION_ATHENA_DRIVER_PROPERTIES = "driverProperties"; - public static final String[] propertySubsets = {GLUE_CONNECTION_ATHENA_CONNECTOR_PROPERTIES, GLUE_CONNECTION_ATHENA_DRIVER_PROPERTIES}; private static final int CONNECT_TIMEOUT = 250; private static final Logger logger = LoggerFactory.getLogger(GlueConnectionUtils.class); @@ -63,8 +56,6 @@ public static Map getGlueConnection() HashMap cachedConfig = connectionNameCache.get(glueConnectionName); if (cachedConfig == null) { try { - HashMap> athenaPropertiesToMap = new HashMap>(); - GlueClient awsGlue = GlueClient.builder() .endpointOverride(new URI("https://glue-gamma.ap-south-1.amazonaws.com")) .httpClientBuilder(ApacheHttpClient @@ -74,26 +65,9 @@ public static Map getGlueConnection() GetConnectionResponse glueConnection = awsGlue.getConnection(GetConnectionRequest.builder().name(glueConnectionName).build()); logger.debug("Successfully retrieved connection {}", glueConnectionName); Connection connection = glueConnection.connection(); - String athenaPropertiesAsString = connection.connectionProperties().get(GLUE_CONNECTION_ATHENA_PROPERTIES); - try { - ObjectMapper mapper = new ObjectMapper(); - athenaPropertiesToMap = mapper.readValue(athenaPropertiesAsString, new TypeReference(){}); - logger.debug("Successfully parsed connection properties"); - } - catch (Exception err) { - logger.error("Error Parsing AthenaDriverProperties JSON to Map", err.toString()); - } - for (String subset : propertySubsets) { - if (athenaPropertiesToMap.containsKey(subset)) { - logger.debug("Adding {} subset from Glue Connection config.", subset); - Map properties = athenaPropertiesToMap.get(subset).entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, element -> String.valueOf(element.getValue()))); - logger.debug("Adding the following set of properties to config: {}", properties); - envConfig.putAll(properties); - } - else { - logger.debug("{} properties not included in Glue Connnection config.", subset); - } - } + envConfig.putAll(connection.athenaProperties()); + envConfig.putAll(connection.connectionPropertiesAsStrings()); + envConfig.putAll(authenticationConfigurationToMap(connection.authenticationConfiguration())); connectionNameCache.put(glueConnectionName, envConfig); } catch (Exception err) { @@ -110,4 +84,13 @@ public static Map getGlueConnection() } return envConfig; } + + private static Map authenticationConfigurationToMap(AuthenticationConfiguration auth) + { + Map authMap = new HashMap<>(); + + String[] splitArn = auth.secretArn().split(":"); + authMap.put("secret_name", splitArn[splitArn.length - 1]); + return authMap; + } } From c3968c510e009c23b78b44e015b45fd9268c4274 Mon Sep 17 00:00:00 2001 From: Aimery Methena <159072740+aimethed@users.noreply.github.com> Date: Fri, 30 Aug 2024 12:57:08 -0400 Subject: [PATCH 16/87] Migrate Cloudwatch Metrics connector to v2 (#2182) --- athena-cloudwatch-metrics/pom.xml | 6 +- .../cloudwatch/metrics/MetricStatSerDe.java | 31 ++---- .../cloudwatch/metrics/MetricUtils.java | 62 ++++++----- .../metrics/MetricsExceptionFilter.java | 8 +- .../metrics/MetricsMetadataHandler.java | 50 ++++----- .../metrics/MetricsRecordHandler.java | 102 +++++++++--------- .../metrics/MetricStatSerDeTest.java | 30 +++--- .../cloudwatch/metrics/MetricUtilsTest.java | 97 ++++++++--------- .../metrics/MetricsMetadataHandlerTest.java | 30 +++--- .../metrics/MetricsRecordHandlerTest.java | 98 +++++++++-------- 10 files changed, 254 insertions(+), 260 deletions(-) diff --git a/athena-cloudwatch-metrics/pom.xml b/athena-cloudwatch-metrics/pom.xml index 6c8bff216e..b249525238 100644 --- a/athena-cloudwatch-metrics/pom.xml +++ b/athena-cloudwatch-metrics/pom.xml @@ -16,9 +16,9 @@ withdep - com.amazonaws - aws-java-sdk-cloudwatch - ${aws-sdk.version} + software.amazon.awssdk + cloudwatch + ${aws-sdk-v2.version} diff --git a/athena-cloudwatch-metrics/src/main/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricStatSerDe.java b/athena-cloudwatch-metrics/src/main/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricStatSerDe.java index 44bfcef8e0..e44c66e7f1 100644 --- a/athena-cloudwatch-metrics/src/main/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricStatSerDe.java +++ b/athena-cloudwatch-metrics/src/main/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricStatSerDe.java @@ -19,14 +19,14 @@ */ package com.amazonaws.athena.connectors.cloudwatch.metrics; -import com.amazonaws.services.cloudwatch.model.MetricStat; -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.type.CollectionType; +import software.amazon.awssdk.services.cloudwatch.model.MetricStat; import java.io.IOException; import java.util.List; +import java.util.stream.Collectors; /** * Used to serialize and deserialize Cloudwatch Metrics MetricStat objects. This is used @@ -48,7 +48,7 @@ private MetricStatSerDe() {} public static String serialize(List metricStats) { try { - return mapper.writeValueAsString(new MetricStatHolder(metricStats)); + return mapper.writeValueAsString(metricStats.stream().map(stat -> stat.toBuilder()).collect(Collectors.toList())); } catch (JsonProcessingException ex) { throw new RuntimeException(ex); @@ -64,30 +64,11 @@ public static String serialize(List metricStats) public static List deserialize(String serializedMetricStats) { try { - return mapper.readValue(serializedMetricStats, MetricStatHolder.class).getMetricStats(); + CollectionType metricStatBuilderCollection = mapper.getTypeFactory().constructCollectionType(List.class, MetricStat.serializableBuilderClass()); + return ((List) mapper.readValue(serializedMetricStats, metricStatBuilderCollection)).stream().map(stat -> stat.build()).collect(Collectors.toList()); } catch (IOException ex) { throw new RuntimeException(ex); } } - - /** - * Helper which allows us to use Jackson's Object Mapper to serialize a List of MetricStats. - */ - private static class MetricStatHolder - { - private final List metricStats; - - @JsonCreator - public MetricStatHolder(@JsonProperty("metricStats") List metricStats) - { - this.metricStats = metricStats; - } - - @JsonProperty - public List getMetricStats() - { - return metricStats; - } - } } diff --git a/athena-cloudwatch-metrics/src/main/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricUtils.java b/athena-cloudwatch-metrics/src/main/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricUtils.java index 40ebeacaeb..7c8b97aa90 100644 --- a/athena-cloudwatch-metrics/src/main/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricUtils.java +++ b/athena-cloudwatch-metrics/src/main/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricUtils.java @@ -26,15 +26,15 @@ import com.amazonaws.athena.connector.lambda.domain.predicate.SortedRangeSet; import com.amazonaws.athena.connector.lambda.domain.predicate.ValueSet; import com.amazonaws.athena.connector.lambda.records.ReadRecordsRequest; -import com.amazonaws.services.cloudwatch.model.Dimension; -import com.amazonaws.services.cloudwatch.model.DimensionFilter; -import com.amazonaws.services.cloudwatch.model.GetMetricDataRequest; -import com.amazonaws.services.cloudwatch.model.ListMetricsRequest; -import com.amazonaws.services.cloudwatch.model.Metric; -import com.amazonaws.services.cloudwatch.model.MetricDataQuery; -import com.amazonaws.services.cloudwatch.model.MetricStat; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.cloudwatch.model.Dimension; +import software.amazon.awssdk.services.cloudwatch.model.DimensionFilter; +import software.amazon.awssdk.services.cloudwatch.model.GetMetricDataRequest; +import software.amazon.awssdk.services.cloudwatch.model.ListMetricsRequest; +import software.amazon.awssdk.services.cloudwatch.model.Metric; +import software.amazon.awssdk.services.cloudwatch.model.MetricDataQuery; +import software.amazon.awssdk.services.cloudwatch.model.MetricStat; import java.util.ArrayList; import java.util.Collections; @@ -70,11 +70,11 @@ private MetricUtils() {} */ protected static boolean applyMetricConstraints(ConstraintEvaluator evaluator, Metric metric, String statistic) { - if (!evaluator.apply(NAMESPACE_FIELD, metric.getNamespace())) { + if (!evaluator.apply(NAMESPACE_FIELD, metric.namespace())) { return false; } - if (!evaluator.apply(METRIC_NAME_FIELD, metric.getMetricName())) { + if (!evaluator.apply(METRIC_NAME_FIELD, metric.metricName())) { return false; } @@ -82,13 +82,13 @@ protected static boolean applyMetricConstraints(ConstraintEvaluator evaluator, M return false; } - for (Dimension next : metric.getDimensions()) { - if (evaluator.apply(DIMENSION_NAME_FIELD, next.getName()) && evaluator.apply(DIMENSION_VALUE_FIELD, next.getValue())) { + for (Dimension next : metric.dimensions()) { + if (evaluator.apply(DIMENSION_NAME_FIELD, next.name()) && evaluator.apply(DIMENSION_VALUE_FIELD, next.value())) { return true; } } - if (metric.getDimensions().isEmpty() && + if (metric.dimensions().isEmpty() && evaluator.apply(DIMENSION_NAME_FIELD, null) && evaluator.apply(DIMENSION_VALUE_FIELD, null)) { return true; @@ -100,28 +100,29 @@ protected static boolean applyMetricConstraints(ConstraintEvaluator evaluator, M /** * Attempts to push the supplied predicate constraints onto the Cloudwatch Metrics request. */ - protected static void pushDownPredicate(Constraints constraints, ListMetricsRequest listMetricsRequest) + protected static void pushDownPredicate(Constraints constraints, ListMetricsRequest.Builder listMetricsRequest) { Map summary = constraints.getSummary(); ValueSet namespaceConstraint = summary.get(NAMESPACE_FIELD); if (namespaceConstraint != null && namespaceConstraint.isSingleValue()) { - listMetricsRequest.setNamespace(namespaceConstraint.getSingleValue().toString()); + listMetricsRequest.namespace(namespaceConstraint.getSingleValue().toString()); } ValueSet metricConstraint = summary.get(METRIC_NAME_FIELD); if (metricConstraint != null && metricConstraint.isSingleValue()) { - listMetricsRequest.setMetricName(metricConstraint.getSingleValue().toString()); + listMetricsRequest.metricName(metricConstraint.getSingleValue().toString()); } ValueSet dimensionNameConstraint = summary.get(DIMENSION_NAME_FIELD); ValueSet dimensionValueConstraint = summary.get(DIMENSION_VALUE_FIELD); if (dimensionNameConstraint != null && dimensionNameConstraint.isSingleValue() && dimensionValueConstraint != null && dimensionValueConstraint.isSingleValue()) { - DimensionFilter filter = new DimensionFilter() - .withName(dimensionNameConstraint.getSingleValue().toString()) - .withValue(dimensionValueConstraint.getSingleValue().toString()); - listMetricsRequest.setDimensions(Collections.singletonList(filter)); + DimensionFilter filter = DimensionFilter.builder() + .name(dimensionNameConstraint.getSingleValue().toString()) + .value(dimensionValueConstraint.getSingleValue().toString()) + .build(); + listMetricsRequest.dimensions(Collections.singletonList(filter)); } } @@ -136,18 +137,15 @@ protected static GetMetricDataRequest makeGetMetricDataRequest(ReadRecordsReques Split split = readRecordsRequest.getSplit(); String serializedMetricStats = split.getProperty(MetricStatSerDe.SERIALIZED_METRIC_STATS_FIELD_NAME); List metricStats = MetricStatSerDe.deserialize(serializedMetricStats); - GetMetricDataRequest dataRequest = new GetMetricDataRequest(); - com.amazonaws.services.cloudwatch.model.Metric metric = new com.amazonaws.services.cloudwatch.model.Metric(); - metric.setNamespace(split.getProperty(NAMESPACE_FIELD)); - metric.setMetricName(split.getProperty(METRIC_NAME_FIELD)); + GetMetricDataRequest.Builder dataRequestBuilder = GetMetricDataRequest.builder(); List metricDataQueries = new ArrayList<>(); int metricId = 1; for (MetricStat nextMetricStat : metricStats) { - metricDataQueries.add(new MetricDataQuery().withMetricStat(nextMetricStat).withId("m" + metricId++)); + metricDataQueries.add(MetricDataQuery.builder().metricStat(nextMetricStat).id("m" + metricId++).build()); } - dataRequest.withMetricDataQueries(metricDataQueries); + dataRequestBuilder.metricDataQueries(metricDataQueries); ValueSet timeConstraint = readRecordsRequest.getConstraints().getSummary().get(TIMESTAMP_FIELD); if (timeConstraint instanceof SortedRangeSet && !timeConstraint.isNullAllowed()) { @@ -162,30 +160,30 @@ protected static GetMetricDataRequest makeGetMetricDataRequest(ReadRecordsReques Long lowerBound = (Long) basicPredicate.getLow().getValue(); //TODO: confirm timezone handling logger.info("makeGetMetricsRequest: with startTime " + (lowerBound * 1000) + " " + new Date(lowerBound * 1000)); - dataRequest.withStartTime(new Date(lowerBound * 1000)); + dataRequestBuilder.startTime(new Date(lowerBound * 1000).toInstant()); } else { //TODO: confirm timezone handling - dataRequest.withStartTime(new Date(0)); + dataRequestBuilder.startTime(new Date(0).toInstant()); } if (!basicPredicate.getHigh().isNullValue()) { Long upperBound = (Long) basicPredicate.getHigh().getValue(); //TODO: confirm timezone handling logger.info("makeGetMetricsRequest: with endTime " + (upperBound * 1000) + " " + new Date(upperBound * 1000)); - dataRequest.withEndTime(new Date(upperBound * 1000)); + dataRequestBuilder.endTime(new Date(upperBound * 1000).toInstant()); } else { //TODO: confirm timezone handling - dataRequest.withEndTime(new Date(System.currentTimeMillis())); + dataRequestBuilder.endTime(new Date(System.currentTimeMillis()).toInstant()); } } else { //TODO: confirm timezone handling - dataRequest.withStartTime(new Date(0)); - dataRequest.withEndTime(new Date(System.currentTimeMillis())); + dataRequestBuilder.startTime(new Date(0).toInstant()); + dataRequestBuilder.endTime(new Date(System.currentTimeMillis()).toInstant()); } - return dataRequest; + return dataRequestBuilder.build(); } } diff --git a/athena-cloudwatch-metrics/src/main/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricsExceptionFilter.java b/athena-cloudwatch-metrics/src/main/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricsExceptionFilter.java index 4810c6a017..1efb757f46 100644 --- a/athena-cloudwatch-metrics/src/main/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricsExceptionFilter.java +++ b/athena-cloudwatch-metrics/src/main/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricsExceptionFilter.java @@ -20,8 +20,8 @@ package com.amazonaws.athena.connectors.cloudwatch.metrics; import com.amazonaws.athena.connector.lambda.ThrottlingInvoker; -import com.amazonaws.services.cloudwatch.model.AmazonCloudWatchException; -import com.amazonaws.services.cloudwatch.model.LimitExceededException; +import software.amazon.awssdk.services.cloudwatch.model.CloudWatchException; +import software.amazon.awssdk.services.cloudwatch.model.LimitExceededException; /** * Used to identify Exceptions that are related to Cloudwatch Metrics throttling events. @@ -36,11 +36,11 @@ private MetricsExceptionFilter() {} @Override public boolean isMatch(Exception ex) { - if (ex instanceof AmazonCloudWatchException && ex.getMessage().startsWith("Rate exceeded")) { + if (ex instanceof CloudWatchException && ex.getMessage().startsWith("Rate exceeded")) { return true; } - if (ex instanceof AmazonCloudWatchException && ex.getMessage().startsWith("Request has been throttled")) { + if (ex instanceof CloudWatchException && ex.getMessage().startsWith("Request has been throttled")) { return true; } diff --git a/athena-cloudwatch-metrics/src/main/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricsMetadataHandler.java b/athena-cloudwatch-metrics/src/main/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricsMetadataHandler.java index 3d1f3747de..1efc065bf9 100644 --- a/athena-cloudwatch-metrics/src/main/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricsMetadataHandler.java +++ b/athena-cloudwatch-metrics/src/main/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricsMetadataHandler.java @@ -42,18 +42,17 @@ import com.amazonaws.athena.connectors.cloudwatch.metrics.tables.MetricSamplesTable; import com.amazonaws.athena.connectors.cloudwatch.metrics.tables.MetricsTable; import com.amazonaws.athena.connectors.cloudwatch.metrics.tables.Table; -import com.amazonaws.services.cloudwatch.AmazonCloudWatch; -import com.amazonaws.services.cloudwatch.AmazonCloudWatchClientBuilder; -import com.amazonaws.services.cloudwatch.model.ListMetricsRequest; -import com.amazonaws.services.cloudwatch.model.ListMetricsResult; -import com.amazonaws.services.cloudwatch.model.Metric; -import com.amazonaws.services.cloudwatch.model.MetricStat; import com.amazonaws.util.CollectionUtils; import com.google.common.collect.Lists; import org.apache.arrow.util.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.cloudwatch.CloudWatchClient; +import software.amazon.awssdk.services.cloudwatch.model.ListMetricsRequest; +import software.amazon.awssdk.services.cloudwatch.model.ListMetricsResponse; +import software.amazon.awssdk.services.cloudwatch.model.Metric; +import software.amazon.awssdk.services.cloudwatch.model.MetricStat; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.ArrayList; @@ -107,7 +106,7 @@ public class MetricsMetadataHandler //Used to handle throttling events by applying AIMD congestion control private final ThrottlingInvoker invoker; - private final AmazonCloudWatch metrics; + private final CloudWatchClient metrics; static { //The statistics supported by Cloudwatch Metrics by default @@ -133,13 +132,13 @@ public class MetricsMetadataHandler public MetricsMetadataHandler(java.util.Map configOptions) { super(SOURCE_TYPE, configOptions); - this.metrics = AmazonCloudWatchClientBuilder.standard().build(); + this.metrics = CloudWatchClient.create(); this.invoker = ThrottlingInvoker.newDefaultBuilder(EXCEPTION_FILTER, configOptions).build(); } @VisibleForTesting protected MetricsMetadataHandler( - AmazonCloudWatch metrics, + CloudWatchClient metrics, EncryptionKeyFactory keyFactory, SecretsManagerClient secretsManager, AthenaClient athena, @@ -235,33 +234,36 @@ public GetSplitsResponse doGetSplits(BlockAllocator blockAllocator, GetSplitsReq try (ConstraintEvaluator constraintEvaluator = new ConstraintEvaluator(blockAllocator, METRIC_DATA_TABLE.getSchema(), getSplitsRequest.getConstraints())) { - ListMetricsRequest listMetricsRequest = new ListMetricsRequest(); - MetricUtils.pushDownPredicate(getSplitsRequest.getConstraints(), listMetricsRequest); - listMetricsRequest.setNextToken(getSplitsRequest.getContinuationToken()); + ListMetricsRequest.Builder listMetricsRequestBuilder = ListMetricsRequest.builder(); + MetricUtils.pushDownPredicate(getSplitsRequest.getConstraints(), listMetricsRequestBuilder); + listMetricsRequestBuilder.nextToken(getSplitsRequest.getContinuationToken()); String period = getPeriodFromConstraint(getSplitsRequest.getConstraints()); Set splits = new HashSet<>(); - ListMetricsResult result = invoker.invoke(() -> metrics.listMetrics(listMetricsRequest)); + ListMetricsRequest listMetricsRequest = listMetricsRequestBuilder.build(); + ListMetricsResponse result = invoker.invoke(() -> metrics.listMetrics(listMetricsRequest)); List metricStats = new ArrayList<>(100); - for (Metric nextMetric : result.getMetrics()) { + for (Metric nextMetric : result.metrics()) { for (String nextStatistic : STATISTICS) { if (MetricUtils.applyMetricConstraints(constraintEvaluator, nextMetric, nextStatistic)) { - metricStats.add(new MetricStat() - .withMetric(new Metric() - .withNamespace(nextMetric.getNamespace()) - .withMetricName(nextMetric.getMetricName()) - .withDimensions(nextMetric.getDimensions())) - .withPeriod(Integer.valueOf(period)) - .withStat(nextStatistic)); + metricStats.add(MetricStat.builder() + .metric(Metric.builder() + .namespace(nextMetric.namespace()) + .metricName(nextMetric.metricName()) + .dimensions(nextMetric.dimensions()) + .build()) + .period(Integer.valueOf(period)) + .stat(nextStatistic) + .build()); } } } String continuationToken = null; - if (result.getNextToken() != null && - !result.getNextToken().equalsIgnoreCase(listMetricsRequest.getNextToken())) { - continuationToken = result.getNextToken(); + if (result.nextToken() != null && + !result.nextToken().equalsIgnoreCase(listMetricsRequest.nextToken())) { + continuationToken = result.nextToken(); } if (CollectionUtils.isNullOrEmpty(metricStats)) { diff --git a/athena-cloudwatch-metrics/src/main/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricsRecordHandler.java b/athena-cloudwatch-metrics/src/main/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricsRecordHandler.java index 5560b39f85..3ca9219f96 100644 --- a/athena-cloudwatch-metrics/src/main/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricsRecordHandler.java +++ b/athena-cloudwatch-metrics/src/main/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricsRecordHandler.java @@ -29,26 +29,25 @@ import com.amazonaws.athena.connectors.cloudwatch.metrics.tables.MetricSamplesTable; import com.amazonaws.athena.connectors.cloudwatch.metrics.tables.MetricsTable; import com.amazonaws.athena.connectors.cloudwatch.metrics.tables.Table; -import com.amazonaws.services.cloudwatch.AmazonCloudWatch; -import com.amazonaws.services.cloudwatch.AmazonCloudWatchClientBuilder; -import com.amazonaws.services.cloudwatch.model.Dimension; -import com.amazonaws.services.cloudwatch.model.GetMetricDataRequest; -import com.amazonaws.services.cloudwatch.model.GetMetricDataResult; -import com.amazonaws.services.cloudwatch.model.ListMetricsRequest; -import com.amazonaws.services.cloudwatch.model.ListMetricsResult; -import com.amazonaws.services.cloudwatch.model.Metric; -import com.amazonaws.services.cloudwatch.model.MetricDataQuery; -import com.amazonaws.services.cloudwatch.model.MetricDataResult; -import com.amazonaws.services.cloudwatch.model.MetricStat; import org.apache.arrow.util.VisibleForTesting; import org.apache.arrow.vector.types.pojo.Field; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.cloudwatch.CloudWatchClient; +import software.amazon.awssdk.services.cloudwatch.model.Dimension; +import software.amazon.awssdk.services.cloudwatch.model.GetMetricDataRequest; +import software.amazon.awssdk.services.cloudwatch.model.GetMetricDataResponse; +import software.amazon.awssdk.services.cloudwatch.model.ListMetricsRequest; +import software.amazon.awssdk.services.cloudwatch.model.ListMetricsResponse; +import software.amazon.awssdk.services.cloudwatch.model.Metric; +import software.amazon.awssdk.services.cloudwatch.model.MetricDataQuery; +import software.amazon.awssdk.services.cloudwatch.model.MetricDataResult; +import software.amazon.awssdk.services.cloudwatch.model.MetricStat; import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; -import java.util.Date; +import java.time.Instant; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -97,22 +96,22 @@ public class MetricsRecordHandler private final ThrottlingInvoker invoker; private final S3Client amazonS3; - private final AmazonCloudWatch metrics; + private final CloudWatchClient cloudwatchClient; public MetricsRecordHandler(java.util.Map configOptions) { this(S3Client.create(), SecretsManagerClient.create(), AthenaClient.create(), - AmazonCloudWatchClientBuilder.standard().build(), configOptions); + CloudWatchClient.create(), configOptions); } @VisibleForTesting - protected MetricsRecordHandler(S3Client amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, AmazonCloudWatch metrics, java.util.Map configOptions) + protected MetricsRecordHandler(S3Client amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, CloudWatchClient metrics, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, SOURCE_TYPE, configOptions); this.amazonS3 = amazonS3; - this.metrics = metrics; + this.cloudwatchClient = metrics; this.invoker = ThrottlingInvoker.newDefaultBuilder(EXCEPTION_FILTER, configOptions) .withInitialDelayMs(THROTTLING_INITIAL_DELAY) .withIncrease(THROTTLING_INCREMENTAL_INCREASE) @@ -143,37 +142,39 @@ else if (readRecordsRequest.getTableName().getTableName().equalsIgnoreCase(METRI private void readMetricsWithConstraint(BlockSpiller blockSpiller, ReadRecordsRequest request, QueryStatusChecker queryStatusChecker) throws TimeoutException { - ListMetricsRequest listMetricsRequest = new ListMetricsRequest(); - MetricUtils.pushDownPredicate(request.getConstraints(), listMetricsRequest); + ListMetricsRequest.Builder listMetricsRequestBuilder = ListMetricsRequest.builder(); + MetricUtils.pushDownPredicate(request.getConstraints(), listMetricsRequestBuilder); String prevToken; + String nextToken; Set requiredFields = new HashSet<>(); request.getSchema().getFields().stream().forEach(next -> requiredFields.add(next.getName())); ValueSet dimensionNameConstraint = request.getConstraints().getSummary().get(DIMENSION_NAME_FIELD); ValueSet dimensionValueConstraint = request.getConstraints().getSummary().get(DIMENSION_VALUE_FIELD); do { - prevToken = listMetricsRequest.getNextToken(); - ListMetricsResult result = invoker.invoke(() -> metrics.listMetrics(listMetricsRequest)); - for (Metric nextMetric : result.getMetrics()) { + ListMetricsRequest listMetricsRequest = listMetricsRequestBuilder.build(); + prevToken = listMetricsRequest.nextToken(); + ListMetricsResponse result = invoker.invoke(() -> cloudwatchClient.listMetrics(listMetricsRequest)); + for (Metric nextMetric : result.metrics()) { blockSpiller.writeRows((Block block, int row) -> { boolean matches = MetricUtils.applyMetricConstraints(blockSpiller.getConstraintEvaluator(), nextMetric, null); if (matches) { - matches &= block.offerValue(METRIC_NAME_FIELD, row, nextMetric.getMetricName()); - matches &= block.offerValue(NAMESPACE_FIELD, row, nextMetric.getNamespace()); + matches &= block.offerValue(METRIC_NAME_FIELD, row, nextMetric.metricName()); + matches &= block.offerValue(NAMESPACE_FIELD, row, nextMetric.namespace()); matches &= block.offerComplexValue(STATISTIC_FIELD, row, DEFAULT, STATISTICS); matches &= block.offerComplexValue(DIMENSIONS_FIELD, row, (Field field, Object val) -> { if (field.getName().equals(DIMENSION_NAME_FIELD)) { - return ((Dimension) val).getName(); + return ((Dimension) val).name(); } else if (field.getName().equals(DIMENSION_VALUE_FIELD)) { - return ((Dimension) val).getValue(); + return ((Dimension) val).value(); } throw new RuntimeException("Unexpected field " + field.getName()); }, - nextMetric.getDimensions()); + nextMetric.dimensions()); //This field is 'faked' in that we just use it as a convenient way to filter single dimensions. As such //we always populate it with the value of the filter if the constraint passed and the filter was singleValue @@ -190,9 +191,10 @@ else if (field.getName().equals(DIMENSION_VALUE_FIELD)) { return matches ? 1 : 0; }); } - listMetricsRequest.setNextToken(result.getNextToken()); + nextToken = result.nextToken(); + listMetricsRequestBuilder.nextToken(nextToken); } - while (listMetricsRequest.getNextToken() != null && !listMetricsRequest.getNextToken().equalsIgnoreCase(prevToken) && queryStatusChecker.isQueryRunning()); + while (nextToken != null && !nextToken.equalsIgnoreCase(prevToken) && queryStatusChecker.isQueryRunning()); } /** @@ -201,46 +203,49 @@ else if (field.getName().equals(DIMENSION_VALUE_FIELD)) { private void readMetricSamplesWithConstraint(BlockSpiller blockSpiller, ReadRecordsRequest request, QueryStatusChecker queryStatusChecker) throws TimeoutException { - GetMetricDataRequest dataRequest = MetricUtils.makeGetMetricDataRequest(request); + GetMetricDataRequest originalDataRequest = MetricUtils.makeGetMetricDataRequest(request); Map queries = new HashMap<>(); - for (MetricDataQuery query : dataRequest.getMetricDataQueries()) { - queries.put(query.getId(), query); + for (MetricDataQuery query : originalDataRequest.metricDataQueries()) { + queries.put(query.id(), query); } + GetMetricDataRequest.Builder dataRequestBuilder = originalDataRequest.toBuilder(); String prevToken; + String nextToken; ValueSet dimensionNameConstraint = request.getConstraints().getSummary().get(DIMENSION_NAME_FIELD); ValueSet dimensionValueConstraint = request.getConstraints().getSummary().get(DIMENSION_VALUE_FIELD); do { - prevToken = dataRequest.getNextToken(); - GetMetricDataResult result = invoker.invoke(() -> metrics.getMetricData(dataRequest)); - for (MetricDataResult nextMetric : result.getMetricDataResults()) { - MetricStat metricStat = queries.get(nextMetric.getId()).getMetricStat(); - List timestamps = nextMetric.getTimestamps(); - List values = nextMetric.getValues(); - for (int i = 0; i < nextMetric.getValues().size(); i++) { + GetMetricDataRequest dataRequest = dataRequestBuilder.build(); + prevToken = dataRequest.nextToken(); + GetMetricDataResponse result = invoker.invoke(() -> cloudwatchClient.getMetricData(dataRequest)); + for (MetricDataResult nextMetric : result.metricDataResults()) { + MetricStat metricStat = queries.get(nextMetric.id()).metricStat(); + List timestamps = nextMetric.timestamps(); + List values = nextMetric.values(); + for (int i = 0; i < nextMetric.values().size(); i++) { int sampleNum = i; blockSpiller.writeRows((Block block, int row) -> { /** * Most constraints were already applied at split generation so we only need to apply * a subset. */ - block.offerValue(METRIC_NAME_FIELD, row, metricStat.getMetric().getMetricName()); - block.offerValue(NAMESPACE_FIELD, row, metricStat.getMetric().getNamespace()); - block.offerValue(STATISTIC_FIELD, row, metricStat.getStat()); + block.offerValue(METRIC_NAME_FIELD, row, metricStat.metric().metricName()); + block.offerValue(NAMESPACE_FIELD, row, metricStat.metric().namespace()); + block.offerValue(STATISTIC_FIELD, row, metricStat.stat()); block.offerComplexValue(DIMENSIONS_FIELD, row, (Field field, Object val) -> { if (field.getName().equals(DIMENSION_NAME_FIELD)) { - return ((Dimension) val).getName(); + return ((Dimension) val).name(); } else if (field.getName().equals(DIMENSION_VALUE_FIELD)) { - return ((Dimension) val).getValue(); + return ((Dimension) val).value(); } throw new RuntimeException("Unexpected field " + field.getName()); }, - metricStat.getMetric().getDimensions()); + metricStat.metric().dimensions()); //This field is 'faked' in that we just use it as a convenient way to filter single dimensions. As such //we always populate it with the value of the filter if the constraint passed and the filter was singleValue @@ -254,19 +259,20 @@ else if (field.getName().equals(DIMENSION_VALUE_FIELD)) { ? null : dimensionValueConstraint.getSingleValue().toString(); block.offerValue(DIMENSION_VALUE_FIELD, row, dimVal); - block.offerValue(PERIOD_FIELD, row, metricStat.getPeriod()); + block.offerValue(PERIOD_FIELD, row, metricStat.period()); boolean matches = true; block.offerValue(VALUE_FIELD, row, values.get(sampleNum)); - long timestamp = timestamps.get(sampleNum).getTime() / 1000; + long timestamp = timestamps.get(sampleNum).getEpochSecond() / 1000; block.offerValue(TIMESTAMP_FIELD, row, timestamp); return matches ? 1 : 0; }); } } - dataRequest.setNextToken(result.getNextToken()); + nextToken = result.nextToken(); + dataRequestBuilder.nextToken(result.nextToken()); } - while (dataRequest.getNextToken() != null && !dataRequest.getNextToken().equalsIgnoreCase(prevToken) && queryStatusChecker.isQueryRunning()); + while (nextToken != null && !nextToken.equalsIgnoreCase(prevToken) && queryStatusChecker.isQueryRunning()); } } diff --git a/athena-cloudwatch-metrics/src/test/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricStatSerDeTest.java b/athena-cloudwatch-metrics/src/test/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricStatSerDeTest.java index 63d15023bc..bfde6ac296 100644 --- a/athena-cloudwatch-metrics/src/test/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricStatSerDeTest.java +++ b/athena-cloudwatch-metrics/src/test/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricStatSerDeTest.java @@ -19,12 +19,12 @@ */ package com.amazonaws.athena.connectors.cloudwatch.metrics; -import com.amazonaws.services.cloudwatch.model.Dimension; -import com.amazonaws.services.cloudwatch.model.Metric; -import com.amazonaws.services.cloudwatch.model.MetricStat; import org.junit.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.cloudwatch.model.Dimension; +import software.amazon.awssdk.services.cloudwatch.model.Metric; +import software.amazon.awssdk.services.cloudwatch.model.MetricStat; import java.util.ArrayList; import java.util.List; @@ -34,8 +34,8 @@ public class MetricStatSerDeTest { private static final Logger logger = LoggerFactory.getLogger(MetricStatSerDeTest.class); - private static final String EXPECTED_SERIALIZATION = "{\"metricStats\":[{\"metric\":{\"namespace\":\"namespace\",\"metricName\":\"metricName\",\"dimensions\":[" + - "{\"name\":\"dim_name1\",\"value\":\"dim_value1\"},{\"name\":\"dim_name2\",\"value\":\"dim_value2\"}]},\"period\":60,\"stat\":\"p90\",\"unit\":null}]}"; + private static final String EXPECTED_SERIALIZATION = "[{\"metric\":{\"namespace\":\"namespace\",\"metricName\":\"metricName\",\"dimensions\":[" + + "{\"name\":\"dim_name1\",\"value\":\"dim_value1\"},{\"name\":\"dim_name2\",\"value\":\"dim_value2\"}]},\"period\":60,\"stat\":\"p90\",\"unit\":null}]"; @Test public void serializeTest() @@ -48,17 +48,19 @@ public void serializeTest() String namespace = "namespace"; List dimensions = new ArrayList<>(); - dimensions.add(new Dimension().withName("dim_name1").withValue("dim_value1")); - dimensions.add(new Dimension().withName("dim_name2").withValue("dim_value2")); + dimensions.add(Dimension.builder().name("dim_name1").value("dim_value1").build()); + dimensions.add(Dimension.builder().name("dim_name2").value("dim_value2").build()); List metricStats = new ArrayList<>(); - metricStats.add(new MetricStat() - .withMetric(new Metric() - .withNamespace(namespace) - .withMetricName(metricName) - .withDimensions(dimensions)) - .withPeriod(60) - .withStat(statistic)); + metricStats.add(MetricStat.builder() + .metric(Metric.builder() + .namespace(namespace) + .metricName(metricName) + .dimensions(dimensions) + .build()) + .period(60) + .stat(statistic) + .build()); String actualSerialization = MetricStatSerDe.serialize(metricStats); logger.info("serializeTest: {}", actualSerialization); List actual = MetricStatSerDe.deserialize(actualSerialization); diff --git a/athena-cloudwatch-metrics/src/test/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricUtilsTest.java b/athena-cloudwatch-metrics/src/test/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricUtilsTest.java index 7929635f31..c32cd6cd5c 100644 --- a/athena-cloudwatch-metrics/src/test/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricUtilsTest.java +++ b/athena-cloudwatch-metrics/src/test/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricUtilsTest.java @@ -31,18 +31,18 @@ import com.amazonaws.athena.connector.lambda.domain.predicate.ValueSet; import com.amazonaws.athena.connector.lambda.records.ReadRecordsRequest; import com.amazonaws.athena.connector.lambda.security.FederatedIdentity; -import com.amazonaws.services.cloudwatch.model.Dimension; -import com.amazonaws.services.cloudwatch.model.DimensionFilter; -import com.amazonaws.services.cloudwatch.model.GetMetricDataRequest; -import com.amazonaws.services.cloudwatch.model.ListMetricsRequest; -import com.amazonaws.services.cloudwatch.model.Metric; -import com.amazonaws.services.cloudwatch.model.MetricStat; import org.apache.arrow.vector.types.pojo.Schema; import com.google.common.collect.ImmutableList; import org.apache.arrow.vector.types.Types; import org.junit.After; import org.junit.Before; import org.junit.Test; +import software.amazon.awssdk.services.cloudwatch.model.Dimension; +import software.amazon.awssdk.services.cloudwatch.model.DimensionFilter; +import software.amazon.awssdk.services.cloudwatch.model.GetMetricDataRequest; +import software.amazon.awssdk.services.cloudwatch.model.ListMetricsRequest; +import software.amazon.awssdk.services.cloudwatch.model.Metric; +import software.amazon.awssdk.services.cloudwatch.model.MetricStat; import java.util.ArrayList; import java.util.Collections; @@ -100,33 +100,21 @@ public void applyMetricConstraints() ConstraintEvaluator constraintEvaluator = new ConstraintEvaluator(allocator, schema, new Constraints(constraintsMap, Collections.emptyList(), Collections.emptyList(), DEFAULT_NO_LIMIT)); - Metric metric = new Metric() - .withNamespace("match1") - .withMetricName("match2") - .withDimensions(new Dimension().withName("match4").withValue("match5")); + Metric metric = Metric.builder() + .namespace("match1") + .metricName("match2") + .dimensions(Dimension.builder().name("match4").value("match5").build()) + .build(); String statistic = "match3"; assertTrue(MetricUtils.applyMetricConstraints(constraintEvaluator, metric, statistic)); - assertFalse(MetricUtils.applyMetricConstraints(constraintEvaluator, copyMetric(metric).withNamespace("no_match"), statistic)); - assertFalse(MetricUtils.applyMetricConstraints(constraintEvaluator, copyMetric(metric).withMetricName("no_match"), statistic)); + assertFalse(MetricUtils.applyMetricConstraints(constraintEvaluator, metric.toBuilder().namespace("no_match").build(), statistic)); + assertFalse(MetricUtils.applyMetricConstraints(constraintEvaluator, metric.toBuilder().metricName("no_match").build(), statistic)); assertFalse(MetricUtils.applyMetricConstraints(constraintEvaluator, - copyMetric(metric).withDimensions(Collections.singletonList(new Dimension().withName("no_match").withValue("match5"))), statistic)); + metric.toBuilder().dimensions(Collections.singletonList(Dimension.builder().name("no_match").value("match5").build())).build(), statistic)); assertFalse(MetricUtils.applyMetricConstraints(constraintEvaluator, - copyMetric(metric).withDimensions(Collections.singletonList(new Dimension().withName("match4").withValue("no_match"))), statistic)); - assertFalse(MetricUtils.applyMetricConstraints(constraintEvaluator, copyMetric(metric), "no_match")); - } - - private Metric copyMetric(Metric metric) - { - Metric newMetric = new Metric() - .withNamespace(metric.getNamespace()) - .withMetricName(metric.getMetricName()); - - List dims = new ArrayList<>(); - for (Dimension next : metric.getDimensions()) { - dims.add(new Dimension().withName(next.getName()).withValue(next.getValue())); - } - return newMetric.withDimensions(dims); + metric.toBuilder().dimensions(Collections.singletonList(Dimension.builder().name("match4").value("no_match").build())).build(), statistic)); + assertFalse(MetricUtils.applyMetricConstraints(constraintEvaluator, metric, "no_match")); } @Test @@ -139,13 +127,14 @@ public void pushDownPredicate() constraintsMap.put(DIMENSION_NAME_FIELD, makeStringEquals(allocator, "match4")); constraintsMap.put(DIMENSION_VALUE_FIELD, makeStringEquals(allocator, "match5")); - ListMetricsRequest request = new ListMetricsRequest(); - MetricUtils.pushDownPredicate(new Constraints(constraintsMap, Collections.emptyList(), Collections.emptyList(), DEFAULT_NO_LIMIT), request); + ListMetricsRequest.Builder requestBuilder = ListMetricsRequest.builder(); + MetricUtils.pushDownPredicate(new Constraints(constraintsMap, Collections.emptyList(), Collections.emptyList(), DEFAULT_NO_LIMIT), requestBuilder); + ListMetricsRequest request = requestBuilder.build(); - assertEquals("match1", request.getNamespace()); - assertEquals("match2", request.getMetricName()); - assertEquals(1, request.getDimensions().size()); - assertEquals(new DimensionFilter().withName("match4").withValue("match5"), request.getDimensions().get(0)); + assertEquals("match1", request.namespace()); + assertEquals("match2", request.metricName()); + assertEquals(1, request.dimensions().size()); + assertEquals(DimensionFilter.builder().name("match4").value("match5").build(), request.dimensions().get(0)); } @Test @@ -159,17 +148,19 @@ public void makeGetMetricDataRequest() String namespace = "namespace"; List dimensions = new ArrayList<>(); - dimensions.add(new Dimension().withName("dim_name1").withValue("dim_value1")); - dimensions.add(new Dimension().withName("dim_name2").withValue("dim_value2")); + dimensions.add(Dimension.builder().name("dim_name1").value("dim_value1").build()); + dimensions.add(Dimension.builder().name("dim_name2").value("dim_value2").build()); List metricStats = new ArrayList<>(); - metricStats.add(new MetricStat() - .withMetric(new Metric() - .withNamespace(namespace) - .withMetricName(metricName) - .withDimensions(dimensions)) - .withPeriod(60) - .withStat(statistic)); + metricStats.add(MetricStat.builder() + .metric(Metric.builder() + .namespace(namespace) + .metricName(metricName) + .dimensions(dimensions) + .build()) + .period(60) + .stat(statistic) + .build()); Split split = Split.newBuilder(null, null) .add(NAMESPACE_FIELD, namespace) @@ -198,16 +189,16 @@ public void makeGetMetricDataRequest() ); GetMetricDataRequest actual = MetricUtils.makeGetMetricDataRequest(request); - assertEquals(1, actual.getMetricDataQueries().size()); - assertNotNull(actual.getMetricDataQueries().get(0).getId()); - MetricStat metricStat = actual.getMetricDataQueries().get(0).getMetricStat(); + assertEquals(1, actual.metricDataQueries().size()); + assertNotNull(actual.metricDataQueries().get(0).id()); + MetricStat metricStat = actual.metricDataQueries().get(0).metricStat(); assertNotNull(metricStat); - assertEquals(metricName, metricStat.getMetric().getMetricName()); - assertEquals(namespace, metricStat.getMetric().getNamespace()); - assertEquals(statistic, metricStat.getStat()); - assertEquals(period, metricStat.getPeriod()); - assertEquals(2, metricStat.getMetric().getDimensions().size()); - assertEquals(1000L, actual.getStartTime().getTime()); - assertTrue(actual.getStartTime().getTime() <= System.currentTimeMillis() + 1_000); + assertEquals(metricName, metricStat.metric().metricName()); + assertEquals(namespace, metricStat.metric().namespace()); + assertEquals(statistic, metricStat.stat()); + assertEquals(period, metricStat.period()); + assertEquals(2, metricStat.metric().dimensions().size()); + assertEquals(1000L, actual.startTime().toEpochMilli()); + assertTrue(actual.startTime().toEpochMilli() <= System.currentTimeMillis() + 1_000); } } diff --git a/athena-cloudwatch-metrics/src/test/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricsMetadataHandlerTest.java b/athena-cloudwatch-metrics/src/test/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricsMetadataHandlerTest.java index 0b1aeef7cd..a194c74185 100644 --- a/athena-cloudwatch-metrics/src/test/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricsMetadataHandlerTest.java +++ b/athena-cloudwatch-metrics/src/test/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricsMetadataHandlerTest.java @@ -43,10 +43,6 @@ import com.amazonaws.athena.connector.lambda.metadata.MetadataResponse; import com.amazonaws.athena.connector.lambda.security.FederatedIdentity; import com.amazonaws.athena.connector.lambda.security.LocalKeyFactory; -import com.amazonaws.services.cloudwatch.AmazonCloudWatch; -import com.amazonaws.services.cloudwatch.model.ListMetricsRequest; -import com.amazonaws.services.cloudwatch.model.ListMetricsResult; -import com.amazonaws.services.cloudwatch.model.Metric; import org.apache.arrow.vector.types.Types; import org.apache.arrow.vector.types.pojo.Schema; import org.junit.After; @@ -59,6 +55,10 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.cloudwatch.CloudWatchClient; +import software.amazon.awssdk.services.cloudwatch.model.ListMetricsRequest; +import software.amazon.awssdk.services.cloudwatch.model.ListMetricsResponse; +import software.amazon.awssdk.services.cloudwatch.model.Metric; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.ArrayList; @@ -92,7 +92,7 @@ public class MetricsMetadataHandlerTest private BlockAllocator allocator; @Mock - private AmazonCloudWatch mockMetrics; + private CloudWatchClient mockMetrics; @Mock private SecretsManagerClient mockSecretsManager; @@ -273,17 +273,20 @@ public void doGetMetricSamplesSplits() ListMetricsRequest request = invocation.getArgument(0, ListMetricsRequest.class); //assert that the namespace filter was indeed pushed down - assertEquals(namespaceFilter, request.getNamespace()); - String nextToken = (request.getNextToken() == null) ? "valid" : null; + assertEquals(namespaceFilter, request.namespace()); + String nextToken = (request.nextToken() == null) ? "valid" : null; List metrics = new ArrayList<>(); for (int i = 0; i < numMetrics; i++) { //first page does not match constraints, but second page should - String mockNamespace = (request.getNextToken() == null) ? "NotMyNameSpace" : namespaceFilter; - metrics.add(new Metric().withNamespace(mockNamespace).withMetricName("metric-" + i)); + String mockNamespace = (request.nextToken() == null) ? "NotMyNameSpace" : namespaceFilter; + metrics.add(Metric.builder() + .namespace(mockNamespace) + .metricName("metric-" + i) + .build()); } - return new ListMetricsResult().withNextToken(nextToken).withMetrics(metrics); + return ListMetricsResponse.builder().nextToken(nextToken).metrics(metrics).build(); }); Schema schema = SchemaBuilder.newBuilder().addIntField("partitionId").build(); @@ -356,9 +359,12 @@ public void doGetMetricSamplesSplitsEmptyMetrics() when(mockMetrics.listMetrics(nullable(ListMetricsRequest.class))).thenAnswer((InvocationOnMock invocation) -> { List metrics = new ArrayList<>(); for (int i = 0; i < numMetrics; i++) { - metrics.add(new Metric().withNamespace(namespace).withMetricName("metric-" + i)); + metrics.add(Metric.builder() + .namespace(namespace) + .metricName("metric-" + i) + .build()); } - return new ListMetricsResult().withNextToken(null).withMetrics(metrics); + return ListMetricsResponse.builder().nextToken(null).metrics(metrics).build(); }); Schema schema = SchemaBuilder.newBuilder().addIntField("partitionId").build(); diff --git a/athena-cloudwatch-metrics/src/test/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricsRecordHandlerTest.java b/athena-cloudwatch-metrics/src/test/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricsRecordHandlerTest.java index ae25003e62..8b50b97881 100644 --- a/athena-cloudwatch-metrics/src/test/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricsRecordHandlerTest.java +++ b/athena-cloudwatch-metrics/src/test/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricsRecordHandlerTest.java @@ -37,16 +37,6 @@ import com.amazonaws.athena.connectors.cloudwatch.metrics.tables.MetricSamplesTable; import com.amazonaws.athena.connectors.cloudwatch.metrics.tables.MetricsTable; import com.amazonaws.athena.connectors.cloudwatch.metrics.tables.Table; -import com.amazonaws.services.cloudwatch.AmazonCloudWatch; -import com.amazonaws.services.cloudwatch.model.Dimension; -import com.amazonaws.services.cloudwatch.model.GetMetricDataRequest; -import com.amazonaws.services.cloudwatch.model.GetMetricDataResult; -import com.amazonaws.services.cloudwatch.model.ListMetricsRequest; -import com.amazonaws.services.cloudwatch.model.ListMetricsResult; -import com.amazonaws.services.cloudwatch.model.Metric; -import com.amazonaws.services.cloudwatch.model.MetricDataQuery; -import com.amazonaws.services.cloudwatch.model.MetricDataResult; -import com.amazonaws.services.cloudwatch.model.MetricStat; import com.google.common.io.ByteStreams; import org.junit.After; import org.junit.Before; @@ -58,19 +48,29 @@ import org.mockito.junit.MockitoJUnitRunner; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import software.amazon.awssdk.services.athena.AthenaClient; -import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; - import software.amazon.awssdk.core.ResponseInputStream; import software.amazon.awssdk.core.sync.RequestBody; +import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.cloudwatch.CloudWatchClient; +import software.amazon.awssdk.services.cloudwatch.model.Dimension; +import software.amazon.awssdk.services.cloudwatch.model.GetMetricDataRequest; +import software.amazon.awssdk.services.cloudwatch.model.GetMetricDataResponse; +import software.amazon.awssdk.services.cloudwatch.model.ListMetricsRequest; +import software.amazon.awssdk.services.cloudwatch.model.ListMetricsResponse; +import software.amazon.awssdk.services.cloudwatch.model.Metric; +import software.amazon.awssdk.services.cloudwatch.model.MetricDataQuery; +import software.amazon.awssdk.services.cloudwatch.model.MetricDataResult; +import software.amazon.awssdk.services.cloudwatch.model.MetricStat; import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.s3.model.GetObjectRequest; import software.amazon.awssdk.services.s3.model.GetObjectResponse; import software.amazon.awssdk.services.s3.model.PutObjectRequest; import software.amazon.awssdk.services.s3.model.PutObjectResponse; +import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.io.ByteArrayInputStream; import java.io.InputStream; +import java.time.Instant; import java.util.ArrayList; import java.util.Collections; import java.util.Date; @@ -115,7 +115,7 @@ public class MetricsRecordHandlerTest private EncryptionKeyFactory keyFactory = new LocalKeyFactory(); @Mock - private AmazonCloudWatch mockMetrics; + private CloudWatchClient mockMetrics; @Mock private S3Client mockS3; @@ -182,17 +182,23 @@ public void readMetricsWithConstraint() ListMetricsRequest request = invocation.getArgument(0, ListMetricsRequest.class); numCalls.incrementAndGet(); //assert that the namespace filter was indeed pushed down - assertEquals(namespace, request.getNamespace()); - String nextToken = (request.getNextToken() == null) ? "valid" : null; + assertEquals(namespace, request.namespace()); + String nextToken = (request.nextToken() == null) ? "valid" : null; List metrics = new ArrayList<>(); for (int i = 0; i < numMetrics; i++) { - metrics.add(new Metric().withNamespace(namespace).withMetricName("metric-" + i) - .withDimensions(new Dimension().withName(dimName).withValue(dimValue))); - metrics.add(new Metric().withNamespace(namespace + i).withMetricName("metric-" + i)); + metrics.add(Metric.builder() + .namespace(namespace) + .metricName("metric-" + i) + .dimensions(Dimension.builder() + .name(dimName) + .value(dimValue) + .build()) + .build()); + metrics.add(Metric.builder().namespace(namespace + i).metricName("metric-" + i).build()); } - return new ListMetricsResult().withNextToken(nextToken).withMetrics(metrics); + return ListMetricsResponse.builder().nextToken(nextToken).metrics(metrics).build(); }); Map constraintsMap = new HashMap<>(); @@ -245,7 +251,7 @@ public void readMetricSamplesWithConstraint() String period = "60"; String dimName = "dimName"; String dimValue = "dimValue"; - List dimensions = Collections.singletonList(new Dimension().withName(dimName).withValue(dimValue)); + List dimensions = Collections.singletonList(Dimension.builder().name(dimName).value(dimValue).build()); int numMetrics = 10; int numSamples = 10; @@ -269,13 +275,15 @@ public void readMetricSamplesWithConstraint() .build(); List metricStats = new ArrayList<>(); - metricStats.add(new MetricStat() - .withMetric(new Metric() - .withNamespace(namespace) - .withMetricName(metricName) - .withDimensions(dimensions)) - .withPeriod(60) - .withStat(statistic)); + metricStats.add(MetricStat.builder() + .metric(Metric.builder() + .namespace(namespace) + .metricName(metricName) + .dimensions(dimensions) + .build()) + .period(60) + .stat(statistic) + .build()); Split split = Split.newBuilder(spillLocation, keyFactory.create()) .add(MetricStatSerDe.SERIALIZED_METRIC_STATS_FIELD_NAME, MetricStatSerDe.serialize(metricStats)) @@ -309,40 +317,40 @@ public void readMetricSamplesWithConstraint() logger.info("readMetricSamplesWithConstraint: exit"); } - private GetMetricDataResult mockMetricData(InvocationOnMock invocation, int numMetrics, int numSamples) + private GetMetricDataResponse mockMetricData(InvocationOnMock invocation, int numMetrics, int numSamples) { GetMetricDataRequest request = invocation.getArgument(0, GetMetricDataRequest.class); /** * Confirm that all available criteria were pushed down into Cloudwatch Metrics */ - List queries = request.getMetricDataQueries(); + List queries = request.metricDataQueries(); assertEquals(1, queries.size()); MetricDataQuery query = queries.get(0); - MetricStat stat = query.getMetricStat(); - assertEquals("m1", query.getId()); - assertNotNull(stat.getPeriod()); - assertNotNull(stat.getMetric()); - assertNotNull(stat.getStat()); - assertNotNull(stat.getMetric().getMetricName()); - assertNotNull(stat.getMetric().getNamespace()); - assertNotNull(stat.getMetric().getDimensions()); - assertEquals(1, stat.getMetric().getDimensions().size()); - - String nextToken = (request.getNextToken() == null) ? "valid" : null; + MetricStat stat = query.metricStat(); + assertEquals("m1", query.id()); + assertNotNull(stat.period()); + assertNotNull(stat.metric()); + assertNotNull(stat.stat()); + assertNotNull(stat.metric().metricName()); + assertNotNull(stat.metric().namespace()); + assertNotNull(stat.metric().dimensions()); + assertEquals(1, stat.metric().dimensions().size()); + + String nextToken = (request.nextToken() == null) ? "valid" : null; List samples = new ArrayList<>(); for (int i = 0; i < numMetrics; i++) { List values = new ArrayList<>(); - List timestamps = new ArrayList<>(); + List timestamps = new ArrayList<>(); for (double j = 0; j < numSamples; j++) { values.add(j); - timestamps.add(new Date(System.currentTimeMillis() + (int) j)); + timestamps.add(new Date(System.currentTimeMillis() + (int) j).toInstant()); } - samples.add(new MetricDataResult().withValues(values).withTimestamps(timestamps).withId("m1")); + samples.add(MetricDataResult.builder().values(values).timestamps(timestamps).id("m1").build()); } - return new GetMetricDataResult().withNextToken(nextToken).withMetricDataResults(samples); + return GetMetricDataResponse.builder().nextToken(nextToken).metricDataResults(samples).build(); } private class ByteHolder From 6786fe67ff19140cd5b57d5f06dfde91829df048 Mon Sep 17 00:00:00 2001 From: ejeffrli <144148373+ejeffrli@users.noreply.github.com> Date: Tue, 3 Sep 2024 15:40:31 -0400 Subject: [PATCH 17/87] Refactor glue connection logic to be more abstract (#9) * Refactor glue connection logic to be reusable * Refactor glue connection logic to be more abstract * Add more jdbc environment properties * Change 'spill_kms_key_id' to map to 'kms_key_id' --- .../connector/lambda/GlueConnectionUtils.java | 96 -------------- .../connection/EnvironmentProperties.java | 119 ++++++++++++++++++ .../connection/JdbcEnvironmentProperties.java | 62 +++++++++ .../MySqlEnvironmentProperties.java | 31 +++++ .../OracleEnvironmentProperties.java | 42 +++++++ .../PostGreSqlEnvironmentProperties.java | 31 +++++ .../SqlServerEnvironmentProperties.java | 49 ++++++++ .../SynapseEnvironmentProperties.java | 31 +++++ .../MultiplexingJdbcCompositeHandler.java | 9 +- .../mysql/MySqlCompositeHandler.java | 4 +- .../oracle/OracleCompositeHandler.java | 4 +- .../PostGreSqlCompositeHandler.java | 4 +- .../sqlserver/SqlServerCompositeHandler.java | 4 +- .../synapse/SynapseCompositeHandler.java | 4 +- 14 files changed, 379 insertions(+), 111 deletions(-) delete mode 100644 athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/GlueConnectionUtils.java create mode 100644 athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/EnvironmentProperties.java create mode 100644 athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/JdbcEnvironmentProperties.java create mode 100644 athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/MySqlEnvironmentProperties.java create mode 100644 athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/OracleEnvironmentProperties.java create mode 100644 athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/PostGreSqlEnvironmentProperties.java create mode 100644 athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/SqlServerEnvironmentProperties.java create mode 100644 athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/SynapseEnvironmentProperties.java diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/GlueConnectionUtils.java b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/GlueConnectionUtils.java deleted file mode 100644 index f2c729b1d6..0000000000 --- a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/GlueConnectionUtils.java +++ /dev/null @@ -1,96 +0,0 @@ -/*- - * #%L - * Amazon Athena Query Federation SDK - * %% - * Copyright (C) 2019 - 2023 Amazon Web Services - * %% - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * #L% - */ -package com.amazonaws.athena.connector.lambda; - -import org.apache.commons.lang3.StringUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import software.amazon.awssdk.http.apache.ApacheHttpClient; -import software.amazon.awssdk.services.glue.GlueClient; -import software.amazon.awssdk.services.glue.model.AuthenticationConfiguration; -import software.amazon.awssdk.services.glue.model.Connection; -import software.amazon.awssdk.services.glue.model.GetConnectionRequest; -import software.amazon.awssdk.services.glue.model.GetConnectionResponse; - -import java.net.URI; -import java.time.Duration; -import java.util.HashMap; -import java.util.Map; - -public class GlueConnectionUtils -{ - // config property to store glue connection reference - public static final String DEFAULT_GLUE_CONNECTION = "glue_connection"; - - private static final int CONNECT_TIMEOUT = 250; - private static final Logger logger = LoggerFactory.getLogger(GlueConnectionUtils.class); - private static HashMap> connectionNameCache = new HashMap<>(); - - private GlueConnectionUtils() - { - } - - public static Map getGlueConnection() - { - HashMap envConfig = new HashMap<>(System.getenv()); - - String glueConnectionName = envConfig.get(DEFAULT_GLUE_CONNECTION); - if (StringUtils.isNotBlank(glueConnectionName)) { - HashMap cachedConfig = connectionNameCache.get(glueConnectionName); - if (cachedConfig == null) { - try { - GlueClient awsGlue = GlueClient.builder() - .endpointOverride(new URI("https://glue-gamma.ap-south-1.amazonaws.com")) - .httpClientBuilder(ApacheHttpClient - .builder() - .connectionTimeout(Duration.ofMillis(CONNECT_TIMEOUT))) - .build(); - GetConnectionResponse glueConnection = awsGlue.getConnection(GetConnectionRequest.builder().name(glueConnectionName).build()); - logger.debug("Successfully retrieved connection {}", glueConnectionName); - Connection connection = glueConnection.connection(); - envConfig.putAll(connection.athenaProperties()); - envConfig.putAll(connection.connectionPropertiesAsStrings()); - envConfig.putAll(authenticationConfigurationToMap(connection.authenticationConfiguration())); - connectionNameCache.put(glueConnectionName, envConfig); - } - catch (Exception err) { - logger.error("Failed to retrieve connection: {}, and parse the connection properties!", glueConnectionName); - throw new RuntimeException(err.toString()); - } - } - else { - return cachedConfig; - } - } - else { - logger.debug("No Glue Connection name was defined in Environment Variables."); - } - return envConfig; - } - - private static Map authenticationConfigurationToMap(AuthenticationConfiguration auth) - { - Map authMap = new HashMap<>(); - - String[] splitArn = auth.secretArn().split(":"); - authMap.put("secret_name", splitArn[splitArn.length - 1]); - return authMap; - } -} diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/EnvironmentProperties.java b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/EnvironmentProperties.java new file mode 100644 index 0000000000..144c065abf --- /dev/null +++ b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/EnvironmentProperties.java @@ -0,0 +1,119 @@ +/*- + * #%L + * Amazon Athena Query Federation SDK + * %% + * Copyright (C) 2019 - 2024 Amazon Web Services + * %% + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * #L% + */ +package com.amazonaws.athena.connector.lambda.connection; + +import org.apache.commons.lang3.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import software.amazon.awssdk.http.apache.ApacheHttpClient; +import software.amazon.awssdk.services.glue.GlueClient; +import software.amazon.awssdk.services.glue.model.AuthenticationConfiguration; +import software.amazon.awssdk.services.glue.model.Connection; +import software.amazon.awssdk.services.glue.model.GetConnectionRequest; +import software.amazon.awssdk.services.glue.model.GetConnectionResponse; + +import java.net.URI; +import java.time.Duration; +import java.util.HashMap; +import java.util.Map; + +public class EnvironmentProperties +{ + public static final String DEFAULT_GLUE_CONNECTION = "glue_connection"; + private static final int CONNECT_TIMEOUT = 250; + protected static final String SECRET_NAME = "secret_name"; + protected static final String SPILL_KMS_KEY_ID = "spill_kms_key_id"; + protected static final String KMS_KEY_ID = "kms_key_id"; + private static final Logger logger = LoggerFactory.getLogger(EnvironmentProperties.class); + + public Map createEnvironment() throws RuntimeException + { + HashMap lambdaEnvironment = new HashMap<>(System.getenv()); + String glueConnectionName = lambdaEnvironment.get(DEFAULT_GLUE_CONNECTION); + + HashMap connectionEnvironment = new HashMap<>(); + if (StringUtils.isNotBlank(glueConnectionName)) { + Connection connection = getGlueConnection(glueConnectionName); + Map connectionProperties = connection.connectionPropertiesAsStrings(); + connectionProperties.putAll(authenticationConfigurationToMap(connection.authenticationConfiguration())); + + connectionEnvironment.putAll(connectionPropertiesToEnvironment(connectionProperties)); + connectionEnvironment.putAll(athenaPropertiesToEnvironment(connection.athenaProperties())); + } + + connectionEnvironment.putAll(lambdaEnvironment); // Overwrite connection environment variables with lambda environment variables + return connectionEnvironment; + } + + public Connection getGlueConnection(String glueConnectionName) throws RuntimeException + { + try { + GlueClient awsGlue = GlueClient.builder() + .endpointOverride(new URI("https://glue-gamma.us-west-2.amazonaws.com")) + .httpClientBuilder(ApacheHttpClient + .builder() + .connectionTimeout(Duration.ofMillis(CONNECT_TIMEOUT))) + .build(); + GetConnectionResponse glueConnection = awsGlue.getConnection(GetConnectionRequest.builder().name(glueConnectionName).build()); + logger.debug("Successfully retrieved connection {}", glueConnectionName); + return glueConnection.connection(); + } + catch (Exception err) { + logger.error("Failed to retrieve connection: {}, and parse the connection properties!", glueConnectionName); + throw new RuntimeException(err.toString()); + } + } + + private Map authenticationConfigurationToMap(AuthenticationConfiguration auth) + { + Map authMap = new HashMap<>(); + + if (StringUtils.isNotBlank(auth.secretArn())) { + String[] splitArn = auth.secretArn().split(":"); + authMap.put(SECRET_NAME, splitArn[splitArn.length - 1]); + } + return authMap; + } + + /** + * Maps glue athena properties to environment properties like 'kms_key_id' + * + * @param athenaProperties contains athena specific properties + * */ + public Map athenaPropertiesToEnvironment(Map athenaProperties) + { + if (athenaProperties.containsKey(SPILL_KMS_KEY_ID)) { + String kmsKeyId = athenaProperties.remove(SPILL_KMS_KEY_ID); + athenaProperties.put(KMS_KEY_ID, kmsKeyId); + } + return athenaProperties; + } + + /** + * Maps glue connection properties to environment properties like 'default' and 'secret_manager_gcp_creds_name' + * Default behavior is to not populate environment with these properties + * + * @param connectionProperties contains secret_name and connection properties + */ + public Map connectionPropertiesToEnvironment(Map connectionProperties) + { + return new HashMap<>(); + } +} diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/JdbcEnvironmentProperties.java b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/JdbcEnvironmentProperties.java new file mode 100644 index 0000000000..aa6227176c --- /dev/null +++ b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/JdbcEnvironmentProperties.java @@ -0,0 +1,62 @@ +/*- + * #%L + * Amazon Athena Query Federation SDK + * %% + * Copyright (C) 2019 - 2024 Amazon Web Services + * %% + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * #L% + */ +package com.amazonaws.athena.connector.lambda.connection; + +import java.util.HashMap; +import java.util.Map; + +public abstract class JdbcEnvironmentProperties extends EnvironmentProperties +{ + protected static final String DEFAULT = "default"; + protected static final String JDBC_PARAMS = "JDBC_PARAMS"; + protected static final String DATABASE = "DATABASE"; + + @Override + public Map connectionPropertiesToEnvironment(Map connectionProperties) + { + HashMap environment = new HashMap<>(); + + // now construct jdbc string + String connectionString = getConnectionStringPrefix(connectionProperties) + connectionProperties.get("HOST") + + ":" + connectionProperties.get("PORT") + getConnectionStringSuffix(connectionProperties); + + environment.put(DEFAULT, connectionString); + return environment; + } + + protected abstract String getConnectionStringPrefix(Map connectionProperties); + + protected String getConnectionStringSuffix(Map connectionProperties) + { + String suffix = "/" + connectionProperties.get(DATABASE) + "?" + + connectionProperties.getOrDefault(JDBC_PARAMS, ""); + + if (connectionProperties.containsKey(SECRET_NAME)) { + if (connectionProperties.containsKey(JDBC_PARAMS)) { // need to add delimiter + suffix = suffix + "&${" + connectionProperties.get(SECRET_NAME) + "}"; + } + else { + suffix = suffix + "${" + connectionProperties.get(SECRET_NAME) + "}"; + } + } + + return suffix; + } +} diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/MySqlEnvironmentProperties.java b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/MySqlEnvironmentProperties.java new file mode 100644 index 0000000000..bce78dbe45 --- /dev/null +++ b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/MySqlEnvironmentProperties.java @@ -0,0 +1,31 @@ +/*- + * #%L + * Amazon Athena Query Federation SDK + * %% + * Copyright (C) 2019 - 2024 Amazon Web Services + * %% + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * #L% + */ +package com.amazonaws.athena.connector.lambda.connection; + +import java.util.Map; + +public class MySqlEnvironmentProperties extends JdbcEnvironmentProperties +{ + @Override + protected String getConnectionStringPrefix(Map connectionProperties) + { + return "mysql://jdbc:mysql://"; + } +} diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/OracleEnvironmentProperties.java b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/OracleEnvironmentProperties.java new file mode 100644 index 0000000000..5447aa3df3 --- /dev/null +++ b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/OracleEnvironmentProperties.java @@ -0,0 +1,42 @@ +/*- + * #%L + * Amazon Athena Query Federation SDK + * %% + * Copyright (C) 2019 - 2024 Amazon Web Services + * %% + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * #L% + */ +package com.amazonaws.athena.connector.lambda.connection; + +import java.util.Map; + +public class OracleEnvironmentProperties extends JdbcEnvironmentProperties +{ + @Override + protected String getConnectionStringPrefix(Map connectionProperties) + { + String prefix = "oracle://jdbc:oracle:thin:"; + if (connectionProperties.containsKey(SECRET_NAME)) { + prefix = prefix + "${" + connectionProperties.get(SECRET_NAME) + "}"; + } + + return prefix; + } + + @Override + protected String getConnectionStringSuffix(Map connectionProperties) + { + return "/" + connectionProperties.get(DATABASE); + } +} diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/PostGreSqlEnvironmentProperties.java b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/PostGreSqlEnvironmentProperties.java new file mode 100644 index 0000000000..fbd0e16487 --- /dev/null +++ b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/PostGreSqlEnvironmentProperties.java @@ -0,0 +1,31 @@ +/*- + * #%L + * Amazon Athena Query Federation SDK + * %% + * Copyright (C) 2019 - 2024 Amazon Web Services + * %% + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * #L% + */ +package com.amazonaws.athena.connector.lambda.connection; + +import java.util.Map; + +public class PostGreSqlEnvironmentProperties extends JdbcEnvironmentProperties +{ + @Override + protected String getConnectionStringPrefix(Map connectionProperties) + { + return "postgres://jdbc:postgresql://"; + } +} diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/SqlServerEnvironmentProperties.java b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/SqlServerEnvironmentProperties.java new file mode 100644 index 0000000000..8962545e8c --- /dev/null +++ b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/SqlServerEnvironmentProperties.java @@ -0,0 +1,49 @@ +/*- + * #%L + * Amazon Athena Query Federation SDK + * %% + * Copyright (C) 2019 - 2024 Amazon Web Services + * %% + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * #L% + */ +package com.amazonaws.athena.connector.lambda.connection; + +import java.util.Map; + +public class SqlServerEnvironmentProperties extends JdbcEnvironmentProperties +{ + @Override + protected String getConnectionStringPrefix(Map connectionProperties) + { + return "sqlserver://jdbc:sqlserver://"; + } + + @Override + protected String getConnectionStringSuffix(Map connectionProperties) + { + String suffix = ";databaseName=" + connectionProperties.get(DATABASE) + ";" + + connectionProperties.getOrDefault(JDBC_PARAMS, ""); + + if (connectionProperties.containsKey(SECRET_NAME)) { + if (connectionProperties.containsKey(JDBC_PARAMS)) { // need to add delimiter + suffix = suffix + ";${" + connectionProperties.get(SECRET_NAME) + "}"; + } + else { + suffix = suffix + "${" + connectionProperties.get(SECRET_NAME) + "}"; + } + } + + return suffix; + } +} diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/SynapseEnvironmentProperties.java b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/SynapseEnvironmentProperties.java new file mode 100644 index 0000000000..149c5ec056 --- /dev/null +++ b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/SynapseEnvironmentProperties.java @@ -0,0 +1,31 @@ +/*- + * #%L + * Amazon Athena Query Federation SDK + * %% + * Copyright (C) 2019 - 2024 Amazon Web Services + * %% + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * #L% + */ +package com.amazonaws.athena.connector.lambda.connection; + +import java.util.Map; + +public class SynapseEnvironmentProperties extends SqlServerEnvironmentProperties +{ + @Override + protected String getConnectionStringPrefix(Map connectionProperties) + { + return "synapse://jdbc:synapse://"; + } +} diff --git a/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/MultiplexingJdbcCompositeHandler.java b/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/MultiplexingJdbcCompositeHandler.java index d2a216a674..254fdf81ba 100644 --- a/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/MultiplexingJdbcCompositeHandler.java +++ b/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/MultiplexingJdbcCompositeHandler.java @@ -19,7 +19,6 @@ */ package com.amazonaws.athena.connectors.jdbc; -import com.amazonaws.athena.connector.lambda.GlueConnectionUtils; import com.amazonaws.athena.connector.lambda.handlers.CompositeHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler; import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler; @@ -44,10 +43,10 @@ public MultiplexingJdbcCompositeHandler( { super( hasCatalogConnections ? - muxMetadataHandlerClass.getConstructor(java.util.Map.class).newInstance(GlueConnectionUtils.getGlueConnection()) : - metadataHandlerClass.getConstructor(java.util.Map.class).newInstance(GlueConnectionUtils.getGlueConnection()), + muxMetadataHandlerClass.getConstructor(java.util.Map.class).newInstance(System.getenv()) : + metadataHandlerClass.getConstructor(java.util.Map.class).newInstance(System.getenv()), hasCatalogConnections ? - muxRecordHandlerClass.getConstructor(java.util.Map.class).newInstance(GlueConnectionUtils.getGlueConnection()) : - recordHandlerClass.getConstructor(java.util.Map.class).newInstance(GlueConnectionUtils.getGlueConnection())); + muxRecordHandlerClass.getConstructor(java.util.Map.class).newInstance(System.getenv()) : + recordHandlerClass.getConstructor(java.util.Map.class).newInstance(System.getenv())); } } diff --git a/athena-mysql/src/main/java/com/amazonaws/athena/connectors/mysql/MySqlCompositeHandler.java b/athena-mysql/src/main/java/com/amazonaws/athena/connectors/mysql/MySqlCompositeHandler.java index 05776c3a2f..0b5b878167 100644 --- a/athena-mysql/src/main/java/com/amazonaws/athena/connectors/mysql/MySqlCompositeHandler.java +++ b/athena-mysql/src/main/java/com/amazonaws/athena/connectors/mysql/MySqlCompositeHandler.java @@ -19,7 +19,7 @@ */ package com.amazonaws.athena.connectors.mysql; -import com.amazonaws.athena.connector.lambda.GlueConnectionUtils; +import com.amazonaws.athena.connector.lambda.connection.MySqlEnvironmentProperties; import com.amazonaws.athena.connector.lambda.handlers.CompositeHandler; /** @@ -33,6 +33,6 @@ public class MySqlCompositeHandler { public MySqlCompositeHandler() { - super(new MySqlMetadataHandler(GlueConnectionUtils.getGlueConnection()), new MySqlRecordHandler(GlueConnectionUtils.getGlueConnection())); + super(new MySqlMetadataHandler(new MySqlEnvironmentProperties().createEnvironment()), new MySqlRecordHandler(new MySqlEnvironmentProperties().createEnvironment())); } } diff --git a/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleCompositeHandler.java b/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleCompositeHandler.java index 0842d73933..86a399601e 100644 --- a/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleCompositeHandler.java +++ b/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleCompositeHandler.java @@ -20,7 +20,7 @@ */ package com.amazonaws.athena.connectors.oracle; -import com.amazonaws.athena.connector.lambda.GlueConnectionUtils; +import com.amazonaws.athena.connector.lambda.connection.OracleEnvironmentProperties; import com.amazonaws.athena.connector.lambda.handlers.CompositeHandler; /** @@ -34,6 +34,6 @@ public class OracleCompositeHandler { public OracleCompositeHandler() { - super(new OracleMetadataHandler(GlueConnectionUtils.getGlueConnection()), new OracleRecordHandler(GlueConnectionUtils.getGlueConnection())); + super(new OracleMetadataHandler(new OracleEnvironmentProperties().createEnvironment()), new OracleRecordHandler(new OracleEnvironmentProperties().createEnvironment())); } } diff --git a/athena-postgresql/src/main/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlCompositeHandler.java b/athena-postgresql/src/main/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlCompositeHandler.java index d493516957..16fe622869 100644 --- a/athena-postgresql/src/main/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlCompositeHandler.java +++ b/athena-postgresql/src/main/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlCompositeHandler.java @@ -19,7 +19,7 @@ */ package com.amazonaws.athena.connectors.postgresql; -import com.amazonaws.athena.connector.lambda.GlueConnectionUtils; +import com.amazonaws.athena.connector.lambda.connection.PostGreSqlEnvironmentProperties; import com.amazonaws.athena.connector.lambda.handlers.CompositeHandler; /** @@ -33,6 +33,6 @@ public class PostGreSqlCompositeHandler { public PostGreSqlCompositeHandler() { - super(new PostGreSqlMetadataHandler(GlueConnectionUtils.getGlueConnection()), new PostGreSqlRecordHandler(GlueConnectionUtils.getGlueConnection())); + super(new PostGreSqlMetadataHandler(new PostGreSqlEnvironmentProperties().createEnvironment()), new PostGreSqlRecordHandler(new PostGreSqlEnvironmentProperties().createEnvironment())); } } diff --git a/athena-sqlserver/src/main/java/com/amazonaws/athena/connectors/sqlserver/SqlServerCompositeHandler.java b/athena-sqlserver/src/main/java/com/amazonaws/athena/connectors/sqlserver/SqlServerCompositeHandler.java index 1cb4873fcb..86789e4cc2 100644 --- a/athena-sqlserver/src/main/java/com/amazonaws/athena/connectors/sqlserver/SqlServerCompositeHandler.java +++ b/athena-sqlserver/src/main/java/com/amazonaws/athena/connectors/sqlserver/SqlServerCompositeHandler.java @@ -19,13 +19,13 @@ */ package com.amazonaws.athena.connectors.sqlserver; -import com.amazonaws.athena.connector.lambda.GlueConnectionUtils; +import com.amazonaws.athena.connector.lambda.connection.SqlServerEnvironmentProperties; import com.amazonaws.athena.connector.lambda.handlers.CompositeHandler; public class SqlServerCompositeHandler extends CompositeHandler { public SqlServerCompositeHandler() { - super(new SqlServerMetadataHandler(GlueConnectionUtils.getGlueConnection()), new SqlServerRecordHandler(GlueConnectionUtils.getGlueConnection())); + super(new SqlServerMetadataHandler(new SqlServerEnvironmentProperties().createEnvironment()), new SqlServerRecordHandler(new SqlServerEnvironmentProperties().createEnvironment())); } } diff --git a/athena-synapse/src/main/java/com/amazonaws/athena/connectors/synapse/SynapseCompositeHandler.java b/athena-synapse/src/main/java/com/amazonaws/athena/connectors/synapse/SynapseCompositeHandler.java index d8f6c1f623..4c319877f4 100644 --- a/athena-synapse/src/main/java/com/amazonaws/athena/connectors/synapse/SynapseCompositeHandler.java +++ b/athena-synapse/src/main/java/com/amazonaws/athena/connectors/synapse/SynapseCompositeHandler.java @@ -19,13 +19,13 @@ */ package com.amazonaws.athena.connectors.synapse; -import com.amazonaws.athena.connector.lambda.GlueConnectionUtils; +import com.amazonaws.athena.connector.lambda.connection.SynapseEnvironmentProperties; import com.amazonaws.athena.connector.lambda.handlers.CompositeHandler; public class SynapseCompositeHandler extends CompositeHandler { public SynapseCompositeHandler() { - super(new SynapseMetadataHandler(GlueConnectionUtils.getGlueConnection()), new SynapseRecordHandler(GlueConnectionUtils.getGlueConnection())); + super(new SynapseMetadataHandler(new SynapseEnvironmentProperties().createEnvironment()), new SynapseRecordHandler(new SynapseEnvironmentProperties().createEnvironment())); } } From 39551b4d5487c47a6bb88967943662b071fc56d6 Mon Sep 17 00:00:00 2001 From: Aimery Methena <159072740+aimethed@users.noreply.github.com> Date: Thu, 5 Sep 2024 15:05:45 -0400 Subject: [PATCH 18/87] V2 merge jsii (#2240) --- athena-clickhouse/pom.xml | 2 +- athena-cloudwatch/pom.xml | 1 + athena-elasticsearch/pom.xml | 27 ----------------------- athena-federation-integ-test/pom.xml | 29 +----------------------- athena-federation-sdk/pom.xml | 31 ++------------------------ athena-jdbc/pom.xml | 27 ----------------------- athena-neptune/pom.xml | 2 +- athena-redis/pom.xml | 27 ----------------------- athena-snowflake/pom.xml | 2 +- athena-synapse/pom.xml | 2 +- pom.xml | 33 +++++++++++++++++++++++++--- 11 files changed, 38 insertions(+), 145 deletions(-) diff --git a/athena-clickhouse/pom.xml b/athena-clickhouse/pom.xml index 4179333a5e..f909864105 100644 --- a/athena-clickhouse/pom.xml +++ b/athena-clickhouse/pom.xml @@ -22,7 +22,7 @@ com.clickhouse clickhouse-jdbc - 0.6.4 + 0.6.5 all diff --git a/athena-cloudwatch/pom.xml b/athena-cloudwatch/pom.xml index 79757d6167..bd2dad00d8 100644 --- a/athena-cloudwatch/pom.xml +++ b/athena-cloudwatch/pom.xml @@ -26,6 +26,7 @@ software.amazon.awscdk logs ${aws-cdk.version} + test com.amazonaws diff --git a/athena-elasticsearch/pom.xml b/athena-elasticsearch/pom.xml index 7d62694419..56c6d3f956 100644 --- a/athena-elasticsearch/pom.xml +++ b/athena-elasticsearch/pom.xml @@ -62,33 +62,6 @@ ${log4j2Version} runtime - - software.amazon.jsii - jsii-runtime - ${jsii.version} - - - com.fasterxml.jackson.datatype - jackson-datatype-jsr310 - - - com.fasterxml.jackson.dataformat - jackson-dataformat-cbor - - - com.fasterxml.jackson.core - jackson-core - - - com.fasterxml.jackson.core - jackson-databind - - - com.fasterxml.jackson.core - jackson-annotations - - - software.amazon.awscdk diff --git a/athena-federation-integ-test/pom.xml b/athena-federation-integ-test/pom.xml index daa91f1927..59df6b8e0f 100644 --- a/athena-federation-integ-test/pom.xml +++ b/athena-federation-integ-test/pom.xml @@ -38,33 +38,6 @@ - - software.amazon.jsii - jsii-runtime - ${jsii.version} - - - com.fasterxml.jackson.datatype - jackson-datatype-jsr310 - - - com.fasterxml.jackson.dataformat - jackson-dataformat-cbor - - - com.fasterxml.jackson.core - jackson-core - - - com.fasterxml.jackson.core - jackson-databind - - - com.fasterxml.jackson.core - jackson-annotations - - - commons-cli commons-cli @@ -171,7 +144,7 @@ org.apache.commons commons-lang3 - 3.16.0 + 3.17.0 diff --git a/athena-federation-sdk/pom.xml b/athena-federation-sdk/pom.xml index 28a00969df..273530c5f2 100644 --- a/athena-federation-sdk/pom.xml +++ b/athena-federation-sdk/pom.xml @@ -53,33 +53,6 @@ - - software.amazon.jsii - jsii-runtime - ${jsii.version} - - - com.fasterxml.jackson.datatype - jackson-datatype-jsr310 - - - com.fasterxml.jackson.dataformat - jackson-dataformat-cbor - - - com.fasterxml.jackson.core - jackson-core - - - com.fasterxml.jackson.core - jackson-databind - - - com.fasterxml.jackson.core - jackson-annotations - - - software.amazon.awssdk apache-client @@ -303,8 +276,8 @@ org.apache.commons commons-lang3 - - 3.16.0 + + 3.17.0 net.jqwik diff --git a/athena-jdbc/pom.xml b/athena-jdbc/pom.xml index a5dadefca8..53cb6df19b 100644 --- a/athena-jdbc/pom.xml +++ b/athena-jdbc/pom.xml @@ -9,33 +9,6 @@ athena-jdbc 2022.47.1 - - software.amazon.jsii - jsii-runtime - ${jsii.version} - - - com.fasterxml.jackson.datatype - jackson-datatype-jsr310 - - - com.fasterxml.jackson.dataformat - jackson-dataformat-cbor - - - com.fasterxml.jackson.core - jackson-core - - - com.fasterxml.jackson.core - jackson-databind - - - com.fasterxml.jackson.core - jackson-annotations - - - com.amazonaws aws-athena-federation-sdk diff --git a/athena-neptune/pom.xml b/athena-neptune/pom.xml index 8726e7b0e7..e792f13948 100644 --- a/athena-neptune/pom.xml +++ b/athena-neptune/pom.xml @@ -78,7 +78,7 @@ org.yaml snakeyaml - 2.2 + 2.3 org.apache.ivy diff --git a/athena-redis/pom.xml b/athena-redis/pom.xml index 29e356b733..7fec21cf3d 100644 --- a/athena-redis/pom.xml +++ b/athena-redis/pom.xml @@ -9,33 +9,6 @@ athena-redis 2022.47.1 - - software.amazon.jsii - jsii-runtime - ${jsii.version} - - - com.fasterxml.jackson.datatype - jackson-datatype-jsr310 - - - com.fasterxml.jackson.dataformat - jackson-dataformat-cbor - - - com.fasterxml.jackson.core - jackson-core - - - com.fasterxml.jackson.core - jackson-databind - - - com.fasterxml.jackson.core - jackson-annotations - - - com.amazonaws aws-athena-federation-sdk diff --git a/athena-snowflake/pom.xml b/athena-snowflake/pom.xml index 4e0ea9926a..fcb730044b 100644 --- a/athena-snowflake/pom.xml +++ b/athena-snowflake/pom.xml @@ -30,7 +30,7 @@ net.snowflake snowflake-jdbc - 3.18.0 + 3.19.0 diff --git a/athena-synapse/pom.xml b/athena-synapse/pom.xml index 92dbfaf0da..21fb490ca8 100644 --- a/athena-synapse/pom.xml +++ b/athena-synapse/pom.xml @@ -35,7 +35,7 @@ com.microsoft.azure msal4j - 1.17.0 + 1.17.1 com.fasterxml.jackson.datatype diff --git a/pom.xml b/pom.xml index 90d23cd472..8e8e833541 100644 --- a/pom.xml +++ b/pom.xml @@ -19,7 +19,7 @@ 1.2.2 1.6.0 1.204.0 - 1.102.0 + 1.103.1 2.0.16 4.11.0 @@ -29,7 +29,7 @@ 7.10.2 2.17.2 - 3.4.0 + 3.5.0 2.23.1 13.0.0 33.3.0-jre @@ -45,7 +45,7 @@ 3.5.0 3.6.0 3.3.1 - 3.8.0 + 3.10.0 3.4.2 none @@ -58,6 +58,33 @@ pom import + + software.amazon.jsii + jsii-runtime + ${jsii.version} + + + com.fasterxml.jackson.datatype + jackson-datatype-jsr310 + + + com.fasterxml.jackson.dataformat + jackson-dataformat-cbor + + + com.fasterxml.jackson.core + jackson-core + + + com.fasterxml.jackson.core + jackson-databind + + + com.fasterxml.jackson.core + jackson-annotations + + + From c207bf9d22dea59ae50008f0844be5ac5630871f Mon Sep 17 00:00:00 2001 From: Mia Schoening Date: Wed, 4 Sep 2024 14:41:21 -0400 Subject: [PATCH 19/87] Split CFN templates into original and glue connections --- .../athena-aws-cmdb-connection.yaml | 139 ++++++++++++ athena-aws-cmdb/athena-aws-cmdb.yaml | 144 +++--------- .../athena-cloudera-hive-connection.yaml | 160 ++++++++++++++ .../athena-cloudera-hive.yaml | 174 ++++----------- .../athena-cloudera-impala-connection.yaml | 160 ++++++++++++++ .../athena-cloudera-impala.yaml | 173 ++++----------- .../athena-cloudwatch-metrics-connection.yaml | 133 +++++++++++ .../athena-cloudwatch-metrics.yaml | 133 ++--------- .../athena-cloudwatch-connection.yaml | 137 ++++++++++++ athena-cloudwatch/athena-cloudwatch.yaml | 52 ++--- .../athena-datalakegen2-connection.yaml | 168 ++++++++++++++ athena-datalakegen2/athena-datalakegen2.yaml | 175 ++++----------- .../athena-db2-as400-connection.yaml | 169 ++++++++++++++ athena-db2-as400/athena-db2-as400.yaml | 174 ++++----------- athena-db2/athena-db2-connection.yaml | 169 ++++++++++++++ athena-db2/athena-db2.yaml | 174 ++++----------- athena-docdb/athena-docdb-connection.yaml | 161 ++++++++++++++ athena-docdb/athena-docdb.yaml | 174 ++++----------- .../athena-dynamodb-connection.yaml | 144 ++++++++++++ athena-dynamodb/athena-dynamodb.yaml | 59 ++--- .../athena-elasticsearch-connection.yaml | 172 +++++++++++++++ .../athena-elasticsearch.yaml | 208 ++++++------------ athena-gcs/athena-gcs-connection.yaml | 151 +++++++++++++ athena-gcs/athena-gcs.yaml | 160 ++++---------- .../athena-google-bigquery-connection.yaml | 170 ++++++++++++++ .../athena-google-bigquery.yaml | 183 ++++----------- athena-hbase/athena-hbase-connection.yaml | 161 ++++++++++++++ athena-hbase/athena-hbase.yaml | 189 +++++----------- .../athena-hortonworks-hive-connection.yaml | 166 ++++++++++++++ .../athena-hortonworks-hive.yaml | 174 ++++----------- athena-kafka/athena-kafka.yaml | 98 ++------- athena-msk/athena-msk-connection.yaml | 160 ++++++++++++++ athena-msk/athena-msk.yaml | 77 ++----- athena-mysql/athena-mysql-connection.yaml | 162 ++++++++++++++ athena-mysql/athena-mysql.yaml | 75 ++----- athena-neptune/athena-neptune-connection.yaml | 165 ++++++++++++++ athena-neptune/athena-neptune.yaml | 196 +++++------------ athena-oracle/athena-oracle-connection.yaml | 163 ++++++++++++++ athena-oracle/athena-oracle.yaml | 92 ++------ .../athena-postgresql-connection.yaml | 171 ++++++++++++++ athena-postgresql/athena-postgresql.yaml | 74 ++----- athena-redis/athena-redis-connection.yaml | 159 +++++++++++++ athena-redis/athena-redis.yaml | 176 ++++----------- .../athena-redshift-connection.yaml | 151 +++++++++++++ athena-redshift/athena-redshift.yaml | 71 ++---- athena-saphana/athena-saphana-connection.yaml | 166 ++++++++++++++ athena-saphana/athena-saphana.yaml | 175 ++++----------- .../athena-snowflake-connection.yaml | 166 ++++++++++++++ athena-snowflake/athena-snowflake.yaml | 175 ++++----------- .../athena-sqlserver-connection.yaml | 164 ++++++++++++++ athena-sqlserver/athena-sqlserver.yaml | 75 ++----- athena-synapse/athena-synapse-connection.yaml | 169 ++++++++++++++ athena-synapse/athena-synapse.yaml | 95 ++------ .../athena-teradata-connection.yaml | 164 ++++++++++++++ athena-teradata/athena-teradata.yaml | 181 ++++----------- .../athena-timestream-connection.yaml | 139 ++++++++++++ athena-timestream/athena-timestream.yaml | 146 +++--------- athena-tpcds/athena-tpcds-connection.yaml | 130 +++++++++++ athena-tpcds/athena-tpcds.yaml | 127 ++--------- athena-vertica/athena-vertica-connection.yaml | 174 +++++++++++++++ athena-vertica/athena-vertica.yaml | 199 +++++------------ 61 files changed, 5787 insertions(+), 3354 deletions(-) create mode 100644 athena-aws-cmdb/athena-aws-cmdb-connection.yaml create mode 100644 athena-cloudera-hive/athena-cloudera-hive-connection.yaml create mode 100644 athena-cloudera-impala/athena-cloudera-impala-connection.yaml create mode 100644 athena-cloudwatch-metrics/athena-cloudwatch-metrics-connection.yaml create mode 100644 athena-cloudwatch/athena-cloudwatch-connection.yaml create mode 100644 athena-datalakegen2/athena-datalakegen2-connection.yaml create mode 100644 athena-db2-as400/athena-db2-as400-connection.yaml create mode 100644 athena-db2/athena-db2-connection.yaml create mode 100644 athena-docdb/athena-docdb-connection.yaml create mode 100644 athena-dynamodb/athena-dynamodb-connection.yaml create mode 100644 athena-elasticsearch/athena-elasticsearch-connection.yaml create mode 100644 athena-gcs/athena-gcs-connection.yaml create mode 100644 athena-google-bigquery/athena-google-bigquery-connection.yaml create mode 100644 athena-hbase/athena-hbase-connection.yaml create mode 100644 athena-hortonworks-hive/athena-hortonworks-hive-connection.yaml create mode 100644 athena-msk/athena-msk-connection.yaml create mode 100644 athena-mysql/athena-mysql-connection.yaml create mode 100644 athena-neptune/athena-neptune-connection.yaml create mode 100644 athena-oracle/athena-oracle-connection.yaml create mode 100644 athena-postgresql/athena-postgresql-connection.yaml create mode 100644 athena-redis/athena-redis-connection.yaml create mode 100644 athena-redshift/athena-redshift-connection.yaml create mode 100644 athena-saphana/athena-saphana-connection.yaml create mode 100644 athena-snowflake/athena-snowflake-connection.yaml create mode 100644 athena-sqlserver/athena-sqlserver-connection.yaml create mode 100644 athena-synapse/athena-synapse-connection.yaml create mode 100644 athena-teradata/athena-teradata-connection.yaml create mode 100644 athena-timestream/athena-timestream-connection.yaml create mode 100644 athena-tpcds/athena-tpcds-connection.yaml create mode 100644 athena-vertica/athena-vertica-connection.yaml diff --git a/athena-aws-cmdb/athena-aws-cmdb-connection.yaml b/athena-aws-cmdb/athena-aws-cmdb-connection.yaml new file mode 100644 index 0000000000..454119caa7 --- /dev/null +++ b/athena-aws-cmdb/athena-aws-cmdb-connection.yaml @@ -0,0 +1,139 @@ +Transform: 'AWS::Serverless-2016-10-31' +Metadata: + 'AWS::ServerlessRepo::Application': + Name: AthenaAwsCmdbConnectorWithGlueConnection + Description: 'This connector enables Amazon Athena to communicate with various AWS Services, making your resource inventories accessible via SQL.' + Author: 'default author' + SpdxLicenseId: Apache-2.0 + LicenseUrl: LICENSE.txt + ReadmeUrl: README.md + Labels: + - athena-federation + HomePageUrl: 'https://github.com/awslabs/aws-athena-query-federation' + SemanticVersion: 2022.47.1 + SourceCodeUrl: 'https://github.com/awslabs/aws-athena-query-federation' +Parameters: + LambdaFunctionName: + Description: 'This is the name of the lambda function that will be created. This name must satisfy the pattern ^[a-z0-9-_]{1,64}$' + Type: String + AllowedPattern: ^[a-z0-9-_]{1,64}$ + SpillBucket: + Description: 'The name of the bucket where this function can spill data.' + Type: String + GlueConnection: + Description: "Name of glue connection storing connection details for Federated Data source." + Type: String + KmsKeyId: + Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." + Type: String + Default: "" + LambdaRoleArn: + Description: "(Optional) A custom role to be used by the Connector lambda" + Type: String + Default: "" + +Conditions: + HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] + NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] + CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] + +Resources: + ConnectorConfig: + Type: 'AWS::Serverless::Function' + Properties: + Environment: + Variables: + glue_connection: !Ref GlueConnection + FunctionName: !Ref LambdaFunctionName + Handler: "com.amazonaws.athena.connectors.aws.cmdb.AwsCmdbCompositeHandler" + CodeUri: "./target/athena-aws-cmdb-2022.47.1.jar" + Description: "Enables Amazon Athena to communicate with various AWS Services, making your resource inventories accessible via SQL." + Runtime: java11 + Timeout: 900 + MemorySize: 3008 + Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn] + + FunctionRole: + Condition: NotHasLambdaRole + Type: AWS::IAM::Role + Properties: + ManagedPolicyArns: + - "arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + Service: + - lambda.amazonaws.com + Action: + - "sts:AssumeRole" + + FunctionExecutionPolicy: + Condition: NotHasLambdaRole + Type: "AWS::IAM::Policy" + Properties: + PolicyName: FunctionExecutionPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Action: + - autoscaling:Describe* + - elasticloadbalancing:Describe* + - ec2:Describe* + - elasticmapreduce:Describe* + - elasticmapreduce:List* + - rds:Describe* + - rds:ListTagsForResource + - athena:GetQueryExecution + - s3:ListBucket + - athena:GetQueryExecution + Effect: Allow + Resource: '*' + - Action: + - s3:GetObject + - s3:ListBucket + - s3:GetBucketLocation + - s3:GetObjectVersion + - s3:PutObject + - s3:PutObjectAcl + - s3:GetLifecycleConfiguration + - s3:PutLifecycleConfiguration + - s3:DeleteObject + Effect: Allow + Resource: + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName} + - bucketName: + Ref: SpillBucket + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName}/* + - bucketName: + Ref: SpillBucket + - Action: + - glue:GetConnection + Effect: Allow + Resource: + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' + Roles: + - !Ref FunctionRole + + FunctionKmsPolicy: + Condition: CreateKmsPolicy + Type: "AWS::IAM::Policy" + Properties: + PolicyName: FunctionKmsPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Action: + - kms:GenerateRandom + Resource: '*' + - Effect: Allow + Action: + - kms:GenerateDataKey + Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" + Roles: + - !Ref FunctionRole \ No newline at end of file diff --git a/athena-aws-cmdb/athena-aws-cmdb.yaml b/athena-aws-cmdb/athena-aws-cmdb.yaml index 92e23d416f..b3265cd1eb 100644 --- a/athena-aws-cmdb/athena-aws-cmdb.yaml +++ b/athena-aws-cmdb/athena-aws-cmdb.yaml @@ -13,7 +13,7 @@ Metadata: SemanticVersion: 2022.47.1 SourceCodeUrl: 'https://github.com/awslabs/aws-athena-query-federation' Parameters: - LambdaFunctionName: + AthenaCatalogName: Description: 'This is the name of the lambda function that will be created. This name must satisfy the pattern ^[a-z0-9-_]{1,64}$' Type: String AllowedPattern: ^[a-z0-9-_]{1,64}$ @@ -24,18 +24,6 @@ Parameters: Description: 'The prefix within SpillBucket where this function can spill data.' Type: String Default: athena-spill - GlueConnection: - Description: "(Optional) Name of glue connection storing connection details for Federated Data source." - Type: String - Default: "" - KmsKeyId: - Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." - Type: String - Default: "" - LambdaRoleArn: - Description: "(Optional) A custom role to be used by the Connector lambda" - Type: String - Default: "" LambdaTimeout: Description: 'Maximum Lambda invocation runtime in seconds. (min 1 - 900 max)' Default: 900 @@ -48,126 +36,46 @@ Parameters: Description: "WARNING: If set to 'true' encryption for spilled data is disabled." Default: 'false' Type: String - PermissionsBoundaryArn: + PermissionsBoundaryARN: Description: "(Optional) An IAM policy ARN to use as the PermissionsBoundary for the created Lambda function's execution role" Default: '' Type: String - Conditions: - HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] - NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] - HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryArn, "" ] ] - CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] - HasGlueConnection: !Not [ !Equals [ !Ref GlueConnection, "" ] ] - NotHasGlueConnectionAndHasKmsKeyId: !And - - !Not [ !Condition HasGlueConnection ] - - !Condition HasKmsKeyId - + HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryARN, "" ] ] Resources: ConnectorConfig: Type: 'AWS::Serverless::Function' Properties: Environment: Variables: - disable_spill_encryption: !If [HasGlueConnection, !Ref "AWS::NoValue", !Ref DisableSpillEncryption] - spill_bucket: !If [HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillBucket] - spill_prefix: !If [HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillPrefix] - kms_key_id: !If [NotHasGlueConnectionAndHasKmsKeyId, !Ref KmsKeyId, !Ref "AWS::NoValue"] - glue_connection: !If [HasGlueConnection, !Ref GlueConnection, !Ref "AWS::NoValue"] - FunctionName: !Ref LambdaFunctionName + disable_spill_encryption: !Ref DisableSpillEncryption + spill_bucket: !Ref SpillBucket + spill_prefix: !Ref SpillPrefix + FunctionName: !Ref AthenaCatalogName Handler: "com.amazonaws.athena.connectors.aws.cmdb.AwsCmdbCompositeHandler" CodeUri: "./target/athena-aws-cmdb-2022.47.1.jar" Description: "Enables Amazon Athena to communicate with various AWS Services, making your resource inventories accessible via SQL." Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory - Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn] - - FunctionRole: - Condition: NotHasLambdaRole - Type: AWS::IAM::Role - Properties: - PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryArn, !Ref "AWS::NoValue" ] - ManagedPolicyArns: - - "arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" - AssumeRolePolicyDocument: - Version: 2012-10-17 - Statement: - - Effect: Allow - Principal: - Service: - - lambda.amazonaws.com - Action: - - "sts:AssumeRole" - - FunctionExecutionPolicy: - Condition: NotHasLambdaRole - Type: "AWS::IAM::Policy" - Properties: - PolicyName: FunctionExecutionPolicy - PolicyDocument: - Version: 2012-10-17 - Statement: - - Action: - - autoscaling:Describe* - - elasticloadbalancing:Describe* - - ec2:Describe* - - elasticmapreduce:Describe* - - elasticmapreduce:List* - - rds:Describe* - - rds:ListTagsForResource - - athena:GetQueryExecution - - s3:ListBucket - - athena:GetQueryExecution - Effect: Allow - Resource: '*' - - Action: - - s3:GetObject - - s3:ListBucket - - s3:GetBucketLocation - - s3:GetObjectVersion - - s3:PutObject - - s3:PutObjectAcl - - s3:GetLifecycleConfiguration - - s3:PutLifecycleConfiguration - - s3:DeleteObject - Effect: Allow - Resource: - - Fn::Sub: - - arn:${AWS::Partition}:s3:::${bucketName} - - bucketName: - Ref: SpillBucket - - Fn::Sub: - - arn:${AWS::Partition}:s3:::${bucketName}/* - - bucketName: - Ref: SpillBucket - - !If - - HasGlueConnection + PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] + Policies: + - Statement: - Action: - - glue:GetConnection + - autoscaling:Describe* + - elasticloadbalancing:Describe* + - ec2:Describe* + - elasticmapreduce:Describe* + - elasticmapreduce:List* + - rds:Describe* + - rds:ListTagsForResource + - athena:GetQueryExecution + - s3:ListBucket + - athena:GetQueryExecution Effect: Allow - Resource: - - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' - - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' - - !Ref "AWS::NoValue" - Roles: - - !Ref FunctionRole - - FunctionKmsPolicy: - Condition: CreateKmsPolicy - Type: "AWS::IAM::Policy" - Properties: - PolicyName: FunctionKmsPolicy - PolicyDocument: - Version: 2012-10-17 - Statement: - - Effect: Allow - Action: - - kms:GenerateRandom - Resource: '*' - - Effect: Allow - Action: - - kms:GenerateDataKey - Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" - Roles: - - !Ref FunctionRole \ No newline at end of file + Resource: '*' + Version: '2012-10-17' + #S3CrudPolicy allows our connector to spill large responses to S3. You can optionally replace this pre-made policy + #with one that is more restrictive and can only 'put' but not read,delete, or overwrite files. + - S3CrudPolicy: + BucketName: !Ref SpillBucket \ No newline at end of file diff --git a/athena-cloudera-hive/athena-cloudera-hive-connection.yaml b/athena-cloudera-hive/athena-cloudera-hive-connection.yaml new file mode 100644 index 0000000000..75343bd08a --- /dev/null +++ b/athena-cloudera-hive/athena-cloudera-hive-connection.yaml @@ -0,0 +1,160 @@ +Transform: 'AWS::Serverless-2016-10-31' +Metadata: + 'AWS::ServerlessRepo::Application': + Name: AthenaClouderaHiveConnectorWithGlueConnection + Description: 'This connector enables Amazon Athena to communicate with your Cloudera Hive instance(s) using JDBC driver.' + Author: 'default author' + SpdxLicenseId: Apache-2.0 + LicenseUrl: LICENSE.txt + ReadmeUrl: README.md + Labels: + - athena-federation + HomePageUrl: 'https://github.com/awslabs/aws-athena-query-federation' + SemanticVersion: 2022.47.1 + SourceCodeUrl: 'https://github.com/awslabs/aws-athena-query-federation' +Parameters: + LambdaFunctionName: + Description: 'This is the name of the lambda function that will be created. This name must satisfy the pattern ^[a-z0-9-_]{1,64}$' + Type: String + AllowedPattern: ^[a-z0-9-_]{1,64}$ + SecretName: + Description: 'Used to create resource-based authorization policy for "secretsmanager:GetSecretValue" action. E.g. All Athena JDBC Federation secret names can be prefixed with "AthenaJdbcFederation" and authorization policy will allow "arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:AthenaJdbcFederation*". Parameter value in this case should be "AthenaJdbcFederation". If you do not have a prefix, you can manually update the IAM policy to add allow any secret names.' + Type: String + SpillBucket: + Description: 'The name of the bucket where this function can spill data.' + Type: String + GlueConnection: + Description: "Name of glue connection storing connection details for Federated Data source." + Type: String + KmsKeyId: + Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." + Type: String + Default: "" + LambdaRoleArn: + Description: "(Optional) A custom role to be used by the Connector lambda" + Type: String + Default: "" + SecurityGroupIds: + Description: 'One or more SecurityGroup IDs corresponding to the SecurityGroup that should be applied to the Lambda function. (e.g. sg1,sg2,sg3)' + Type: 'List' + SubnetIds: + Description: 'One or more Subnet IDs corresponding to the Subnet that the Lambda function can use to access you data source. (e.g. subnet1,subnet2)' + Type: 'List' +Conditions: + NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] + HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] + CreateKmsPolicy: !And [!Condition NotHasLambdaRole, !Condition HasKmsKeyId] +Resources: + JdbcConnectorConfig: + Type: 'AWS::Serverless::Function' + Properties: + Environment: + Variables: + glue_connection: !Ref GlueConnection + FunctionName: !Ref LambdaFunctionName + Handler: "com.amazonaws.athena.connectors.cloudera.HiveMuxCompositeHandler" + CodeUri: "./target/athena-cloudera-hive-2022.47.1.jar" + Description: "Enables Amazon Athena to communicate with Cloudera Hive using JDBC" + Runtime: java11 + Timeout: 900 + MemorySize: 3008 + Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn] + VpcConfig: + SecurityGroupIds: !Ref SecurityGroupIds + SubnetIds: !Ref SubnetIds + + FunctionRole: + Condition: NotHasLambdaRole + Type: AWS::IAM::Role + Properties: + ManagedPolicyArns: + - "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole" + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + Service: + - lambda.amazonaws.com + Action: + - "sts:AssumeRole" + + FunctionExecutionPolicy: + Condition: NotHasLambdaRole + Type: "AWS::IAM::Policy" + Properties: + Roles: + - !Ref FunctionRole + PolicyName: FunctionExecutionPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Action: + - secretsmanager:GetSecretValue + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretName}*' + - Action: + - logs:CreateLogGroup + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:*' + - Action: + - logs:CreateLogStream + - logs:PutLogEvents + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/${LambdaFunctionName}:*' + - Action: + - athena:GetQueryExecution + Effect: Allow + Resource: '*' + - Action: + - ec2:CreateNetworkInterface + - ec2:DeleteNetworkInterface + - ec2:DescribeNetworkInterfaces + - ec2:DetachNetworkInterface + Effect: Allow + Resource: '*' + - Action: + - s3:GetObject + - s3:ListBucket + - s3:GetBucketLocation + - s3:GetObjectVersion + - s3:PutObject + - s3:PutObjectAcl + - s3:GetLifecycleConfiguration + - s3:PutLifecycleConfiguration + - s3:DeleteObject + Effect: Allow + Resource: + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName} + - bucketName: + Ref: SpillBucket + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName}/* + - bucketName: + Ref: SpillBucket + - Action: + - glue:GetConnection + Effect: Allow + Resource: + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' + + FunctionKmsPolicy: + Condition: CreateKmsPolicy + Type: "AWS::IAM::Policy" + Properties: + PolicyName: FunctionKmsPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Action: + - kms:GenerateRandom + Resource: '*' + - Effect: Allow + Action: + - kms:GenerateDataKey + Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" + Roles: + - !Ref FunctionRole \ No newline at end of file diff --git a/athena-cloudera-hive/athena-cloudera-hive.yaml b/athena-cloudera-hive/athena-cloudera-hive.yaml index cffb40b8e6..fd56ac05f7 100644 --- a/athena-cloudera-hive/athena-cloudera-hive.yaml +++ b/athena-cloudera-hive/athena-cloudera-hive.yaml @@ -20,8 +20,7 @@ Parameters: DefaultConnectionString: Description: 'The default connection string is used when catalog is "lambda:${LambdaFunctionName}". Catalog specific Connection Strings can be added later. Format: ${DatabaseType}://${NativeJdbcConnectionString}.' Type: String - Default: "" - SecretName: + SecretNamePrefix: Description: 'Used to create resource-based authorization policy for "secretsmanager:GetSecretValue" action. E.g. All Athena JDBC Federation secret names can be prefixed with "AthenaJdbcFederation" and authorization policy will allow "arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:AthenaJdbcFederation*". Parameter value in this case should be "AthenaJdbcFederation". If you do not have a prefix, you can manually update the IAM policy to add allow any secret names.' Type: String SpillBucket: @@ -31,18 +30,6 @@ Parameters: Description: 'The prefix within SpillBucket where this function can spill data.' Type: String Default: athena-spill - GlueConnection: - Description: "(Optional) Name of glue connection storing connection details for Federated Data source." - Type: String - Default: "" - KmsKeyId: - Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." - Type: String - Default: "" - LambdaRoleArn: - Description: "(Optional) A custom role to be used by the Connector lambda" - Type: String - Default: "" LambdaTimeout: Description: 'Maximum Lambda invocation runtime in seconds. (min 1 - 900 max)' Default: 900 @@ -61,139 +48,62 @@ Parameters: SubnetIds: Description: 'One or more Subnet IDs corresponding to the Subnet that the Lambda function can use to access you data source. (e.g. subnet1,subnet2)' Type: 'List' - PermissionsBoundaryArn: + PermissionsBoundaryARN: Description: "(Optional) An IAM policy ARN to use as the PermissionsBoundary for the created Lambda function's execution role" Default: '' Type: String Conditions: - HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryArn, "" ] ] - NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] - HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] - CreateKmsPolicy: !And [!Condition NotHasLambdaRole, !Condition HasKmsKeyId] - HasGlueConnection: !Not [ !Equals [ !Ref GlueConnection, "" ] ] - NotHasGlueConnectionAndHasKmsKeyId: !And - - !Not [ !Condition HasGlueConnection ] - - !Condition HasKmsKeyId + HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryARN, "" ] ] Resources: JdbcConnectorConfig: Type: 'AWS::Serverless::Function' Properties: Environment: Variables: - default: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref DefaultConnectionString ] - disable_spill_encryption: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref DisableSpillEncryption ] - spill_bucket: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillBucket ] - spill_prefix: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillPrefix ] - kms_key_id: !If [ NotHasGlueConnectionAndHasKmsKeyId, !Ref KmsKeyId, !Ref "AWS::NoValue" ] - glue_connection: !If [ HasGlueConnection, !Ref GlueConnection, !Ref "AWS::NoValue" ] + disable_spill_encryption: !Ref DisableSpillEncryption + spill_bucket: !Ref SpillBucket + spill_prefix: !Ref SpillPrefix + default: !Ref DefaultConnectionString FunctionName: !Ref LambdaFunctionName Handler: "com.amazonaws.athena.connectors.cloudera.HiveMuxCompositeHandler" CodeUri: "./target/athena-cloudera-hive-2022.47.1.jar" - Description: "Enables Amazon Athena to communicate with Cloudera Hive using JDBC" + Description: "Enables Amazon Athena to communicate with Coludera Hive using JDBC" Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory - Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn] - VpcConfig: - SecurityGroupIds: !Ref SecurityGroupIds - SubnetIds: !Ref SubnetIds - - FunctionRole: - Condition: NotHasLambdaRole - Type: AWS::IAM::Role - Properties: - PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryArn, !Ref "AWS::NoValue" ] - ManagedPolicyArns: - - "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole" - AssumeRolePolicyDocument: - Version: 2012-10-17 - Statement: - - Effect: Allow - Principal: - Service: - - lambda.amazonaws.com - Action: - - "sts:AssumeRole" - - FunctionExecutionPolicy: - Condition: NotHasLambdaRole - Type: "AWS::IAM::Policy" - Properties: - Roles: - - !Ref FunctionRole - PolicyName: FunctionExecutionPolicy - PolicyDocument: - Version: 2012-10-17 - Statement: - - Action: - - secretsmanager:GetSecretValue - Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretName}*' - - Action: - - logs:CreateLogGroup - Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:*' - - Action: - - logs:CreateLogStream - - logs:PutLogEvents - Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/${LambdaFunctionName}:*' - - Action: - - athena:GetQueryExecution - Effect: Allow - Resource: '*' - - Action: - - ec2:CreateNetworkInterface - - ec2:DeleteNetworkInterface - - ec2:DescribeNetworkInterfaces - - ec2:DetachNetworkInterface - Effect: Allow - Resource: '*' - - Action: - - s3:GetObject - - s3:ListBucket - - s3:GetBucketLocation - - s3:GetObjectVersion - - s3:PutObject - - s3:PutObjectAcl - - s3:GetLifecycleConfiguration - - s3:PutLifecycleConfiguration - - s3:DeleteObject - Effect: Allow - Resource: - - Fn::Sub: - - arn:${AWS::Partition}:s3:::${bucketName} - - bucketName: - Ref: SpillBucket - - Fn::Sub: - - arn:${AWS::Partition}:s3:::${bucketName}/* - - bucketName: - Ref: SpillBucket - - !If - - HasGlueConnection + PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] + Policies: + - Statement: - Action: - - glue:GetConnection + - secretsmanager:GetSecretValue Effect: Allow - Resource: - - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' - - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' - - !Ref "AWS::NoValue" - - FunctionKmsPolicy: - Condition: CreateKmsPolicy - Type: "AWS::IAM::Policy" - Properties: - PolicyName: FunctionKmsPolicy - PolicyDocument: - Version: 2012-10-17 - Statement: - - Effect: Allow - Action: - - kms:GenerateRandom - Resource: '*' - - Effect: Allow - Action: - - kms:GenerateDataKey - Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" - Roles: - - !Ref FunctionRole \ No newline at end of file + Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretNamePrefix}*' + Version: '2012-10-17' + - Statement: + - Action: + - logs:CreateLogGroup + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:*' + Version: '2012-10-17' + - Statement: + - Action: + - logs:CreateLogStream + - logs:PutLogEvents + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/${LambdaFunctionName}:*' + Version: '2012-10-17' + - Statement: + - Action: + - athena:GetQueryExecution + Effect: Allow + Resource: '*' + Version: '2012-10-17' + #S3CrudPolicy allows our connector to spill large responses to S3. You can optionally replace this pre-made policy + #with one that is more restrictive and can only 'put' but not read,delete, or overwrite files. + - S3CrudPolicy: + BucketName: !Ref SpillBucket + #VPCAccessPolicy allows our connector to run in a VPC so that it can access your data source. + - VPCAccessPolicy: {} + VpcConfig: + SecurityGroupIds: !Ref SecurityGroupIds + SubnetIds: !Ref SubnetIds \ No newline at end of file diff --git a/athena-cloudera-impala/athena-cloudera-impala-connection.yaml b/athena-cloudera-impala/athena-cloudera-impala-connection.yaml new file mode 100644 index 0000000000..ea829130f1 --- /dev/null +++ b/athena-cloudera-impala/athena-cloudera-impala-connection.yaml @@ -0,0 +1,160 @@ +Transform: 'AWS::Serverless-2016-10-31' +Metadata: + 'AWS::ServerlessRepo::Application': + Name: AthenaClouderaImpalaConnectorWithGlueConnection + Description: 'This connector enables Amazon Athena to communicate with your Cloudera Impala instance(s) using JDBC driver.' + Author: 'default author' + SpdxLicenseId: Apache-2.0 + LicenseUrl: LICENSE.txt + ReadmeUrl: README.md + Labels: + - athena-federation + HomePageUrl: 'https://github.com/awslabs/aws-athena-query-federation' + SemanticVersion: 2022.47.1 + SourceCodeUrl: 'https://github.com/awslabs/aws-athena-query-federation' +Parameters: + LambdaFunctionName: + Description: 'This is the name of the lambda function that will be created. This name must satisfy the pattern ^[a-z0-9-_]{1,64}$' + Type: String + AllowedPattern: ^[a-z0-9-_]{1,64}$ + SecretName: + Description: 'Used to create resource-based authorization policy for "secretsmanager:GetSecretValue" action. E.g. All Athena JDBC Federation secret names can be prefixed with "AthenaJdbcFederation" and authorization policy will allow "arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:AthenaJdbcFederation*". Parameter value in this case should be "AthenaJdbcFederation". If you do not have a prefix, you can manually update the IAM policy to add allow any secret names.' + Type: String + SpillBucket: + Description: 'The name of the bucket where this function can spill data.' + Type: String + GlueConnection: + Description: "Name of glue connection storing connection details for Federated Data source." + Type: String + SecurityGroupIds: + Description: 'One or more SecurityGroup IDs corresponding to the SecurityGroup that should be applied to the Lambda function. (e.g. sg1,sg2,sg3)' + Type: 'List' + SubnetIds: + Description: 'One or more Subnet IDs corresponding to the Subnet that the Lambda function can use to access you data source. (e.g. subnet1,subnet2)' + Type: 'List' + KmsKeyId: + Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." + Type: String + Default: "" + LambdaRoleArn: + Description: "(Optional) A custom role to be used by the Connector lambda" + Type: String + Default: "" +Conditions: + HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] + NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] + CreateKmsPolicy: !And [!Condition NotHasLambdaRole, !Condition HasKmsKeyId] +Resources: + JdbcConnectorConfig: + Type: 'AWS::Serverless::Function' + Properties: + Environment: + Variables: + glue_connection: !Ref GlueConnection + FunctionName: !Ref LambdaFunctionName + Handler: "com.amazonaws.athena.connectors.cloudera.ImpalaMuxCompositeHandler" + CodeUri: "./target/athena-cloudera-impala-2022.47.1.jar" + Description: "Enables Amazon Athena to communicate with Cloudera Impala using JDBC" + Runtime: java11 + Timeout: 900 + MemorySize: 3008 + Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn] + VpcConfig: + SecurityGroupIds: !Ref SecurityGroupIds + SubnetIds: !Ref SubnetIds + + FunctionRole: + Condition: NotHasLambdaRole + Type: AWS::IAM::Role + Properties: + ManagedPolicyArns: + - "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole" + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + Service: + - lambda.amazonaws.com + Action: + - "sts:AssumeRole" + + FunctionExecutionPolicy: + Condition: NotHasLambdaRole + Type: "AWS::IAM::Policy" + Properties: + Roles: + - !Ref FunctionRole + PolicyName: FunctionExecutionPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Action: + - secretsmanager:GetSecretValue + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretName}*' + - Action: + - logs:CreateLogGroup + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:*' + - Action: + - logs:CreateLogStream + - logs:PutLogEvents + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/${LambdaFunctionName}:*' + - Action: + - athena:GetQueryExecution + Effect: Allow + Resource: '*' + - Action: + - ec2:CreateNetworkInterface + - ec2:DeleteNetworkInterface + - ec2:DescribeNetworkInterfaces + - ec2:DetachNetworkInterface + Effect: Allow + Resource: '*' + - Action: + - s3:GetObject + - s3:ListBucket + - s3:GetBucketLocation + - s3:GetObjectVersion + - s3:PutObject + - s3:PutObjectAcl + - s3:GetLifecycleConfiguration + - s3:PutLifecycleConfiguration + - s3:DeleteObject + Effect: Allow + Resource: + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName} + - bucketName: + Ref: SpillBucket + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName}/* + - bucketName: + Ref: SpillBucket + - Action: + - glue:GetConnection + Effect: Allow + Resource: + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' + + FunctionKmsPolicy: + Condition: CreateKmsPolicy + Type: "AWS::IAM::Policy" + Properties: + PolicyName: FunctionKmsPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Action: + - kms:GenerateRandom + Resource: '*' + - Effect: Allow + Action: + - kms:GenerateDataKey + Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" + Roles: + - !Ref FunctionRole diff --git a/athena-cloudera-impala/athena-cloudera-impala.yaml b/athena-cloudera-impala/athena-cloudera-impala.yaml index c7649ccd23..b26d1c11f1 100644 --- a/athena-cloudera-impala/athena-cloudera-impala.yaml +++ b/athena-cloudera-impala/athena-cloudera-impala.yaml @@ -20,8 +20,7 @@ Parameters: DefaultConnectionString: Description: 'The default connection string is used when catalog is "lambda:${LambdaFunctionName}". Catalog specific Connection Strings can be added later. Format: ${DatabaseType}://${NativeJdbcConnectionString}.' Type: String - Default: "" - SecretName: + SecretNamePrefix: Description: 'Used to create resource-based authorization policy for "secretsmanager:GetSecretValue" action. E.g. All Athena JDBC Federation secret names can be prefixed with "AthenaJdbcFederation" and authorization policy will allow "arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:AthenaJdbcFederation*". Parameter value in this case should be "AthenaJdbcFederation". If you do not have a prefix, you can manually update the IAM policy to add allow any secret names.' Type: String SpillBucket: @@ -31,10 +30,6 @@ Parameters: Description: 'The prefix within SpillBucket where this function can spill data.' Type: String Default: athena-spill - GlueConnection: - Description: "(Optional) Name of glue connection storing connection details for Federated Data source." - Type: String - Default: "" LambdaTimeout: Description: 'Maximum Lambda invocation runtime in seconds. (min 1 - 900 max)' Default: 900 @@ -53,38 +48,27 @@ Parameters: SubnetIds: Description: 'One or more Subnet IDs corresponding to the Subnet that the Lambda function can use to access you data source. (e.g. subnet1,subnet2)' Type: 'List' - PermissionsBoundaryArn: + PermissionsBoundaryARN: Description: "(Optional) An IAM policy ARN to use as the PermissionsBoundary for the created Lambda function's execution role" Default: '' Type: String - KmsKeyId: - Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." - Type: String - LambdaRoleArn: - Description: "(Optional) A custom role to be used by the Connector lambda" - Type: String + LambdaEncryptionKmsKeyARN: + Description: "(Optional) The KMS Key ARN used for encrypting your Lambda environment variables." Default: "" + Type: String Conditions: - HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryArn, "" ] ] - HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] - NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] - CreateKmsPolicy: !And [!Condition NotHasLambdaRole, !Condition HasKmsKeyId] - HasGlueConnection: !Not [!Equals [!Ref GlueConnection, ""]] - NotHasGlueConnectionAndHasKmsKeyId: !And - - !Not [ !Condition HasGlueConnection ] - - !Condition HasKmsKeyId + HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryARN, "" ] ] + HasLambdaEncryptionKmsKeyARN: !Not [ !Equals [ !Ref LambdaEncryptionKmsKeyARN, "" ] ] Resources: JdbcConnectorConfig: Type: 'AWS::Serverless::Function' Properties: Environment: Variables: - default: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref DefaultConnectionString ] - disable_spill_encryption: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref DisableSpillEncryption ] - spill_bucket: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillBucket ] - spill_prefix: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillPrefix ] - kms_key_id: !If [ NotHasGlueConnectionAndHasKmsKeyId, !Ref KmsKeyId, !Ref "AWS::NoValue" ] - glue_connection: !If [ HasGlueConnection, !Ref GlueConnection, !Ref "AWS::NoValue" ] + disable_spill_encryption: !Ref DisableSpillEncryption + spill_bucket: !Ref SpillBucket + spill_prefix: !Ref SpillPrefix + default: !Ref DefaultConnectionString FunctionName: !Ref LambdaFunctionName Handler: "com.amazonaws.athena.connectors.cloudera.ImpalaMuxCompositeHandler" CodeUri: "./target/athena-cloudera-impala-2022.47.1.jar" @@ -92,107 +76,40 @@ Resources: Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory - Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn] + PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] + Policies: + - Statement: + - Action: + - secretsmanager:GetSecretValue + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretNamePrefix}*' + Version: '2012-10-17' + - Statement: + - Action: + - logs:CreateLogGroup + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:*' + Version: '2012-10-17' + - Statement: + - Action: + - logs:CreateLogStream + - logs:PutLogEvents + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/${LambdaFunctionName}:*' + Version: '2012-10-17' + - Statement: + - Action: + - athena:GetQueryExecution + Effect: Allow + Resource: '*' + Version: '2012-10-17' + #S3CrudPolicy allows our connector to spill large responses to S3. You can optionally replace this pre-made policy + #with one that is more restrictive and can only 'put' but not read,delete, or overwrite files. + - S3CrudPolicy: + BucketName: !Ref SpillBucket + #VPCAccessPolicy allows our connector to run in a VPC so that it can access your data source. + - VPCAccessPolicy: {} VpcConfig: SecurityGroupIds: !Ref SecurityGroupIds SubnetIds: !Ref SubnetIds - - FunctionRole: - Condition: NotHasLambdaRole - Type: AWS::IAM::Role - Properties: - PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryArn, !Ref "AWS::NoValue" ] - ManagedPolicyArns: - - "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole" - AssumeRolePolicyDocument: - Version: 2012-10-17 - Statement: - - Effect: Allow - Principal: - Service: - - lambda.amazonaws.com - Action: - - "sts:AssumeRole" - - FunctionExecutionPolicy: - Condition: NotHasLambdaRole - Type: "AWS::IAM::Policy" - Properties: - Roles: - - !Ref FunctionRole - PolicyName: FunctionExecutionPolicy - PolicyDocument: - Version: 2012-10-17 - Statement: - - Action: - - secretsmanager:GetSecretValue - Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretName}*' - - Action: - - logs:CreateLogGroup - Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:*' - - Action: - - logs:CreateLogStream - - logs:PutLogEvents - Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/${LambdaFunctionName}:*' - - Action: - - athena:GetQueryExecution - Effect: Allow - Resource: '*' - - Action: - - ec2:CreateNetworkInterface - - ec2:DeleteNetworkInterface - - ec2:DescribeNetworkInterfaces - - ec2:DetachNetworkInterface - Effect: Allow - Resource: '*' - - Action: - - s3:GetObject - - s3:ListBucket - - s3:GetBucketLocation - - s3:GetObjectVersion - - s3:PutObject - - s3:PutObjectAcl - - s3:GetLifecycleConfiguration - - s3:PutLifecycleConfiguration - - s3:DeleteObject - Effect: Allow - Resource: - - Fn::Sub: - - arn:${AWS::Partition}:s3:::${bucketName} - - bucketName: - Ref: SpillBucket - - Fn::Sub: - - arn:${AWS::Partition}:s3:::${bucketName}/* - - bucketName: - Ref: SpillBucket - - !If - - HasGlueConnection - - Action: - - glue:GetConnection - Effect: Allow - Resource: - - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' - - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' - - !Ref "AWS::NoValue" - - FunctionKmsPolicy: - Condition: CreateKmsPolicy - Type: "AWS::IAM::Policy" - Properties: - PolicyName: FunctionKmsPolicy - PolicyDocument: - Version: 2012-10-17 - Statement: - - Effect: Allow - Action: - - kms:GenerateRandom - Resource: '*' - - Effect: Allow - Action: - - kms:GenerateDataKey - Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" - Roles: - - !Ref FunctionRole + KmsKeyArn: !If [ HasLambdaEncryptionKmsKeyARN, !Ref LambdaEncryptionKmsKeyARN, !Ref "AWS::NoValue" ] \ No newline at end of file diff --git a/athena-cloudwatch-metrics/athena-cloudwatch-metrics-connection.yaml b/athena-cloudwatch-metrics/athena-cloudwatch-metrics-connection.yaml new file mode 100644 index 0000000000..b967005c40 --- /dev/null +++ b/athena-cloudwatch-metrics/athena-cloudwatch-metrics-connection.yaml @@ -0,0 +1,133 @@ +Transform: 'AWS::Serverless-2016-10-31' +Metadata: + 'AWS::ServerlessRepo::Application': + Name: AthenaCloudwatchMetricsConnectorWithGlueConnection + Description: 'This connector enables Amazon Athena to communicate with Cloudwatch Metrics, making your metrics data accessible via SQL.' + Author: 'default author' + SpdxLicenseId: Apache-2.0 + LicenseUrl: LICENSE.txt + ReadmeUrl: README.md + Labels: + - athena-federation + HomePageUrl: 'https://github.com/awslabs/aws-athena-query-federation' + SemanticVersion: 2022.47.1 + SourceCodeUrl: 'https://github.com/awslabs/aws-athena-query-federation' +Parameters: + LambdaFunctionName: + Description: 'This is the name of the lambda function that will be created. This name must satisfy the pattern ^[a-z0-9-_]{1,64}$' + Type: String + AllowedPattern: ^[a-z0-9-_]{1,64}$ + SpillBucket: + Description: 'The name of the bucket where this function can spill data.' + Type: String + GlueConnection: + Description: "Name of glue connection storing connection details for Federated Data source." + Type: String + KmsKeyId: + Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." + Type: String + Default: "" + LambdaRoleArn: + Description: "(Optional) A custom role to be used by the Connector lambda" + Type: String + Default: "" + +Conditions: + HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] + NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] + CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] + +Resources: + ConnectorConfig: + Type: 'AWS::Serverless::Function' + Properties: + Environment: + Variables: + glue_connection: !Ref GlueConnection + FunctionName: !Ref LambdaFunctionName + Handler: "com.amazonaws.athena.connectors.cloudwatch.metrics.MetricsCompositeHandler" + CodeUri: "./target/athena-cloudwatch-metrics-2022.47.1.jar" + Description: "Enables Amazon Athena to communicate with Cloudwatch Metrics, making your metrics data accessible via SQL" + Runtime: java11 + Timeout: 900 + MemorySize: 3008 + Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn] + + FunctionRole: + Condition: NotHasLambdaRole + Type: AWS::IAM::Role + Properties: + ManagedPolicyArns: + - "arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + Service: + - lambda.amazonaws.com + Action: + - "sts:AssumeRole" + + FunctionExecutionPolicy: + Condition: NotHasLambdaRole + Type: "AWS::IAM::Policy" + Properties: + Roles: + - !Ref FunctionRole + PolicyName: FunctionExecutionPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Action: + - cloudwatch:Describe* + - cloudwatch:Get* + - cloudwatch:List* + - athena:GetQueryExecution + Effect: Allow + Resource: '*' + - Action: + - s3:GetObject + - s3:ListBucket + - s3:GetBucketLocation + - s3:GetObjectVersion + - s3:PutObject + - s3:PutObjectAcl + - s3:GetLifecycleConfiguration + - s3:PutLifecycleConfiguration + - s3:DeleteObject + Effect: Allow + Resource: + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName} + - bucketName: + Ref: SpillBucket + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName}/* + - bucketName: + Ref: SpillBucket + - Action: + - glue:GetConnection + Effect: Allow + Resource: + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' + + FunctionKmsPolicy: + Condition: CreateKmsPolicy + Type: "AWS::IAM::Policy" + Properties: + PolicyName: FunctionKmsPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Action: + - kms:GenerateRandom + Resource: '*' + - Effect: Allow + Action: + - kms:GenerateDataKey + Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" + Roles: + - !Ref FunctionRole \ No newline at end of file diff --git a/athena-cloudwatch-metrics/athena-cloudwatch-metrics.yaml b/athena-cloudwatch-metrics/athena-cloudwatch-metrics.yaml index 1a64f9e4a2..d1d815063c 100644 --- a/athena-cloudwatch-metrics/athena-cloudwatch-metrics.yaml +++ b/athena-cloudwatch-metrics/athena-cloudwatch-metrics.yaml @@ -13,7 +13,7 @@ Metadata: SemanticVersion: 2022.47.1 SourceCodeUrl: 'https://github.com/awslabs/aws-athena-query-federation' Parameters: - LambdaFunctionName: + AthenaCatalogName: Description: 'This is the name of the lambda function that will be created. This name must satisfy the pattern ^[a-z0-9-_]{1,64}$' Type: String AllowedPattern: ^[a-z0-9-_]{1,64}$ @@ -24,10 +24,6 @@ Parameters: Description: 'The prefix within SpillBucket where this function can spill data.' Type: String Default: athena-spill - GlueConnection: - Description: "(Optional) Name of glue connection storing connection details for Federated Data source." - Type: String - Default: "" LambdaTimeout: Description: 'Maximum Lambda invocation runtime in seconds. (min 1 - 900 max)' Default: 900 @@ -40,129 +36,40 @@ Parameters: Description: "WARNING: If set to 'true' encryption for spilled data is disabled." Default: 'false' Type: String - PermissionsBoundaryArn: + PermissionsBoundaryARN: Description: "(Optional) An IAM policy ARN to use as the PermissionsBoundary for the created Lambda function's execution role" Default: '' Type: String - KmsKeyId: - Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." - Type: String - Default: "" - LambdaRoleArn: - Description: "(Optional) A custom role to be used by the Connector lambda" - Type: String - Default: "" - Conditions: - HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] - NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] - HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryArn, "" ] ] - CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] - HasGlueConnection: !Not [ !Equals [ !Ref GlueConnection, "" ] ] - NotHasGlueConnectionAndHasKmsKeyId: !And - - !Not [ !Condition HasGlueConnection ] - - !Condition HasKmsKeyId - + HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryARN, "" ] ] Resources: ConnectorConfig: Type: 'AWS::Serverless::Function' Properties: Environment: Variables: - disable_spill_encryption: !If [HasGlueConnection, !Ref "AWS::NoValue", !Ref DisableSpillEncryption] - spill_bucket: !If [HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillBucket] - spill_prefix: !If [HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillPrefix] - kms_key_id: !If [NotHasGlueConnectionAndHasKmsKeyId, !Ref KmsKeyId, !Ref "AWS::NoValue"] - glue_connection: !If [HasGlueConnection, !Ref GlueConnection, !Ref "AWS::NoValue"] - FunctionName: !Ref LambdaFunctionName + disable_spill_encryption: !Ref DisableSpillEncryption + spill_bucket: !Ref SpillBucket + spill_prefix: !Ref SpillPrefix + FunctionName: !Ref AthenaCatalogName Handler: "com.amazonaws.athena.connectors.cloudwatch.metrics.MetricsCompositeHandler" CodeUri: "./target/athena-cloudwatch-metrics-2022.47.1.jar" Description: "Enables Amazon Athena to communicate with Cloudwatch Metrics, making your metrics data accessible via SQL" Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory - Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn] - - FunctionRole: - Condition: NotHasLambdaRole - Type: AWS::IAM::Role - Properties: - PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryArn, !Ref "AWS::NoValue" ] - ManagedPolicyArns: - - "arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" - AssumeRolePolicyDocument: - Version: 2012-10-17 - Statement: - - Effect: Allow - Principal: - Service: - - lambda.amazonaws.com - Action: - - "sts:AssumeRole" - - FunctionExecutionPolicy: - Condition: NotHasLambdaRole - Type: "AWS::IAM::Policy" - Properties: - Roles: - - !Ref FunctionRole - PolicyName: FunctionExecutionPolicy - PolicyDocument: - Version: 2012-10-17 - Statement: - - Action: - - cloudwatch:Describe* - - cloudwatch:Get* - - cloudwatch:List* - - athena:GetQueryExecution - - Effect: Allow - Resource: '*' - - Action: - - s3:GetObject - - s3:ListBucket - - s3:GetBucketLocation - - s3:GetObjectVersion - - s3:PutObject - - s3:PutObjectAcl - - s3:GetLifecycleConfiguration - - s3:PutLifecycleConfiguration - - s3:DeleteObject - Effect: Allow - Resource: - - Fn::Sub: - - arn:${AWS::Partition}:s3:::${bucketName} - - bucketName: - Ref: SpillBucket - - Fn::Sub: - - arn:${AWS::Partition}:s3:::${bucketName}/* - - bucketName: - Ref: SpillBucket - - !If - - HasGlueConnection + PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] + Policies: + - Statement: - Action: - - glue:GetConnection + - cloudwatch:Describe* + - cloudwatch:Get* + - cloudwatch:List* + - athena:GetQueryExecution Effect: Allow - Resource: - - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' - - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' - - !Ref "AWS::NoValue" - - FunctionKmsPolicy: - Condition: CreateKmsPolicy - Type: "AWS::IAM::Policy" - Properties: - PolicyName: FunctionKmsPolicy - PolicyDocument: - Version: 2012-10-17 - Statement: - - Effect: Allow - Action: - - kms:GenerateRandom - Resource: '*' - - Effect: Allow - Action: - - kms:GenerateDataKey - Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" - Roles: - - !Ref FunctionRole \ No newline at end of file + Resource: '*' + Version: '2012-10-17' + #S3CrudPolicy allows our connector to spill large responses to S3. You can optionally replace this pre-made policy + #with one that is more restrictive and can only 'put' but not read,delete, or overwrite files. + - S3CrudPolicy: + BucketName: !Ref SpillBucket \ No newline at end of file diff --git a/athena-cloudwatch/athena-cloudwatch-connection.yaml b/athena-cloudwatch/athena-cloudwatch-connection.yaml new file mode 100644 index 0000000000..7363f5dc62 --- /dev/null +++ b/athena-cloudwatch/athena-cloudwatch-connection.yaml @@ -0,0 +1,137 @@ +Transform: 'AWS::Serverless-2016-10-31' +Metadata: + 'AWS::ServerlessRepo::Application': + Name: AthenaCloudwatchConnectorWithGlueConnection + Description: 'This connector enables Amazon Athena to communicate with Cloudwatch, making your logs accessible via SQL.' + Author: 'default author' + SpdxLicenseId: Apache-2.0 + LicenseUrl: LICENSE.txt + ReadmeUrl: README.md + Labels: + - athena-federation + HomePageUrl: 'https://github.com/awslabs/aws-athena-query-federation' + SemanticVersion: 2022.47.1 + SourceCodeUrl: 'https://github.com/awslabs/aws-athena-query-federation' +Parameters: + LambdaFunctionName: + Description: 'This is the name of the lambda function that will be created. This name must satisfy the pattern ^[a-z0-9-_]{1,64}$' + Type: String + AllowedPattern: ^[a-z0-9-_]{1,64}$ + SpillBucket: + Description: 'The name of the bucket where this function can spill data.' + Type: String + GlueConnection: + Description: "Name of glue connection storing connection details for Federated Data source." + Type: String + LambdaRole: + Description: "(Optional) A custom role to be used by the Connector lambda" + Type: String + Default: "" + KmsKeyId: + Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." + Type: String + Default: "" + +Conditions: + HasKmsKeyId: !Not [!Equals [!Ref KmsKeyId, ""]] + NotHasLambdaRole: !Equals [!Ref LambdaRole, ""] + CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] + +Resources: + ConnectorConfig: + Type: 'AWS::Serverless::Function' + Properties: + Environment: + Variables: + glue_connection: !Ref GlueConnection + FunctionName: !Ref LambdaFunctionName + Handler: "com.amazonaws.athena.connectors.cloudwatch.CloudwatchCompositeHandler" + CodeUri: "./target/athena-cloudwatch-2022.47.1.jar" + Description: "Enables Amazon Athena to communicate with Cloudwatch, making your log accessible via SQL" + Runtime: java11 + Timeout: 900 + MemorySize: 3008 + Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRole] + + FunctionRole: + Condition: NotHasLambdaRole + Type: AWS::IAM::Role + Properties: + ManagedPolicyArns: + - "arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + Service: + - lambda.amazonaws.com + Action: + - "sts:AssumeRole" + + FunctionExecutionPolicy: + Condition: NotHasLambdaRole + Type: "AWS::IAM::Policy" + Properties: + PolicyName: FunctionExecutionPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Action: + - logs:Describe* + - logs:Get* + - logs:List* + - logs:StartQuery + - logs:StopQuery + - logs:TestMetricFilter + - logs:FilterLogEvents + - athena:GetQueryExecution + Effect: Allow + Resource: '*' + - Action: + - s3:GetObject + - s3:ListBucket + - s3:GetBucketLocation + - s3:GetObjectVersion + - s3:PutObject + - s3:PutObjectAcl + - s3:GetLifecycleConfiguration + - s3:PutLifecycleConfiguration + - s3:DeleteObject + Effect: Allow + Resource: + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName} + - bucketName: + Ref: SpillBucket + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName}/* + - bucketName: + Ref: SpillBucket + - Action: + - glue:GetConnection + Effect: Allow + Resource: + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' + Roles: + - !Ref FunctionRole + + FunctionKmsPolicy: + Condition: CreateKmsPolicy + Type: "AWS::IAM::Policy" + Properties: + PolicyName: FunctionKmsPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Action: + - kms:GenerateRandom + Effect: Allow + Resource: '*' + - Action: + - kms:GenerateDataKey + Effect: Allow + Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" + Roles: + - !Ref FunctionRole diff --git a/athena-cloudwatch/athena-cloudwatch.yaml b/athena-cloudwatch/athena-cloudwatch.yaml index 968cadc2a0..89d10f6d48 100644 --- a/athena-cloudwatch/athena-cloudwatch.yaml +++ b/athena-cloudwatch/athena-cloudwatch.yaml @@ -13,7 +13,7 @@ Metadata: SemanticVersion: 2022.47.1 SourceCodeUrl: 'https://github.com/awslabs/aws-athena-query-federation' Parameters: - LambdaFunctionName: + AthenaCatalogName: Description: 'This is the name of the lambda function that will be created. This name must satisfy the pattern ^[a-z0-9-_]{1,64}$' Type: String AllowedPattern: ^[a-z0-9-_]{1,64}$ @@ -24,10 +24,6 @@ Parameters: Description: 'The prefix within SpillBucket where this function can spill data.' Type: String Default: athena-spill - GlueConnection: - Description: "(Optional) Name of glue connection storing connection details for Federated Data source." - Type: String - Default: "" LambdaTimeout: Description: 'Maximum Lambda invocation runtime in seconds. (min 1 - 900 max)' Default: 900 @@ -44,24 +40,20 @@ Parameters: Description: "WARNING: If set to 'true' encryption for spilled data is disabled." Default: 'false' Type: String - KmsKeyId: + KMSKeyId: Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." Type: String Default: "" - PermissionsBoundaryArn: + PermissionsBoundaryARN: Description: "(Optional) An IAM policy ARN to use as the PermissionsBoundary for the created Lambda function's execution role" Default: '' Type: String Conditions: - HasKmsKeyId: !Not [!Equals [!Ref KmsKeyId, ""]] + HasKMSKeyId: !Not [!Equals [!Ref KMSKeyId, ""]] NotHasLambdaRole: !Equals [!Ref LambdaRole, ""] - HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryArn, "" ] ] - CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] - HasGlueConnection: !Not [ !Equals [ !Ref GlueConnection, "" ] ] - NotHasGlueConnectionAndHasKmsKeyId: !And - - !Not [ !Condition HasGlueConnection ] - - !Condition HasKmsKeyId + HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryARN, "" ] ] + CreateKMSPolicy: !And [ !Condition HasKMSKeyId, !Condition NotHasLambdaRole ] Resources: ConnectorConfig: @@ -69,12 +61,11 @@ Resources: Properties: Environment: Variables: - disable_spill_encryption: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref DisableSpillEncryption ] - spill_bucket: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillBucket ] - spill_prefix: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillPrefix ] - kms_key_id: !If [ NotHasGlueConnectionAndHasKmsKeyId, !Ref KmsKeyId, !Ref "AWS::NoValue" ] - glue_connection: !If [ HasGlueConnection, !Ref GlueConnection, !Ref "AWS::NoValue" ] - FunctionName: !Ref LambdaFunctionName + disable_spill_encryption: !Ref DisableSpillEncryption + spill_bucket: !Ref SpillBucket + spill_prefix: !Ref SpillPrefix + kms_key_id: !If [HasKMSKeyId, !Ref KMSKeyId, !Ref "AWS::NoValue"] + FunctionName: !Ref AthenaCatalogName Handler: "com.amazonaws.athena.connectors.cloudwatch.CloudwatchCompositeHandler" CodeUri: "./target/athena-cloudwatch-2022.47.1.jar" Description: "Enables Amazon Athena to communicate with Cloudwatch, making your log accessible via SQL" @@ -87,7 +78,7 @@ Resources: Condition: NotHasLambdaRole Type: AWS::IAM::Role Properties: - PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryArn, !Ref "AWS::NoValue" ] + PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] ManagedPolicyArns: - "arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" AssumeRolePolicyDocument: @@ -139,23 +130,14 @@ Resources: - arn:${AWS::Partition}:s3:::${bucketName}/* - bucketName: Ref: SpillBucket - - !If - - HasGlueConnection - - Action: - - glue:GetConnection - Effect: Allow - Resource: - - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' - - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' - - !Ref "AWS::NoValue" Roles: - !Ref FunctionRole - FunctionKmsPolicy: - Condition: CreateKmsPolicy + FunctionKMSPolicy: + Condition: CreateKMSPolicy Type: "AWS::IAM::Policy" Properties: - PolicyName: FunctionKmsPolicy + PolicyName: FunctionKMSPolicy PolicyDocument: Version: 2012-10-17 Statement: @@ -166,6 +148,6 @@ Resources: - Action: - kms:GenerateDataKey Effect: Allow - Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" + Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KMSKeyId}" Roles: - - !Ref FunctionRole + - !Ref FunctionRole \ No newline at end of file diff --git a/athena-datalakegen2/athena-datalakegen2-connection.yaml b/athena-datalakegen2/athena-datalakegen2-connection.yaml new file mode 100644 index 0000000000..b16dcd38da --- /dev/null +++ b/athena-datalakegen2/athena-datalakegen2-connection.yaml @@ -0,0 +1,168 @@ +Transform: 'AWS::Serverless-2016-10-31' +Metadata: + 'AWS::ServerlessRepo::Application': + Name: AthenaDataLakeGen2ConnectorWithGlueConnection + Description: 'This connector enables Amazon Athena to communicate with DataLake Gen2 using JDBC driver.' + Author: 'default author' + SpdxLicenseId: Apache-2.0 + LicenseUrl: LICENSE.txt + ReadmeUrl: README.md + Labels: + - datalake-gen2 + - athena-federation + - jdbc + HomePageUrl: 'https://github.com/awslabs/aws-athena-query-federation' + SemanticVersion: 2022.47.1 + SourceCodeUrl: 'https://github.com/awslabs/aws-athena-query-federation' +Parameters: + LambdaFunctionName: + Description: 'This is the name of the lambda function that will be created. This name must satisfy the pattern ^[a-z0-9-_]{1,64}$' + Type: String + AllowedPattern: ^[a-z0-9-_]{1,64}$ + SecretName: + Description: 'Used to create resource-based authorization policy for "secretsmanager:GetSecretValue" action. E.g. All Athena JDBC Federation secret names can be prefixed with "AthenaJdbcFederation" and authorization policy will allow "arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:AthenaJdbcFederation*". Parameter value in this case should be "AthenaJdbcFederation". If you do not have a prefix, you can manually update the IAM policy to add allow any secret names.' + Type: String + SpillBucket: + Description: 'The name of the bucket where this function can spill data.' + Type: String + GlueConnection: + Description: "Name of glue connection storing connection details for Federated Data source." + Type: String + SecurityGroupIds: + Description: '(Optional) One or more SecurityGroup IDs corresponding to the SecurityGroup that should be applied to the Lambda function. (e.g. sg1,sg2,sg3)' + Type: CommaDelimitedList + Default: "" + SubnetIds: + Description: '(Optional) One or more Subnet IDs corresponding to the Subnet that the Lambda function can use to access you data source. (e.g. subnet1,subnet2)' + Type: CommaDelimitedList + Default: "" + KmsKeyId: + Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." + Type: String + Default: "" + LambdaRoleArn: + Description: "(Optional) A custom role to be used by the Connector lambda" + Type: String + Default: "" + +Conditions: + HasSecurityGroups: !Not [ !Equals [ !Join ["", !Ref SecurityGroupIds], "" ] ] + HasSubnets: !Not [ !Equals [ !Join ["", !Ref SubnetIds], "" ] ] + HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] + NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] + CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] + +Resources: + JdbcConnectorConfig: + Type: 'AWS::Serverless::Function' + Properties: + Environment: + Variables: + glue_connection: !Ref GlueConnection + FunctionName: !Ref LambdaFunctionName + Handler: "com.amazonaws.athena.connectors.datalakegen2.DataLakeGen2MuxCompositeHandler" + CodeUri: "./target/athena-datalakegen2-2022.47.1.jar" + Description: "Enables Amazon Athena to communicate with DataLake Gen2 using JDBC" + Runtime: java11 + Timeout: !Ref 900 + MemorySize: !Ref 3008 + Role: !If [ NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn ] + VpcConfig: + SecurityGroupIds: !If [ HasSecurityGroups, !Ref SecurityGroupIds, !Ref "AWS::NoValue" ] + SubnetIds: !If [ HasSubnets, !Ref SubnetIds, !Ref "AWS::NoValue" ] + + FunctionRole: + Condition: NotHasLambdaRole + Type: AWS::IAM::Role + Properties: + ManagedPolicyArns: + - "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole" + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + Service: + - lambda.amazonaws.com + Action: + - "sts:AssumeRole" + + FunctionExecutionPolicy: + Condition: NotHasLambdaRole + Type: "AWS::IAM::Policy" + Properties: + Roles: + - !Ref FunctionRole + PolicyName: FunctionExecutionPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Action: + - secretsmanager:GetSecretValue + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretName}*' + - Action: + - logs:CreateLogGroup + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:*' + - Action: + - logs:CreateLogStream + - logs:PutLogEvents + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/${LambdaFunctionName}:*' + - Action: + - athena:GetQueryExecution + Effect: Allow + Resource: '*' + - Action: + - ec2:CreateNetworkInterface + - ec2:DeleteNetworkInterface + - ec2:DescribeNetworkInterfaces + - ec2:DetachNetworkInterface + Effect: Allow + Resource: '*' + - Action: + - s3:GetObject + - s3:ListBucket + - s3:GetBucketLocation + - s3:GetObjectVersion + - s3:PutObject + - s3:PutObjectAcl + - s3:GetLifecycleConfiguration + - s3:PutLifecycleConfiguration + - s3:DeleteObject + Effect: Allow + Resource: + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName} + - bucketName: + Ref: SpillBucket + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName}/* + - bucketName: + Ref: SpillBucket + - Action: + - glue:GetConnection + Effect: Allow + Resource: + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' + + FunctionKmsPolicy: + Condition: CreateKmsPolicy + Type: "AWS::IAM::Policy" + Properties: + PolicyName: FunctionKmsPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Action: + - kms:GenerateRandom + Resource: '*' + - Effect: Allow + Action: + - kms:GenerateDataKey + Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" + Roles: + - !Ref FunctionRole \ No newline at end of file diff --git a/athena-datalakegen2/athena-datalakegen2.yaml b/athena-datalakegen2/athena-datalakegen2.yaml index fd904408fd..32da145587 100644 --- a/athena-datalakegen2/athena-datalakegen2.yaml +++ b/athena-datalakegen2/athena-datalakegen2.yaml @@ -22,8 +22,7 @@ Parameters: DefaultConnectionString: Description: 'The default connection string is used when catalog is "lambda:${LambdaFunctionName}". Catalog specific Connection Strings can be added later. Format: ${DatabaseType}://${NativeJdbcConnectionString}.' Type: String - Default: "" - SecretName: + SecretNamePrefix: Description: 'Used to create resource-based authorization policy for "secretsmanager:GetSecretValue" action. E.g. All Athena JDBC Federation secret names can be prefixed with "AthenaJdbcFederation" and authorization policy will allow "arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:AthenaJdbcFederation*". Parameter value in this case should be "AthenaJdbcFederation". If you do not have a prefix, you can manually update the IAM policy to add allow any secret names.' Type: String SpillBucket: @@ -33,10 +32,6 @@ Parameters: Description: 'The prefix within SpillBucket where this function can spill data.' Type: String Default: athena-spill - GlueConnection: - Description: "(Optional) Name of glue connection storing connection details for Federated Data source." - Type: String - Default: "" LambdaTimeout: Description: 'Maximum Lambda invocation runtime in seconds. (min 1 - 900 max)' Default: 900 @@ -57,43 +52,24 @@ Parameters: Description: '(Optional) One or more Subnet IDs corresponding to the Subnet that the Lambda function can use to access you data source. (e.g. subnet1,subnet2)' Type: CommaDelimitedList Default: "" - PermissionsBoundaryArn: + PermissionsBoundaryARN: Description: "(Optional) An IAM policy ARN to use as the PermissionsBoundary for the created Lambda function's execution role" Default: '' Type: String - KmsKeyId: - Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." - Type: String - Default: "" - LambdaRoleArn: - Description: "(Optional) A custom role to be used by the Connector lambda" - Type: String - Default: "" - Conditions: - HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryArn, "" ] ] + HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryARN, "" ] ] HasSecurityGroups: !Not [ !Equals [ !Join ["", !Ref SecurityGroupIds], "" ] ] HasSubnets: !Not [ !Equals [ !Join ["", !Ref SubnetIds], "" ] ] - HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] - NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] - CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] - HasGlueConnection: !Not [ !Equals [ !Ref GlueConnection, "" ] ] - NotHasGlueConnectionAndHasKmsKeyId: !And - - !Not [ !Condition HasGlueConnection ] - - !Condition HasKmsKeyId - Resources: JdbcConnectorConfig: Type: 'AWS::Serverless::Function' Properties: Environment: Variables: - default: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref DefaultConnectionString ] - disable_spill_encryption: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref DisableSpillEncryption ] - spill_bucket: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillBucket ] - spill_prefix: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillPrefix ] - kms_key_id: !If [ NotHasGlueConnectionAndHasKmsKeyId, !Ref KmsKeyId, !Ref "AWS::NoValue" ] - glue_connection: !If [ HasGlueConnection, !Ref GlueConnection, !Ref "AWS::NoValue" ] + disable_spill_encryption: !Ref DisableSpillEncryption + spill_bucket: !Ref SpillBucket + spill_prefix: !Ref SpillPrefix + default: !Ref DefaultConnectionString FunctionName: !Ref LambdaFunctionName Handler: "com.amazonaws.athena.connectors.datalakegen2.DataLakeGen2MuxCompositeHandler" CodeUri: "./target/athena-datalakegen2-2022.47.1.jar" @@ -101,108 +77,39 @@ Resources: Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory - Role: !If [ NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn ] - VpcConfig: - SecurityGroupIds: !If [ HasSecurityGroups, !Ref SecurityGroupIds, !Ref "AWS::NoValue" ] - SubnetIds: !If [ HasSubnets, !Ref SubnetIds, !Ref "AWS::NoValue" ] - - FunctionRole: - Condition: NotHasLambdaRole - Type: AWS::IAM::Role - Properties: - PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryArn, !Ref "AWS::NoValue" ] - ManagedPolicyArns: - - "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole" - AssumeRolePolicyDocument: - Version: 2012-10-17 - Statement: - - Effect: Allow - Principal: - Service: - - lambda.amazonaws.com - Action: - - "sts:AssumeRole" - - FunctionExecutionPolicy: - Condition: NotHasLambdaRole - Type: "AWS::IAM::Policy" - Properties: - Roles: - - !Ref FunctionRole - PolicyName: FunctionExecutionPolicy - PolicyDocument: - Version: 2012-10-17 - Statement: - - Action: - - secretsmanager:GetSecretValue - Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretName}*' - - Action: - - logs:CreateLogGroup - Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:*' - - Action: - - logs:CreateLogStream - - logs:PutLogEvents - Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/${LambdaFunctionName}:*' - - Action: - - athena:GetQueryExecution - - Effect: Allow - Resource: '*' - - Action: - - ec2:CreateNetworkInterface - - ec2:DeleteNetworkInterface - - ec2:DescribeNetworkInterfaces - - ec2:DetachNetworkInterface - Effect: Allow - Resource: '*' - - Action: - - s3:GetObject - - s3:ListBucket - - s3:GetBucketLocation - - s3:GetObjectVersion - - s3:PutObject - - s3:PutObjectAcl - - s3:GetLifecycleConfiguration - - s3:PutLifecycleConfiguration - - s3:DeleteObject - Effect: Allow - Resource: - - Fn::Sub: - - arn:${AWS::Partition}:s3:::${bucketName} - - bucketName: - Ref: SpillBucket - - Fn::Sub: - - arn:${AWS::Partition}:s3:::${bucketName}/* - - bucketName: - Ref: SpillBucket - - !If - - HasGlueConnection + PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] + Policies: + - Statement: - Action: - - glue:GetConnection + - secretsmanager:GetSecretValue Effect: Allow - Resource: - - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' - - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' - - !Ref "AWS::NoValue" - - FunctionKmsPolicy: - Condition: CreateKmsPolicy - Type: "AWS::IAM::Policy" - Properties: - PolicyName: FunctionKmsPolicy - PolicyDocument: - Version: 2012-10-17 - Statement: - - Effect: Allow - Action: - - kms:GenerateRandom - Resource: '*' - - Effect: Allow - Action: - - kms:GenerateDataKey - Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" - Roles: - - !Ref FunctionRole \ No newline at end of file + Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretNamePrefix}*' + Version: '2012-10-17' + - Statement: + - Action: + - logs:CreateLogGroup + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:*' + Version: '2012-10-17' + - Statement: + - Action: + - logs:CreateLogStream + - logs:PutLogEvents + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/${LambdaFunctionName}:*' + Version: '2012-10-17' + - Statement: + - Action: + - athena:GetQueryExecution + Effect: Allow + Resource: '*' + Version: '2012-10-17' + #S3CrudPolicy allows our connector to spill large responses to S3. You can optionally replace this pre-made policy + #with one that is more restrictive and can only 'put' but not read,delete, or overwrite files. + - S3CrudPolicy: + BucketName: !Ref SpillBucket + #VPCAccessPolicy allows our connector to run in a VPC so that it can access your data source. + - VPCAccessPolicy: {} + VpcConfig: + SecurityGroupIds: !If [ HasSecurityGroups, !Ref SecurityGroupIds, !Ref "AWS::NoValue" ] + SubnetIds: !If [ HasSubnets, !Ref SubnetIds, !Ref "AWS::NoValue" ] \ No newline at end of file diff --git a/athena-db2-as400/athena-db2-as400-connection.yaml b/athena-db2-as400/athena-db2-as400-connection.yaml new file mode 100644 index 0000000000..89ba799ef6 --- /dev/null +++ b/athena-db2-as400/athena-db2-as400-connection.yaml @@ -0,0 +1,169 @@ +Transform: 'AWS::Serverless-2016-10-31' +Metadata: + 'AWS::ServerlessRepo::Application': + Name: AthenaDb2AS400ConnectorWithGlueConnection + Description: 'This connector enables Amazon Athena to communicate with DB2 on iSeries (AS400) using JDBC driver.' + Author: 'default author' + SpdxLicenseId: Apache-2.0 + LicenseUrl: LICENSE.txt + ReadmeUrl: README.md + Labels: + - ibm + - db2as400 + - athena-federation + - jdbc + HomePageUrl: 'https://github.com/awslabs/aws-athena-query-federation' + SemanticVersion: 2022.47.1 + SourceCodeUrl: 'https://github.com/awslabs/aws-athena-query-federation' +Parameters: + LambdaFunctionName: + Description: 'This is the name of the lambda function that will be created. This name must satisfy the pattern ^[a-z0-9-_]{1,64}$' + Type: String + AllowedPattern: ^[a-z0-9-_]{1,64}$ + SecretName: + Description: 'Used to create resource-based authorization policy for "secretsmanager:GetSecretValue" action. E.g. All Athena JDBC Federation secret names can be prefixed with "AthenaJdbcFederation" and authorization policy will allow "arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:AthenaJdbcFederation*". Parameter value in this case should be "AthenaJdbcFederation". If you do not have a prefix, you can manually update the IAM policy to add allow any secret names.' + Type: String + SpillBucket: + Description: 'The name of the bucket where this function can spill data.' + Type: String + GlueConnection: + Description: "Name of glue connection storing connection details for Federated Data source." + Type: String + SecurityGroupIds: + Description: '(Optional) One or more SecurityGroup IDs corresponding to the SecurityGroup that should be applied to the Lambda function. (e.g. sg1,sg2,sg3)' + Type: CommaDelimitedList + Default: "" + SubnetIds: + Description: '(Optional) One or more Subnet IDs corresponding to the Subnet that the Lambda function can use to access you data source. (e.g. subnet1,subnet2)' + Type: CommaDelimitedList + Default: "" + KmsKeyId: + Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." + Type: String + Default: "" + LambdaRoleArn: + Description: "(Optional) A custom role to be used by the Connector lambda" + Type: String + Default: "" + +Conditions: + HasSecurityGroups: !Not [ !Equals [ !Join ["", !Ref SecurityGroupIds], "" ] ] + HasSubnets: !Not [ !Equals [ !Join ["", !Ref SubnetIds], "" ] ] + HasKmsKeyId: !Not [!Equals [!Ref KmsKeyId, ""]] + NotHasLambdaRole: !Equals [!Ref LambdaRoleArn, ""] + CreateKmsPolicy: !And [!Condition HasKmsKeyId, !Condition NotHasLambdaRole] + +Resources: + JdbcConnectorConfig: + Type: 'AWS::Serverless::Function' + Properties: + Environment: + Variables: + glue_connection: !Ref GlueConnection + FunctionName: !Ref LambdaFunctionName + Handler: "com.amazonaws.athena.connectors.db2as400.Db2As400MuxCompositeHandler" + CodeUri: "./target/athena-db2-as400-2022.47.1.jar" + Description: "Enables Amazon Athena to communicate with DB2 on iSeries (AS400) using JDBC" + Runtime: java11 + Timeout: 900 + MemorySize: 3008 + Role: !If [ NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn ] + VpcConfig: + SecurityGroupIds: !If [ HasSecurityGroups, !Ref SecurityGroupIds, !Ref "AWS::NoValue" ] + SubnetIds: !If [ HasSubnets, !Ref SubnetIds, !Ref "AWS::NoValue" ] + + FunctionRole: + Condition: NotHasLambdaRole + Type: AWS::IAM::Role + Properties: + ManagedPolicyArns: + - "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole" + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + Service: + - lambda.amazonaws.com + Action: + - "sts:AssumeRole" + + FunctionExecutionPolicy: + Condition: NotHasLambdaRole + Type: "AWS::IAM::Policy" + Properties: + Roles: + - !Ref FunctionRole + PolicyName: FunctionExecutionPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Action: + - secretsmanager:GetSecretValue + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretName}*' + - Action: + - logs:CreateLogGroup + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:*' + - Action: + - logs:CreateLogStream + - logs:PutLogEvents + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/${LambdaFunctionName}:*' + - Action: + - athena:GetQueryExecution + Effect: Allow + Resource: '*' + - Action: + - ec2:CreateNetworkInterface + - ec2:DeleteNetworkInterface + - ec2:DescribeNetworkInterfaces + - ec2:DetachNetworkInterface + Effect: Allow + Resource: '*' + - Action: + - s3:GetObject + - s3:ListBucket + - s3:GetBucketLocation + - s3:GetObjectVersion + - s3:PutObject + - s3:PutObjectAcl + - s3:GetLifecycleConfiguration + - s3:PutLifecycleConfiguration + - s3:DeleteObject + Effect: Allow + Resource: + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName} + - bucketName: + Ref: SpillBucket + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName}/* + - bucketName: + Ref: SpillBucket + - Action: + - glue:GetConnection + Effect: Allow + Resource: + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' + + FunctionKmsPolicy: + Condition: CreateKmsPolicy + Type: "AWS::IAM::Policy" + Properties: + PolicyName: FunctionKmsPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Action: + - kms:GenerateRandom + Resource: '*' + - Effect: Allow + Action: + - kms:GenerateDataKey + Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" + Roles: + - !Ref FunctionRole \ No newline at end of file diff --git a/athena-db2-as400/athena-db2-as400.yaml b/athena-db2-as400/athena-db2-as400.yaml index 1c2a55890c..c84dac623e 100644 --- a/athena-db2-as400/athena-db2-as400.yaml +++ b/athena-db2-as400/athena-db2-as400.yaml @@ -23,8 +23,7 @@ Parameters: DefaultConnectionString: Description: 'The default connection string is used when catalog is "lambda:${LambdaFunctionName}". Catalog specific Connection Strings can be added later. Format: ${DatabaseType}://${NativeJdbcConnectionString}.' Type: String - Default: "" - SecretName: + SecretNamePrefix: Description: 'Used to create resource-based authorization policy for "secretsmanager:GetSecretValue" action. E.g. All Athena JDBC Federation secret names can be prefixed with "AthenaJdbcFederation" and authorization policy will allow "arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:AthenaJdbcFederation*". Parameter value in this case should be "AthenaJdbcFederation". If you do not have a prefix, you can manually update the IAM policy to add allow any secret names.' Type: String SpillBucket: @@ -34,10 +33,6 @@ Parameters: Description: 'The prefix within SpillBucket where this function can spill data.' Type: String Default: athena-spill - GlueConnection: - Description: "(Optional) Name of glue connection storing connection details for Federated Data source." - Type: String - Default: "" LambdaTimeout: Description: 'Maximum Lambda invocation runtime in seconds. (min 1 - 900 max)' Default: 900 @@ -58,43 +53,24 @@ Parameters: Description: '(Optional) One or more Subnet IDs corresponding to the Subnet that the Lambda function can use to access you data source. (e.g. subnet1,subnet2)' Type: CommaDelimitedList Default: "" - PermissionsBoundaryArn: + PermissionsBoundaryARN: Description: "(Optional) An IAM policy ARN to use as the PermissionsBoundary for the created Lambda function's execution role" Default: '' Type: String - KmsKeyId: - Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." - Type: String - Default: "" - LambdaRoleArn: - Description: "(Optional) A custom role to be used by the Connector lambda" - Type: String - Default: "" - Conditions: + HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryARN, "" ] ] HasSecurityGroups: !Not [ !Equals [ !Join ["", !Ref SecurityGroupIds], "" ] ] HasSubnets: !Not [ !Equals [ !Join ["", !Ref SubnetIds], "" ] ] - HasKmsKeyId: !Not [!Equals [!Ref KmsKeyId, ""]] - NotHasLambdaRole: !Equals [!Ref LambdaRoleArn, ""] - HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryArn, "" ] ] - CreateKmsPolicy: !And [!Condition HasKmsKeyId, !Condition NotHasLambdaRole] - HasGlueConnection: !Not [!Equals [!Ref GlueConnection, ""]] - NotHasGlueConnectionAndHasKmsKeyId: !And - - !Not [!Condition HasGlueConnection] - - !Condition HasKmsKeyId - Resources: JdbcConnectorConfig: Type: 'AWS::Serverless::Function' Properties: Environment: Variables: - disable_spill_encryption: !If [HasGlueConnection, !Ref "AWS::NoValue", !Ref DisableSpillEncryption] - spill_bucket: !If [HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillBucket] - spill_prefix: !If [HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillPrefix] - kms_key_id: !If [NotHasGlueConnectionAndHasKmsKeyId, !Ref KmsKeyId, !Ref "AWS::NoValue"] - glue_connection: !If [HasGlueConnection, !Ref GlueConnection, !Ref "AWS::NoValue"] - default: !If [HasGlueConnection, !Ref "AWS::NoValue", !Ref DefaultConnectionString] + disable_spill_encryption: !Ref DisableSpillEncryption + spill_bucket: !Ref SpillBucket + spill_prefix: !Ref SpillPrefix + default: !Ref DefaultConnectionString FunctionName: !Ref LambdaFunctionName Handler: "com.amazonaws.athena.connectors.db2as400.Db2As400MuxCompositeHandler" CodeUri: "./target/athena-db2-as400-2022.47.1.jar" @@ -102,107 +78,39 @@ Resources: Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory - Role: !If [ NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn ] - VpcConfig: - SecurityGroupIds: !Ref SecurityGroupIds - SubnetIds: !Ref SubnetIds - - FunctionRole: - Condition: NotHasLambdaRole - Type: AWS::IAM::Role - Properties: - PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryArn, !Ref "AWS::NoValue" ] - ManagedPolicyArns: - - "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole" - AssumeRolePolicyDocument: - Version: 2012-10-17 - Statement: - - Effect: Allow - Principal: - Service: - - lambda.amazonaws.com - Action: - - "sts:AssumeRole" - - FunctionExecutionPolicy: - Condition: NotHasLambdaRole - Type: "AWS::IAM::Policy" - Properties: - Roles: - - !Ref FunctionRole - PolicyName: FunctionExecutionPolicy - PolicyDocument: - Version: 2012-10-17 - Statement: - - Action: - - secretsmanager:GetSecretValue - Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretName}*' - - Action: - - logs:CreateLogGroup - Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:*' - - Action: - - logs:CreateLogStream - - logs:PutLogEvents - Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/${LambdaFunctionName}:*' - - Action: - - athena:GetQueryExecution - Effect: Allow - Resource: '*' - - Action: - - ec2:CreateNetworkInterface - - ec2:DeleteNetworkInterface - - ec2:DescribeNetworkInterfaces - - ec2:DetachNetworkInterface - Effect: Allow - Resource: '*' - - Action: - - s3:GetObject - - s3:ListBucket - - s3:GetBucketLocation - - s3:GetObjectVersion - - s3:PutObject - - s3:PutObjectAcl - - s3:GetLifecycleConfiguration - - s3:PutLifecycleConfiguration - - s3:DeleteObject - Effect: Allow - Resource: - - Fn::Sub: - - arn:${AWS::Partition}:s3:::${bucketName} - - bucketName: - Ref: SpillBucket - - Fn::Sub: - - arn:${AWS::Partition}:s3:::${bucketName}/* - - bucketName: - Ref: SpillBucket - - !If - - HasGlueConnection + PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] + Policies: + - Statement: - Action: - - glue:GetConnection + - secretsmanager:GetSecretValue Effect: Allow - Resource: - - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' - - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' - - !Ref "AWS::NoValue" - - FunctionKmsPolicy: - Condition: CreateKmsPolicy - Type: "AWS::IAM::Policy" - Properties: - PolicyName: FunctionKmsPolicy - PolicyDocument: - Version: 2012-10-17 - Statement: - - Effect: Allow - Action: - - kms:GenerateRandom - Resource: '*' - - Effect: Allow - Action: - - kms:GenerateDataKey - Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" - Roles: - - !Ref FunctionRole \ No newline at end of file + Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretNamePrefix}*' + Version: '2012-10-17' + - Statement: + - Action: + - logs:CreateLogGroup + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:*' + Version: '2012-10-17' + - Statement: + - Action: + - logs:CreateLogStream + - logs:PutLogEvents + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/${LambdaFunctionName}:*' + Version: '2012-10-17' + - Statement: + - Action: + - athena:GetQueryExecution + Effect: Allow + Resource: '*' + Version: '2012-10-17' + #S3CrudPolicy allows our connector to spill large responses to S3. You can optionally replace this pre-made policy + #with one that is more restrictive and can only 'put' but not read,delete, or overwrite files. + - S3CrudPolicy: + BucketName: !Ref SpillBucket + #VPCAccessPolicy allows our connector to run in a VPC so that it can access your data source. + - VPCAccessPolicy: {} + VpcConfig: + SecurityGroupIds: !If [ HasSecurityGroups, !Ref SecurityGroupIds, !Ref "AWS::NoValue" ] + SubnetIds: !If [ HasSubnets, !Ref SubnetIds, !Ref "AWS::NoValue" ] \ No newline at end of file diff --git a/athena-db2/athena-db2-connection.yaml b/athena-db2/athena-db2-connection.yaml new file mode 100644 index 0000000000..2d6a5d9b99 --- /dev/null +++ b/athena-db2/athena-db2-connection.yaml @@ -0,0 +1,169 @@ +Transform: 'AWS::Serverless-2016-10-31' +Metadata: + 'AWS::ServerlessRepo::Application': + Name: AthenaDb2ConnectorWithGlueConnection + Description: 'This connector enables Amazon Athena to communicate with DB2 using JDBC driver.' + Author: 'default author' + SpdxLicenseId: Apache-2.0 + LicenseUrl: LICENSE.txt + ReadmeUrl: README.md + Labels: + - ibm + - db2 + - athena-federation + - jdbc + HomePageUrl: 'https://github.com/awslabs/aws-athena-query-federation' + SemanticVersion: 2022.47.1 + SourceCodeUrl: 'https://github.com/awslabs/aws-athena-query-federation' +Parameters: + LambdaFunctionName: + Description: 'This is the name of the lambda function that will be created. This name must satisfy the pattern ^[a-z0-9-_]{1,64}$' + Type: String + AllowedPattern: ^[a-z0-9-_]{1,64}$ + SecretName: + Description: 'Used to create resource-based authorization policy for "secretsmanager:GetSecretValue" action. E.g. All Athena JDBC Federation secret names can be prefixed with "AthenaJdbcFederation" and authorization policy will allow "arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:AthenaJdbcFederation*". Parameter value in this case should be "AthenaJdbcFederation". If you do not have a prefix, you can manually update the IAM policy to add allow any secret names.' + Type: String + SpillBucket: + Description: 'The name of the bucket where this function can spill data.' + Type: String + GlueConnection: + Description: "Name of glue connection storing connection details for Federated Data source." + Type: String + SecurityGroupIds: + Description: '(Optional) One or more SecurityGroup IDs corresponding to the SecurityGroup that should be applied to the Lambda function. (e.g. sg1,sg2,sg3)' + Type: CommaDelimitedList + Default: "" + SubnetIds: + Description: '(Optional) One or more Subnet IDs corresponding to the Subnet that the Lambda function can use to access you data source. (e.g. subnet1,subnet2)' + Type: CommaDelimitedList + Default: "" + KmsKeyId: + Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." + Type: String + Default: "" + LambdaRoleArn: + Description: "(Optional) A custom role to be used by the Connector lambda" + Type: String + Default: "" + +Conditions: + HasSecurityGroups: !Not [ !Equals [ !Join ["", !Ref SecurityGroupIds], "" ] ] + HasSubnets: !Not [ !Equals [ !Join ["", !Ref SubnetIds], "" ] ] + HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] + NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] + CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] + +Resources: + JdbcConnectorConfig: + Type: 'AWS::Serverless::Function' + Properties: + Environment: + Variables: + glue_connection: !Ref GlueConnection + FunctionName: !Ref LambdaFunctionName + Handler: "com.amazonaws.athena.connectors.db2.Db2MuxCompositeHandler" + CodeUri: "./target/athena-db2-2022.47.1.jar" + Description: "Enables Amazon Athena to communicate with DB2 using JDBC" + Runtime: java11 + Timeout: 900 + MemorySize: 3008 + Role: !If [ NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn ] + VpcConfig: + SecurityGroupIds: !If [ HasSecurityGroups, !Ref SecurityGroupIds, !Ref "AWS::NoValue" ] + SubnetIds: !If [ HasSubnets, !Ref SubnetIds, !Ref "AWS::NoValue" ] + + FunctionRole: + Condition: NotHasLambdaRole + Type: AWS::IAM::Role + Properties: + ManagedPolicyArns: + - "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole" + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + Service: + - lambda.amazonaws.com + Action: + - "sts:AssumeRole" + + FunctionExecutionPolicy: + Condition: NotHasLambdaRole + Type: "AWS::IAM::Policy" + Properties: + Roles: + - !Ref FunctionRole + PolicyName: FunctionExecutionPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Action: + - secretsmanager:GetSecretValue + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretName}*' + - Action: + - logs:CreateLogGroup + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:*' + - Action: + - logs:CreateLogStream + - logs:PutLogEvents + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/${LambdaFunctionName}:*' + - Action: + - athena:GetQueryExecution + Effect: Allow + Resource: '*' + - Action: + - ec2:CreateNetworkInterface + - ec2:DeleteNetworkInterface + - ec2:DescribeNetworkInterfaces + - ec2:DetachNetworkInterface + Effect: Allow + Resource: '*' + - Action: + - s3:GetObject + - s3:ListBucket + - s3:GetBucketLocation + - s3:GetObjectVersion + - s3:PutObject + - s3:PutObjectAcl + - s3:GetLifecycleConfiguration + - s3:PutLifecycleConfiguration + - s3:DeleteObject + Effect: Allow + Resource: + - Fn::Sub: + - "arn:${AWS::Partition}:s3:::${bucketName}" + - bucketName: + Ref: SpillBucket + - Fn::Sub: + - "arn:${AWS::Partition}:s3:::${bucketName}/*" + - bucketName: + Ref: SpillBucket + - Action: + - glue:GetConnection + Effect: Allow + Resource: + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' + + FunctionKmsPolicy: + Condition: CreateKmsPolicy + Type: "AWS::IAM::Policy" + Properties: + PolicyName: FunctionKmsPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Action: + - kms:GenerateRandom + Resource: '*' + - Effect: Allow + Action: + - kms:GenerateDataKey + Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" + Roles: + - !Ref FunctionRole \ No newline at end of file diff --git a/athena-db2/athena-db2.yaml b/athena-db2/athena-db2.yaml index d7d4df6616..cbaaa93af9 100644 --- a/athena-db2/athena-db2.yaml +++ b/athena-db2/athena-db2.yaml @@ -23,8 +23,7 @@ Parameters: DefaultConnectionString: Description: 'The default connection string is used when catalog is "lambda:${LambdaFunctionName}". Catalog specific Connection Strings can be added later. Format: ${DatabaseType}://${NativeJdbcConnectionString}.' Type: String - Default: "" - SecretName: + SecretNamePrefix: Description: 'Used to create resource-based authorization policy for "secretsmanager:GetSecretValue" action. E.g. All Athena JDBC Federation secret names can be prefixed with "AthenaJdbcFederation" and authorization policy will allow "arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:AthenaJdbcFederation*". Parameter value in this case should be "AthenaJdbcFederation". If you do not have a prefix, you can manually update the IAM policy to add allow any secret names.' Type: String SpillBucket: @@ -34,10 +33,6 @@ Parameters: Description: 'The prefix within SpillBucket where this function can spill data.' Type: String Default: athena-spill - GlueConnection: - Description: "(Optional) Name of glue connection storing connection details for Federated Data source." - Type: String - Default: "" LambdaTimeout: Description: 'Maximum Lambda invocation runtime in seconds. (min 1 - 900 max)' Default: 900 @@ -58,43 +53,24 @@ Parameters: Description: '(Optional) One or more Subnet IDs corresponding to the Subnet that the Lambda function can use to access you data source. (e.g. subnet1,subnet2)' Type: CommaDelimitedList Default: "" - PermissionsBoundaryArn: + PermissionsBoundaryARN: Description: "(Optional) An IAM policy ARN to use as the PermissionsBoundary for the created Lambda function's execution role" Default: '' Type: String - KmsKeyId: - Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." - Type: String - Default: "" - LambdaRoleArn: - Description: "(Optional) A custom role to be used by the Connector lambda" - Type: String - Default: "" - Conditions: + HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryARN, "" ] ] HasSecurityGroups: !Not [ !Equals [ !Join ["", !Ref SecurityGroupIds], "" ] ] HasSubnets: !Not [ !Equals [ !Join ["", !Ref SubnetIds], "" ] ] - HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] - NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] - HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryArn, "" ] ] - CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] - HasGlueConnection: !Not [ !Equals [ !Ref GlueConnection, "" ] ] - NotHasGlueConnectionAndHasKmsKeyId: !And - - !Not [ !Condition HasGlueConnection ] - - !Condition HasKmsKeyId - Resources: JdbcConnectorConfig: Type: 'AWS::Serverless::Function' Properties: Environment: Variables: - default: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref DefaultConnectionString ] - disable_spill_encryption: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref DisableSpillEncryption ] - spill_bucket: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillBucket ] - spill_prefix: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillPrefix ] - kms_key_id: !If [ NotHasGlueConnectionAndHasKmsKeyId, !Ref KmsKeyId, !Ref "AWS::NoValue" ] - glue_connection: !If [ HasGlueConnection, !Ref GlueConnection, !Ref "AWS::NoValue" ] + disable_spill_encryption: !Ref DisableSpillEncryption + spill_bucket: !Ref SpillBucket + spill_prefix: !Ref SpillPrefix + default: !Ref DefaultConnectionString FunctionName: !Ref LambdaFunctionName Handler: "com.amazonaws.athena.connectors.db2.Db2MuxCompositeHandler" CodeUri: "./target/athena-db2-2022.47.1.jar" @@ -102,107 +78,39 @@ Resources: Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory - Role: !If [ NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn ] - VpcConfig: - SecurityGroupIds: !Ref SecurityGroupIds - SubnetIds: !Ref SubnetIds - - FunctionRole: - Condition: NotHasLambdaRole - Type: AWS::IAM::Role - Properties: - PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryArn, !Ref "AWS::NoValue" ] - ManagedPolicyArns: - - "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole" - AssumeRolePolicyDocument: - Version: 2012-10-17 - Statement: - - Effect: Allow - Principal: - Service: - - lambda.amazonaws.com - Action: - - "sts:AssumeRole" - - FunctionExecutionPolicy: - Condition: NotHasLambdaRole - Type: "AWS::IAM::Policy" - Properties: - Roles: - - !Ref FunctionRole - PolicyName: FunctionExecutionPolicy - PolicyDocument: - Version: 2012-10-17 - Statement: - - Action: - - secretsmanager:GetSecretValue - Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretName}*' - - Action: - - logs:CreateLogGroup - Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:*' - - Action: - - logs:CreateLogStream - - logs:PutLogEvents - Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/${LambdaFunctionName}:*' - - Action: - - athena:GetQueryExecution - Effect: Allow - Resource: '*' - - Action: - - ec2:CreateNetworkInterface - - ec2:DeleteNetworkInterface - - ec2:DescribeNetworkInterfaces - - ec2:DetachNetworkInterface - Effect: Allow - Resource: '*' - - Action: - - s3:GetObject - - s3:ListBucket - - s3:GetBucketLocation - - s3:GetObjectVersion - - s3:PutObject - - s3:PutObjectAcl - - s3:GetLifecycleConfiguration - - s3:PutLifecycleConfiguration - - s3:DeleteObject - Effect: Allow - Resource: - - Fn::Sub: - - "arn:${AWS::Partition}:s3:::${bucketName}" - - bucketName: - Ref: SpillBucket - - Fn::Sub: - - "arn:${AWS::Partition}:s3:::${bucketName}/*" - - bucketName: - Ref: SpillBucket - - !If - - HasGlueConnection + PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] + Policies: + - Statement: - Action: - - glue:GetConnection + - secretsmanager:GetSecretValue Effect: Allow - Resource: - - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' - - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' - - !Ref "AWS::NoValue" - - FunctionKmsPolicy: - Condition: CreateKmsPolicy - Type: "AWS::IAM::Policy" - Properties: - PolicyName: FunctionKmsPolicy - PolicyDocument: - Version: 2012-10-17 - Statement: - - Effect: Allow - Action: - - kms:GenerateRandom - Resource: '*' - - Effect: Allow - Action: - - kms:GenerateDataKey - Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" - Roles: - - !Ref FunctionRole \ No newline at end of file + Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretNamePrefix}*' + Version: '2012-10-17' + - Statement: + - Action: + - logs:CreateLogGroup + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:*' + Version: '2012-10-17' + - Statement: + - Action: + - logs:CreateLogStream + - logs:PutLogEvents + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/${LambdaFunctionName}:*' + Version: '2012-10-17' + - Statement: + - Action: + - athena:GetQueryExecution + Effect: Allow + Resource: '*' + Version: '2012-10-17' + #S3CrudPolicy allows our connector to spill large responses to S3. You can optionally replace this pre-made policy + #with one that is more restrictive and can only 'put' but not read,delete, or overwrite files. + - S3CrudPolicy: + BucketName: !Ref SpillBucket + #VPCAccessPolicy allows our connector to run in a VPC so that it can access your data source. + - VPCAccessPolicy: {} + VpcConfig: + SecurityGroupIds: !If [ HasSecurityGroups, !Ref SecurityGroupIds, !Ref "AWS::NoValue" ] + SubnetIds: !If [ HasSubnets, !Ref SubnetIds, !Ref "AWS::NoValue" ] \ No newline at end of file diff --git a/athena-docdb/athena-docdb-connection.yaml b/athena-docdb/athena-docdb-connection.yaml new file mode 100644 index 0000000000..f4a2a435eb --- /dev/null +++ b/athena-docdb/athena-docdb-connection.yaml @@ -0,0 +1,161 @@ +Transform: 'AWS::Serverless-2016-10-31' +Metadata: + 'AWS::ServerlessRepo::Application': + Name: AthenaDocumentDBConnectorWithGlueConnection + Description: This connector enables Amazon Athena to communicate with your DocumentDB instance(s), making your DocumentDB data accessible via SQL. + Author: 'default author' + SpdxLicenseId: Apache-2.0 + LicenseUrl: LICENSE.txt + ReadmeUrl: README.md + Labels: + - athena-federation + HomePageUrl: 'https://github.com/awslabs/aws-athena-query-federation' + SemanticVersion: 2022.47.1 + SourceCodeUrl: 'https://github.com/awslabs/aws-athena-query-federation' +Parameters: + LambdaFunctionName: + Description: 'This is the name of the lambda function that will be created. This name must satisfy the pattern ^[a-z0-9-_]{1,64}$' + Type: String + AllowedPattern: ^[a-z0-9-_]{1,64}$ + SpillBucket: + Description: 'The name of the bucket where this function can spill data.' + Type: String + GlueConnection: + Description: "Name of glue connection storing connection details for Federated Data source." + Type: String + SecurityGroupIds: + Description: 'One or more SecurityGroup IDs corresponding to the SecurityGroup that should be applied to the Lambda function. (e.g. sg1,sg2,sg3)' + Type: 'List' + SubnetIds: + Description: 'One or more Subnet IDs corresponding to the Subnet that the Lambda function can use to access you data source. (e.g. subnet1,subnet2)' + Type: 'List' + SecretName: + Description: 'The name or prefix of a set of names within Secrets Manager that this function should have access to. (e.g. hbase-*).' + Type: String + KmsKeyId: + Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." + Type: String + Default: "" + LambdaRoleArn: + Description: "(Optional) A custom role to be used by the Connector lambda" + Type: String + Default: "" + +Conditions: + HasKmsKeyId: !Not [!Equals [!Ref KmsKeyId, ""]] + NotHasLambdaRole: !Equals [!Ref LambdaRoleArn, ""] + CreateKmsPolicy: !And [!Condition HasKmsKeyId, !Condition NotHasLambdaRole] + +Resources: + ConnectorConfig: + Type: 'AWS::Serverless::Function' + Properties: + Environment: + Variables: + glue_connection: !Ref GlueConnection + FunctionName: !Ref LambdaFunctionName + Handler: "com.amazonaws.athena.connectors.docdb.DocDBCompositeHandler" + CodeUri: "./target/athena-docdb-2022.47.1.jar" + Description: "Enables Amazon Athena to communicate with DocumentDB, making your DocumentDB data accessible via SQL." + Runtime: java11 + Timeout: 900 + MemorySize: 3008 + Role: !If [ NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn ] + VpcConfig: + SecurityGroupIds: !Ref SecurityGroupIds + SubnetIds: !Ref SubnetIds + + FunctionRole: + Condition: NotHasLambdaRole + Type: AWS::IAM::Role + Properties: + ManagedPolicyArns: + - "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole" + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + Service: + - lambda.amazonaws.com + Action: + - "sts:AssumeRole" + + FunctionExecutionPolicy: + Condition: NotHasLambdaRole + Type: "AWS::IAM::Policy" + Properties: + Roles: + - !Ref FunctionRole + PolicyName: FunctionExecutionPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Action: + - secretsmanager:GetSecretValue + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:*:*:secret:${SecretName}*' + - Action: + - glue:GetTableVersions + - glue:GetPartitions + - glue:GetTables + - glue:GetTableVersion + - glue:GetDatabases + - glue:GetTable + - glue:GetPartition + - glue:GetDatabase + - athena:GetQueryExecution + Effect: Allow + Resource: '*' + - Action: + - ec2:CreateNetworkInterface + - ec2:DeleteNetworkInterface + - ec2:DescribeNetworkInterfaces + - ec2:DetachNetworkInterface + Effect: Allow + Resource: '*' + - Action: + - s3:GetObject + - s3:ListBucket + - s3:GetBucketLocation + - s3:GetObjectVersion + - s3:PutObject + - s3:PutObjectAcl + - s3:GetLifecycleConfiguration + - s3:PutLifecycleConfiguration + - s3:DeleteObject + Effect: Allow + Resource: + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName} + - bucketName: + Ref: SpillBucket + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName}/* + - bucketName: + Ref: SpillBucket + - Action: + - glue:GetConnection + Effect: Allow + Resource: + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' + + FunctionKmsPolicy: + Condition: CreateKmsPolicy + Type: "AWS::IAM::Policy" + Properties: + PolicyName: FunctionKmsPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Action: + - kms:GenerateRandom + Resource: '*' + - Effect: Allow + Action: + - kms:GenerateDataKey + Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" + Roles: + - !Ref FunctionRole \ No newline at end of file diff --git a/athena-docdb/athena-docdb.yaml b/athena-docdb/athena-docdb.yaml index 4e14513238..efb7fc0b2e 100644 --- a/athena-docdb/athena-docdb.yaml +++ b/athena-docdb/athena-docdb.yaml @@ -13,7 +13,7 @@ Metadata: SemanticVersion: 2022.47.1 SourceCodeUrl: 'https://github.com/awslabs/aws-athena-query-federation' Parameters: - LambdaFunctionName: + AthenaCatalogName: Description: 'This is the name of the lambda function that will be created. This name must satisfy the pattern ^[a-z0-9-_]{1,64}$' Type: String AllowedPattern: ^[a-z0-9-_]{1,64}$ @@ -24,10 +24,6 @@ Parameters: Description: 'The prefix within SpillBucket where this function can spill data.' Type: String Default: athena-spill - GlueConnection: - Description: "(Optional) Name of glue connection storing connection details for Federated Data source." - Type: String - Default: "" LambdaTimeout: Description: 'Maximum Lambda invocation runtime in seconds. (min 1 - 900 max)' Default: 900 @@ -46,156 +42,64 @@ Parameters: SubnetIds: Description: 'One or more Subnet IDs corresponding to the Subnet that the Lambda function can use to access you data source. (e.g. subnet1,subnet2)' Type: 'List' - SecretName: + SecretNameOrPrefix: Description: 'The name or prefix of a set of names within Secrets Manager that this function should have access to. (e.g. hbase-*).' Type: String - DefaultDocdb: + DocDBConnectionString: Description: 'The DocDB connection details to use by default if not catalog specific connection is defined and optionally using SecretsManager (e.g. ${secret_name}).' Type: String Default: "e.g. mongodb://:@:/?ssl=true&ssl_ca_certs=rds-combined-ca-bundle.pem&replicaSet=rs0" - PermissionsBoundaryArn: + PermissionsBoundaryARN: Description: "(Optional) An IAM policy ARN to use as the PermissionsBoundary for the created Lambda function's execution role" Default: '' Type: String - KmsKeyId: - Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." - Type: String - Default: "" - LambdaRoleArn: - Description: "(Optional) A custom role to be used by the Connector lambda" - Type: String - Default: "" - Conditions: - HasKmsKeyId: !Not [!Equals [!Ref KmsKeyId, ""]] - NotHasLambdaRole: !Equals [!Ref LambdaRoleArn, ""] - HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryArn, "" ] ] - CreateKmsPolicy: !And [!Condition HasKmsKeyId, !Condition NotHasLambdaRole] - HasGlueConnection: !Not [!Equals [!Ref GlueConnection, ""]] - NotHasGlueConnectionAndHasKmsKeyId: !And - - !Not [!Condition HasGlueConnection] - - !Condition HasKmsKeyId - + HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryARN, "" ] ] Resources: ConnectorConfig: Type: 'AWS::Serverless::Function' Properties: Environment: Variables: - disable_spill_encryption: !If [HasGlueConnection, !Ref "AWS::NoValue", !Ref DisableSpillEncryption] - spill_bucket: !If [HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillBucket] - spill_prefix: !If [HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillPrefix] - kms_key_id: !If [NotHasGlueConnectionAndHasKmsKeyId, !Ref KmsKeyId, !Ref "AWS::NoValue"] - glue_connection: !If [HasGlueConnection, !Ref GlueConnection, !Ref "AWS::NoValue"] - default_docdb: !If [HasGlueConnection, !Ref "AWS::NoValue", !Ref DefaultDocdb] - FunctionName: !Ref LambdaFunctionName + disable_spill_encryption: !Ref DisableSpillEncryption + spill_bucket: !Ref SpillBucket + spill_prefix: !Ref SpillPrefix + default_docdb: !Ref DocDBConnectionString + FunctionName: !Ref AthenaCatalogName Handler: "com.amazonaws.athena.connectors.docdb.DocDBCompositeHandler" CodeUri: "./target/athena-docdb-2022.47.1.jar" Description: "Enables Amazon Athena to communicate with DocumentDB, making your DocumentDB data accessible via SQL." Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory - Role: !If [ NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn ] - VpcConfig: - SecurityGroupIds: !Ref SecurityGroupIds - SubnetIds: !Ref SubnetIds - - FunctionRole: - Condition: NotHasLambdaRole - Type: AWS::IAM::Role - Properties: - PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryArn, !Ref "AWS::NoValue" ] - ManagedPolicyArns: - - "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole" - AssumeRolePolicyDocument: - Version: 2012-10-17 - Statement: - - Effect: Allow - Principal: - Service: - - lambda.amazonaws.com - Action: - - "sts:AssumeRole" - - FunctionExecutionPolicy: - Condition: NotHasLambdaRole - Type: "AWS::IAM::Policy" - Properties: - Roles: - - !Ref FunctionRole - PolicyName: FunctionExecutionPolicy - PolicyDocument: - Version: 2012-10-17 - Statement: - - Action: - - secretsmanager:GetSecretValue - Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:*:*:secret:${SecretName}*' - - Action: - - glue:GetTableVersions - - glue:GetPartitions - - glue:GetTables - - glue:GetTableVersion - - glue:GetDatabases - - glue:GetTable - - glue:GetPartition - - glue:GetDatabase - - athena:GetQueryExecution - - Effect: Allow - Resource: '*' - - Action: - - ec2:CreateNetworkInterface - - ec2:DeleteNetworkInterface - - ec2:DescribeNetworkInterfaces - - ec2:DetachNetworkInterface - Effect: Allow - Resource: '*' - - Action: - - s3:GetObject - - s3:ListBucket - - s3:GetBucketLocation - - s3:GetObjectVersion - - s3:PutObject - - s3:PutObjectAcl - - s3:GetLifecycleConfiguration - - s3:PutLifecycleConfiguration - - s3:DeleteObject - Effect: Allow - Resource: - - Fn::Sub: - - arn:${AWS::Partition}:s3:::${bucketName} - - bucketName: - Ref: SpillBucket - - Fn::Sub: - - arn:${AWS::Partition}:s3:::${bucketName}/* - - bucketName: - Ref: SpillBucket - - !If - - HasGlueConnection + PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] + Policies: + - Statement: - Action: - - glue:GetConnection + - secretsmanager:GetSecretValue Effect: Allow - Resource: - - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' - - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' - - !Ref "AWS::NoValue" - - FunctionKmsPolicy: - Condition: CreateKmsPolicy - Type: "AWS::IAM::Policy" - Properties: - PolicyName: FunctionKmsPolicy - PolicyDocument: - Version: 2012-10-17 - Statement: - - Effect: Allow - Action: - - kms:GenerateRandom - Resource: '*' - - Effect: Allow - Action: - - kms:GenerateDataKey - Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" - Roles: - - !Ref FunctionRole \ No newline at end of file + Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:*:*:secret:${SecretNameOrPrefix}' + Version: '2012-10-17' + - Statement: + - Action: + - glue:GetTableVersions + - glue:GetPartitions + - glue:GetTables + - glue:GetTableVersion + - glue:GetDatabases + - glue:GetTable + - glue:GetPartition + - glue:GetDatabase + - athena:GetQueryExecution + Effect: Allow + Resource: '*' + Version: '2012-10-17' + #S3CrudPolicy allows our connector to spill large responses to S3. You can optionally replace this pre-made policy + #with one that is more restrictive and can only 'put' but not read,delete, or overwrite files. + - S3CrudPolicy: + BucketName: !Ref SpillBucket + #VPCAccessPolicy allows our connector to run in a VPC so that it can access your data source. + - VPCAccessPolicy: {} + VpcConfig: + SecurityGroupIds: !Ref SecurityGroupIds + SubnetIds: !Ref SubnetIds \ No newline at end of file diff --git a/athena-dynamodb/athena-dynamodb-connection.yaml b/athena-dynamodb/athena-dynamodb-connection.yaml new file mode 100644 index 0000000000..aef157678f --- /dev/null +++ b/athena-dynamodb/athena-dynamodb-connection.yaml @@ -0,0 +1,144 @@ +Transform: 'AWS::Serverless-2016-10-31' +Metadata: + 'AWS::ServerlessRepo::Application': + Name: AthenaDynamoDBConnectorWithGlueConnection + Description: 'This connector enables Amazon Athena to communicate with DynamoDB, making your tables accessible via SQL.' + Author: 'default author' + SpdxLicenseId: Apache-2.0 + LicenseUrl: LICENSE.txt + ReadmeUrl: README.md + Labels: + - athena-federation + HomePageUrl: 'https://github.com/awslabs/aws-athena-query-federation' + SemanticVersion: 2022.47.1 + SourceCodeUrl: 'https://github.com/awslabs/aws-athena-query-federation' +Parameters: + LambdaFunctionName: + Description: 'This is the name of the lambda function that will be created. This name must satisfy the pattern ^[a-z0-9-_]{1,64}$' + Type: String + AllowedPattern: ^[a-z0-9-_]{1,64}$ + SpillBucket: + Description: 'The name of the bucket where this function can spill data.' + Type: String + GlueConnection: + Description: "Name of glue connection storing connection details for Federated Data source." + Type: String + LambdaRoleArn: + Description: "(Optional) A custom role to be used by the Connector lambda" + Type: String + Default: "" + KmsKeyId: + Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." + Type: String + Default: "" + +Conditions: + HasKmsKeyId: !Not [!Equals [!Ref KmsKeyId, ""]] + NotHasLambdaRole: !Equals [!Ref LambdaRoleArn, ""] + CreateKmsPolicy: !And [!Condition HasKmsKeyId, !Condition NotHasLambdaRole] + +Resources: + ConnectorConfig: + Type: 'AWS::Serverless::Function' + Properties: + Environment: + Variables: + glue_connection: !Ref GlueConnection + FunctionName: !Ref LambdaFunctionName + Handler: "com.amazonaws.athena.connectors.dynamodb.DynamoDBCompositeHandler" + CodeUri: "./target/athena-dynamodb-2022.47.1.jar" + Description: "Enables Amazon Athena to communicate with DynamoDB, making your tables accessible via SQL" + Runtime: java11 + Timeout: 900 + MemorySize: 3008 + Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn] + + FunctionRole: + Condition: NotHasLambdaRole + Type: AWS::IAM::Role + Properties: + ManagedPolicyArns: + - "arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + Service: + - lambda.amazonaws.com + Action: + - "sts:AssumeRole" + + FunctionExecutionPolicy: + Condition: NotHasLambdaRole + Type: "AWS::IAM::Policy" + Properties: + PolicyName: FunctionExecutionPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Action: + - dynamodb:DescribeTable + - dynamodb:ListSchemas + - dynamodb:ListTables + - dynamodb:Query + - dynamodb:Scan + - dynamodb:PartiQLSelect + - glue:GetTableVersions + - glue:GetPartitions + - glue:GetTables + - glue:GetTableVersion + - glue:GetDatabases + - glue:GetTable + - glue:GetPartition + - glue:GetDatabase + - athena:GetQueryExecution + Effect: Allow + Resource: '*' + - Action: + - s3:GetObject + - s3:ListBucket + - s3:GetBucketLocation + - s3:GetObjectVersion + - s3:PutObject + - s3:PutObjectAcl + - s3:GetLifecycleConfiguration + - s3:PutLifecycleConfiguration + - s3:DeleteObject + Effect: Allow + Resource: + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName} + - bucketName: + Ref: SpillBucket + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName}/* + - bucketName: + Ref: SpillBucket + - Action: + - glue:GetConnection + Effect: Allow + Resource: + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' + Roles: + - !Ref FunctionRole + + FunctionKmsPolicy: + Condition: CreateKmsPolicy + Type: "AWS::IAM::Policy" + Properties: + PolicyName: FunctionKmsPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Action: + - kms:GenerateRandom + Effect: Allow + Resource: '*' + - Action: + - kms:GenerateDataKey + Effect: Allow + Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" + Roles: + - !Ref FunctionRole diff --git a/athena-dynamodb/athena-dynamodb.yaml b/athena-dynamodb/athena-dynamodb.yaml index a988b444d3..fbdd05c865 100644 --- a/athena-dynamodb/athena-dynamodb.yaml +++ b/athena-dynamodb/athena-dynamodb.yaml @@ -13,7 +13,7 @@ Metadata: SemanticVersion: 2022.47.1 SourceCodeUrl: 'https://github.com/awslabs/aws-athena-query-federation' Parameters: - LambdaFunctionName: + AthenaCatalogName: Description: 'This is the name of the lambda function that will be created. This name must satisfy the pattern ^[a-z0-9-_]{1,64}$' Type: String AllowedPattern: ^[a-z0-9-_]{1,64}$ @@ -24,10 +24,6 @@ Parameters: Description: 'The prefix within SpillBucket where this function can spill data.' Type: String Default: athena-spill - GlueConnection: - Description: "(Optional) Name of glue connection storing connection details for Federated Data source." - Type: String - Default: "" LambdaTimeout: Description: 'Maximum Lambda invocation runtime in seconds. (min 1 - 900 max)' Default: 900 @@ -36,7 +32,7 @@ Parameters: Description: 'Lambda memory in MB (min 128 - 3008 max).' Default: 3008 Type: Number - LambdaRoleArn: + LambdaRole: Description: "(Optional) A custom role to be used by the Connector lambda" Type: String Default: "" @@ -44,24 +40,20 @@ Parameters: Description: "WARNING: If set to 'true' encryption for spilled data is disabled." Default: 'false' Type: String - KmsKeyId: + KMSKeyId: Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." Type: String Default: "" - PermissionsBoundaryArn: + PermissionsBoundaryARN: Description: "(Optional) An IAM policy ARN to use as the PermissionsBoundary for the created Lambda function's execution role" Default: '' Type: String Conditions: - HasKmsKeyId: !Not [!Equals [!Ref KmsKeyId, ""]] - NotHasLambdaRole: !Equals [!Ref LambdaRoleArn, ""] - HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryArn, "" ] ] - CreateKmsPolicy: !And [!Condition HasKmsKeyId, !Condition NotHasLambdaRole] - HasGlueConnection: !Not [!Equals [!Ref GlueConnection, ""]] - NotHasGlueConnectionAndHasKmsKeyId: !And - - !Not [!Condition HasGlueConnection] - - !Condition HasKmsKeyId + HasKMSKeyId: !Not [!Equals [!Ref KMSKeyId, ""]] + NotHasLambdaRole: !Equals [!Ref LambdaRole, ""] + HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryARN, "" ] ] + CreateKMSPolicy: !And [!Condition HasKMSKeyId, !Condition NotHasLambdaRole] Resources: ConnectorConfig: @@ -69,25 +61,24 @@ Resources: Properties: Environment: Variables: - disable_spill_encryption: !If [HasGlueConnection, !Ref "AWS::NoValue", !Ref DisableSpillEncryption] - spill_bucket: !If [HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillBucket] - spill_prefix: !If [HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillPrefix] - kms_key_id: !If [NotHasGlueConnectionAndHasKmsKeyId, !Ref KmsKeyId, !Ref "AWS::NoValue"] - glue_connection: !If [HasGlueConnection, !Ref GlueConnection, !Ref "AWS::NoValue"] - FunctionName: !Ref LambdaFunctionName + disable_spill_encryption: !Ref DisableSpillEncryption + spill_bucket: !Ref SpillBucket + spill_prefix: !Ref SpillPrefix + kms_key_id: !If [HasKMSKeyId, !Ref KMSKeyId, !Ref "AWS::NoValue"] + FunctionName: !Ref AthenaCatalogName Handler: "com.amazonaws.athena.connectors.dynamodb.DynamoDBCompositeHandler" CodeUri: "./target/athena-dynamodb-2022.47.1.jar" Description: "Enables Amazon Athena to communicate with DynamoDB, making your tables accessible via SQL" Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory - Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn] + Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRole] FunctionRole: Condition: NotHasLambdaRole Type: AWS::IAM::Role Properties: - PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryArn, !Ref "AWS::NoValue" ] + PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] ManagedPolicyArns: - "arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" AssumeRolePolicyDocument: @@ -124,7 +115,6 @@ Resources: - glue:GetPartition - glue:GetDatabase - athena:GetQueryExecution - Effect: Allow Resource: '*' - Action: @@ -147,23 +137,14 @@ Resources: - arn:${AWS::Partition}:s3:::${bucketName}/* - bucketName: Ref: SpillBucket - - !If - - HasGlueConnection - - Action: - - glue:GetConnection - Effect: Allow - Resource: - - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' - - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' - - !Ref "AWS::NoValue" Roles: - !Ref FunctionRole - FunctionKmsPolicy: - Condition: CreateKmsPolicy + FunctionKMSPolicy: + Condition: CreateKMSPolicy Type: "AWS::IAM::Policy" Properties: - PolicyName: FunctionKmsPolicy + PolicyName: FunctionKMSPolicy PolicyDocument: Version: 2012-10-17 Statement: @@ -174,6 +155,6 @@ Resources: - Action: - kms:GenerateDataKey Effect: Allow - Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" + Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KMSKeyId}" Roles: - - !Ref FunctionRole + - !Ref FunctionRole \ No newline at end of file diff --git a/athena-elasticsearch/athena-elasticsearch-connection.yaml b/athena-elasticsearch/athena-elasticsearch-connection.yaml new file mode 100644 index 0000000000..a2b06ada0c --- /dev/null +++ b/athena-elasticsearch/athena-elasticsearch-connection.yaml @@ -0,0 +1,172 @@ +Transform: 'AWS::Serverless-2016-10-31' + +Metadata: + AWS::ServerlessRepo::Application: + Name: AthenaElasticsearchConnectorWithGlueConnection + Description: 'This connector enables Amazon Athena to communicate with your Elasticsearch instance(s).' + Author: 'default author' + SpdxLicenseId: Apache-2.0 + LicenseUrl: LICENSE.txt + ReadmeUrl: README.md + Labels: ['athena-federation'] + HomePageUrl: https://github.com/awslabs/aws-athena-query-federation + SemanticVersion: 2022.47.1 + SourceCodeUrl: https://github.com/awslabs/aws-athena-query-federation + +# Parameters are CloudFormation features to pass input +# to your template when you create a stack +Parameters: + LambdaFunctionName: + Description: 'This is the name of the lambda function that will be created. This name must satisfy the pattern ^[a-z0-9-_]{1,64}$' + Type: String + AllowedPattern: ^[a-z0-9-_]{1,64}$ + SecretName: + Description: 'Used to create resource-based authorization policy for "secretsmanager:GetSecretValue" action. E.g. All Athena Elasticsearch Federation secret names can be prefixed with "AthenaESFederation" and authorization policy will allow "arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:AthenaESFederation*". Parameter value in this case should be "AthenaESFederation". If you do not have a prefix, you can manually update the IAM policy to add allow any secret names.' + Type: String + Default: "" + SpillBucket: + Description: 'The name of the bucket where this function can spill data.' + Type: String + GlueConnection: + Description: "Name of glue connection storing connection details for Federated Data source." + Type: String + SecurityGroupIds: + Description: '(Optional) Provide one or more SecurityGroup IDs corresponding to the SecurityGroup that should be applied to the Lambda function. (e.g. sg1,sg2,sg3)' + Type: CommaDelimitedList + Default: "" + SubnetIds: + Description: '(Optional) Provide one or more Subnet IDs corresponding to the Subnet that the Lambda function can use to access you data source. (e.g. subnet1,subnet2)' + Type: CommaDelimitedList + Default: "" + KmsKeyId: + Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." + Type: String + Default: "" + LambdaRoleArn: + Description: "(Optional) A custom role to be used by the Connector lambda" + Type: String + Default: "" + +Conditions: + HasSecurityGroups: !Not [ !Equals [ !Join [ "", !Ref SecurityGroupIds ], "" ] ] + HasSubnets: !Not [ !Equals [ !Join [ "", !Ref SubnetIds ], "" ] ] + HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] + NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] + CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] + +Resources: + ConnectorConfig: + Type: 'AWS::Serverless::Function' + Properties: + Environment: + Variables: + glue_connection: !Ref GlueConnection + FunctionName: !Ref LambdaFunctionName + Handler: "com.amazonaws.athena.connectors.elasticsearch.ElasticsearchCompositeHandler" + CodeUri: "./target/athena-elasticsearch-2024.18.2.jar" + Description: "The Elasticsearch Lambda Connector provides Athena users the ability to query data stored on Elasticsearch clusters." + Runtime: java11 + Timeout: 900 + MemorySize: 3008 + Role: !If [ NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn ] + VpcConfig: + SecurityGroupIds: !If [ HasSecurityGroups, !Ref SecurityGroupIds, !Ref "AWS::NoValue" ] + SubnetIds: !If [ HasSubnets, !Ref SubnetIds, !Ref "AWS::NoValue" ] + + FunctionRole: + Condition: NotHasLambdaRole + Type: AWS::IAM::Role + Properties: + ManagedPolicyArns: + - "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole" + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + Service: + - lambda.amazonaws.com + Action: + - "sts:AssumeRole" + + FunctionExecutionPolicy: + Condition: NotHasLambdaRole + Type: "AWS::IAM::Policy" + Properties: + Roles: + - !Ref FunctionRole + PolicyName: FunctionExecutionPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Action: + - secretsmanager:GetSecretValue + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretName}*' + - Action: + - es:List* + - es:Describe* + - es:ESHttp* + - glue:GetTableVersions + - glue:GetPartitions + - glue:GetTables + - glue:GetTableVersion + - glue:GetDatabases + - glue:GetTable + - glue:GetPartition + - glue:GetDatabase + - athena:GetQueryExecution + Effect: Allow + Resource: '*' + - Action: + - ec2:CreateNetworkInterface + - ec2:DeleteNetworkInterface + - ec2:DescribeNetworkInterfaces + - ec2:DetachNetworkInterface + Effect: Allow + Resource: '*' + - Action: + - s3:GetObject + - s3:ListBucket + - s3:GetBucketLocation + - s3:GetObjectVersion + - s3:PutObject + - s3:PutObjectAcl + - s3:GetLifecycleConfiguration + - s3:PutLifecycleConfiguration + - s3:DeleteObject + Effect: Allow + Resource: + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName} + - bucketName: + Ref: SpillBucket + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName}/* + - bucketName: + Ref: SpillBucket + - Action: + - glue:GetConnection + Effect: Allow + Resource: + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' + + FunctionKmsPolicy: + Condition: CreateKmsPolicy + Type: "AWS::IAM::Policy" + Properties: + PolicyName: FunctionKmsPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Action: + - kms:GenerateRandom + Resource: '*' + - Effect: Allow + Action: + - kms:GenerateDataKey + Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" + Roles: + - !Ref FunctionRole \ No newline at end of file diff --git a/athena-elasticsearch/athena-elasticsearch.yaml b/athena-elasticsearch/athena-elasticsearch.yaml index a50e198af2..9c932f7a85 100644 --- a/athena-elasticsearch/athena-elasticsearch.yaml +++ b/athena-elasticsearch/athena-elasticsearch.yaml @@ -16,11 +16,11 @@ Metadata: # Parameters are CloudFormation features to pass input # to your template when you create a stack Parameters: - LambdaFunctionName: + AthenaCatalogName: Description: 'This is the name of the lambda function that will be created. This name must satisfy the pattern ^[a-z0-9-_]{1,64}$' Type: String AllowedPattern: ^[a-z0-9-_]{1,64}$ - SecretName: + SecretNamePrefix: Description: 'Used to create resource-based authorization policy for "secretsmanager:GetSecretValue" action. E.g. All Athena Elasticsearch Federation secret names can be prefixed with "AthenaESFederation" and authorization policy will allow "arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:AthenaESFederation*". Parameter value in this case should be "AthenaESFederation". If you do not have a prefix, you can manually update the IAM policy to add allow any secret names.' Type: String Default: "" @@ -31,10 +31,6 @@ Parameters: Description: 'The prefix within SpillBucket where this function can spill data.' Type: String Default: athena-spill - GlueConnection: - Description: "(Optional) Name of glue connection storing connection details for Federated Data source." - Type: String - Default: "" LambdaTimeout: Description: "Maximum Lambda invocation runtime in seconds. (min 1 - 900 max)" Default: 900 @@ -67,38 +63,29 @@ Parameters: Description: "timeout period (in seconds) for scroll timeout used in the retrieval of documents (default is 60 seconds)." Default: 60 Type: Number + IsVPCAccess: + AllowedValues: + - true + - false + Default: false + Description: "If ElasticSearch cluster is in VPC select true, [true, false] (default is false)" + Type: String SecurityGroupIds: - Description: '(Optional) Provide one or more SecurityGroup IDs corresponding to the SecurityGroup that should be applied to the Lambda function. (e.g. sg1,sg2,sg3)' + Description: '**If IsVPCAccess is True**. Provide one or more SecurityGroup IDs corresponding to the SecurityGroup that should be applied to the Lambda function. (e.g. sg1,sg2,sg3)' Type: CommaDelimitedList Default: "" SubnetIds: - Description: '(Optional) Provide one or more Subnet IDs corresponding to the Subnet that the Lambda function can use to access you data source. (e.g. subnet1,subnet2)' + Description: '**If IsVPCAccess is True**. Provide one or more Subnet IDs corresponding to the Subnet that the Lambda function can use to access you data source. (e.g. subnet1,subnet2)' Type: CommaDelimitedList Default: "" - PermissionsBoundaryArn: + PermissionsBoundaryARN: Description: "(Optional) An IAM policy ARN to use as the PermissionsBoundary for the created Lambda function's execution role" Default: '' Type: String - KmsKeyId: - Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." - Type: String - Default: "" - LambdaRoleArn: - Description: "(Optional) A custom role to be used by the Connector lambda" - Type: String - Default: "" Conditions: - HasSecurityGroups: !Not [ !Equals [ !Join [ "", !Ref SecurityGroupIds ], "" ] ] - HasSubnets: !Not [ !Equals [ !Join [ "", !Ref SubnetIds ], "" ] ] - HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] - NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] - HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryArn, "" ] ] - CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] - HasGlueConnection: !Not [ !Equals [ !Ref GlueConnection, "" ] ] - NotHasGlueConnectionAndHasKmsKeyId: !And - - !Not [ !Condition HasGlueConnection ] - - !Condition HasKmsKeyId + IsVPCAccessSelected: !Equals [!Ref IsVPCAccess, true] + HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryARN, "" ] ] Resources: ConnectorConfig: @@ -106,127 +93,60 @@ Resources: Properties: Environment: Variables: - disable_spill_encryption: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref DisableSpillEncryption ] - spill_bucket: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillBucket ] - spill_prefix: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillPrefix ] - kms_key_id: !If [ NotHasGlueConnectionAndHasKmsKeyId, !Ref KmsKeyId, !Ref "AWS::NoValue" ] - glue_connection: !If [ HasGlueConnection, !Ref GlueConnection, !Ref "AWS::NoValue" ] - auto_discover_endpoint: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref AutoDiscoverEndpoint ] - domain_mapping: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref DomainMapping ] - query_timeout_cluster: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref QueryTimeoutCluster ] - query_timeout_search: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref QueryTimeoutSearch ] - query_scroll_timeout: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref QueryScrollTimeout ] - FunctionName: !Ref LambdaFunctionName + disable_spill_encryption: !Ref DisableSpillEncryption + spill_bucket: !Ref SpillBucket + spill_prefix: !Ref SpillPrefix + auto_discover_endpoint: !Ref AutoDiscoverEndpoint + domain_mapping: !Ref DomainMapping + query_timeout_cluster: !Ref QueryTimeoutCluster + query_timeout_search: !Ref QueryTimeoutSearch + query_scroll_timeout: !Ref QueryScrollTimeout + FunctionName: !Sub "${AthenaCatalogName}" Handler: "com.amazonaws.athena.connectors.elasticsearch.ElasticsearchCompositeHandler" - CodeUri: "./target/athena-elasticsearch-2024.18.2.jar" + CodeUri: "./target/athena-elasticsearch-2022.47.1.jar" Description: "The Elasticsearch Lambda Connector provides Athena users the ability to query data stored on Elasticsearch clusters." Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory - Role: !If [ NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn ] - VpcConfig: - SecurityGroupIds: !If [ HasSecurityGroups, !Ref SecurityGroupIds, !Ref "AWS::NoValue" ] - SubnetIds: !If [ HasSubnets, !Ref SubnetIds, !Ref "AWS::NoValue" ] - - FunctionRole: - Condition: NotHasLambdaRole - Type: AWS::IAM::Role - Properties: - PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryArn, !Ref "AWS::NoValue" ] - ManagedPolicyArns: - - "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole" - AssumeRolePolicyDocument: - Version: 2012-10-17 - Statement: - - Effect: Allow - Principal: - Service: - - lambda.amazonaws.com - Action: - - "sts:AssumeRole" - - FunctionExecutionPolicy: - Condition: NotHasLambdaRole - Type: "AWS::IAM::Policy" - Properties: - Roles: - - !Ref FunctionRole - PolicyName: FunctionExecutionPolicy - PolicyDocument: - Version: 2012-10-17 - Statement: - - Action: - - secretsmanager:GetSecretValue - Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretName}*' - - Action: - - es:List* - - es:Describe* - - es:ESHttp* - - glue:GetTableVersions - - glue:GetPartitions - - glue:GetTables - - glue:GetTableVersion - - glue:GetDatabases - - glue:GetTable - - glue:GetPartition - - glue:GetDatabase - - athena:GetQueryExecution - - Effect: Allow - Resource: '*' - - Action: - - ec2:CreateNetworkInterface - - ec2:DeleteNetworkInterface - - ec2:DescribeNetworkInterfaces - - ec2:DetachNetworkInterface - Effect: Allow - Resource: '*' - - Action: - - s3:GetObject - - s3:ListBucket - - s3:GetBucketLocation - - s3:GetObjectVersion - - s3:PutObject - - s3:PutObjectAcl - - s3:GetLifecycleConfiguration - - s3:PutLifecycleConfiguration - - s3:DeleteObject - Effect: Allow - Resource: - - Fn::Sub: - - arn:${AWS::Partition}:s3:::${bucketName} - - bucketName: - Ref: SpillBucket - - Fn::Sub: - - arn:${AWS::Partition}:s3:::${bucketName}/* - - bucketName: - Ref: SpillBucket - - !If - - HasGlueConnection + PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] + Policies: + - Statement: - Action: - - glue:GetConnection + - secretsmanager:GetSecretValue Effect: Allow - Resource: - - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' - - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' - - !Ref "AWS::NoValue" - - FunctionKmsPolicy: - Condition: CreateKmsPolicy - Type: "AWS::IAM::Policy" - Properties: - PolicyName: FunctionKmsPolicy - PolicyDocument: - Version: 2012-10-17 - Statement: - - Effect: Allow - Action: - - kms:GenerateRandom - Resource: '*' - - Effect: Allow - Action: - - kms:GenerateDataKey - Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" - Roles: - - !Ref FunctionRole \ No newline at end of file + Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretNamePrefix}*' + Version: '2012-10-17' + - Statement: + - Action: + - es:List* + - es:Describe* + - es:ESHttp* + - glue:GetTableVersions + - glue:GetPartitions + - glue:GetTables + - glue:GetTableVersion + - glue:GetDatabases + - glue:GetTable + - glue:GetPartition + - glue:GetDatabase + - athena:GetQueryExecution + Effect: Allow + Resource: '*' + Version: '2012-10-17' + #S3CrudPolicy allows our connector to spill large responses to S3. You can optionally replace this pre-made policy + #with one that is more restrictive and can only 'put' but not read,delete, or overwrite files. + - S3CrudPolicy: + BucketName: !Ref SpillBucket + VpcConfig: + SecurityGroupIds: + !If + - IsVPCAccessSelected + - + !Ref SecurityGroupIds + - !Ref "AWS::NoValue" + SubnetIds: + !If + - IsVPCAccessSelected + - + !Ref SubnetIds + - !Ref "AWS::NoValue" \ No newline at end of file diff --git a/athena-gcs/athena-gcs-connection.yaml b/athena-gcs/athena-gcs-connection.yaml new file mode 100644 index 0000000000..55da3c8f9b --- /dev/null +++ b/athena-gcs/athena-gcs-connection.yaml @@ -0,0 +1,151 @@ +Transform: 'AWS::Serverless-2016-10-31' + +Metadata: + AWS::ServerlessRepo::Application: + Name: AthenaGCSConnectorWithGlueConnection + Description: 'This connector enables Amazon Athena to communicate with Google Cloud Storage (GCS) and fetch data from Parquet file format' + Author: 'default author' + SpdxLicenseId: Apache-2.0 + LicenseUrl: LICENSE.txt + ReadmeUrl: README.md + Labels: ['athena-federation', 'GCS', 'Google-Cloud-Storage', 'parquet', 'csv'] + HomePageUrl: https://github.com/awslabs/aws-athena-query-federation + SemanticVersion: 2022.47.1 + SourceCodeUrl: https://github.com/awslabs/aws-athena-query-federation + +Parameters: + LambdaFunctionName: + Description: 'The name you will give to this catalog is a Lambda name for Athena. Athena will use this name as the function name. This name must satisfy the pattern ^[a-z0-9-_]{1,64}$' + Type: String + AllowedPattern: ^[a-z0-9-_]{1,64}$ + SpillBucket: + Description: 'The name of the bucket where this function can spill data.' + Type: String + GlueConnection: + Description: "Name of glue connection storing connection details for Federated Data source." + Type: String + SecretManagerGcpCredsName: + Description: 'Secret key name in the AWS Secrets Manager.' + Type: String + KmsKeyId: + Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." + Type: String + Default: "" + LambdaRoleArn: + Description: "(Optional) A custom role to be used by the Connector lambda" + Type: String + Default: "" + +Conditions: + HasKmsKeyId: !Not [!Equals [!Ref KmsKeyId, ""]] + NotHasLambdaRole: !Equals [!Ref LambdaRoleArn, ""] + CreateKmsPolicy: !And [!Condition HasKmsKeyId, !Condition NotHasLambdaRole] + +Resources: + AthenaGCSConnector: + Type: 'AWS::Serverless::Function' + Properties: + Environment: + Variables: + glue_connection: !Ref GlueConnection + FunctionName: !Ref LambdaFunctionName + Handler: "com.amazonaws.athena.connectors.gcs.GcsCompositeHandler" + CodeUri: "./target/athena-gcs.zip" + Description: "Amazon Athena GCS Connector" + Runtime: java11 + Timeout: 900 + MemorySize: 3008 + Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn] + + FunctionRole: + Condition: NotHasLambdaRole + Type: AWS::IAM::Role + Properties: + ManagedPolicyArns: + - "arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + Service: + - lambda.amazonaws.com + Action: + - "sts:AssumeRole" + + FunctionExecutionPolicy: + Condition: NotHasLambdaRole + Type: "AWS::IAM::Policy" + Properties: + Roles: + - !Ref FunctionRole + PolicyName: FunctionExecutionPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Action: + - athena:GetQueryExecution + - glue:GetTableVersions + - glue:GetPartitions + - glue:GetTables + - glue:GetTableVersion + - glue:GetDatabases + - glue:GetTable + - glue:GetPartition + - glue:GetDatabase + Effect: Allow + Resource: '*' + - Action: + - s3:ListBucket + Effect: Allow + Resource: + - !Sub 'arn:${AWS::Partition}:s3:::${SpillBucket}' + - Action: + - secretsmanager:GetSecretValue + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:*:*:secret:${SecretManagerGcpCredsName}*' + - Action: + - s3:GetObject + - s3:ListBucket + - s3:GetBucketLocation + - s3:GetObjectVersion + - s3:PutObject + - s3:PutObjectAcl + - s3:GetLifecycleConfiguration + - s3:PutLifecycleConfiguration + - s3:DeleteObject + Effect: Allow + Resource: + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName} + - bucketName: + Ref: SpillBucket + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName}/* + - bucketName: + Ref: SpillBucket + - Action: + - glue:GetConnection + Effect: Allow + Resource: + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' + + FunctionKmsPolicy: + Condition: CreateKmsPolicy + Type: "AWS::IAM::Policy" + Properties: + PolicyName: FunctionKmsPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Action: + - kms:GenerateRandom + Resource: '*' + - Effect: Allow + Action: + - kms:GenerateDataKey + Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" + Roles: + - !Ref FunctionRole \ No newline at end of file diff --git a/athena-gcs/athena-gcs.yaml b/athena-gcs/athena-gcs.yaml index 1078dbb313..621998ba81 100644 --- a/athena-gcs/athena-gcs.yaml +++ b/athena-gcs/athena-gcs.yaml @@ -25,9 +25,6 @@ Parameters: Description: 'The prefix within SpillBucket where this function can spill data.' Type: String Default: athena-spill - GlueConnection: - Description: "Name of glue connection storing connection details for Federated Data source." - Type: String LambdaTimeout: Description: "Maximum Lambda invocation runtime in seconds. (min 1 - 900 max)" Default: 900 @@ -40,31 +37,16 @@ Parameters: Description: "WARNING: If set to 'true' encryption for spilled data is disabled." Default: "false" Type: String - SecretManagerGcpCredsName: + GCSSecretName: Description: 'Secret key name in the AWS Secrets Manager.' Type: String - PermissionsBoundaryArn: + PermissionsBoundaryARN: Description: "(Optional) An IAM policy ARN to use as the PermissionsBoundary for the created Lambda function's execution role" Default: '' Type: String - KmsKeyId: - Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." - Type: String - Default: "" - LambdaRoleArn: - Description: "(Optional) A custom role to be used by the Connector lambda" - Type: String - Default: "" Conditions: - HasKmsKeyId: !Not [!Equals [!Ref KmsKeyId, ""]] - NotHasLambdaRole: !Equals [!Ref LambdaRoleArn, ""] - HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryArn, "" ] ] - CreateKmsPolicy: !And [!Condition HasKmsKeyId, !Condition NotHasLambdaRole] - HasGlueConnection: !Not [!Equals [!Ref GlueConnection, ""]] - NotHasGlueConnectionAndHasKmsKeyId: !And - - !Not [!Condition HasGlueConnection] - - !Condition HasKmsKeyId + HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryARN, "" ] ] Resources: AthenaGCSConnector: @@ -72,12 +54,10 @@ Resources: Properties: Environment: Variables: - disable_spill_encryption: !If [HasGlueConnection, !Ref "AWS::NoValue", !Ref DisableSpillEncryption] - spill_bucket: !If [HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillBucket] - spill_prefix: !If [HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillPrefix] - kms_key_id: !If [NotHasGlueConnectionAndHasKmsKeyId, !Ref KmsKeyId, !Ref "AWS::NoValue"] - glue_connection: !If [HasGlueConnection, !Ref GlueConnection, !Ref "AWS::NoValue"] - secret_manager_gcp_creds_name: !If [HasGlueConnection, !Ref "AWS::NoValue", !Ref SecretManagerGcpCredsName] + disable_spill_encryption: !Ref DisableSpillEncryption + spill_bucket: !Ref SpillBucket + spill_prefix: !Ref SpillPrefix + secret_manager_gcp_creds_name: !Ref GCSSecretName FunctionName: !Ref LambdaFunctionName Handler: "com.amazonaws.athena.connectors.gcs.GcsCompositeHandler" CodeUri: "./target/athena-gcs.zip" @@ -85,101 +65,37 @@ Resources: Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory - Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn] - - FunctionRole: - Condition: NotHasLambdaRole - Type: AWS::IAM::Role - Properties: - PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryArn, !Ref "AWS::NoValue" ] - ManagedPolicyArns: - - "arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" - AssumeRolePolicyDocument: - Version: 2012-10-17 - Statement: - - Effect: Allow - Principal: - Service: - - lambda.amazonaws.com - Action: - - "sts:AssumeRole" - - FunctionExecutionPolicy: - Condition: NotHasLambdaRole - Type: "AWS::IAM::Policy" - Properties: - Roles: - - !Ref FunctionRole - PolicyName: FunctionExecutionPolicy - PolicyDocument: - Version: 2012-10-17 - Statement: - - Action: - - athena:GetQueryExecution - - glue:GetTableVersions - - glue:GetPartitions - - glue:GetTables - - glue:GetTableVersion - - glue:GetDatabases - - glue:GetTable - - glue:GetPartition - - glue:GetDatabase - Effect: Allow - Resource: '*' - - Action: - - s3:ListBucket - Effect: Allow - Resource: - - !Sub 'arn:${AWS::Partition}:s3:::${SpillBucket}' - - Action: - - secretsmanager:GetSecretValue - Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:*:*:secret:${SecretManagerGcpCredsName}*' - - Action: - - s3:GetObject - - s3:ListBucket - - s3:GetBucketLocation - - s3:GetObjectVersion - - s3:PutObject - - s3:PutObjectAcl - - s3:GetLifecycleConfiguration - - s3:PutLifecycleConfiguration - - s3:DeleteObject - Effect: Allow - Resource: - - Fn::Sub: - - arn:${AWS::Partition}:s3:::${bucketName} - - bucketName: - Ref: SpillBucket - - Fn::Sub: - - arn:${AWS::Partition}:s3:::${bucketName}/* - - bucketName: - Ref: SpillBucket - - !If - - HasGlueConnection + PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] + Policies: + - Statement: - Action: - - glue:GetConnection + - athena:GetQueryExecution + - glue:GetTableVersions + - glue:GetPartitions + - glue:GetTables + - glue:GetTableVersion + - glue:GetDatabases + - glue:GetTable + - glue:GetPartition + - glue:GetDatabase + Effect: Allow + Resource: '*' + Version: '2012-10-17' + - Statement: + - Action: + - s3:ListBucket Effect: Allow Resource: - - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' - - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' - - !Ref "AWS::NoValue" - - FunctionKmsPolicy: - Condition: CreateKmsPolicy - Type: "AWS::IAM::Policy" - Properties: - PolicyName: FunctionKmsPolicy - PolicyDocument: - Version: 2012-10-17 - Statement: - - Effect: Allow - Action: - - kms:GenerateRandom - Resource: '*' - - Effect: Allow - Action: - - kms:GenerateDataKey - Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" - Roles: - - !Ref FunctionRole \ No newline at end of file + - !Sub 'arn:${AWS::Partition}:s3:::${SpillBucket}' + Version: '2012-10-17' + - Statement: + - Action: + - secretsmanager:GetSecretValue + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:*:*:secret:${GCSSecretName}*' + - S3ReadPolicy: + BucketName: + Ref: SpillBucket + - S3WritePolicy: + BucketName: + Ref: SpillBucket \ No newline at end of file diff --git a/athena-google-bigquery/athena-google-bigquery-connection.yaml b/athena-google-bigquery/athena-google-bigquery-connection.yaml new file mode 100644 index 0000000000..d5aa61df95 --- /dev/null +++ b/athena-google-bigquery/athena-google-bigquery-connection.yaml @@ -0,0 +1,170 @@ +Transform: 'AWS::Serverless-2016-10-31' +Metadata: + 'AWS::ServerlessRepo::Application': + Name: AthenaGoogleBigQueryConnectorWithGlueConnection + Description: 'This connector enables Amazon Athena to communicate with Big Query using Google SDK' + Author: 'default author' + SpdxLicenseId: Apache-2.0 + LicenseUrl: LICENSE.txt + ReadmeUrl: README.md + Labels: + - Trianz + - Big-Query + - Athena-Federation + - Google-SDK + HomePageUrl: 'https://github.com/awslabs/aws-athena-query-federation' + SemanticVersion: 2022.47.1 + SourceCodeUrl: 'https://github.com/awslabs/aws-athena-query-federation' +Parameters: + LambdaFunctionName: + Description: 'This is the name of the lambda function that will be created. This name must satisfy the pattern ^[a-z0-9-_]{1,64}$' + Type: String + AllowedPattern: ^[a-z0-9-_]{1,64}$ + SecretManagerGcpCredsName: + Description: "The secret name within AWS Secrets Manager that contains your Google Cloud Platform Credentials." + Type: String + SpillBucket: + Description: 'The name of the bucket where this function can spill data.' + Type: String + GlueConnection: + Description: "Name of glue connection storing connection details for Federated Data source." + Type: String + SecurityGroupIds: + Description: '(Optional) One or more SecurityGroup IDs corresponding to the SecurityGroup that should be applied to the Lambda function. (e.g. sg1,sg2,sg3)' + Type: CommaDelimitedList + Default: "" + SubnetIds: + Description: '(Optional) One or more Subnet IDs corresponding to the Subnet that the Lambda function can use to access you data source. (e.g. subnet1,subnet2)' + Type: CommaDelimitedList + Default: "" + KmsKeyId: + Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." + Type: String + Default: "" + LambdaRoleArn: + Description: "(Optional) A custom role to be used by the Connector lambda" + Type: String + Default: "" + +Conditions: + HasSecurityGroups: !Not [ !Equals [ !Join ["", !Ref SecurityGroupIds], "" ] ] + HasSubnets: !Not [ !Equals [ !Join ["", !Ref SubnetIds], "" ] ] + HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] + NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] + CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] + +Resources: + AthenaBigQueryConnector: + Type: 'AWS::Serverless::Function' + Properties: + Environment: + Variables: + glue_connection: !Ref GlueConnection + GOOGLE_APPLICATION_CREDENTIALS: '/tmp/service-account.json' + FunctionName: !Ref LambdaFunctionName + Handler: "com.amazonaws.athena.connectors.google.bigquery.BigQueryCompositeHandler" + CodeUri: "./target/athena-google-bigquery-2022.47.1.jar" + Description: "Enables Amazon Athena to communicate with BigQuery using Google SDK" + Runtime: java11 + Timeout: 900 + MemorySize: 3008 + Role: !If [ NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn ] + VpcConfig: + SecurityGroupIds: !If [ HasSecurityGroups, !Ref SecurityGroupIds, !Ref "AWS::NoValue" ] + SubnetIds: !If [ HasSubnets, !Ref SubnetIds, !Ref "AWS::NoValue" ] + + FunctionRole: + Condition: NotHasLambdaRole + Type: AWS::IAM::Role + Properties: + ManagedPolicyArns: + - "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole" + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + Service: + - lambda.amazonaws.com + Action: + - "sts:AssumeRole" + + FunctionExecutionPolicy: + Condition: NotHasLambdaRole + Type: "AWS::IAM::Policy" + Properties: + Roles: + - !Ref FunctionRole + PolicyName: FunctionExecutionPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Action: + - secretsmanager:GetSecretValue + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretManagerGcpCredsName}*' + - Action: + - logs:CreateLogGroup + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:*' + - Action: + - logs:CreateLogStream + - logs:PutLogEvents + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/${LambdaFunctionName}:*' + - Action: + - athena:GetQueryExecution + Effect: Allow + Resource: '*' + - Action: + - ec2:CreateNetworkInterface + - ec2:DeleteNetworkInterface + - ec2:DescribeNetworkInterfaces + - ec2:DetachNetworkInterface + Effect: Allow + Resource: '*' + - Action: + - s3:GetObject + - s3:ListBucket + - s3:GetBucketLocation + - s3:GetObjectVersion + - s3:PutObject + - s3:PutObjectAcl + - s3:GetLifecycleConfiguration + - s3:PutLifecycleConfiguration + - s3:DeleteObject + Effect: Allow + Resource: + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName} + - bucketName: + Ref: SpillBucket + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName}/* + - bucketName: + Ref: SpillBucket + - Action: + - glue:GetConnection + Effect: Allow + Resource: + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' + + FunctionKmsPolicy: + Condition: CreateKmsPolicy + Type: "AWS::IAM::Policy" + Properties: + PolicyName: FunctionKmsPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Action: + - kms:GenerateRandom + Resource: '*' + - Effect: Allow + Action: + - kms:GenerateDataKey + Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" + Roles: + - !Ref FunctionRole \ No newline at end of file diff --git a/athena-google-bigquery/athena-google-bigquery.yaml b/athena-google-bigquery/athena-google-bigquery.yaml index b2d1652070..00465fc040 100644 --- a/athena-google-bigquery/athena-google-bigquery.yaml +++ b/athena-google-bigquery/athena-google-bigquery.yaml @@ -20,15 +20,14 @@ Parameters: Description: 'This is the name of the lambda function that will be created. This name must satisfy the pattern ^[a-z0-9-_]{1,64}$' Type: String AllowedPattern: ^[a-z0-9-_]{1,64}$ - GcpProjectId: - Description: "(Optional if Glue Connection is provided) The project ID within Google Cloud Platform ." - Default: '' + GCPProjectID: + Description: "The project ID within Google Cloud Platform ." Type: String BigQueryEndpoint: Description: "(Optional) BigQuery Private Endpoint" Default: '' Type: String - SecretManagerGcpCredsName: + SecretNamePrefix: Description: "The secret name within AWS Secrets Manager that contains your Google Cloud Platform Credentials." Type: String SpillBucket: @@ -38,10 +37,6 @@ Parameters: Description: 'The prefix within SpillBucket where this function can spill data.' Type: String Default: athena-spill - GlueConnection: - Description: "(Optional) Name of glue connection storing connection details for Federated Data source." - Type: String - Default: "" LambdaTimeout: Description: 'Maximum Lambda invocation runtime in seconds. (min 1 - 900 max)' Default: 900 @@ -62,45 +57,26 @@ Parameters: Description: '(Optional) One or more Subnet IDs corresponding to the Subnet that the Lambda function can use to access you data source. (e.g. subnet1,subnet2)' Type: CommaDelimitedList Default: "" - PermissionsBoundaryArn: + PermissionsBoundaryARN: Description: "(Optional) An IAM policy ARN to use as the PermissionsBoundary for the created Lambda function's execution role" Default: '' Type: String - KmsKeyId: - Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." - Type: String - Default: "" - LambdaRoleArn: - Description: "(Optional) A custom role to be used by the Connector lambda" - Type: String - Default: "" - Conditions: + HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryARN, "" ] ] HasSecurityGroups: !Not [ !Equals [ !Join ["", !Ref SecurityGroupIds], "" ] ] HasSubnets: !Not [ !Equals [ !Join ["", !Ref SubnetIds], "" ] ] - HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] - NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] - HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryArn, "" ] ] - CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] - HasGlueConnection: !Not [ !Equals [ !Ref GlueConnection, "" ] ] - NotHasGlueConnectionAndHasKmsKeyId: !And - - !Not [ !Condition HasGlueConnection ] - - !Condition HasKmsKeyId - Resources: AthenaBigQueryConnector: Type: 'AWS::Serverless::Function' Properties: Environment: Variables: - disable_spill_encryption: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref DisableSpillEncryption ] - spill_bucket: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillBucket ] - spill_prefix: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillPrefix ] - kms_key_id: !If [ NotHasGlueConnectionAndHasKmsKeyId, !Ref KmsKeyId, !Ref "AWS::NoValue" ] - glue_connection: !If [ HasGlueConnection, !Ref GlueConnection, !Ref "AWS::NoValue" ] - secret_manager_gcp_creds_name: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SecretManagerGcpCredsName ] - gcp_project_id: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref GcpProjectId ] - big_query_endpoint: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref BigQueryEndpoint ] + disable_spill_encryption: !Ref DisableSpillEncryption + spill_bucket: !Ref SpillBucket + spill_prefix: !Ref SpillPrefix + secret_manager_gcp_creds_name: !Ref SecretNamePrefix + gcp_project_id: !Ref GCPProjectID + big_query_endpoint: !Ref BigQueryEndpoint GOOGLE_APPLICATION_CREDENTIALS: '/tmp/service-account.json' FunctionName: !Ref LambdaFunctionName Handler: "com.amazonaws.athena.connectors.google.bigquery.BigQueryCompositeHandler" @@ -109,108 +85,39 @@ Resources: Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory - Role: !If [ NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn ] - VpcConfig: - SecurityGroupIds: !If [ HasSecurityGroups, !Ref SecurityGroupIds, !Ref "AWS::NoValue" ] - SubnetIds: !If [ HasSubnets, !Ref SubnetIds, !Ref "AWS::NoValue" ] - - FunctionRole: - Condition: NotHasLambdaRole - Type: AWS::IAM::Role - Properties: - PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryArn, !Ref "AWS::NoValue" ] - ManagedPolicyArns: - - "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole" - AssumeRolePolicyDocument: - Version: 2012-10-17 - Statement: - - Effect: Allow - Principal: - Service: - - lambda.amazonaws.com - Action: - - "sts:AssumeRole" - - FunctionExecutionPolicy: - Condition: NotHasLambdaRole - Type: "AWS::IAM::Policy" - Properties: - Roles: - - !Ref FunctionRole - PolicyName: FunctionExecutionPolicy - PolicyDocument: - Version: 2012-10-17 - Statement: - - Action: - - secretsmanager:GetSecretValue - Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretManagerGcpCredsName}*' - - Action: - - logs:CreateLogGroup - Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:*' - - Action: - - logs:CreateLogStream - - logs:PutLogEvents - Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/${LambdaFunctionName}:*' - - Action: - - athena:GetQueryExecution - - Effect: Allow - Resource: '*' - - Action: - - ec2:CreateNetworkInterface - - ec2:DeleteNetworkInterface - - ec2:DescribeNetworkInterfaces - - ec2:DetachNetworkInterface - Effect: Allow - Resource: '*' - - Action: - - s3:GetObject - - s3:ListBucket - - s3:GetBucketLocation - - s3:GetObjectVersion - - s3:PutObject - - s3:PutObjectAcl - - s3:GetLifecycleConfiguration - - s3:PutLifecycleConfiguration - - s3:DeleteObject - Effect: Allow - Resource: - - Fn::Sub: - - arn:${AWS::Partition}:s3:::${bucketName} - - bucketName: - Ref: SpillBucket - - Fn::Sub: - - arn:${AWS::Partition}:s3:::${bucketName}/* - - bucketName: - Ref: SpillBucket - - !If - - HasGlueConnection + PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] + Policies: + - Statement: - Action: - - glue:GetConnection + - secretsmanager:GetSecretValue Effect: Allow - Resource: - - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' - - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' - - !Ref "AWS::NoValue" - - FunctionKmsPolicy: - Condition: CreateKmsPolicy - Type: "AWS::IAM::Policy" - Properties: - PolicyName: FunctionKmsPolicy - PolicyDocument: - Version: 2012-10-17 - Statement: - - Effect: Allow - Action: - - kms:GenerateRandom - Resource: '*' - - Effect: Allow - Action: - - kms:GenerateDataKey - Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" - Roles: - - !Ref FunctionRole \ No newline at end of file + Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretNamePrefix}*' + Version: '2012-10-17' + - Statement: + - Action: + - logs:CreateLogGroup + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:*' + Version: '2012-10-17' + - Statement: + - Action: + - logs:CreateLogStream + - logs:PutLogEvents + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/${LambdaFunctionName}:*' + Version: '2012-10-17' + - Statement: + - Action: + - athena:GetQueryExecution + Effect: Allow + Resource: '*' + Version: '2012-10-17' + #S3CrudPolicy allows our connector to spill large responses to S3. You can optionally replace this pre-made policy + #with one that is more restrictive and can only 'put' but not read,delete, or overwrite files. + - S3CrudPolicy: + BucketName: !Ref SpillBucket + #VPCAccessPolicy allows our connector to run in a VPC so that it can access your data source. + - VPCAccessPolicy: { } + VpcConfig: + SecurityGroupIds: !If [ HasSecurityGroups, !Ref SecurityGroupIds, !Ref "AWS::NoValue" ] + SubnetIds: !If [ HasSubnets, !Ref SubnetIds, !Ref "AWS::NoValue" ] \ No newline at end of file diff --git a/athena-hbase/athena-hbase-connection.yaml b/athena-hbase/athena-hbase-connection.yaml new file mode 100644 index 0000000000..9ba9662917 --- /dev/null +++ b/athena-hbase/athena-hbase-connection.yaml @@ -0,0 +1,161 @@ +Transform: 'AWS::Serverless-2016-10-31' +Metadata: + 'AWS::ServerlessRepo::Application': + Name: AthenaHBaseConnectorWithGlueConnection + Description: 'This connector enables Amazon Athena to communicate with your HBase instance(s), making your HBase data accessible via SQL.' + Author: 'default author' + SpdxLicenseId: Apache-2.0 + LicenseUrl: LICENSE.txt + ReadmeUrl: README.md + Labels: + - athena-federation + HomePageUrl: 'https://github.com/awslabs/aws-athena-query-federation' + SemanticVersion: 2022.47.1 + SourceCodeUrl: 'https://github.com/awslabs/aws-athena-query-federation' +Parameters: + LambdaFunctionName: + Description: 'This is the name of the lambda function that will be created. This name must satisfy the pattern ^[a-z0-9-_]{1,64}$' + Type: String + AllowedPattern: ^[a-z0-9-_]{1,64}$ + SpillBucket: + Description: 'The name of the bucket where this function can spill data.' + Type: String + GlueConnection: + Description: "Name of glue connection storing connection details for Federated Data source." + Type: String + SecurityGroupIds: + Description: 'One or more SecurityGroup IDs corresponding to the SecurityGroup that should be applied to the Lambda function. (e.g. sg1,sg2,sg3)' + Type: 'List' + SubnetIds: + Description: 'One or more Subnet IDs corresponding to the Subnet that the Lambda function can use to access you data source. (e.g. subnet1,subnet2)' + Type: 'List' + SecretName: + Description: 'The name or prefix of a set of names within Secrets Manager that this function should have access to. (e.g. hbase-*).' + Type: String + KmsKeyId: + Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." + Type: String + Default: "" + LambdaRoleArn: + Description: "(Optional) A custom role to be used by the Connector lambda" + Type: String + Default: "" + +Conditions: + HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] + NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] + CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] + +Resources: + ConnectorConfig: + Type: 'AWS::Serverless::Function' + Properties: + Environment: + Variables: + glue_connection: !Ref GlueConnection + FunctionName: !Ref LambdaFunctionName + Handler: "com.amazonaws.athena.connectors.hbase.HbaseCompositeHandler" + CodeUri: "./target/athena-hbase-2022.47.1.jar" + Description: "Enables Amazon Athena to communicate with HBase, making your HBase data accessible via SQL" + Runtime: java11 + Timeout: 900 + MemorySize: 3008 + Role: !If [ NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn ] + VpcConfig: + SecurityGroupIds: !Ref SecurityGroupIds + SubnetIds: !Ref SubnetIds + + FunctionRole: + Condition: NotHasLambdaRole + Type: AWS::IAM::Role + Properties: + ManagedPolicyArns: + - "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole" + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + Service: + - lambda.amazonaws.com + Action: + - "sts:AssumeRole" + + FunctionExecutionPolicy: + Condition: NotHasLambdaRole + Type: "AWS::IAM::Policy" + Properties: + Roles: + - !Ref FunctionRole + PolicyName: FunctionExecutionPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Action: + - secretsmanager:GetSecretValue + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:*:*:secret:${SecretName}*' + - Action: + - glue:GetTableVersions + - glue:GetPartitions + - glue:GetTables + - glue:GetTableVersion + - glue:GetDatabases + - glue:GetTable + - glue:GetPartition + - glue:GetDatabase + - athena:GetQueryExecution + Effect: Allow + Resource: '*' + - Action: + - ec2:CreateNetworkInterface + - ec2:DeleteNetworkInterface + - ec2:DescribeNetworkInterfaces + - ec2:DetachNetworkInterface + Effect: Allow + Resource: '*' + - Action: + - s3:GetObject + - s3:ListBucket + - s3:GetBucketLocation + - s3:GetObjectVersion + - s3:PutObject + - s3:PutObjectAcl + - s3:GetLifecycleConfiguration + - s3:PutLifecycleConfiguration + - s3:DeleteObject + Effect: Allow + Resource: + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName} + - bucketName: + Ref: SpillBucket + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName}/* + - bucketName: + Ref: SpillBucket + - Action: + - glue:GetConnection + Effect: Allow + Resource: + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' + + FunctionKmsPolicy: + Condition: CreateKmsPolicy + Type: "AWS::IAM::Policy" + Properties: + PolicyName: FunctionKmsPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Action: + - kms:GenerateRandom + Resource: '*' + - Effect: Allow + Action: + - kms:GenerateDataKey + Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" + Roles: + - !Ref FunctionRole \ No newline at end of file diff --git a/athena-hbase/athena-hbase.yaml b/athena-hbase/athena-hbase.yaml index 5ef5b5ea7d..447f96048f 100644 --- a/athena-hbase/athena-hbase.yaml +++ b/athena-hbase/athena-hbase.yaml @@ -13,7 +13,7 @@ Metadata: SemanticVersion: 2022.47.1 SourceCodeUrl: 'https://github.com/awslabs/aws-athena-query-federation' Parameters: - LambdaFunctionName: + AthenaCatalogName: Description: 'This is the name of the lambda function that will be created. This name must satisfy the pattern ^[a-z0-9-_]{1,64}$' Type: String AllowedPattern: ^[a-z0-9-_]{1,64}$ @@ -24,10 +24,6 @@ Parameters: Description: 'The prefix within SpillBucket where this function can spill data.' Type: String Default: athena-spill - GlueConnection: - Description: "(Optional) Name of glue connection storing connection details for Federated Data source." - Type: String - Default: "" LambdaTimeout: Description: 'Maximum Lambda invocation runtime in seconds. (min 1 - 900 max)' Default: 900 @@ -46,20 +42,19 @@ Parameters: SubnetIds: Description: 'One or more Subnet IDs corresponding to the Subnet that the Lambda function can use to access you data source. (e.g. subnet1,subnet2)' Type: 'List' - SecretName: + SecretNameOrPrefix: Description: 'The name or prefix of a set of names within Secrets Manager that this function should have access to. (e.g. hbase-*).' Type: String - DefaultHbase: + HBaseConnectionString: Description: 'The HBase connection details to use by default in the format: master_hostname:hbase_port:zookeeper_port and optionally using SecretsManager (e.g. ${secret_name}).' Type: String - Default: "" - PermissionsBoundaryArn: + PermissionsBoundaryARN: Description: "(Optional) An IAM policy ARN to use as the PermissionsBoundary for the created Lambda function's execution role" Default: '' Type: String KerberosAuthEnabled: Description: 'Kerberos authentication enabled or not' - Default: "" + Default: "false" Type: String KerberosConfigFilesS3Reference: Description: 'The S3 bucket reference where kerberos auth config files are uploaded. Applicable for Kerberos auth' @@ -73,148 +68,62 @@ Parameters: Description: 'Hbase Rpc Protection value for Kerberos authentication' Default: "" Type: String - KmsKeyId: - Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." - Type: String - Default: "" - LambdaRoleArn: - Description: "(Optional) A custom role to be used by the Connector lambda" - Type: String - Default: "" - Conditions: - HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] - NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] - HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryArn, "" ] ] - CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] - HasGlueConnection: !Not [ !Equals [ !Ref GlueConnection, "" ] ] - NotHasGlueConnectionAndHasKmsKeyId: !And - - !Not [ !Condition HasGlueConnection ] - - !Condition HasKmsKeyId - + HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryARN, "" ] ] Resources: ConnectorConfig: Type: 'AWS::Serverless::Function' Properties: Environment: Variables: - disable_spill_encryption: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref DisableSpillEncryption ] - spill_bucket: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillBucket ] - spill_prefix: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillPrefix ] - kms_key_id: !If [ NotHasGlueConnectionAndHasKmsKeyId, !Ref KmsKeyId, !Ref "AWS::NoValue" ] - glue_connection: !If [ HasGlueConnection, !Ref GlueConnection, !Ref "AWS::NoValue" ] - default_hbase: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref DefaultHbase ] - kerberos_auth_enabled: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref KerberosAuthEnabled ] - kerberos_config_files_s3_reference: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref KerberosConfigFilesS3Reference ] - principal_name: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref PrincipalName ] - hbase_rpc_protection: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref HbaseRpcProtection ] - FunctionName: !Ref LambdaFunctionName + disable_spill_encryption: !Ref DisableSpillEncryption + spill_bucket: !Ref SpillBucket + spill_prefix: !Ref SpillPrefix + default_hbase: !Ref HBaseConnectionString + kerberos_auth_enabled: !Ref KerberosAuthEnabled + kerberos_config_files_s3_reference: !Ref KerberosConfigFilesS3Reference + principal_name: !Ref PrincipalName + hbase_rpc_protection: !Ref HbaseRpcProtection + FunctionName: !Ref AthenaCatalogName Handler: "com.amazonaws.athena.connectors.hbase.HbaseCompositeHandler" CodeUri: "./target/athena-hbase-2022.47.1.jar" Description: "Enables Amazon Athena to communicate with HBase, making your HBase data accessible via SQL" Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory - Role: !If [ NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn ] - VpcConfig: - SecurityGroupIds: !Ref SecurityGroupIds - SubnetIds: !Ref SubnetIds - - FunctionRole: - Condition: NotHasLambdaRole - Type: AWS::IAM::Role - Properties: - PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryArn, !Ref "AWS::NoValue" ] - ManagedPolicyArns: - - "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole" - AssumeRolePolicyDocument: - Version: 2012-10-17 - Statement: - - Effect: Allow - Principal: - Service: - - lambda.amazonaws.com - Action: - - "sts:AssumeRole" - - FunctionExecutionPolicy: - Condition: NotHasLambdaRole - Type: "AWS::IAM::Policy" - Properties: - Roles: - - !Ref FunctionRole - PolicyName: FunctionExecutionPolicy - PolicyDocument: - Version: 2012-10-17 - Statement: - - Action: - - secretsmanager:GetSecretValue - Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:*:*:secret:${SecretName}*' - - Action: - - glue:GetTableVersions - - glue:GetPartitions - - glue:GetTables - - glue:GetTableVersion - - glue:GetDatabases - - glue:GetTable - - glue:GetPartition - - glue:GetDatabase - - athena:GetQueryExecution - Effect: Allow - Resource: '*' - - Action: - - ec2:CreateNetworkInterface - - ec2:DeleteNetworkInterface - - ec2:DescribeNetworkInterfaces - - ec2:DetachNetworkInterface - Effect: Allow - Resource: '*' - - Action: - - s3:GetObject - - s3:ListBucket - - s3:GetBucketLocation - - s3:GetObjectVersion - - s3:PutObject - - s3:PutObjectAcl - - s3:GetLifecycleConfiguration - - s3:PutLifecycleConfiguration - - s3:DeleteObject - Effect: Allow - Resource: - - Fn::Sub: - - arn:${AWS::Partition}:s3:::${bucketName} - - bucketName: - Ref: SpillBucket - - Fn::Sub: - - arn:${AWS::Partition}:s3:::${bucketName}/* - - bucketName: - Ref: SpillBucket - - !If - - HasGlueConnection + PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] + Policies: + - Statement: - Action: - - glue:GetConnection + - secretsmanager:GetSecretValue Effect: Allow - Resource: - - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' - - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' - - !Ref "AWS::NoValue" - - FunctionKmsPolicy: - Condition: CreateKmsPolicy - Type: "AWS::IAM::Policy" - Properties: - PolicyName: FunctionKmsPolicy - PolicyDocument: - Version: 2012-10-17 - Statement: - - Effect: Allow - Action: - - kms:GenerateRandom - Resource: '*' - - Effect: Allow - Action: - - kms:GenerateDataKey - Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" - Roles: - - !Ref FunctionRole \ No newline at end of file + Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:*:*:secret:${SecretNameOrPrefix}' + Version: '2012-10-17' + - Statement: + - Action: + - glue:GetTableVersions + - glue:GetPartitions + - glue:GetTables + - glue:GetTableVersion + - glue:GetDatabases + - glue:GetTable + - glue:GetPartition + - glue:GetDatabase + - athena:GetQueryExecution + - s3:ListBucket + - s3:GetObject + - s3:GetBucketLocation + - s3:GetObjectVersion + - s3:GetLifecycleConfiguration + Effect: Allow + Resource: '*' + Version: '2012-10-17' + #S3CrudPolicy allows our connector to spill large responses to S3. You can optionally replace this pre-made policy + #with one that is more restrictive and can only 'put' but not read,delete, or overwrite files. + - S3CrudPolicy: + BucketName: !Ref SpillBucket + #VPCAccessPolicy allows our connector to run in a VPC so that it can access your data source. + - VPCAccessPolicy: {} + VpcConfig: + SecurityGroupIds: !Ref SecurityGroupIds + SubnetIds: !Ref SubnetIds \ No newline at end of file diff --git a/athena-hortonworks-hive/athena-hortonworks-hive-connection.yaml b/athena-hortonworks-hive/athena-hortonworks-hive-connection.yaml new file mode 100644 index 0000000000..4671c4b710 --- /dev/null +++ b/athena-hortonworks-hive/athena-hortonworks-hive-connection.yaml @@ -0,0 +1,166 @@ +Transform: 'AWS::Serverless-2016-10-31' +Metadata: + 'AWS::ServerlessRepo::Application': + Name: AthenaHortonworksHiveConnectorWithGlueConnection + Description: 'This connector enables Amazon Athena to communicate with your Hortonworks Hive instance(s) using JDBC driver.' + Author: 'default author' + SpdxLicenseId: Apache-2.0 + LicenseUrl: LICENSE.txt + ReadmeUrl: README.md + Labels: + - athena-federation + HomePageUrl: 'https://github.com/awslabs/aws-athena-query-federation' + SemanticVersion: 2022.47.1 + SourceCodeUrl: 'https://github.com/awslabs/aws-athena-query-federation' +Parameters: + LambdaFunctionName: + Description: 'This is the name of the lambda function that will be created. This name must satisfy the pattern ^[a-z0-9-_]{1,64}$' + Type: String + AllowedPattern: ^[a-z0-9-_]{1,64}$ + SecretName: + Description: 'Used to create resource-based authorization policy for "secretsmanager:GetSecretValue" action. E.g. All Athena JDBC Federation secret names can be prefixed with "AthenaJdbcFederation" and authorization policy will allow "arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:AthenaJdbcFederation*". Parameter value in this case should be "AthenaJdbcFederation". If you do not have a prefix, you can manually update the IAM policy to add allow any secret names.' + Type: String + SpillBucket: + Description: 'The name of the bucket where this function can spill data.' + Type: String + GlueConnection: + Description: "Name of glue connection storing connection details for Federated Data source." + Type: String + SecurityGroupIds: + Description: '(Optional) One or more SecurityGroup IDs corresponding to the SecurityGroup that should be applied to the Lambda function. (e.g. sg1,sg2,sg3)' + Type: CommaDelimitedList + Default: "" + SubnetIds: + Description: '(Optional) One or more Subnet IDs corresponding to the Subnet that the Lambda function can use to access you data source. (e.g. subnet1,subnet2)' + Type: CommaDelimitedList + Default: "" + KmsKeyId: + Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." + Type: String + Default: "" + LambdaRoleArn: + Description: "(Optional) A custom role to be used by the Connector lambda" + Type: String + Default: "" + +Conditions: + HasSecurityGroups: !Not [ !Equals [ !Join ["", !Ref SecurityGroupIds], "" ] ] + HasSubnets: !Not [ !Equals [ !Join ["", !Ref SubnetIds], "" ] ] + HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] + NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] + CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] + +Resources: + JdbcConnectorConfig: + Type: 'AWS::Serverless::Function' + Properties: + Environment: + Variables: + glue_connection: !Ref GlueConnection + FunctionName: !Ref LambdaFunctionName + Handler: "com.amazonaws.athena.connectors.hortonworks.HiveMuxCompositeHandler" + CodeUri: "./target/athena-hortonworks-hive-2022.47.1.jar" + Description: "Enables Amazon Athena to communicate with Hortonworks Hive using JDBC" + Runtime: java11 + Timeout: 900 + MemorySize: 3008 + Role: !If [ NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn ] + VpcConfig: + SecurityGroupIds: !If [ HasSecurityGroups, !Ref SecurityGroupIds, !Ref "AWS::NoValue" ] + SubnetIds: !If [ HasSubnets, !Ref SubnetIds, !Ref "AWS::NoValue" ] + + FunctionRole: + Condition: NotHasLambdaRole + Type: AWS::IAM::Role + Properties: + ManagedPolicyArns: + - "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole" + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + Service: + - lambda.amazonaws.com + Action: + - "sts:AssumeRole" + + FunctionExecutionPolicy: + Condition: NotHasLambdaRole + Type: "AWS::IAM::Policy" + Properties: + Roles: + - !Ref FunctionRole + PolicyName: FunctionExecutionPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Action: + - secretsmanager:GetSecretValue + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretName}*' + - Action: + - logs:CreateLogGroup + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:*' + - Action: + - logs:CreateLogStream + - logs:PutLogEvents + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/${LambdaFunctionName}:*' + - Action: + - athena:GetQueryExecution + Effect: Allow + Resource: '*' + - Action: + - ec2:CreateNetworkInterface + - ec2:DeleteNetworkInterface + - ec2:DescribeNetworkInterfaces + - ec2:DetachNetworkInterface + Effect: Allow + Resource: '*' + - Action: + - s3:GetObject + - s3:ListBucket + - s3:GetBucketLocation + - s3:GetObjectVersion + - s3:PutObject + - s3:PutObjectAcl + - s3:GetLifecycleConfiguration + - s3:PutLifecycleConfiguration + - s3:DeleteObject + Effect: Allow + Resource: + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName} + - bucketName: + Ref: SpillBucket + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName}/* + - bucketName: + Ref: SpillBucket + - Action: + - glue:GetConnection + Effect: Allow + Resource: + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' + + FunctionKmsPolicy: + Condition: CreateKmsPolicy + Type: "AWS::IAM::Policy" + Properties: + PolicyName: FunctionKmsPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Action: + - kms:GenerateRandom + Resource: '*' + - Effect: Allow + Action: + - kms:GenerateDataKey + Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" + Roles: + - !Ref FunctionRole \ No newline at end of file diff --git a/athena-hortonworks-hive/athena-hortonworks-hive.yaml b/athena-hortonworks-hive/athena-hortonworks-hive.yaml index a506e540ce..5de7161e5b 100644 --- a/athena-hortonworks-hive/athena-hortonworks-hive.yaml +++ b/athena-hortonworks-hive/athena-hortonworks-hive.yaml @@ -20,8 +20,7 @@ Parameters: DefaultConnectionString: Description: 'The default connection string is used when catalog is "lambda:${LambdaFunctionName}". Catalog specific Connection Strings can be added later. Format: ${DatabaseType}://${NativeJdbcConnectionString}.' Type: String - Default: "" - SecretName: + SecretNamePrefix: Description: 'Used to create resource-based authorization policy for "secretsmanager:GetSecretValue" action. E.g. All Athena JDBC Federation secret names can be prefixed with "AthenaJdbcFederation" and authorization policy will allow "arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:AthenaJdbcFederation*". Parameter value in this case should be "AthenaJdbcFederation". If you do not have a prefix, you can manually update the IAM policy to add allow any secret names.' Type: String SpillBucket: @@ -31,10 +30,6 @@ Parameters: Description: 'The prefix within SpillBucket where this function can spill data.' Type: String Default: athena-spill - GlueConnection: - Description: "(Optional) Name of glue connection storing connection details for Federated Data source." - Type: String - Default: "" LambdaTimeout: Description: 'Maximum Lambda invocation runtime in seconds. (min 1 - 900 max)' Default: 900 @@ -55,43 +50,24 @@ Parameters: Description: '(Optional) One or more Subnet IDs corresponding to the Subnet that the Lambda function can use to access you data source. (e.g. subnet1,subnet2)' Type: CommaDelimitedList Default: "" - PermissionsBoundaryArn: + PermissionsBoundaryARN: Description: "(Optional) An IAM policy ARN to use as the PermissionsBoundary for the created Lambda function's execution role" Default: '' Type: String - KmsKeyId: - Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." - Type: String - Default: "" - LambdaRoleArn: - Description: "(Optional) A custom role to be used by the Connector lambda" - Type: String - Default: "" - Conditions: + HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryARN, "" ] ] HasSecurityGroups: !Not [ !Equals [ !Join ["", !Ref SecurityGroupIds], "" ] ] HasSubnets: !Not [ !Equals [ !Join ["", !Ref SubnetIds], "" ] ] - HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] - NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] - HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryArn, "" ] ] - CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] - HasGlueConnection: !Not [ !Equals [ !Ref GlueConnection, "" ] ] - NotHasGlueConnectionAndHasKmsKeyId: !And - - !Not [ !Condition HasGlueConnection ] - - !Condition HasKmsKeyId - Resources: JdbcConnectorConfig: Type: 'AWS::Serverless::Function' Properties: Environment: Variables: - disable_spill_encryption: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref DisableSpillEncryption ] - spill_bucket: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillBucket ] - spill_prefix: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillPrefix ] - kms_key_id: !If [ NotHasGlueConnectionAndHasKmsKeyId, !Ref KmsKeyId, !Ref "AWS::NoValue" ] - glue_connection: !If [ HasGlueConnection, !Ref GlueConnection, !Ref "AWS::NoValue" ] - default: [ HasGlueConnection, !Ref "AWS::NoValue", !Ref DefaultConnectionString ] + disable_spill_encryption: !Ref DisableSpillEncryption + spill_bucket: !Ref SpillBucket + spill_prefix: !Ref SpillPrefix + default: !Ref DefaultConnectionString FunctionName: !Ref LambdaFunctionName Handler: "com.amazonaws.athena.connectors.hortonworks.HiveMuxCompositeHandler" CodeUri: "./target/athena-hortonworks-hive-2022.47.1.jar" @@ -99,107 +75,39 @@ Resources: Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory - Role: !If [ NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn ] - VpcConfig: - SecurityGroupIds: !If [ HasSecurityGroups, !Ref SecurityGroupIds, !Ref "AWS::NoValue" ] - SubnetIds: !If [ HasSubnets, !Ref SubnetIds, !Ref "AWS::NoValue" ] - - FunctionRole: - Condition: NotHasLambdaRole - Type: AWS::IAM::Role - Properties: - PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryArn, !Ref "AWS::NoValue" ] - ManagedPolicyArns: - - "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole" - AssumeRolePolicyDocument: - Version: 2012-10-17 - Statement: - - Effect: Allow - Principal: - Service: - - lambda.amazonaws.com - Action: - - "sts:AssumeRole" - - FunctionExecutionPolicy: - Condition: NotHasLambdaRole - Type: "AWS::IAM::Policy" - Properties: - Roles: - - !Ref FunctionRole - PolicyName: FunctionExecutionPolicy - PolicyDocument: - Version: 2012-10-17 - Statement: - - Action: - - secretsmanager:GetSecretValue - Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretName}*' - - Action: - - logs:CreateLogGroup - Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:*' - - Action: - - logs:CreateLogStream - - logs:PutLogEvents - Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/${LambdaFunctionName}:*' - - Action: - - athena:GetQueryExecution - Effect: Allow - Resource: '*' - - Action: - - ec2:CreateNetworkInterface - - ec2:DeleteNetworkInterface - - ec2:DescribeNetworkInterfaces - - ec2:DetachNetworkInterface - Effect: Allow - Resource: '*' - - Action: - - s3:GetObject - - s3:ListBucket - - s3:GetBucketLocation - - s3:GetObjectVersion - - s3:PutObject - - s3:PutObjectAcl - - s3:GetLifecycleConfiguration - - s3:PutLifecycleConfiguration - - s3:DeleteObject - Effect: Allow - Resource: - - Fn::Sub: - - arn:${AWS::Partition}:s3:::${bucketName} - - bucketName: - Ref: SpillBucket - - Fn::Sub: - - arn:${AWS::Partition}:s3:::${bucketName}/* - - bucketName: - Ref: SpillBucket - - !If - - HasGlueConnection + PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] + Policies: + - Statement: - Action: - - glue:GetConnection + - secretsmanager:GetSecretValue Effect: Allow - Resource: - - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' - - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' - - !Ref "AWS::NoValue" - - FunctionKmsPolicy: - Condition: CreateKmsPolicy - Type: "AWS::IAM::Policy" - Properties: - PolicyName: FunctionKmsPolicy - PolicyDocument: - Version: 2012-10-17 - Statement: - - Effect: Allow - Action: - - kms:GenerateRandom - Resource: '*' - - Effect: Allow - Action: - - kms:GenerateDataKey - Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" - Roles: - - !Ref FunctionRole \ No newline at end of file + Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretNamePrefix}*' + Version: '2012-10-17' + - Statement: + - Action: + - logs:CreateLogGroup + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:*' + Version: '2012-10-17' + - Statement: + - Action: + - logs:CreateLogStream + - logs:PutLogEvents + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/${LambdaFunctionName}:*' + Version: '2012-10-17' + - Statement: + - Action: + - athena:GetQueryExecution + Effect: Allow + Resource: '*' + Version: '2012-10-17' + #S3CrudPolicy allows our connector to spill large responses to S3. You can optionally replace this pre-made policy + #with one that is more restrictive and can only 'put' but not read,delete, or overwrite files. + - S3CrudPolicy: + BucketName: !Ref SpillBucket + #VPCAccessPolicy allows our connector to run in a VPC so that it can access your data source. + - VPCAccessPolicy: {} + VpcConfig: + SecurityGroupIds: !If [ HasSecurityGroups, !Ref SecurityGroupIds, !Ref "AWS::NoValue" ] + SubnetIds: !If [ HasSubnets, !Ref SubnetIds, !Ref "AWS::NoValue" ] \ No newline at end of file diff --git a/athena-kafka/athena-kafka.yaml b/athena-kafka/athena-kafka.yaml index 446f819273..c80c4ecaa1 100644 --- a/athena-kafka/athena-kafka.yaml +++ b/athena-kafka/athena-kafka.yaml @@ -36,7 +36,7 @@ Parameters: Description: 'This is the name of the lambda function that will be created. This name must satisfy the pattern ^[a-z0-9-_]{1,64}$' Type: String AllowedPattern: ^[a-z0-9-_]{1,64}$ - SecretsManagerSecret: + SecretNamePrefix: Description: "The secret name within AWS Secrets Manager that contains your aws key and secret Credentials" Default: "" Type: String @@ -47,10 +47,6 @@ Parameters: Description: 'The prefix within SpillBucket where this function can spill data.' Type: String Default: athena-spill - GlueConnection: - Description: "(Optional) Name of glue connection storing connection details for Federated Data source." - Type: String - Default: "" LambdaTimeout: Description: 'Maximum Lambda invocation runtime in seconds. (min 1 - 900 max)' Default: 900 @@ -75,30 +71,20 @@ Parameters: Description: 'The S3 bucket reference where keystore and truststore certificates are uploaded. Applicable for SSL auth' Default: "" Type: String - LambdaRoleArn: + LambdaRoleARN: Description: "(Optional) A custom role to be used by the Connector lambda" Default: "" Type: String - PermissionsBoundaryArn: + PermissionsBoundaryARN: Description: "(Optional) An IAM policy ARN to use as the PermissionsBoundary for the created Lambda function's execution role" Default: '' Type: String - KmsKeyId: - Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." - Type: String - Default: "" Conditions: - HasSecurityGroups: !Not [ !Equals [ !Join [ "", !Ref SecurityGroupIds ], "" ] ] - HasSubnets: !Not [ !Equals [ !Join [ "", !Ref SubnetIds ], "" ] ] - HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] - NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] - HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryArn, "" ] ] - CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] - HasGlueConnection: !Not [ !Equals [ !Ref GlueConnection, "" ] ] - NotHasGlueConnectionAndHasKmsKeyId: !And - - !Not [ !Condition HasGlueConnection ] - - !Condition HasKmsKeyId + NotHasLambdaRole: !Equals [!Ref LambdaRoleARN, ""] + HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryARN, "" ] ] + HasSecurityGroups: !Not [ !Equals [ !Join ["", !Ref SecurityGroupIds], "" ] ] + HasSubnets: !Not [ !Equals [ !Join ["", !Ref SubnetIds], "" ] ] Resources: AthenaKafkaConnector: @@ -106,16 +92,14 @@ Resources: Properties: Environment: Variables: - disable_spill_encryption: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref DisableSpillEncryption ] - spill_bucket: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillBucket ] - spill_prefix: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillPrefix ] - kms_key_id: !If [ NotHasGlueConnectionAndHasKmsKeyId, !Ref KmsKeyId, !Ref "AWS::NoValue" ] - glue_connection: !If [ HasGlueConnection, !Ref GlueConnection, !Ref "AWS::NoValue" ] - secrets_manager_secret: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SecretsManagerSecret ] - certificates_s3_reference: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref CertificatesS3Reference ] - kafka_endpoint: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref KafkaEndpoint ] + disable_spill_encryption: !Ref DisableSpillEncryption + spill_bucket: !Ref SpillBucket + spill_prefix: !Ref SpillPrefix + secrets_manager_secret: !Ref SecretNamePrefix + certificates_s3_reference: !Ref CertificatesS3Reference + kafka_endpoint: !Ref KafkaEndpoint schema_registry_url: !Ref SchemaRegistryUrl - auth_type: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref AuthType ] + auth_type: !Ref AuthType FunctionName: !Ref LambdaFunctionName Handler: "com.amazonaws.athena.connectors.kafka.KafkaCompositeHandler" CodeUri: "./target/athena-kafka-2022.47.1.jar" @@ -123,7 +107,7 @@ Resources: Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory - Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn] + Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleARN] VpcConfig: SecurityGroupIds: !If [ HasSecurityGroups, !Ref SecurityGroupIds, !Ref "AWS::NoValue" ] SubnetIds: !If [ HasSubnets, !Ref SubnetIds, !Ref "AWS::NoValue"] @@ -132,7 +116,7 @@ Resources: Condition: NotHasLambdaRole Type: AWS::IAM::Role Properties: - PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryArn, !Ref "AWS::NoValue" ] + PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] ManagedPolicyArns: - "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole" AssumeRolePolicyDocument: @@ -156,7 +140,7 @@ Resources: - Action: - secretsmanager:GetSecretValue Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretsManagerSecret}*' + Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretNamePrefix}*' - Action: - logs:CreateLogGroup Effect: Allow @@ -189,53 +173,5 @@ Resources: - autoscaling:CompleteLifecycleAction Effect: Allow Resource: '*' - - Action: - - s3:GetObject - - s3:ListBucket - - s3:GetBucketLocation - - s3:GetObjectVersion - - s3:PutObject - - s3:PutObjectAcl - - s3:GetLifecycleConfiguration - - s3:PutLifecycleConfiguration - - s3:DeleteObject - Effect: Allow - Resource: - - Fn::Sub: - - arn:${AWS::Partition}:s3:::${bucketName} - - bucketName: - Ref: SpillBucket - - Fn::Sub: - - arn:${AWS::Partition}:s3:::${bucketName}/* - - bucketName: - Ref: SpillBucket - - !If - - HasGlueConnection - - Action: - - glue:GetConnection - Effect: Allow - Resource: - - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' - - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' - - !Ref "AWS::NoValue" - Roles: - - !Ref FunctionRole - - FunctionKmsPolicy: - Condition: CreateKmsPolicy - Type: "AWS::IAM::Policy" - Properties: - PolicyName: FunctionKmsPolicy - PolicyDocument: - Version: 2012-10-17 - Statement: - - Effect: Allow - Action: - - kms:GenerateRandom - Resource: '*' - - Effect: Allow - Action: - - kms:GenerateDataKey - Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" Roles: - !Ref FunctionRole \ No newline at end of file diff --git a/athena-msk/athena-msk-connection.yaml b/athena-msk/athena-msk-connection.yaml new file mode 100644 index 0000000000..c268d25fb7 --- /dev/null +++ b/athena-msk/athena-msk-connection.yaml @@ -0,0 +1,160 @@ +Transform: 'AWS::Serverless-2016-10-31' +Metadata: + 'AWS::ServerlessRepo::Application': + Name: AthenaMSKConnectorWithGlueConnection + Description: 'This connector enables Amazon Athena to communicate with MSK clusters and fetch data.' + Author: 'default author' + SpdxLicenseId: Apache-2.0 + LicenseUrl: LICENSE.txt + ReadmeUrl: README.md + Labels: + - msk + - athena-federation + HomePageUrl: 'https://github.com/awslabs/aws-athena-query-federation' + SemanticVersion: 2022.47.1 + SourceCodeUrl: 'https://github.com/awslabs/aws-athena-query-federation' +Parameters: + LambdaFunctionName: + Description: 'This is the name of the lambda function that will be created. This name must satisfy the pattern ^[a-z0-9-_]{1,64}$' + Type: String + AllowedPattern: ^[a-z0-9-_]{1,64}$ + SecretsManagerSecret: + Description: "The secret name within AWS Secrets Manager that contains your aws key and secret Credentials(Not Required for IAM AUTH)" + Default: "" + Type: String + SpillBucket: + Description: 'The name of the bucket where this function can spill data.' + Type: String + GlueConnection: + Description: "Name of glue connection storing connection details for Federated Data source." + Type: String + SecurityGroupIds: + Description: 'One or more SecurityGroup IDs corresponding to the SecurityGroup that should be applied to the Lambda function. (e.g. sg1,sg2,sg3)' + Type: CommaDelimitedList + Default: "" + SubnetIds: + Description: 'One or more Subnet IDs corresponding to the Subnet that the Lambda function can use to access you data source. (e.g. subnet1,subnet2)' + Type: CommaDelimitedList + Default: "" + LambdaRoleArn: + Description: "(Must for auth type IAM) A custom role to be used by the Connector lambda" + Default: "" + Type: String + KmsKeyId: + Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." + Type: String + Default: "" + +Conditions: + HasSecurityGroups: !Not [ !Equals [ !Join ["", !Ref SecurityGroupIds], "" ] ] + HasSubnets: !Not [ !Equals [ !Join ["", !Ref SubnetIds], "" ] ] + HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] + NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] + CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] + +Resources: + AthenaMSKConnector: + Type: 'AWS::Serverless::Function' + Properties: + Environment: + Variables: + glue_connection: !Ref GlueConnection + FunctionName: !Ref LambdaFunctionName + Handler: "com.amazonaws.athena.connectors.msk.AmazonMskCompositeHandler" + CodeUri: "./target/athena-msk-2022.47.1.jar" + Description: "Enables Amazon Athena to communicate with MSK clusters" + Runtime: java11 + Timeout: 900 + MemorySize: 3008 + Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn] + VpcConfig: + SecurityGroupIds: !If [ HasSecurityGroups, !Ref SecurityGroupIds, !Ref "AWS::NoValue" ] + SubnetIds: !If [ HasSubnets, !Ref SubnetIds, !Ref "AWS::NoValue"] + + FunctionRole: + Condition: NotHasLambdaRole + Type: AWS::IAM::Role + Properties: + ManagedPolicyArns: + - "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole" + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + Service: + - lambda.amazonaws.com + Action: + - "sts:AssumeRole" + + FunctionExecutionPolicy: + Condition: NotHasLambdaRole + Type: "AWS::IAM::Policy" + Properties: + PolicyName: FunctionExecutionPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Action: + - secretsmanager:GetSecretValue + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretsManagerSecret}*' + - Action: + - logs:CreateLogGroup + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:*' + - Action: + - logs:CreateLogStream + - logs:PutLogEvents + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/${LambdaFunctionName}:*' + - Action: + - athena:GetQueryExecution + - s3:ListBucket + - s3:GetObject + - s3:GetBucketLocation + - s3:GetObjectVersion + - s3:PutObject + - s3:PutObjectAcl + - s3:GetLifecycleConfiguration + - s3:PutLifecycleConfiguration + - s3:DeleteObject + - kms:Decrypt + - glue:* + Effect: Allow + Resource: '*' + - Action: + - ec2:CreateNetworkInterface + - ec2:DeleteNetworkInterface + - ec2:DescribeNetworkInterfaces + - ec2:DetachNetworkInterface + - autoscaling:CompleteLifecycleAction + Effect: Allow + Resource: '*' + - Action: + - glue:GetConnection + Effect: Allow + Resource: + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' + Roles: + - !Ref FunctionRole + + FunctionKmsPolicy: + Condition: CreateKmsPolicy + Type: "AWS::IAM::Policy" + Properties: + PolicyName: FunctionKmsPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Action: + - kms:GenerateRandom + Resource: '*' + - Effect: Allow + Action: + - kms:GenerateDataKey + Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" + Roles: + - !Ref FunctionRole \ No newline at end of file diff --git a/athena-msk/athena-msk.yaml b/athena-msk/athena-msk.yaml index e821044898..f83c4f185e 100644 --- a/athena-msk/athena-msk.yaml +++ b/athena-msk/athena-msk.yaml @@ -26,14 +26,13 @@ Parameters: - SSL - NO_AUTH KafkaEndpoint: - Description: '(Optional if Glue Connection is provided) MSK cluster endpoint' + Description: 'MSK cluster endpoint' Type: String - Default: "" LambdaFunctionName: Description: 'This is the name of the lambda function that will be created. This name must satisfy the pattern ^[a-z0-9-_]{1,64}$' Type: String AllowedPattern: ^[a-z0-9-_]{1,64}$ - SecretsManagerSecret: + SecretNamePrefix: Description: "The secret name within AWS Secrets Manager that contains your aws key and secret Credentials(Not Required for IAM AUTH)" Default: "" Type: String @@ -44,10 +43,6 @@ Parameters: Description: 'The prefix within SpillBucket where this function can spill data.' Type: String Default: athena-spill - GlueConnection: - Description: "(Optional) Name of glue connection storing connection details for Federated Data source." - Type: String - Default: "" LambdaTimeout: Description: 'Maximum Lambda invocation runtime in seconds. (min 1 - 900 max)' Default: 900 @@ -72,30 +67,20 @@ Parameters: Description: 'The S3 bucket reference where keystore and truststore certificates are uploaded. Applicable for SSL auth' Default: "" Type: String - LambdaRoleArn: + LambdaRoleARN: Description: "(Must for auth type IAM) A custom role to be used by the Connector lambda" Default: "" Type: String - PermissionsBoundaryArn: + PermissionsBoundaryARN: Description: "(Optional) An IAM policy ARN to use as the PermissionsBoundary for the created Lambda function's execution role" Default: '' Type: String - KmsKeyId: - Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." - Type: String - Default: "" Conditions: + NotHasLambdaRole: !Equals [!Ref LambdaRoleARN, ""] + HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryARN, "" ] ] HasSecurityGroups: !Not [ !Equals [ !Join ["", !Ref SecurityGroupIds], "" ] ] HasSubnets: !Not [ !Equals [ !Join ["", !Ref SubnetIds], "" ] ] - HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] - NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] - HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryArn, "" ] ] - CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] - HasGlueConnection: !Not [ !Equals [ !Ref GlueConnection, "" ] ] - NotHasGlueConnectionAndHasKmsKeyId: !And - - !Not [ !Condition HasGlueConnection ] - - !Condition HasKmsKeyId Resources: AthenaMSKConnector: @@ -103,15 +88,13 @@ Resources: Properties: Environment: Variables: - disable_spill_encryption: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref DisableSpillEncryption ] - spill_bucket: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillBucket ] - spill_prefix: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillPrefix ] - kms_key_id: !If [ NotHasGlueConnectionAndHasKmsKeyId, !Ref KmsKeyId, !Ref "AWS::NoValue" ] - glue_connection: !If [ HasGlueConnection, !Ref GlueConnection, !Ref "AWS::NoValue" ] - secrets_manager_secret: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SecretsManagerSecret ] - certificates_s3_reference: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref CertificatesS3Reference ] - kafka_endpoint: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref KafkaEndpoint ] - auth_type: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref AuthType ] + disable_spill_encryption: !Ref DisableSpillEncryption + spill_bucket: !Ref SpillBucket + spill_prefix: !Ref SpillPrefix + secrets_manager_secret: !Ref SecretNamePrefix + certificates_s3_reference: !Ref CertificatesS3Reference + kafka_endpoint: !Ref KafkaEndpoint + auth_type: !Ref AuthType FunctionName: !Ref LambdaFunctionName Handler: "com.amazonaws.athena.connectors.msk.AmazonMskCompositeHandler" CodeUri: "./target/athena-msk-2022.47.1.jar" @@ -119,7 +102,7 @@ Resources: Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory - Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn] + Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleARN] VpcConfig: SecurityGroupIds: !If [ HasSecurityGroups, !Ref SecurityGroupIds, !Ref "AWS::NoValue" ] SubnetIds: !If [ HasSubnets, !Ref SubnetIds, !Ref "AWS::NoValue"] @@ -128,7 +111,7 @@ Resources: Condition: NotHasLambdaRole Type: AWS::IAM::Role Properties: - PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryArn, !Ref "AWS::NoValue" ] + PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] ManagedPolicyArns: - "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole" AssumeRolePolicyDocument: @@ -152,7 +135,7 @@ Resources: - Action: - secretsmanager:GetSecretValue Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretsManagerSecret}*' + Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretNamePrefix}*' - Action: - logs:CreateLogGroup Effect: Allow @@ -185,33 +168,5 @@ Resources: - autoscaling:CompleteLifecycleAction Effect: Allow Resource: '*' - - !If - - HasGlueConnection - - Action: - - glue:GetConnection - Effect: Allow - Resource: - - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' - - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' - - !Ref "AWS::NoValue" - Roles: - - !Ref FunctionRole - - FunctionKmsPolicy: - Condition: CreateKmsPolicy - Type: "AWS::IAM::Policy" - Properties: - PolicyName: FunctionKmsPolicy - PolicyDocument: - Version: 2012-10-17 - Statement: - - Effect: Allow - Action: - - kms:GenerateRandom - Resource: '*' - - Effect: Allow - Action: - - kms:GenerateDataKey - Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" Roles: - !Ref FunctionRole \ No newline at end of file diff --git a/athena-mysql/athena-mysql-connection.yaml b/athena-mysql/athena-mysql-connection.yaml new file mode 100644 index 0000000000..ded479cd93 --- /dev/null +++ b/athena-mysql/athena-mysql-connection.yaml @@ -0,0 +1,162 @@ +Transform: 'AWS::Serverless-2016-10-31' +Metadata: + 'AWS::ServerlessRepo::Application': + Name: AthenaMySQLConnectorWithGlueConnection + Description: 'This connector enables Amazon Athena to communicate with your MySQL instance(s) using JDBC driver.' + Author: 'default author' + SpdxLicenseId: Apache-2.0 + LicenseUrl: LICENSE.txt + ReadmeUrl: README.md + Labels: + - athena-federation + HomePageUrl: 'https://github.com/awslabs/aws-athena-query-federation' + SemanticVersion: 2022.47.1 + SourceCodeUrl: 'https://github.com/awslabs/aws-athena-query-federation' +Parameters: + LambdaFunctionName: + Description: 'This is the name of the lambda function that will be created. This name must satisfy the pattern ^[a-z0-9-_]{1,64}$' + Type: String + AllowedPattern: ^[a-z0-9-_]{1,64}$ + SecretName: + Description: 'Used to create resource-based authorization policy for "secretsmanager:GetSecretValue" action. E.g. All Athena MySQL Federation secret names can be prefixed with "AthenaMySQLFederation" and authorization policy will allow "arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:AthenaJdbcFederation*". Parameter value in this case should be "AthenaMySQLFederation". If you do not have a prefix, you can manually update the IAM policy to add allow any secret names.' + Type: String + SpillBucket: + Description: 'The name of the bucket where this function can spill data.' + Type: String + GlueConnection: + Description: "Name of glue connection storing connection details for Federated Data source." + Type: String + LambdaRoleArn: + Description: "(Optional) A custom role to be used by the Connector lambda" + Type: String + Default: "" + SecurityGroupIds: + Description: 'One or more SecurityGroup IDs corresponding to the SecurityGroup that should be applied to the Lambda function. (e.g. sg1,sg2,sg3)' + Type: 'List' + SubnetIds: + Description: 'One or more Subnet IDs corresponding to the Subnet that the Lambda function can use to access you data source. (e.g. subnet1,subnet2)' + Type: 'List' + KmsKeyId: + Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." + Type: String + Default: "" + +Conditions: + HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] + NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] + CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] + +Resources: + JdbcConnectorConfig: + Type: 'AWS::Serverless::Function' + Properties: + Environment: + Variables: + glue_connection: !Ref GlueConnection + FunctionName: !Ref LambdaFunctionName + Handler: "com.amazonaws.athena.connectors.mysql.MySqlMuxCompositeHandler" + CodeUri: "./target/athena-mysql-2022.47.1.jar" + Description: "Enables Amazon Athena to communicate with MySQL using JDBC" + Runtime: java11 + Timeout: 900 + MemorySize: 3008 + Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn] + VpcConfig: + SecurityGroupIds: !Ref SecurityGroupIds + SubnetIds: !Ref SubnetIds + + FunctionRole: + Condition: NotHasLambdaRole + Type: AWS::IAM::Role + Properties: + ManagedPolicyArns: + - "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole" + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + Service: + - lambda.amazonaws.com + Action: + - "sts:AssumeRole" + + FunctionExecutionPolicy: + Condition: NotHasLambdaRole + Type: "AWS::IAM::Policy" + Properties: + Roles: + - !Ref FunctionRole + PolicyName: FunctionExecutionPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Action: + - secretsmanager:GetSecretValue + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretName}*' + - Action: + - logs:CreateLogGroup + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:*' + - Action: + - logs:CreateLogStream + - logs:PutLogEvents + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/${LambdaFunctionName}:*' + - Action: + - athena:GetQueryExecution + Effect: Allow + Resource: '*' + - Action: + - ec2:CreateNetworkInterface + - ec2:DeleteNetworkInterface + - ec2:DescribeNetworkInterfaces + - ec2:DetachNetworkInterface + Effect: Allow + Resource: '*' + - Action: + - s3:GetObject + - s3:ListBucket + - s3:GetBucketLocation + - s3:GetObjectVersion + - s3:PutObject + - s3:PutObjectAcl + - s3:GetLifecycleConfiguration + - s3:PutLifecycleConfiguration + - s3:DeleteObject + Effect: Allow + Resource: + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName} + - bucketName: + Ref: SpillBucket + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName}/* + - bucketName: + Ref: SpillBucket + - Action: + - glue:GetConnection + Effect: Allow + Resource: + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' + + FunctionKmsPolicy: + Condition: CreateKmsPolicy + Type: "AWS::IAM::Policy" + Properties: + PolicyName: FunctionKmsPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Action: + - kms:GenerateRandom + Resource: '*' + - Effect: Allow + Action: + - kms:GenerateDataKey + Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" + Roles: + - !Ref FunctionRole diff --git a/athena-mysql/athena-mysql.yaml b/athena-mysql/athena-mysql.yaml index 318343736f..e8e63c8995 100644 --- a/athena-mysql/athena-mysql.yaml +++ b/athena-mysql/athena-mysql.yaml @@ -20,8 +20,7 @@ Parameters: DefaultConnectionString: Description: 'The default connection string is used when catalog is "lambda:${LambdaFunctionName}". Catalog specific Connection Strings can be added later. Format: ${DatabaseType}://${NativeJdbcConnectionString}.' Type: String - Default: "" - SecretName: + SecretNamePrefix: Description: 'Used to create resource-based authorization policy for "secretsmanager:GetSecretValue" action. E.g. All Athena MySQL Federation secret names can be prefixed with "AthenaMySQLFederation" and authorization policy will allow "arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:AthenaJdbcFederation*". Parameter value in this case should be "AthenaMySQLFederation". If you do not have a prefix, you can manually update the IAM policy to add allow any secret names.' Type: String SpillBucket: @@ -31,10 +30,6 @@ Parameters: Description: 'The prefix within SpillBucket where this function can spill data.' Type: String Default: athena-spill - GlueConnection: - Description: "(Optional) Name of glue connection storing connection details for Federated Data source." - Type: String - Default: "" LambdaTimeout: Description: 'Maximum Lambda invocation runtime in seconds. (min 1 - 900 max)' Default: 900 @@ -43,7 +38,7 @@ Parameters: Description: 'Lambda memory in MB (min 128 - 3008 max).' Default: 3008 Type: Number - LambdaRoleArn: + LambdaRoleARN: Description: "(Optional) A custom role to be used by the Connector lambda" Type: String Default: "" @@ -57,37 +52,23 @@ Parameters: SubnetIds: Description: 'One or more Subnet IDs corresponding to the Subnet that the Lambda function can use to access you data source. (e.g. subnet1,subnet2)' Type: 'List' - PermissionsBoundaryArn: + PermissionsBoundaryARN: Description: "(Optional) An IAM policy ARN to use as the PermissionsBoundary for the created Lambda function's execution role" Default: '' Type: String - KmsKeyId: - Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." - Type: String - Default: "" - Conditions: - HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] - NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] - HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryArn, "" ] ] - CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] - HasGlueConnection: !Not [ !Equals [ !Ref GlueConnection, "" ] ] - NotHasGlueConnectionAndHasKmsKeyId: !And - - !Not [ !Condition HasGlueConnection ] - - !Condition HasKmsKeyId - + HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryARN, "" ] ] + NotHasLambdaRole: !Equals [!Ref LambdaRoleARN, ""] Resources: JdbcConnectorConfig: Type: 'AWS::Serverless::Function' Properties: Environment: Variables: - default: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref DefaultConnectionString ] - disable_spill_encryption: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref DisableSpillEncryption ] - spill_bucket: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillBucket ] - spill_prefix: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillPrefix ] - kms_key_id: !If [ NotHasGlueConnectionAndHasKmsKeyId, !Ref KmsKeyId, !Ref "AWS::NoValue" ] - glue_connection: !If [ HasGlueConnection, !Ref GlueConnection, !Ref "AWS::NoValue" ] + disable_spill_encryption: !Ref DisableSpillEncryption + spill_bucket: !Ref SpillBucket + spill_prefix: !Ref SpillPrefix + default: !Ref DefaultConnectionString FunctionName: !Ref LambdaFunctionName Handler: "com.amazonaws.athena.connectors.mysql.MySqlMuxCompositeHandler" CodeUri: "./target/athena-mysql-2022.47.1.jar" @@ -95,16 +76,15 @@ Resources: Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory - Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn] + PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] + Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleARN] VpcConfig: SecurityGroupIds: !Ref SecurityGroupIds SubnetIds: !Ref SubnetIds - FunctionRole: Condition: NotHasLambdaRole Type: AWS::IAM::Role Properties: - PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryArn, !Ref "AWS::NoValue" ] ManagedPolicyArns: - "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole" AssumeRolePolicyDocument: @@ -116,7 +96,6 @@ Resources: - lambda.amazonaws.com Action: - "sts:AssumeRole" - FunctionExecutionPolicy: Condition: NotHasLambdaRole Type: "AWS::IAM::Policy" @@ -130,7 +109,7 @@ Resources: - Action: - secretsmanager:GetSecretValue Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretName}*' + Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretNamePrefix}*' - Action: - logs:CreateLogGroup Effect: Allow @@ -170,32 +149,4 @@ Resources: - Fn::Sub: - arn:${AWS::Partition}:s3:::${bucketName}/* - bucketName: - Ref: SpillBucket - - !If - - HasGlueConnection - - Action: - - glue:GetConnection - Effect: Allow - Resource: - - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' - - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' - - !Ref "AWS::NoValue" - - FunctionKmsPolicy: - Condition: CreateKmsPolicy - Type: "AWS::IAM::Policy" - Properties: - PolicyName: FunctionKmsPolicy - PolicyDocument: - Version: 2012-10-17 - Statement: - - Effect: Allow - Action: - - kms:GenerateRandom - Resource: '*' - - Effect: Allow - Action: - - kms:GenerateDataKey - Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" - Roles: - - !Ref FunctionRole + Ref: SpillBucket \ No newline at end of file diff --git a/athena-neptune/athena-neptune-connection.yaml b/athena-neptune/athena-neptune-connection.yaml new file mode 100644 index 0000000000..a0a2839cc5 --- /dev/null +++ b/athena-neptune/athena-neptune-connection.yaml @@ -0,0 +1,165 @@ +Transform: 'AWS::Serverless-2016-10-31' + +Metadata: + 'AWS::ServerlessRepo::Application': + Name: AthenaNeptuneConnectorWithGlueConnection + Description: This connector enables Amazon Athena to communicate with your Neptune instance, making your Neptune graph data accessible via SQL. + Author: 'default author' + SpdxLicenseId: Apache-2.0 + LicenseUrl: LICENSE.txt + ReadmeUrl: README.md + Labels: ['athena-federation','athena-neptune','neptune'] + HomePageUrl: https://github.com/awslabs/aws-athena-query-federation + SemanticVersion: 2022.47.1 + SourceCodeUrl: https://github.com/awslabs/aws-athena-query-federation + +Parameters: + NeptuneClusterResId: + Description: 'To find the Neptune cluster resource ID in the Amazon Neptune AWS Management Console, choose the DB cluster that you want. The Resource ID is shown in the Configuration section.' + Type: String + LambdaFunctionName: + Description: 'This is the name of the lambda function that will be created. This name must satisfy the pattern ^[a-z0-9-_]{1,64}$' + Type: String + Default: 'athena-catalog' + AllowedPattern: ^[a-z0-9-_]{1,64}$ + SpillBucket: + Description: 'The name of the bucket where this function can spill data.' + Type: String + GlueConnection: + Description: "Name of glue connection storing connection details for Federated Data source." + Type: String + SecurityGroupIds: + Description: 'One or more SecurityGroup IDs corresponding to the SecurityGroup(s) that should be applied to the Lambda function to allow connectivity to Neptune cluster. (e.g. sg1,sg2,sg3)' + Type: 'List' + SubnetIds: + Description: 'One or more Subnet IDs corresponding to the Subnet(s) that the Lambda function can use to access the Neptune cluster. (e.g. subnet1,subnet2)' + Type: 'List' + KmsKeyId: + Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." + Type: String + Default: "" + LambdaRoleArn: + Description: "(Optional) A custom role to be used by the Connector lambda" + Type: String + Default: "" + +Conditions: + HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] + NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] + CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] + +Resources: + ConnectorConfig: + Type: 'AWS::Serverless::Function' + Properties: + Environment: + Variables: + glue_connection: !Ref GlueConnection + SERVICE_REGION: !Ref AWS::Region + FunctionName: !Ref LambdaFunctionName + Handler: "com.amazonaws.athena.connectors.neptune.NeptuneCompositeHandler" + CodeUri: "./target/athena-neptune-2022.47.1.jar" + Description: "Enables Amazon Athena to communicate with Neptune, making your Neptune graph data accessible via SQL." + Runtime: java11 + Timeout: 900 + MemorySize: 3008 + Role: !If [ NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn ] + VpcConfig: + SecurityGroupIds: !Ref SecurityGroupIds + SubnetIds: !Ref SubnetIds + + FunctionRole: + Condition: NotHasLambdaRole + Type: AWS::IAM::Role + Properties: + ManagedPolicyArns: + - "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole" + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + Service: + - lambda.amazonaws.com + Action: + - "sts:AssumeRole" + + FunctionExecutionPolicy: + Condition: NotHasLambdaRole + Type: "AWS::IAM::Policy" + Properties: + Roles: + - !Ref FunctionRole + PolicyName: FunctionExecutionPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Action: + - glue:GetTableVersions + - glue:GetPartitions + - glue:GetTables + - glue:GetTableVersion + - glue:GetDatabases + - glue:GetTable + - glue:GetPartition + - glue:GetDatabase + - athena:GetQueryExecution + Effect: Allow + Resource: '*' + - Action: + - neptune-db:* + Effect: Allow + #Dynamically construct Neptune Cluster Resource ARN to limit permissions to the specific cluster provided + Resource: !Sub 'arn:${AWS::Partition}:neptune-db:${AWS::Region}:${AWS::AccountId}:${NeptuneClusterResId}/*' + - Action: + - ec2:CreateNetworkInterface + - ec2:DeleteNetworkInterface + - ec2:DescribeNetworkInterfaces + - ec2:DetachNetworkInterface + Effect: Allow + Resource: '*' + - Action: + - s3:GetObject + - s3:ListBucket + - s3:GetBucketLocation + - s3:GetObjectVersion + - s3:PutObject + - s3:PutObjectAcl + - s3:GetLifecycleConfiguration + - s3:PutLifecycleConfiguration + - s3:DeleteObject + Effect: Allow + Resource: + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName} + - bucketName: + Ref: SpillBucket + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName}/* + - bucketName: + Ref: SpillBucket + - Action: + - glue:GetConnection + Effect: Allow + Resource: + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' + + FunctionKmsPolicy: + Condition: CreateKmsPolicy + Type: "AWS::IAM::Policy" + Properties: + PolicyName: FunctionKmsPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Action: + - kms:GenerateRandom + Resource: '*' + - Effect: Allow + Action: + - kms:GenerateDataKey + Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" + Roles: + - !Ref FunctionRole \ No newline at end of file diff --git a/athena-neptune/athena-neptune.yaml b/athena-neptune/athena-neptune.yaml index 847e85bdf9..c13ed20f89 100644 --- a/athena-neptune/athena-neptune.yaml +++ b/athena-neptune/athena-neptune.yaml @@ -14,27 +14,25 @@ Metadata: SourceCodeUrl: https://github.com/awslabs/aws-athena-query-federation Parameters: - NeptuneEndpoint: - Description: '(Optional if Glue Connection is provided) The Neptune cluster endpoint' + NeptuneClusterEndpoint: + Description: 'The Neptune cluster endpoint' Type: String - Default: "" NeptunePort: Description: 'The Neptune port' Type: String Default: '8182' - NeptuneClusterResId: + NeptuneClusterResourceID: Description: 'To find the Neptune cluster resource ID in the Amazon Neptune AWS Management Console, choose the DB cluster that you want. The Resource ID is shown in the Configuration section.' Type: String - NeptuneGraphtype: + NeptuneGraphType: Description: 'Type of graph created in Neptune, defaults to PROPERTYGRAPH. Allowed values: PROPERTYGRAPH, RDF' Type: String Default: 'PROPERTYGRAPH' AllowedValues: ["PROPERTYGRAPH", "RDF"] GlueDatabaseName: - Description: '(Optional if Glue Connection is provided) Name of the Neptune cluster specific Glue Database that contains schemas of graph vertices' + Description: 'Name of the Neptune cluster specific Glue Database that contains schemas of graph vertices' Type: String - Default: "" - LambdaFunctionName: + AthenaCatalogName: Description: 'This is the name of the lambda function that will be created. This name must satisfy the pattern ^[a-z0-9-_]{1,64}$' Type: String Default: 'athena-catalog' @@ -46,10 +44,6 @@ Parameters: Description: 'The prefix within SpillBucket where this function can spill data.' Type: String Default: athena-neptune-spill - GlueConnection: - Description: "(Optional) Name of glue connection storing connection details for Federated Data source." - Type: String - Default: "" LambdaTimeout: Description: 'Maximum Lambda invocation runtime in seconds. (min 1 - 900 max)' Default: 900 @@ -68,36 +62,21 @@ Parameters: SubnetIds: Description: 'One or more Subnet IDs corresponding to the Subnet(s) that the Lambda function can use to access the Neptune cluster. (e.g. subnet1,subnet2)' Type: 'List' - IamEnabled: + IAMEnabled: Description: 'If set to ''true'' the connector uses Signature Version 4 Signing' Default: false Type: String - EnableCaseinsensitivematch: + EnableCaseInsensitiveMatch: Description: 'If set to ''false'' the connector does a case sensitive match for keys' Default: true Type: String - PermissionsBoundaryArn: + PermissionsBoundaryARN: Description: "(Optional) An IAM policy ARN to use as the PermissionsBoundary for the created Lambda function's execution role" Default: '' Type: String - KmsKeyId: - Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." - Type: String - Default: "" - LambdaRoleArn: - Description: "(Optional) A custom role to be used by the Connector lambda" - Type: String - Default: "" Conditions: - HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] - NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] - HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryArn, "" ] ] - CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] - HasGlueConnection: !Not [ !Equals [ !Ref GlueConnection, "" ] ] - NotHasGlueConnectionAndHasKmsKeyId: !And - - !Not [ !Condition HasGlueConnection ] - - !Condition HasKmsKeyId + HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryARN, "" ] ] Resources: ConnectorConfig: @@ -105,128 +84,51 @@ Resources: Properties: Environment: Variables: - disable_spill_encryption: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref DisableSpillEncryption ] - spill_bucket: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillBucket ] - spill_prefix: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillPrefix ] - kms_key_id: !If [ NotHasGlueConnectionAndHasKmsKeyId, !Ref KmsKeyId, !Ref "AWS::NoValue" ] - glue_connection: !If [ HasGlueConnection, !Ref GlueConnection, !Ref "AWS::NoValue" ] - glue_database_name: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref GlueDatabaseName ] - neptune_endpoint: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref NeptuneEndpoint ] - neptune_port: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref NeptunePort ] - neptune_cluster_res_id: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref NeptuneClusterResId ] - iam_enabled: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref IamEnabled ] - neptune_graphtype: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref NeptuneGraphtype ] + disable_spill_encryption: !Ref DisableSpillEncryption + spill_bucket: !Ref SpillBucket + spill_prefix: !Ref SpillPrefix + glue_database_name: !Ref GlueDatabaseName + neptune_endpoint: !Ref NeptuneClusterEndpoint + neptune_port: !Ref NeptunePort + neptune_cluster_res_id: !Ref NeptuneClusterResourceID + iam_enabled: !Ref IAMEnabled + neptune_graphtype: !Ref NeptuneGraphType SERVICE_REGION: !Ref AWS::Region - enable_caseinsensitivematch: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref EnableCaseinsensitivematch ] - FunctionName: !Ref LambdaFunctionName + enable_caseinsensitivematch: !Ref EnableCaseInsensitiveMatch + FunctionName: !Ref AthenaCatalogName Handler: "com.amazonaws.athena.connectors.neptune.NeptuneCompositeHandler" CodeUri: "./target/athena-neptune-2022.47.1.jar" Description: "Enables Amazon Athena to communicate with Neptune, making your Neptune graph data accessible via SQL." Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory - Role: !If [ NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn ] - VpcConfig: - SecurityGroupIds: !Ref SecurityGroupIds - SubnetIds: !Ref SubnetIds - - FunctionRole: - Condition: NotHasLambdaRole - Type: AWS::IAM::Role - Properties: - PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryArn, !Ref "AWS::NoValue" ] - ManagedPolicyArns: - - "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole" - AssumeRolePolicyDocument: - Version: 2012-10-17 - Statement: - - Effect: Allow - Principal: - Service: - - lambda.amazonaws.com - Action: - - "sts:AssumeRole" - - FunctionExecutionPolicy: - Condition: NotHasLambdaRole - Type: "AWS::IAM::Policy" - Properties: - Roles: - - !Ref FunctionRole - PolicyName: FunctionExecutionPolicy - PolicyDocument: - Version: 2012-10-17 - Statement: - - Action: - - glue:GetTableVersions - - glue:GetPartitions - - glue:GetTables - - glue:GetTableVersion - - glue:GetDatabases - - glue:GetTable - - glue:GetPartition - - glue:GetDatabase - - athena:GetQueryExecution - - Effect: Allow - Resource: '*' - - Action: - - neptune-db:* - Effect: Allow - #Dynamically construct Neptune Cluster Resource ARN to limit permissions to the specific cluster provided - Resource: !Sub 'arn:${AWS::Partition}:neptune-db:${AWS::Region}:${AWS::AccountId}:${NeptuneClusterResId}/*' - - Action: - - ec2:CreateNetworkInterface - - ec2:DeleteNetworkInterface - - ec2:DescribeNetworkInterfaces - - ec2:DetachNetworkInterface - Effect: Allow - Resource: '*' - - Action: - - s3:GetObject - - s3:ListBucket - - s3:GetBucketLocation - - s3:GetObjectVersion - - s3:PutObject - - s3:PutObjectAcl - - s3:GetLifecycleConfiguration - - s3:PutLifecycleConfiguration - - s3:DeleteObject - Effect: Allow - Resource: - - Fn::Sub: - - arn:${AWS::Partition}:s3:::${bucketName} - - bucketName: - Ref: SpillBucket - - Fn::Sub: - - arn:${AWS::Partition}:s3:::${bucketName}/* - - bucketName: - Ref: SpillBucket - - !If - - HasGlueConnection + PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] + Policies: + - Statement: - Action: - - glue:GetConnection + - glue:GetTableVersions + - glue:GetPartitions + - glue:GetTables + - glue:GetTableVersion + - glue:GetDatabases + - glue:GetTable + - glue:GetPartition + - glue:GetDatabase + - athena:GetQueryExecution Effect: Allow - Resource: - - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' - - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' - - !Ref "AWS::NoValue" - - FunctionKmsPolicy: - Condition: CreateKmsPolicy - Type: "AWS::IAM::Policy" - Properties: - PolicyName: FunctionKmsPolicy - PolicyDocument: - Version: 2012-10-17 - Statement: - - Effect: Allow - Action: - - kms:GenerateRandom - Resource: '*' - - Effect: Allow - Action: - - kms:GenerateDataKey - Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" - Roles: - - !Ref FunctionRole \ No newline at end of file + Resource: '*' + - Action: + - neptune-db:* + Effect: Allow + #Dynamically construct Neptune Cluster Resource ARN to limit permissions to the specific cluster provided + Resource: !Sub 'arn:${AWS::Partition}:neptune-db:${AWS::Region}:${AWS::AccountId}:${NeptuneClusterResourceID}/*' + Version: '2012-10-17' + #S3CrudPolicy allows our connector to spill large responses to S3. You can optionally replace this pre-made policy + #with one that is more restrictive and can only 'put' but not read,delete, or overwrite files. + - S3CrudPolicy: + BucketName: !Ref SpillBucket + #VPCAccessPolicy allows our connector to run in a VPC so that it can access your data source. + - VPCAccessPolicy: {} + VpcConfig: + SecurityGroupIds: !Ref SecurityGroupIds + SubnetIds: !Ref SubnetIds \ No newline at end of file diff --git a/athena-oracle/athena-oracle-connection.yaml b/athena-oracle/athena-oracle-connection.yaml new file mode 100644 index 0000000000..647dc19579 --- /dev/null +++ b/athena-oracle/athena-oracle-connection.yaml @@ -0,0 +1,163 @@ +Transform: 'AWS::Serverless-2016-10-31' +Metadata: + 'AWS::ServerlessRepo::Application': + Name: AthenaOracleConnectorWithGlueConnection + Description: 'This connector enables Amazon Athena to communicate with your ORACLE instance(s) using JDBC driver.' + Author: 'default author' + SpdxLicenseId: Apache-2.0 + LicenseUrl: LICENSE.txt + ReadmeUrl: README.md + Labels: + - athena-federation + HomePageUrl: 'https://github.com/awslabs/aws-athena-query-federation' + SemanticVersion: 2022.47.1 + SourceCodeUrl: 'https://github.com/awslabs/aws-athena-query-federation' +Parameters: + LambdaFunctionName: + Description: 'This is the name of the lambda function that will be created. This name must satisfy the pattern ^[a-z0-9-_]{1,64}$' + Type: String + AllowedPattern: ^[a-z0-9-_]{1,64}$ + SecretName: + Description: 'Used to create resource-based authorization policy for "secretsmanager:GetSecretValue" action. E.g. All Athena JDBC Federation secret names can be prefixed with "AthenaJdbcFederation" and authorization policy will allow "arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:AthenaJdbcFederation*". Parameter value in this case should be "AthenaJdbcFederation". If you do not have a prefix, you can manually update the IAM policy to add allow any secret names.' + Type: String + SpillBucket: + Description: 'The name of the bucket where this function can spill data.' + Type: String + GlueConnection: + Description: "Name of glue connection storing connection details for Federated Data source." + Type: String + SecurityGroupIds: + Description: '(Optional) One or more SecurityGroup IDs corresponding to the SecurityGroup that should be applied to the Lambda function. (e.g. sg1,sg2,sg3)' + Type: CommaDelimitedList + Default: "" + SubnetIds: + Description: '(Optional) One or more Subnet IDs corresponding to the Subnet that the Lambda function can use to access you data source. (e.g. subnet1,subnet2)' + Type: CommaDelimitedList + Default: "" + KmsKeyId: + Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." + Type: String + LambdaRoleArn: + Description: "(Optional) A custom role to be used by the Connector lambda" + Type: String + Default: "" +Conditions: + HasSecurityGroups: !Not [ !Equals [ !Join [ "", !Ref SecurityGroupIds ], "" ] ] + HasSubnets: !Not [ !Equals [ !Join [ "", !Ref SubnetIds ], "" ] ] + HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] + NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] + CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] +Resources: + JdbcConnectorConfig: + Type: 'AWS::Serverless::Function' + Properties: + Environment: + Variables: + glue_connection: !Ref GlueConnection + FunctionName: !Ref LambdaFunctionName + Handler: "com.amazonaws.athena.connectors.oracle.OracleMuxCompositeHandler" + CodeUri: "./target/athena-oracle-2022.47.1.jar" + Description: "Enables Amazon Athena to communicate with ORACLE using JDBC" + Runtime: java11 + Timeout: 900 + MemorySize: 3008 + Role: !If [ NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn ] + VpcConfig: + SecurityGroupIds: !If [ HasSecurityGroups, !Ref SecurityGroupIds, !Ref "AWS::NoValue" ] + SubnetIds: !If [ HasSubnets, !Ref SubnetIds, !Ref "AWS::NoValue" ] + + FunctionRole: + Condition: NotHasLambdaRole + Type: AWS::IAM::Role + Properties: + ManagedPolicyArns: + - "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole" + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + Service: + - lambda.amazonaws.com + Action: + - "sts:AssumeRole" + + FunctionExecutionPolicy: + Condition: NotHasLambdaRole + Type: "AWS::IAM::Policy" + Properties: + Roles: + - !Ref FunctionRole + PolicyName: FunctionExecutionPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Action: + - secretsmanager:GetSecretValue + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretName}*' + - Action: + - logs:CreateLogGroup + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:*' + - Action: + - logs:CreateLogStream + - logs:PutLogEvents + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/${LambdaFunctionName}:*' + - Action: + - athena:GetQueryExecution + Effect: Allow + Resource: '*' + - Action: + - ec2:CreateNetworkInterface + - ec2:DeleteNetworkInterface + - ec2:DescribeNetworkInterfaces + - ec2:DetachNetworkInterface + Effect: Allow + Resource: '*' + - Action: + - s3:GetObject + - s3:ListBucket + - s3:GetBucketLocation + - s3:GetObjectVersion + - s3:PutObject + - s3:PutObjectAcl + - s3:GetLifecycleConfiguration + - s3:PutLifecycleConfiguration + - s3:DeleteObject + Effect: Allow + Resource: + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName} + - bucketName: + Ref: SpillBucket + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName}/* + - bucketName: + Ref: SpillBucket + - Action: + - glue:GetConnection + Effect: Allow + Resource: + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' + + FunctionKmsPolicy: + Condition: CreateKmsPolicy + Type: "AWS::IAM::Policy" + Properties: + PolicyName: FunctionKmsPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Action: + - kms:GenerateRandom + Resource: '*' + - Effect: Allow + Action: + - kms:GenerateDataKey + Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" + Roles: + - !Ref FunctionRole \ No newline at end of file diff --git a/athena-oracle/athena-oracle.yaml b/athena-oracle/athena-oracle.yaml index f58bece818..dd19543fff 100644 --- a/athena-oracle/athena-oracle.yaml +++ b/athena-oracle/athena-oracle.yaml @@ -20,11 +20,10 @@ Parameters: DefaultConnectionString: Description: 'The default connection string is used when catalog is "lambda:${LambdaFunctionName}". Catalog specific Connection Strings can be added later. Format: ${DatabaseType}://${NativeJdbcConnectionString}.' Type: String - Default: "" - SecretName: + SecretNamePrefix: Description: 'Used to create resource-based authorization policy for "secretsmanager:GetSecretValue" action. E.g. All Athena JDBC Federation secret names can be prefixed with "AthenaJdbcFederation" and authorization policy will allow "arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:AthenaJdbcFederation*". Parameter value in this case should be "AthenaJdbcFederation". If you do not have a prefix, you can manually update the IAM policy to add allow any secret names.' Type: String - IsFipsEnabled: + IsFIPSEnabled: AllowedValues: - true - false @@ -38,10 +37,6 @@ Parameters: Description: 'The prefix within SpillBucket where this function can spill data.' Type: String Default: athena-spill - GlueConnection: - Description: "(Optional) Name of glue connection storing connection details for Federated Data source." - Type: String - Default: "" LambdaTimeout: Description: 'Maximum Lambda invocation runtime in seconds. (min 1 - 900 max)' Default: 900 @@ -50,6 +45,10 @@ Parameters: Description: 'Lambda memory in MB (min 128 - 3008 max).' Default: 3008 Type: Number + LambdaRoleARN: + Description: "(Optional) A custom role to be used by the Connector lambda" + Type: String + Default: "" DisableSpillEncryption: Description: 'If set to ''false'' data spilled to S3 is encrypted with AES GCM' Default: 'false' @@ -62,41 +61,26 @@ Parameters: Description: '(Optional) One or more Subnet IDs corresponding to the Subnet that the Lambda function can use to access you data source. (e.g. subnet1,subnet2)' Type: CommaDelimitedList Default: "" - PermissionsBoundaryArn: + PermissionsBoundaryARN: Description: "(Optional) An IAM policy ARN to use as the PermissionsBoundary for the created Lambda function's execution role" Default: '' Type: String - KmsKeyId: - Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." - Type: String - LambdaRoleArn: - Description: "(Optional) A custom role to be used by the Connector lambda" - Type: String - Default: "" Conditions: - HasSecurityGroups: !Not [ !Equals [ !Join [ "", !Ref SecurityGroupIds ], "" ] ] - HasSubnets: !Not [ !Equals [ !Join [ "", !Ref SubnetIds ], "" ] ] - HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] - NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] - HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryArn, "" ] ] - CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] - HasGlueConnection: !Not [ !Equals [ !Ref GlueConnection, "" ] ] - NotHasGlueConnectionAndHasKmsKeyId: !And - - !Not [ !Condition HasGlueConnection ] - - !Condition HasKmsKeyId + HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryARN, "" ] ] + NotHasLambdaRole: !Equals [!Ref LambdaRoleARN, ""] + HasSecurityGroups: !Not [ !Equals [ !Join ["", !Ref SecurityGroupIds], "" ] ] + HasSubnets: !Not [ !Equals [ !Join ["", !Ref SubnetIds], "" ] ] Resources: JdbcConnectorConfig: Type: 'AWS::Serverless::Function' Properties: Environment: Variables: - disable_spill_encryption: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref DisableSpillEncryption ] - spill_bucket: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillBucket ] - spill_prefix: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillPrefix ] - kms_key_id: !If [ NotHasGlueConnectionAndHasKmsKeyId, !Ref KmsKeyId, !Ref "AWS::NoValue" ] - glue_connection: !If [ HasGlueConnection, !Ref GlueConnection, !Ref "AWS::NoValue" ] - default: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref DefaultConnectionString ] - is_fips_enabled: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref IsFipsEnabled ] + disable_spill_encryption: !Ref DisableSpillEncryption + spill_bucket: !Ref SpillBucket + spill_prefix: !Ref SpillPrefix + default: !Ref DefaultConnectionString + is_FIPS_Enabled: !Ref IsFIPSEnabled FunctionName: !Ref LambdaFunctionName Handler: "com.amazonaws.athena.connectors.oracle.OracleMuxCompositeHandler" CodeUri: "./target/athena-oracle-2022.47.1.jar" @@ -104,7 +88,8 @@ Resources: Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory - Role: !If [ NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn ] + PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] + Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleARN] VpcConfig: SecurityGroupIds: !If [ HasSecurityGroups, !Ref SecurityGroupIds, !Ref "AWS::NoValue" ] SubnetIds: !If [ HasSubnets, !Ref SubnetIds, !Ref "AWS::NoValue" ] @@ -113,7 +98,6 @@ Resources: Condition: NotHasLambdaRole Type: AWS::IAM::Role Properties: - PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryArn, !Ref "AWS::NoValue" ] ManagedPolicyArns: - "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole" AssumeRolePolicyDocument: @@ -125,7 +109,6 @@ Resources: - lambda.amazonaws.com Action: - "sts:AssumeRole" - FunctionExecutionPolicy: Condition: NotHasLambdaRole Type: "AWS::IAM::Policy" @@ -139,7 +122,7 @@ Resources: - Action: - secretsmanager:GetSecretValue Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretName}*' + Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretNamePrefix}*' - Action: - logs:CreateLogGroup Effect: Allow @@ -153,13 +136,6 @@ Resources: - athena:GetQueryExecution Effect: Allow Resource: '*' - - Action: - - ec2:CreateNetworkInterface - - ec2:DeleteNetworkInterface - - ec2:DescribeNetworkInterfaces - - ec2:DetachNetworkInterface - Effect: Allow - Resource: '*' - Action: - s3:GetObject - s3:ListBucket @@ -179,32 +155,4 @@ Resources: - Fn::Sub: - arn:${AWS::Partition}:s3:::${bucketName}/* - bucketName: - Ref: SpillBucket - - !If - - HasGlueConnection - - Action: - - glue:GetConnection - Effect: Allow - Resource: - - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' - - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' - - !Ref "AWS::NoValue" - - FunctionKmsPolicy: - Condition: CreateKmsPolicy - Type: "AWS::IAM::Policy" - Properties: - PolicyName: FunctionKmsPolicy - PolicyDocument: - Version: 2012-10-17 - Statement: - - Effect: Allow - Action: - - kms:GenerateRandom - Resource: '*' - - Effect: Allow - Action: - - kms:GenerateDataKey - Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" - Roles: - - !Ref FunctionRole \ No newline at end of file + Ref: SpillBucket \ No newline at end of file diff --git a/athena-postgresql/athena-postgresql-connection.yaml b/athena-postgresql/athena-postgresql-connection.yaml new file mode 100644 index 0000000000..d61372175a --- /dev/null +++ b/athena-postgresql/athena-postgresql-connection.yaml @@ -0,0 +1,171 @@ +Transform: 'AWS::Serverless-2016-10-31' +Metadata: + 'AWS::ServerlessRepo::Application': + Name: AthenaPostgreSQLConnectorWithGlueConnection + Description: 'This connector enables Amazon Athena to communicate with your PostgreSQL instance(s) using JDBC driver.' + Author: 'default author' + SpdxLicenseId: Apache-2.0 + LicenseUrl: LICENSE.txt + ReadmeUrl: README.md + Labels: + - athena-federation + HomePageUrl: 'https://github.com/awslabs/aws-athena-query-federation' + SemanticVersion: 2022.47.1 + SourceCodeUrl: 'https://github.com/awslabs/aws-athena-query-federation' +Parameters: + LambdaFunctionName: + Description: 'This is the name of the lambda function that will be created. This name must satisfy the pattern ^[a-z0-9-_]{1,64}$' + Type: String + AllowedPattern: ^[a-z0-9-_]{1,64}$ + SecretName: + Description: 'Used to create resource-based authorization policy for "secretsmanager:GetSecretValue" action. E.g. All Athena PostgreSQL Federation secret names can be prefixed with "AthenaPostgreSQLFederation" and authorization policy will allow "arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:AthenaJdbcFederation*". Parameter value in this case should be "AthenaPostgreSQLFederation". If you do not have a prefix, you can manually update the IAM policy to add allow any secret names.' + Type: String + SpillBucket: + Description: 'The name of the bucket where this function can spill data.' + Type: String + GlueConnection: + Description: "Name of glue connection storing connection details for Federated Data source." + Type: String + LambdaRoleArn: + Description: "(Optional) A custom role to be used by the Connector lambda" + Type: String + Default: "" + SecurityGroupIds: + Description: 'One or more SecurityGroup IDs corresponding to the SecurityGroup that should be applied to the Lambda function. (e.g. sg1,sg2,sg3)' + Type: 'List' + SubnetIds: + Description: 'One or more Subnet IDs corresponding to the Subnet that the Lambda function can use to access you data source. (e.g. subnet1,subnet2)' + Type: 'List' + CompositeHandler: + Description: 'Use "PostGreSqlMuxCompositeHandler" to access multiple postgres instances and "PostGreSqlCompositeHandler" to access single instance using DefaultConnectionString' + Type: String + Default: "PostGreSqlMuxCompositeHandler" + AllowedValues : ["PostGreSqlMuxCompositeHandler", "PostGreSqlCompositeHandler"] + DefaultScale: + Description: "(Optional) Default value for scale of type Numeric, representing the decimal digits in the fractional part, to the right of the decimal point." + Default: 0 + Type: Number + KmsKeyId: + Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." + Type: String + Default: "" + +Conditions: + HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] + NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] + CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] + +Resources: + JdbcConnectorConfig: + Type: 'AWS::Serverless::Function' + Properties: + Environment: + Variables: + glue_connection: !Ref GlueConnection + FunctionName: !Ref LambdaFunctionName + Handler: !Sub "com.amazonaws.athena.connectors.postgresql.${CompositeHandler}" + CodeUri: "./target/athena-postgresql-2022.47.1.jar" + Description: "Enables Amazon Athena to communicate with PostgreSQL using JDBC" + Runtime: java11 + Timeout: 900 + MemorySize: 3008 + Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn] + VpcConfig: + SecurityGroupIds: !Ref SecurityGroupIds + SubnetIds: !Ref SubnetIds + + FunctionRole: + Condition: NotHasLambdaRole + Type: AWS::IAM::Role + Properties: + ManagedPolicyArns: + - "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole" + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + Service: + - lambda.amazonaws.com + Action: + - "sts:AssumeRole" + + FunctionExecutionPolicy: + Condition: NotHasLambdaRole + Type: "AWS::IAM::Policy" + Properties: + Roles: + - !Ref FunctionRole + PolicyName: FunctionExecutionPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Action: + - secretsmanager:GetSecretValue + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretName}*' + - Action: + - logs:CreateLogGroup + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:*' + - Action: + - logs:CreateLogStream + - logs:PutLogEvents + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/${LambdaFunctionName}:*' + - Action: + - athena:GetQueryExecution + Effect: Allow + Resource: '*' + - Action: + - ec2:CreateNetworkInterface + - ec2:DeleteNetworkInterface + - ec2:DescribeNetworkInterfaces + - ec2:DetachNetworkInterface + Effect: Allow + Resource: '*' + - Action: + - s3:GetObject + - s3:ListBucket + - s3:GetBucketLocation + - s3:GetObjectVersion + - s3:PutObject + - s3:PutObjectAcl + - s3:GetLifecycleConfiguration + - s3:PutLifecycleConfiguration + - s3:DeleteObject + Effect: Allow + Resource: + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName} + - bucketName: + Ref: SpillBucket + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName}/* + - bucketName: + Ref: SpillBucket + - Action: + - glue:GetConnection + Effect: Allow + Resource: + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' + + FunctionKmsPolicy: + Condition: CreateKmsPolicy + Type: "AWS::IAM::Policy" + Properties: + PolicyName: FunctionKmsPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Action: + - kms:GenerateRandom + Resource: '*' + - Effect: Allow + Action: + - kms:GenerateDataKey + Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" + Roles: + - !Ref FunctionRole \ No newline at end of file diff --git a/athena-postgresql/athena-postgresql.yaml b/athena-postgresql/athena-postgresql.yaml index 7d5d8a6a86..4975474fb9 100644 --- a/athena-postgresql/athena-postgresql.yaml +++ b/athena-postgresql/athena-postgresql.yaml @@ -20,8 +20,7 @@ Parameters: DefaultConnectionString: Description: 'The default connection string is used when catalog is "lambda:${LambdaFunctionName}". Catalog specific Connection Strings can be added later. Format: ${DatabaseType}://${NativeJdbcConnectionString}.' Type: String - Default: "" - SecretName: + SecretNamePrefix: Description: 'Used to create resource-based authorization policy for "secretsmanager:GetSecretValue" action. E.g. All Athena PostgreSQL Federation secret names can be prefixed with "AthenaPostgreSQLFederation" and authorization policy will allow "arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:AthenaJdbcFederation*". Parameter value in this case should be "AthenaPostgreSQLFederation". If you do not have a prefix, you can manually update the IAM policy to add allow any secret names.' Type: String SpillBucket: @@ -31,10 +30,6 @@ Parameters: Description: 'The prefix within SpillBucket where this function can spill data.' Type: String Default: athena-spill - GlueConnection: - Description: "(Optional) Name of glue connection storing connection details for Federated Data source." - Type: String - Default: "" LambdaTimeout: Description: 'Maximum Lambda invocation runtime in seconds. (min 1 - 900 max)' Default: 900 @@ -47,7 +42,7 @@ Parameters: Description: 'If set to ''false'' data spilled to S3 is encrypted with AES GCM' Default: 'false' Type: String - LambdaRoleArn: + LambdaRoleARN: Description: "(Optional) A custom role to be used by the Connector lambda" Type: String Default: "" @@ -62,7 +57,7 @@ Parameters: Type: String Default: "PostGreSqlMuxCompositeHandler" AllowedValues : ["PostGreSqlMuxCompositeHandler", "PostGreSqlCompositeHandler"] - PermissionsBoundaryArn: + PermissionsBoundaryARN: Description: "(Optional) An IAM policy ARN to use as the PermissionsBoundary for the created Lambda function's execution role" Default: '' Type: String @@ -70,34 +65,21 @@ Parameters: Description: "(Optional) Default value for scale of type Numeric, representing the decimal digits in the fractional part, to the right of the decimal point." Default: 0 Type: Number - KmsKeyId: - Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." - Type: String - Default: "" Conditions: - HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] - NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] - HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryArn, "" ] ] - CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] - HasGlueConnection: !Not [ !Equals [ !Ref GlueConnection, "" ] ] - NotHasGlueConnectionAndHasKmsKeyId: !And - - !Not [ !Condition HasGlueConnection ] - - !Condition HasKmsKeyId - + HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryARN, "" ] ] + NotHasLambdaRole: !Equals [!Ref LambdaRoleARN, ""] Resources: JdbcConnectorConfig: Type: 'AWS::Serverless::Function' Properties: Environment: Variables: - disable_spill_encryption: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref DisableSpillEncryption ] - spill_bucket: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillBucket ] - spill_prefix: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillPrefix ] - kms_key_id: !If [ NotHasGlueConnectionAndHasKmsKeyId, !Ref KmsKeyId, !Ref "AWS::NoValue" ] - glue_connection: !If [ HasGlueConnection, !Ref GlueConnection, !Ref "AWS::NoValue" ] - default: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref DefaultConnectionString ] - default_scale: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref DefaultScale ] + disable_spill_encryption: !Ref DisableSpillEncryption + spill_bucket: !Ref SpillBucket + spill_prefix: !Ref SpillPrefix + default: !Ref DefaultConnectionString + default_scale: !Ref DefaultScale FunctionName: !Ref LambdaFunctionName Handler: !Sub "com.amazonaws.athena.connectors.postgresql.${CompositeHandler}" CodeUri: "./target/athena-postgresql-2022.47.1.jar" @@ -105,7 +87,8 @@ Resources: Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory - Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn] + PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] + Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleARN] VpcConfig: SecurityGroupIds: !Ref SecurityGroupIds SubnetIds: !Ref SubnetIds @@ -114,7 +97,6 @@ Resources: Condition: NotHasLambdaRole Type: AWS::IAM::Role Properties: - PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryArn, !Ref "AWS::NoValue" ] ManagedPolicyArns: - "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole" AssumeRolePolicyDocument: @@ -140,7 +122,7 @@ Resources: - Action: - secretsmanager:GetSecretValue Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretName}*' + Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretNamePrefix}*' - Action: - logs:CreateLogGroup Effect: Allow @@ -180,32 +162,4 @@ Resources: - Fn::Sub: - arn:${AWS::Partition}:s3:::${bucketName}/* - bucketName: - Ref: SpillBucket - - !If - - HasGlueConnection - - Action: - - glue:GetConnection - Effect: Allow - Resource: - - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' - - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' - - !Ref "AWS::NoValue" - - FunctionKmsPolicy: - Condition: CreateKmsPolicy - Type: "AWS::IAM::Policy" - Properties: - PolicyName: FunctionKmsPolicy - PolicyDocument: - Version: 2012-10-17 - Statement: - - Effect: Allow - Action: - - kms:GenerateRandom - Resource: '*' - - Effect: Allow - Action: - - kms:GenerateDataKey - Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" - Roles: - - !Ref FunctionRole \ No newline at end of file + Ref: SpillBucket \ No newline at end of file diff --git a/athena-redis/athena-redis-connection.yaml b/athena-redis/athena-redis-connection.yaml new file mode 100644 index 0000000000..7a6ca5d0cf --- /dev/null +++ b/athena-redis/athena-redis-connection.yaml @@ -0,0 +1,159 @@ +Transform: 'AWS::Serverless-2016-10-31' +Metadata: + 'AWS::ServerlessRepo::Application': + Name: AthenaRedisConnectorWithGlueConnection + Description: 'This connector enables Amazon Athena to communicate with your Redis instance(s), making your Redis data accessible via SQL.' + Author: 'default author' + SpdxLicenseId: Apache-2.0 + LicenseUrl: LICENSE.txt + ReadmeUrl: README.md + Labels: + - athena-federation + HomePageUrl: 'https://github.com/awslabs/aws-athena-query-federation' + SemanticVersion: 2022.47.1 + SourceCodeUrl: 'https://github.com/awslabs/aws-athena-query-federation' +Parameters: + LambdaFunctionName: + Description: 'This is the name of the lambda function that will be created. This name must satisfy the pattern ^[a-z0-9-_]{1,64}$' + Type: String + AllowedPattern: ^[a-z0-9-_]{1,64}$ + SpillBucket: + Description: 'The name of the bucket where this function can spill data.' + Type: String + GlueConnection: + Description: "Name of glue connection storing connection details for Federated Data source." + Type: String + SecurityGroupIds: + Description: 'One or more SecurityGroup IDs corresponding to the SecurityGroup that should be applied to the Lambda function. (e.g. sg1,sg2,sg3)' + Type: 'List' + SubnetIds: + Description: 'One or more Subnet IDs corresponding to the Subnet that the Lambda function can use to access you data source. (e.g. subnet1,subnet2)' + Type: 'List' + SecretName: + Description: 'The name or prefix of a set of names within Secrets Manager that this function should have access to. (e.g. redis-*).' + Type: String + KmsKeyId: + Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." + Type: String + Default: "" + LambdaRoleArn: + Description: "(Optional) A custom role to be used by the Connector lambda" + Type: String + Default: "" +Conditions: + HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] + NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] + CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] +Resources: + ConnectorConfig: + Type: 'AWS::Serverless::Function' + Properties: + Environment: + Variables: + glue_connection: !Ref GlueConnection + FunctionName: !Ref LambdaFunctionName + Handler: "com.amazonaws.athena.connectors.redis.RedisCompositeHandler" + CodeUri: "./target/athena-redis-2022.47.1.jar" + Description: "Enables Amazon Athena to communicate with Redis, making your Redis data accessible via SQL" + Runtime: java11 + Timeout: 900 + MemorySize: 3008 + Role: !If [ NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn ] + VpcConfig: + SecurityGroupIds: !Ref SecurityGroupIds + SubnetIds: !Ref SubnetIds + + FunctionRole: + Condition: NotHasLambdaRole + Type: AWS::IAM::Role + Properties: + ManagedPolicyArns: + - "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole" + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + Service: + - lambda.amazonaws.com + Action: + - "sts:AssumeRole" + + FunctionExecutionPolicy: + Condition: NotHasLambdaRole + Type: "AWS::IAM::Policy" + Properties: + Roles: + - !Ref FunctionRole + PolicyName: FunctionExecutionPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Action: + - secretsmanager:GetSecretValue + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:*:*:secret:${SecretName}*' + - Action: + - glue:GetTableVersions + - glue:GetPartitions + - glue:GetTables + - glue:GetTableVersion + - glue:GetDatabases + - glue:GetTable + - glue:GetPartition + - glue:GetDatabase + - athena:GetQueryExecution + Effect: Allow + Resource: '*' + - Action: + - ec2:CreateNetworkInterface + - ec2:DeleteNetworkInterface + - ec2:DescribeNetworkInterfaces + - ec2:DetachNetworkInterface + Effect: Allow + Resource: '*' + - Action: + - s3:GetObject + - s3:ListBucket + - s3:GetBucketLocation + - s3:GetObjectVersion + - s3:PutObject + - s3:PutObjectAcl + - s3:GetLifecycleConfiguration + - s3:PutLifecycleConfiguration + - s3:DeleteObject + Effect: Allow + Resource: + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName} + - bucketName: + Ref: SpillBucket + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName}/* + - bucketName: + Ref: SpillBucket + - Action: + - glue:GetConnection + Effect: Allow + Resource: + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' + + FunctionKmsPolicy: + Condition: CreateKmsPolicy + Type: "AWS::IAM::Policy" + Properties: + PolicyName: FunctionKmsPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Action: + - kms:GenerateRandom + Resource: '*' + - Effect: Allow + Action: + - kms:GenerateDataKey + Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" + Roles: + - !Ref FunctionRole \ No newline at end of file diff --git a/athena-redis/athena-redis.yaml b/athena-redis/athena-redis.yaml index f54fa6c0f3..85ffd47a79 100644 --- a/athena-redis/athena-redis.yaml +++ b/athena-redis/athena-redis.yaml @@ -13,7 +13,7 @@ Metadata: SemanticVersion: 2022.47.1 SourceCodeUrl: 'https://github.com/awslabs/aws-athena-query-federation' Parameters: - LambdaFunctionName: + AthenaCatalogName: Description: 'This is the name of the lambda function that will be created. This name must satisfy the pattern ^[a-z0-9-_]{1,64}$' Type: String AllowedPattern: ^[a-z0-9-_]{1,64}$ @@ -24,10 +24,6 @@ Parameters: Description: 'The prefix within SpillBucket where this function can spill data.' Type: String Default: athena-spill - GlueConnection: - Description: "(Optional) Name of glue connection storing connection details for Federated Data source." - Type: String - Default: "" LambdaTimeout: Description: 'Maximum Lambda invocation runtime in seconds. (min 1 - 900 max)' Default: 900 @@ -46,10 +42,10 @@ Parameters: SubnetIds: Description: 'One or more Subnet IDs corresponding to the Subnet that the Lambda function can use to access you data source. (e.g. subnet1,subnet2)' Type: 'List' - SecretName: + SecretNameOrPrefix: Description: 'The name or prefix of a set of names within Secrets Manager that this function should have access to. (e.g. redis-*).' Type: String - PermissionsBoundaryArn: + PermissionsBoundaryARN: Description: "(Optional) An IAM policy ARN to use as the PermissionsBoundary for the created Lambda function's execution role" Default: '' Type: String @@ -69,146 +65,56 @@ Parameters: Description: "(Optional) Set this number (for example 1, 2, or 3) to read from a non-default Redis database. Used for Query Pass Through queries only." Default: 0 Type: Number - KmsKeyId: - Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." - Type: String - Default: "" - LambdaRoleArn: - Description: "(Optional) A custom role to be used by the Connector lambda" - Type: String - Default: "" Conditions: - HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] - NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] - HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryArn, "" ] ] - CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] - HasGlueConnection: !Not [ !Equals [ !Ref GlueConnection, "" ] ] - NotHasGlueConnectionAndHasKmsKeyId: !And - - !Not [ !Condition HasGlueConnection ] - - !Condition HasKmsKeyId + HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryARN, "" ] ] Resources: ConnectorConfig: Type: 'AWS::Serverless::Function' Properties: Environment: Variables: - disable_spill_encryption: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref DisableSpillEncryption ] - spill_bucket: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillBucket ] - spill_prefix: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillPrefix ] - kms_key_id: !If [ NotHasGlueConnectionAndHasKmsKeyId, !Ref KmsKeyId, !Ref "AWS::NoValue" ] - glue_connection: !If [ HasGlueConnection, !Ref GlueConnection, !Ref "AWS::NoValue" ] - qpt_endpoint: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref QPTConnectionEndpoint ] - qpt_ssl: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref QPTConnectionSSL ] - qpt_cluster: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref QPTConnectionCluster ] - qpt_db_number: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref QPTConnectionDBNumber ] - FunctionName: !Ref LambdaFunctionName + disable_spill_encryption: !Ref DisableSpillEncryption + spill_bucket: !Ref SpillBucket + spill_prefix: !Ref SpillPrefix + qpt_endpoint: !Ref QPTConnectionEndpoint + qpt_ssl: !Ref QPTConnectionSSL + qpt_cluster: !Ref QPTConnectionCluster + qpt_db_number: !Ref QPTConnectionDBNumber + FunctionName: !Ref AthenaCatalogName Handler: "com.amazonaws.athena.connectors.redis.RedisCompositeHandler" CodeUri: "./target/athena-redis-2022.47.1.jar" Description: "Enables Amazon Athena to communicate with Redis, making your Redis data accessible via SQL" Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory - Role: !If [ NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn ] - VpcConfig: - SecurityGroupIds: !Ref SecurityGroupIds - SubnetIds: !Ref SubnetIds - - FunctionRole: - Condition: NotHasLambdaRole - Type: AWS::IAM::Role - Properties: - PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryArn, !Ref "AWS::NoValue" ] - ManagedPolicyArns: - - "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole" - AssumeRolePolicyDocument: - Version: 2012-10-17 - Statement: - - Effect: Allow - Principal: - Service: - - lambda.amazonaws.com - Action: - - "sts:AssumeRole" - - FunctionExecutionPolicy: - Condition: NotHasLambdaRole - Type: "AWS::IAM::Policy" - Properties: - Roles: - - !Ref FunctionRole - PolicyName: FunctionExecutionPolicy - PolicyDocument: - Version: 2012-10-17 - Statement: - - Action: - - secretsmanager:GetSecretValue - Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:*:*:secret:${SecretName}*' - - Action: - - glue:GetTableVersions - - glue:GetPartitions - - glue:GetTables - - glue:GetTableVersion - - glue:GetDatabases - - glue:GetTable - - glue:GetPartition - - glue:GetDatabase - - athena:GetQueryExecution - - Effect: Allow - Resource: '*' - - Action: - - ec2:CreateNetworkInterface - - ec2:DeleteNetworkInterface - - ec2:DescribeNetworkInterfaces - - ec2:DetachNetworkInterface - Effect: Allow - Resource: '*' - - Action: - - s3:GetObject - - s3:ListBucket - - s3:GetBucketLocation - - s3:GetObjectVersion - - s3:PutObject - - s3:PutObjectAcl - - s3:GetLifecycleConfiguration - - s3:PutLifecycleConfiguration - - s3:DeleteObject - Effect: Allow - Resource: - - Fn::Sub: - - arn:${AWS::Partition}:s3:::${bucketName} - - bucketName: - Ref: SpillBucket - - Fn::Sub: - - arn:${AWS::Partition}:s3:::${bucketName}/* - - bucketName: - Ref: SpillBucket - - !If - - HasGlueConnection + PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] + Policies: + - Statement: - Action: - - glue:GetConnection + - secretsmanager:GetSecretValue Effect: Allow - Resource: - - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' - - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' - - !Ref "AWS::NoValue" - - FunctionKmsPolicy: - Condition: CreateKmsPolicy - Type: "AWS::IAM::Policy" - Properties: - PolicyName: FunctionKmsPolicy - PolicyDocument: - Version: 2012-10-17 - Statement: - - Effect: Allow - Action: - - kms:GenerateRandom - Resource: '*' - - Effect: Allow - Action: - - kms:GenerateDataKey - Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" - Roles: - - !Ref FunctionRole \ No newline at end of file + Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:*:*:secret:${SecretNameOrPrefix}' + Version: '2012-10-17' + - Statement: + - Action: + - glue:GetTableVersions + - glue:GetPartitions + - glue:GetTables + - glue:GetTableVersion + - glue:GetDatabases + - glue:GetTable + - glue:GetPartition + - glue:GetDatabase + - athena:GetQueryExecution + Effect: Allow + Resource: '*' + Version: '2012-10-17' + #S3CrudPolicy allows our connector to spill large responses to S3. You can optionally replace this pre-made policy + #with one that is more restrictive and can only 'put' but not read,delete, or overwrite files. + - S3CrudPolicy: + BucketName: !Ref SpillBucket + #VPCAccessPolicy allows our connector to run in a VPC so that it can access your data source. + - VPCAccessPolicy: {} + VpcConfig: + SecurityGroupIds: !Ref SecurityGroupIds + SubnetIds: !Ref SubnetIds \ No newline at end of file diff --git a/athena-redshift/athena-redshift-connection.yaml b/athena-redshift/athena-redshift-connection.yaml new file mode 100644 index 0000000000..fedef92cc0 --- /dev/null +++ b/athena-redshift/athena-redshift-connection.yaml @@ -0,0 +1,151 @@ +Transform: 'AWS::Serverless-2016-10-31' +Metadata: + 'AWS::ServerlessRepo::Application': + Name: AthenaRedshiftConnectorWithGlueConnection + Description: 'This connector enables Amazon Athena to communicate with your Redshift instance(s) using JDBC driver.' + Author: 'default author' + SpdxLicenseId: Apache-2.0 + LicenseUrl: LICENSE.txt + ReadmeUrl: README.md + Labels: + - athena-federation + HomePageUrl: 'https://github.com/awslabs/aws-athena-query-federation' + SemanticVersion: 2022.47.1 + SourceCodeUrl: 'https://github.com/awslabs/aws-athena-query-federation' +Parameters: + LambdaFunctionName: + Description: 'This is the name of the lambda function that will be created. This name must satisfy the pattern ^[a-z0-9-_]{1,64}$' + Type: String + AllowedPattern: ^[a-z0-9-_]{1,64}$ + SecretName: + Description: 'Used to create resource-based authorization policy for "secretsmanager:GetSecretValue" action. E.g. All Athena Redshift Federation secret names can be prefixed with "AthenaRedshiftFederation" and authorization policy will allow "arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:AthenaRedshiftFederation*". Parameter value in this case should be "AthenaRedshiftFederation". If you do not have a prefix, you can manually update the IAM policy to add allow any secret names.' + Type: String + SpillBucket: + Description: 'The name of the bucket where this function can spill data.' + Type: String + GlueConnection: + Description: "Name of glue connection storing connection details for Federated Data source." + Type: String + SecurityGroupIds: + Description: 'One or more SecurityGroup IDs corresponding to the SecurityGroup that should be applied to the Lambda function. (e.g. sg1,sg2,sg3)' + Type: 'List' + SubnetIds: + Description: 'One or more Subnet IDs corresponding to the Subnet that the Lambda function can use to access you data source. (e.g. subnet1,subnet2)' + Type: 'List' + KmsKeyId: + Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." + Type: String + Default: "" + LambdaRoleArn: + Description: "(Optional) A custom IAM role ARN to be used by the Connector lambda" + Type: String + Default: "" +Conditions: + HasKmsKeyId: !Not [!Equals [!Ref KmsKeyId, ""]] + NotHasLambdaRole: !Equals [!Ref LambdaRoleArn, ""] + CreateKmsPolicy: !And [!Condition NotHasLambdaRole, !Condition HasKmsKeyId] +Resources: + JdbcConnectorConfig: + Type: 'AWS::Serverless::Function' + Properties: + Environment: + Variables: + glue_connection: !Ref GlueConnection + FunctionName: !Ref LambdaFunctionName + Handler: "com.amazonaws.athena.connectors.redshift.RedshiftMuxCompositeHandler" + CodeUri: "./target/athena-redshift-2022.47.1.jar" + Description: "Enables Amazon Athena to communicate with Redshift using JDBC" + Runtime: java11 + Timeout: 900 + MemorySize: 3008 + Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn] + VpcConfig: + SecurityGroupIds: !Ref SecurityGroupIds + SubnetIds: !Ref SubnetIds + + FunctionRole: + Condition: NotHasLambdaRole + Type: AWS::IAM::Role + Properties: + ManagedPolicyArns: + - "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole" + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + Service: + - lambda.amazonaws.com + Action: + - "sts:AssumeRole" + + FunctionExecutionPolicy: + Condition: NotHasLambdaRole + Type: "AWS::IAM::Policy" + Properties: + PolicyName: FunctionExecutionPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Action: + - secretsmanager:GetSecretValue + Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretName}*' + - Effect: Allow + Action: + - athena:GetQueryExecution + Resource: '*' + - Action: + - ec2:CreateNetworkInterface + - ec2:DeleteNetworkInterface + - ec2:DescribeNetworkInterfaces + - ec2:DetachNetworkInterface + Effect: Allow + Resource: '*' + - Effect: Allow + Action: + - s3:GetObject + - s3:ListBucket + - s3:GetBucketLocation + - s3:GetObjectVersion + - s3:PutObject + - s3:PutObjectAcl + - s3:GetLifecycleConfiguration + - s3:PutLifecycleConfiguration + - s3:DeleteObject + Resource: + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName} + - bucketName: + Ref: SpillBucket + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName}/* + - bucketName: + Ref: SpillBucket + - Action: + - glue:GetConnection + Effect: Allow + Resource: + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' + Roles: + - !Ref FunctionRole + + FunctionKmsPolicy: + Condition: CreateKmsPolicy + Type: "AWS::IAM::Policy" + Properties: + PolicyName: FunctionKmsPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Action: + - kms:GenerateRandom + Resource: '*' + - Effect: Allow + Action: + - kms:GenerateDataKey + Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" + Roles: + - !Ref FunctionRole diff --git a/athena-redshift/athena-redshift.yaml b/athena-redshift/athena-redshift.yaml index 8c2b32060c..998751db9c 100644 --- a/athena-redshift/athena-redshift.yaml +++ b/athena-redshift/athena-redshift.yaml @@ -20,8 +20,7 @@ Parameters: DefaultConnectionString: Description: 'The default connection string is used when catalog is "lambda:${LambdaFunctionName}". Catalog specific Connection Strings can be added later. Format: ${DatabaseType}://${NativeJdbcConnectionString}.' Type: String - Default: "" - SecretName: + SecretNamePrefix: Description: 'Used to create resource-based authorization policy for "secretsmanager:GetSecretValue" action. E.g. All Athena Redshift Federation secret names can be prefixed with "AthenaRedshiftFederation" and authorization policy will allow "arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:AthenaRedshiftFederation*". Parameter value in this case should be "AthenaRedshiftFederation". If you do not have a prefix, you can manually update the IAM policy to add allow any secret names.' Type: String SpillBucket: @@ -31,10 +30,6 @@ Parameters: Description: 'The prefix within SpillBucket where this function can spill data.' Type: String Default: athena-spill - GlueConnection: - Description: "(Optional) Name of glue connection storing connection details for Federated Data source." - Type: String - Default: "" LambdaTimeout: Description: 'Maximum Lambda invocation runtime in seconds. (min 1 - 900 max)' Default: 900 @@ -53,39 +48,36 @@ Parameters: SubnetIds: Description: 'One or more Subnet IDs corresponding to the Subnet that the Lambda function can use to access you data source. (e.g. subnet1,subnet2)' Type: 'List' - KmsKeyId: + KMSKeyId: Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." Type: String Default: "" - LambdaRoleArn: + LambdaRole: Description: "(Optional) A custom IAM role ARN to be used by the Connector lambda" Type: String Default: "" - PermissionsBoundaryArn: + PermissionsBoundaryARN: Description: "(Optional) An IAM policy ARN to use as the PermissionsBoundary for the created Lambda function's execution role" Default: '' Type: String Conditions: - HasKmsKeyId: !Not [!Equals [!Ref KmsKeyId, ""]] - NotHasLambdaRole: !Equals [!Ref LambdaRoleArn, ""] - CreateKmsPolicy: !And [!Condition NotHasLambdaRole, !Condition HasKmsKeyId] - HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryArn, "" ] ] - HasGlueConnection: !Not [ !Equals [ !Ref GlueConnection, "" ] ] - NotHasGlueConnectionAndHasKmsKeyId: !And - - !Not [ !Condition HasGlueConnection ] - - !Condition HasKmsKeyId + HasKMSKeyId: !Not [!Equals [!Ref KMSKeyId, ""]] + NotHasLambdaRole: !Equals [!Ref LambdaRole, ""] + NotHasLambdaRoleAndHasKMSKeyId: !And + - !Condition NotHasLambdaRole + - !Condition HasKMSKeyId + HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryARN, "" ] ] Resources: JdbcConnectorConfig: Type: 'AWS::Serverless::Function' Properties: Environment: Variables: - disable_spill_encryption: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref DisableSpillEncryption ] - spill_bucket: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillBucket ] - spill_prefix: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillPrefix ] - kms_key_id: !If [ NotHasGlueConnectionAndHasKmsKeyId, !Ref KmsKeyId, !Ref "AWS::NoValue" ] - glue_connection: !If [ HasGlueConnection, !Ref GlueConnection, !Ref "AWS::NoValue" ] - default: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref DefaultConnectionString ] + disable_spill_encryption: !Ref DisableSpillEncryption + spill_bucket: !Ref SpillBucket + spill_prefix: !Ref SpillPrefix + default: !Ref DefaultConnectionString + kms_key_id: !If [HasKMSKeyId, !Ref KMSKeyId, !Ref "AWS::NoValue"] FunctionName: !Ref LambdaFunctionName Handler: "com.amazonaws.athena.connectors.redshift.RedshiftMuxCompositeHandler" CodeUri: "./target/athena-redshift-2022.47.1.jar" @@ -93,7 +85,7 @@ Resources: Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory - Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn] + Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRole] VpcConfig: SecurityGroupIds: !Ref SecurityGroupIds SubnetIds: !Ref SubnetIds @@ -102,7 +94,7 @@ Resources: Condition: NotHasLambdaRole Type: AWS::IAM::Role Properties: - PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryArn, !Ref "AWS::NoValue" ] + PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] ManagedPolicyArns: - "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole" AssumeRolePolicyDocument: @@ -115,6 +107,7 @@ Resources: Action: - "sts:AssumeRole" + FunctionExecutionPolicy: Condition: NotHasLambdaRole Type: "AWS::IAM::Policy" @@ -126,18 +119,11 @@ Resources: - Effect: Allow Action: - secretsmanager:GetSecretValue - Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretName}*' + Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretNamePrefix}*' - Effect: Allow Action: - athena:GetQueryExecution Resource: '*' - - Action: - - ec2:CreateNetworkInterface - - ec2:DeleteNetworkInterface - - ec2:DescribeNetworkInterfaces - - ec2:DetachNetworkInterface - Effect: Allow - Resource: '*' - Effect: Allow Action: - s3:GetObject @@ -158,23 +144,14 @@ Resources: - arn:${AWS::Partition}:s3:::${bucketName}/* - bucketName: Ref: SpillBucket - - !If - - HasGlueConnection - - Action: - - glue:GetConnection - Effect: Allow - Resource: - - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' - - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' - - !Ref "AWS::NoValue" Roles: - !Ref FunctionRole - FunctionKmsPolicy: - Condition: CreateKmsPolicy + FunctionKMSPolicy: + Condition: NotHasLambdaRoleAndHasKMSKeyId Type: "AWS::IAM::Policy" Properties: - PolicyName: FunctionKmsPolicy + PolicyName: FunctionKMSPolicy PolicyDocument: Version: 2012-10-17 Statement: @@ -185,6 +162,6 @@ Resources: - Effect: Allow Action: - kms:GenerateDataKey - Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" + Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KMSKeyId}" Roles: - - !Ref FunctionRole + - !Ref FunctionRole \ No newline at end of file diff --git a/athena-saphana/athena-saphana-connection.yaml b/athena-saphana/athena-saphana-connection.yaml new file mode 100644 index 0000000000..afef3098a5 --- /dev/null +++ b/athena-saphana/athena-saphana-connection.yaml @@ -0,0 +1,166 @@ +Transform: 'AWS::Serverless-2016-10-31' +Metadata: + 'AWS::ServerlessRepo::Application': + Name: AthenaSaphanaConnectorWithGlueConnection + Description: 'This connector enables Amazon Athena to communicate with your Teradata instance(s) using JDBC driver.' + Author: 'default author' + SpdxLicenseId: Apache-2.0 + LicenseUrl: LICENSE.txt + ReadmeUrl: README.md + Labels: + - athena-federation + HomePageUrl: 'https://github.com/awslabs/aws-athena-query-federation' + SemanticVersion: 2022.47.1 + SourceCodeUrl: 'https://github.com/awslabs/aws-athena-query-federation' +Parameters: + LambdaFunctionName: + Description: 'This is the name of the lambda function that will be created. This name must satisfy the pattern ^[a-z0-9-_]{1,64}$' + Type: String + AllowedPattern: ^[a-z0-9-_]{1,64}$ + SecretName: + Description: 'Used to create resource-based authorization policy for "secretsmanager:GetSecretValue" action. E.g. All Athena JDBC Federation secret names can be prefixed with "AthenaJdbcFederation" and authorization policy will allow "arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:AthenaJdbcFederation*". Parameter value in this case should be "AthenaJdbcFederation". If you do not have a prefix, you can manually update the IAM policy to add allow any secret names.' + Type: String + SpillBucket: + Description: 'The name of the bucket where this function can spill data.' + Type: String + GlueConnection: + Description: "Name of glue connection storing connection details for Federated Data source." + Type: String + SecurityGroupIds: + Description: '(Optional) One or more SecurityGroup IDs corresponding to the SecurityGroup that should be applied to the Lambda function. (e.g. sg1,sg2,sg3)' + Type: CommaDelimitedList + Default: "" + SubnetIds: + Description: '(Optional) One or more Subnet IDs corresponding to the Subnet that the Lambda function can use to access you data source. (e.g. subnet1,subnet2)' + Type: CommaDelimitedList + Default: "" + KmsKeyId: + Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." + Type: String + Default: "" + LambdaRoleArn: + Description: "(Optional) A custom role to be used by the Connector lambda" + Type: String + Default: "" + +Conditions: + HasSecurityGroups: !Not [ !Equals [ !Join ["", !Ref SecurityGroupIds], "" ] ] + HasSubnets: !Not [ !Equals [ !Join ["", !Ref SubnetIds], "" ] ] + HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] + NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] + CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] + +Resources: + JdbcConnectorConfig: + Type: 'AWS::Serverless::Function' + Properties: + Environment: + Variables: + glue_connection: !Ref GlueConnection + FunctionName: !Ref LambdaFunctionName + Handler: "com.amazonaws.athena.connectors.saphana.SaphanaMuxCompositeHandler" + CodeUri: "./target/athena-saphana.zip" + Description: "Enables Amazon Athena to communicate with Teradata using JDBC" + Runtime: java11 + Timeout: 900 + MemorySize: 3008 + Role: !If [ NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn ] + VpcConfig: + SecurityGroupIds: !If [ HasSecurityGroups, !Ref SecurityGroupIds, !Ref "AWS::NoValue" ] + SubnetIds: !If [ HasSubnets, !Ref SubnetIds, !Ref "AWS::NoValue" ] + + FunctionRole: + Condition: NotHasLambdaRole + Type: AWS::IAM::Role + Properties: + ManagedPolicyArns: + - "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole" + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + Service: + - lambda.amazonaws.com + Action: + - "sts:AssumeRole" + + FunctionExecutionPolicy: + Condition: NotHasLambdaRole + Type: "AWS::IAM::Policy" + Properties: + Roles: + - !Ref FunctionRole + PolicyName: FunctionExecutionPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Action: + - secretsmanager:GetSecretValue + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretName}*' + - Action: + - logs:CreateLogGroup + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:*' + - Action: + - logs:CreateLogStream + - logs:PutLogEvents + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/${LambdaFunctionName}:*' + - Action: + - athena:GetQueryExecution + Effect: Allow + Resource: '*' + - Action: + - ec2:CreateNetworkInterface + - ec2:DeleteNetworkInterface + - ec2:DescribeNetworkInterfaces + - ec2:DetachNetworkInterface + Effect: Allow + Resource: '*' + - Action: + - s3:GetObject + - s3:ListBucket + - s3:GetBucketLocation + - s3:GetObjectVersion + - s3:PutObject + - s3:PutObjectAcl + - s3:GetLifecycleConfiguration + - s3:PutLifecycleConfiguration + - s3:DeleteObject + Effect: Allow + Resource: + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName} + - bucketName: + Ref: SpillBucket + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName}/* + - bucketName: + Ref: SpillBucket + - Action: + - glue:GetConnection + Effect: Allow + Resource: + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' + + FunctionKmsPolicy: + Condition: CreateKmsPolicy + Type: "AWS::IAM::Policy" + Properties: + PolicyName: FunctionKmsPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Action: + - kms:GenerateRandom + Resource: '*' + - Effect: Allow + Action: + - kms:GenerateDataKey + Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" + Roles: + - !Ref FunctionRole diff --git a/athena-saphana/athena-saphana.yaml b/athena-saphana/athena-saphana.yaml index 0c3d3c633b..d85a95b3eb 100644 --- a/athena-saphana/athena-saphana.yaml +++ b/athena-saphana/athena-saphana.yaml @@ -20,8 +20,7 @@ Parameters: DefaultConnectionString: Description: 'The default connection string is used when catalog is "lambda:${LambdaFunctionName}". Catalog specific Connection Strings can be added later. Format: ${DatabaseType}://${NativeJdbcConnectionString}.' Type: String - Default: "" - SecretName: + SecretNamePrefix: Description: 'Used to create resource-based authorization policy for "secretsmanager:GetSecretValue" action. E.g. All Athena JDBC Federation secret names can be prefixed with "AthenaJdbcFederation" and authorization policy will allow "arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:AthenaJdbcFederation*". Parameter value in this case should be "AthenaJdbcFederation". If you do not have a prefix, you can manually update the IAM policy to add allow any secret names.' Type: String SpillBucket: @@ -31,10 +30,6 @@ Parameters: Description: 'The prefix within SpillBucket where this function can spill data.' Type: String Default: athena-spill - GlueConnection: - Description: "(Optional) Name of glue connection storing connection details for Federated Data source." - Type: String - Default: "" LambdaTimeout: Description: 'Maximum Lambda invocation runtime in seconds. (min 1 - 900 max)' Default: 900 @@ -55,43 +50,24 @@ Parameters: Description: '(Optional) One or more Subnet IDs corresponding to the Subnet that the Lambda function can use to access you data source. (e.g. subnet1,subnet2)' Type: CommaDelimitedList Default: "" - PermissionsBoundaryArn: + PermissionsBoundaryARN: Description: "(Optional) An IAM policy ARN to use as the PermissionsBoundary for the created Lambda function's execution role" Default: '' Type: String - KmsKeyId: - Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." - Type: String - Default: "" - LambdaRoleArn: - Description: "(Optional) A custom role to be used by the Connector lambda" - Type: String - Default: "" - Conditions: + HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryARN, "" ] ] HasSecurityGroups: !Not [ !Equals [ !Join ["", !Ref SecurityGroupIds], "" ] ] HasSubnets: !Not [ !Equals [ !Join ["", !Ref SubnetIds], "" ] ] - HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] - NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] - HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryArn, "" ] ] - CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] - HasGlueConnection: !Not [ !Equals [ !Ref GlueConnection, "" ] ] - NotHasGlueConnectionAndHasKmsKeyId: !And - - !Not [ !Condition HasGlueConnection ] - - !Condition HasKmsKeyId - Resources: JdbcConnectorConfig: Type: 'AWS::Serverless::Function' Properties: Environment: Variables: - disable_spill_encryption: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref DisableSpillEncryption ] - spill_bucket: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillBucket ] - spill_prefix: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillPrefix ] - kms_key_id: !If [ NotHasGlueConnectionAndHasKmsKeyId, !Ref KmsKeyId, !Ref "AWS::NoValue" ] - glue_connection: !If [ HasGlueConnection, !Ref GlueConnection, !Ref "AWS::NoValue" ] - default: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref DefaultConnectionString ] + disable_spill_encryption: !Ref DisableSpillEncryption + spill_bucket: !Ref SpillBucket + spill_prefix: !Ref SpillPrefix + default: !Ref DefaultConnectionString FunctionName: !Ref LambdaFunctionName Handler: "com.amazonaws.athena.connectors.saphana.SaphanaMuxCompositeHandler" CodeUri: "./target/athena-saphana.zip" @@ -99,108 +75,39 @@ Resources: Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory - Role: !If [ NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn ] - VpcConfig: - SecurityGroupIds: !If [ HasSecurityGroups, !Ref SecurityGroupIds, !Ref "AWS::NoValue" ] - SubnetIds: !If [ HasSubnets, !Ref SubnetIds, !Ref "AWS::NoValue" ] - - FunctionRole: - Condition: NotHasLambdaRole - Type: AWS::IAM::Role - Properties: - PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryArn, !Ref "AWS::NoValue" ] - ManagedPolicyArns: - - "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole" - AssumeRolePolicyDocument: - Version: 2012-10-17 - Statement: - - Effect: Allow - Principal: - Service: - - lambda.amazonaws.com - Action: - - "sts:AssumeRole" - - FunctionExecutionPolicy: - Condition: NotHasLambdaRole - Type: "AWS::IAM::Policy" - Properties: - Roles: - - !Ref FunctionRole - PolicyName: FunctionExecutionPolicy - PolicyDocument: - Version: 2012-10-17 - Statement: - - Action: - - secretsmanager:GetSecretValue - Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretName}*' - - Action: - - logs:CreateLogGroup - Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:*' - - Action: - - logs:CreateLogStream - - logs:PutLogEvents - Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/${LambdaFunctionName}:*' - - Action: - - athena:GetQueryExecution - - Effect: Allow - Resource: '*' - - Action: - - ec2:CreateNetworkInterface - - ec2:DeleteNetworkInterface - - ec2:DescribeNetworkInterfaces - - ec2:DetachNetworkInterface - Effect: Allow - Resource: '*' - - Action: - - s3:GetObject - - s3:ListBucket - - s3:GetBucketLocation - - s3:GetObjectVersion - - s3:PutObject - - s3:PutObjectAcl - - s3:GetLifecycleConfiguration - - s3:PutLifecycleConfiguration - - s3:DeleteObject - Effect: Allow - Resource: - - Fn::Sub: - - arn:${AWS::Partition}:s3:::${bucketName} - - bucketName: - Ref: SpillBucket - - Fn::Sub: - - arn:${AWS::Partition}:s3:::${bucketName}/* - - bucketName: - Ref: SpillBucket - - !If - - HasGlueConnection + PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] + Policies: + - Statement: - Action: - - glue:GetConnection + - secretsmanager:GetSecretValue Effect: Allow - Resource: - - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' - - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' - - !Ref "AWS::NoValue" - - FunctionKmsPolicy: - Condition: CreateKmsPolicy - Type: "AWS::IAM::Policy" - Properties: - PolicyName: FunctionKmsPolicy - PolicyDocument: - Version: 2012-10-17 - Statement: - - Effect: Allow - Action: - - kms:GenerateRandom - Resource: '*' - - Effect: Allow - Action: - - kms:GenerateDataKey - Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" - Roles: - - !Ref FunctionRole + Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretNamePrefix}*' + Version: '2012-10-17' + - Statement: + - Action: + - logs:CreateLogGroup + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:*' + Version: '2012-10-17' + - Statement: + - Action: + - logs:CreateLogStream + - logs:PutLogEvents + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/${LambdaFunctionName}:*' + Version: '2012-10-17' + - Statement: + - Action: + - athena:GetQueryExecution + Effect: Allow + Resource: '*' + Version: '2012-10-17' + #S3CrudPolicy allows our connector to spill large responses to S3. You can optionally replace this pre-made policy + #with one that is more restrictive and can only 'put' but not read,delete, or overwrite files. + - S3CrudPolicy: + BucketName: !Ref SpillBucket + #VPCAccessPolicy allows our connector to run in a VPC so that it can access your data source. + - VPCAccessPolicy: {} + VpcConfig: + SecurityGroupIds: !If [ HasSecurityGroups, !Ref SecurityGroupIds, !Ref "AWS::NoValue" ] + SubnetIds: !If [ HasSubnets, !Ref SubnetIds, !Ref "AWS::NoValue" ] \ No newline at end of file diff --git a/athena-snowflake/athena-snowflake-connection.yaml b/athena-snowflake/athena-snowflake-connection.yaml new file mode 100644 index 0000000000..6ddc4ef17d --- /dev/null +++ b/athena-snowflake/athena-snowflake-connection.yaml @@ -0,0 +1,166 @@ +Transform: 'AWS::Serverless-2016-10-31' +Metadata: + 'AWS::ServerlessRepo::Application': + Name: AthenaSnowflakeConnectorWithGlueConnection + Description: 'This Amazon Athena connector for Snowflake enables Amazon Athena to run SQL queries on data stored in Snowflake.' + Author: 'default author' + SpdxLicenseId: Apache-2.0 + LicenseUrl: LICENSE.txt + ReadmeUrl: README.md + Labels: + - athena-federation + HomePageUrl: 'https://github.com/awslabs/aws-athena-query-federation' + SemanticVersion: 2022.47.1 + SourceCodeUrl: 'https://github.com/awslabs/aws-athena-query-federation' +Parameters: + LambdaFunctionName: + Description: 'This is the name of the lambda function that will be created. This name must satisfy the pattern ^[a-z0-9-_]{1,64}$' + Type: String + AllowedPattern: ^[a-z0-9-_]{1,64}$ + SecretName: + Description: 'Used to create resource-based authorization policy for "secretsmanager:GetSecretValue" action. E.g. All Athena JDBC Federation secret names can be prefixed with "AthenaJdbcFederation" and authorization policy will allow "arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:AthenaJdbcFederation*". Parameter value in this case should be "AthenaJdbcFederation". If you do not have a prefix, you can manually update the IAM policy to add allow any secret names.' + Type: String + SpillBucket: + Description: 'The name of the bucket where this function can spill data.' + Type: String + GlueConnection: + Description: "Name of glue connection storing connection details for Federated Data source." + Type: String + SecurityGroupIds: + Description: '(Optional) One or more SecurityGroup IDs corresponding to the SecurityGroup that should be applied to the Lambda function. (e.g. sg1,sg2,sg3)' + Type: CommaDelimitedList + Default: "" + SubnetIds: + Description: '(Optional) One or more Subnet IDs corresponding to the Subnet that the Lambda function can use to access you data source. (e.g. subnet1,subnet2)' + Type: CommaDelimitedList + Default: "" + KmsKeyId: + Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." + Type: String + Default: "" + LambdaRoleArn: + Description: "(Optional) A custom role to be used by the Connector lambda" + Type: String + Default: "" + +Conditions: + HasSecurityGroups: !Not [ !Equals [ !Join ["", !Ref SecurityGroupIds], "" ] ] + HasSubnets: !Not [ !Equals [ !Join ["", !Ref SubnetIds], "" ] ] + HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] + NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] + CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] + +Resources: + JdbcConnectorConfig: + Type: 'AWS::Serverless::Function' + Properties: + Environment: + Variables: + glue_connection: !Ref GlueConnection + FunctionName: !Ref LambdaFunctionName + Handler: "com.amazonaws.athena.connectors.snowflake.SnowflakeMuxCompositeHandler" + CodeUri: "./target/athena-snowflake.zip" + Description: "Enables Amazon Athena to communicate with Snowflake using JDBC" + Runtime: java11 + Timeout: 900 + MemorySize: 3008 + Role: !If [ NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn ] + VpcConfig: + SecurityGroupIds: !If [ HasSecurityGroups, !Ref SecurityGroupIds, !Ref "AWS::NoValue" ] + SubnetIds: !If [ HasSubnets, !Ref SubnetIds, !Ref "AWS::NoValue" ] + + FunctionRole: + Condition: NotHasLambdaRole + Type: AWS::IAM::Role + Properties: + ManagedPolicyArns: + - "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole" + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + Service: + - lambda.amazonaws.com + Action: + - "sts:AssumeRole" + + FunctionExecutionPolicy: + Condition: NotHasLambdaRole + Type: "AWS::IAM::Policy" + Properties: + Roles: + - !Ref FunctionRole + PolicyName: FunctionExecutionPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Action: + - secretsmanager:GetSecretValue + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretName}*' + - Action: + - logs:CreateLogGroup + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:*' + - Action: + - logs:CreateLogStream + - logs:PutLogEvents + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/${LambdaFunctionName}:*' + - Action: + - athena:GetQueryExecution + Effect: Allow + Resource: '*' + - Action: + - ec2:CreateNetworkInterface + - ec2:DeleteNetworkInterface + - ec2:DescribeNetworkInterfaces + - ec2:DetachNetworkInterface + Effect: Allow + Resource: '*' + - Action: + - s3:GetObject + - s3:ListBucket + - s3:GetBucketLocation + - s3:GetObjectVersion + - s3:PutObject + - s3:PutObjectAcl + - s3:GetLifecycleConfiguration + - s3:PutLifecycleConfiguration + - s3:DeleteObject + Effect: Allow + Resource: + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName} + - bucketName: + Ref: SpillBucket + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName}/* + - bucketName: + Ref: SpillBucket + - Action: + - glue:GetConnection + Effect: Allow + Resource: + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' + + FunctionKmsPolicy: + Condition: CreateKmsPolicy + Type: "AWS::IAM::Policy" + Properties: + PolicyName: FunctionKmsPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Action: + - kms:GenerateRandom + Resource: '*' + - Effect: Allow + Action: + - kms:GenerateDataKey + Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" + Roles: + - !Ref FunctionRole \ No newline at end of file diff --git a/athena-snowflake/athena-snowflake.yaml b/athena-snowflake/athena-snowflake.yaml index f5c6b53d0c..24edcaf9f8 100644 --- a/athena-snowflake/athena-snowflake.yaml +++ b/athena-snowflake/athena-snowflake.yaml @@ -20,8 +20,7 @@ Parameters: DefaultConnectionString: Description: 'The default connection string is used when catalog is "lambda:${LambdaFunctionName}". Catalog specific Connection Strings can be added later. Format: ${DatabaseType}://${NativeJdbcConnectionString}.' Type: String - Default: "" - SecretName: + SecretNamePrefix: Description: 'Used to create resource-based authorization policy for "secretsmanager:GetSecretValue" action. E.g. All Athena JDBC Federation secret names can be prefixed with "AthenaJdbcFederation" and authorization policy will allow "arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:AthenaJdbcFederation*". Parameter value in this case should be "AthenaJdbcFederation". If you do not have a prefix, you can manually update the IAM policy to add allow any secret names.' Type: String SpillBucket: @@ -31,10 +30,6 @@ Parameters: Description: 'The prefix within SpillBucket where this function can spill data.' Type: String Default: athena-spill - GlueConnection: - Description: "(Optional) Name of glue connection storing connection details for Federated Data source." - Type: String - Default: "" LambdaTimeout: Description: 'Maximum Lambda invocation runtime in seconds. (min 1 - 900 max)' Default: 900 @@ -55,43 +50,24 @@ Parameters: Description: '(Optional) One or more Subnet IDs corresponding to the Subnet that the Lambda function can use to access you data source. (e.g. subnet1,subnet2)' Type: CommaDelimitedList Default: "" - PermissionsBoundaryArn: + PermissionsBoundaryARN: Description: "(Optional) An IAM policy ARN to use as the PermissionsBoundary for the created Lambda function's execution role" Default: '' Type: String - KmsKeyId: - Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." - Type: String - Default: "" - LambdaRoleArn: - Description: "(Optional) A custom role to be used by the Connector lambda" - Type: String - Default: "" - Conditions: + HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryARN, "" ] ] HasSecurityGroups: !Not [ !Equals [ !Join ["", !Ref SecurityGroupIds], "" ] ] HasSubnets: !Not [ !Equals [ !Join ["", !Ref SubnetIds], "" ] ] - HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] - NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] - HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryArn, "" ] ] - CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] - HasGlueConnection: !Not [ !Equals [ !Ref GlueConnection, "" ] ] - NotHasGlueConnectionAndHasKmsKeyId: !And - - !Not [ !Condition HasGlueConnection ] - - !Condition HasKmsKeyId - Resources: JdbcConnectorConfig: Type: 'AWS::Serverless::Function' Properties: Environment: Variables: - disable_spill_encryption: !If [HasGlueConnection, !Ref "AWS::NoValue", !Ref DisableSpillEncryption] - spill_bucket: !If [HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillBucket] - spill_prefix: !If [HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillPrefix] - kms_key_id: !If [NotHasGlueConnectionAndHasKmsKeyId, !Ref KmsKeyId, !Ref "AWS::NoValue"] - glue_connection: !If [HasGlueConnection, !Ref GlueConnection, !Ref "AWS::NoValue"] - default: !If [HasGlueConnection, !Ref "AWS::NoValue", !Ref DefaultConnectionString] + disable_spill_encryption: !Ref DisableSpillEncryption + spill_bucket: !Ref SpillBucket + spill_prefix: !Ref SpillPrefix + default: !Ref DefaultConnectionString FunctionName: !Ref LambdaFunctionName Handler: "com.amazonaws.athena.connectors.snowflake.SnowflakeMuxCompositeHandler" CodeUri: "./target/athena-snowflake.zip" @@ -99,108 +75,39 @@ Resources: Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory - Role: !If [ NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn ] - VpcConfig: - SecurityGroupIds: !If [ HasSecurityGroups, !Ref SecurityGroupIds, !Ref "AWS::NoValue" ] - SubnetIds: !If [ HasSubnets, !Ref SubnetIds, !Ref "AWS::NoValue" ] - - FunctionRole: - Condition: NotHasLambdaRole - Type: AWS::IAM::Role - Properties: - PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryArn, !Ref "AWS::NoValue" ] - ManagedPolicyArns: - - "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole" - AssumeRolePolicyDocument: - Version: 2012-10-17 - Statement: - - Effect: Allow - Principal: - Service: - - lambda.amazonaws.com - Action: - - "sts:AssumeRole" - - FunctionExecutionPolicy: - Condition: NotHasLambdaRole - Type: "AWS::IAM::Policy" - Properties: - Roles: - - !Ref FunctionRole - PolicyName: FunctionExecutionPolicy - PolicyDocument: - Version: 2012-10-17 - Statement: - - Action: - - secretsmanager:GetSecretValue - Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretName}*' - - Action: - - logs:CreateLogGroup - Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:*' - - Action: - - logs:CreateLogStream - - logs:PutLogEvents - Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/${LambdaFunctionName}:*' - - Action: - - athena:GetQueryExecution - - Effect: Allow - Resource: '*' - - Action: - - ec2:CreateNetworkInterface - - ec2:DeleteNetworkInterface - - ec2:DescribeNetworkInterfaces - - ec2:DetachNetworkInterface - Effect: Allow - Resource: '*' - - Action: - - s3:GetObject - - s3:ListBucket - - s3:GetBucketLocation - - s3:GetObjectVersion - - s3:PutObject - - s3:PutObjectAcl - - s3:GetLifecycleConfiguration - - s3:PutLifecycleConfiguration - - s3:DeleteObject - Effect: Allow - Resource: - - Fn::Sub: - - arn:${AWS::Partition}:s3:::${bucketName} - - bucketName: - Ref: SpillBucket - - Fn::Sub: - - arn:${AWS::Partition}:s3:::${bucketName}/* - - bucketName: - Ref: SpillBucket - - !If - - HasGlueConnection + PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] + Policies: + - Statement: - Action: - - glue:GetConnection + - secretsmanager:GetSecretValue Effect: Allow - Resource: - - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' - - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' - - !Ref "AWS::NoValue" - - FunctionKmsPolicy: - Condition: CreateKmsPolicy - Type: "AWS::IAM::Policy" - Properties: - PolicyName: FunctionKmsPolicy - PolicyDocument: - Version: 2012-10-17 - Statement: - - Effect: Allow - Action: - - kms:GenerateRandom - Resource: '*' - - Effect: Allow - Action: - - kms:GenerateDataKey - Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" - Roles: - - !Ref FunctionRole \ No newline at end of file + Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretNamePrefix}*' + Version: '2012-10-17' + - Statement: + - Action: + - logs:CreateLogGroup + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:*' + Version: '2012-10-17' + - Statement: + - Action: + - logs:CreateLogStream + - logs:PutLogEvents + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/${LambdaFunctionName}:*' + Version: '2012-10-17' + - Statement: + - Action: + - athena:GetQueryExecution + Effect: Allow + Resource: '*' + Version: '2012-10-17' + #S3CrudPolicy allows our connector to spill large responses to S3. You can optionally replace this pre-made policy + #with one that is more restrictive and can only 'put' but not read,delete, or overwrite files. + - S3CrudPolicy: + BucketName: !Ref SpillBucket + #VPCAccessPolicy allows our connector to run in a VPC so that it can access your data source. + - VPCAccessPolicy: {} + VpcConfig: + SecurityGroupIds: !If [ HasSecurityGroups, !Ref SecurityGroupIds, !Ref "AWS::NoValue" ] + SubnetIds: !If [ HasSubnets, !Ref SubnetIds, !Ref "AWS::NoValue" ] \ No newline at end of file diff --git a/athena-sqlserver/athena-sqlserver-connection.yaml b/athena-sqlserver/athena-sqlserver-connection.yaml new file mode 100644 index 0000000000..241a35ff98 --- /dev/null +++ b/athena-sqlserver/athena-sqlserver-connection.yaml @@ -0,0 +1,164 @@ +Transform: 'AWS::Serverless-2016-10-31' +Metadata: + 'AWS::ServerlessRepo::Application': + Name: AthenaSqlServerConnectorWithGlueConnection + Description: 'This connector enables Amazon Athena to communicate with SQL Server using JDBC driver.' + Author: 'default author' + SpdxLicenseId: Apache-2.0 + LicenseUrl: LICENSE.txt + ReadmeUrl: README.md + Labels: + - sqlserver + - athena-federation + - jdbc + HomePageUrl: 'https://github.com/awslabs/aws-athena-query-federation' + SemanticVersion: 2022.47.1 + SourceCodeUrl: 'https://github.com/awslabs/aws-athena-query-federation' +Parameters: + LambdaFunctionName: + Description: 'This is the name of the lambda function that will be created. This name must satisfy the pattern ^[a-z0-9-_]{1,64}$' + Type: String + AllowedPattern: ^[a-z0-9-_]{1,64}$ + SecretName: + Description: 'Used to create resource-based authorization policy for "secretsmanager:GetSecretValue" action. E.g. All Athena JDBC Federation secret names can be prefixed with "AthenaJdbcFederation" and authorization policy will allow "arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:AthenaJdbcFederation*". Parameter value in this case should be "AthenaJdbcFederation". If you do not have a prefix, you can manually update the IAM policy to add allow any secret names.' + Type: String + SpillBucket: + Description: 'The name of the bucket where this function can spill data.' + Type: String + GlueConnection: + Description: "Name of glue connection storing connection details for Federated Data source." + Type: String + LambdaRoleArn: + Description: "(Optional) A custom role to be used by the Connector lambda" + Type: String + Default: "" + SecurityGroupIds: + Description: '(Optional) One or more SecurityGroup IDs corresponding to the SecurityGroup that should be applied to the Lambda function. (e.g. sg1,sg2,sg3)' + Type: CommaDelimitedList + Default: "" + SubnetIds: + Description: '(Optional) One or more Subnet IDs corresponding to the Subnet that the Lambda function can use to access you data source. (e.g. subnet1,subnet2)' + Type: CommaDelimitedList + Default: "" + KmsKeyId: + Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." + Type: String + Default: "" +Conditions: + HasSecurityGroups: !Not [ !Equals [ !Join [ "", !Ref SecurityGroupIds ], "" ] ] + HasSubnets: !Not [ !Equals [ !Join [ "", !Ref SubnetIds ], "" ] ] + HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] + NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] + CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] +Resources: + JdbcConnectorConfig: + Type: 'AWS::Serverless::Function' + Properties: + Environment: + Variables: + glue_connection: !Ref GlueConnection + FunctionName: !Ref LambdaFunctionName + Handler: "com.amazonaws.athena.connectors.sqlserver.SqlServerMuxCompositeHandler" + CodeUri: "./target/athena-sqlserver-2022.47.1.jar" + Description: "Enables Amazon Athena to communicate with SQLSERVER using JDBC" + Runtime: java11 + Timeout: 900 + MemorySize: 3008 + Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn] + VpcConfig: + SecurityGroupIds: !If [ HasSecurityGroups, !Ref SecurityGroupIds, !Ref "AWS::NoValue" ] + SubnetIds: !If [ HasSubnets, !Ref SubnetIds, !Ref "AWS::NoValue" ] + FunctionRole: + Condition: NotHasLambdaRole + Type: AWS::IAM::Role + Properties: + ManagedPolicyArns: + - "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole" + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + Service: + - lambda.amazonaws.com + Action: + - "sts:AssumeRole" + FunctionExecutionPolicy: + Condition: NotHasLambdaRole + Type: "AWS::IAM::Policy" + Properties: + Roles: + - !Ref FunctionRole + PolicyName: FunctionExecutionPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Action: + - secretsmanager:GetSecretValue + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretName}*' + - Action: + - logs:CreateLogGroup + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:*' + - Action: + - logs:CreateLogStream + - logs:PutLogEvents + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/${LambdaFunctionName}:*' + - Action: + - athena:GetQueryExecution + Effect: Allow + Resource: '*' + - Action: + - ec2:CreateNetworkInterface + - ec2:DeleteNetworkInterface + - ec2:DescribeNetworkInterfaces + - ec2:DetachNetworkInterface + Effect: Allow + Resource: '*' + - Action: + - s3:GetObject + - s3:ListBucket + - s3:GetBucketLocation + - s3:GetObjectVersion + - s3:PutObject + - s3:PutObjectAcl + - s3:GetLifecycleConfiguration + - s3:PutLifecycleConfiguration + - s3:DeleteObject + Effect: Allow + Resource: + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName} + - bucketName: + Ref: SpillBucket + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName}/* + - bucketName: + Ref: SpillBucket + - Action: + - glue:GetConnection + Effect: Allow + Resource: + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' + + FunctionKmsPolicy: + Condition: CreateKmsPolicy + Type: "AWS::IAM::Policy" + Properties: + PolicyName: FunctionKmsPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Action: + - kms:GenerateRandom + Resource: '*' + - Effect: Allow + Action: + - kms:GenerateDataKey + Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" + Roles: + - !Ref FunctionRole diff --git a/athena-sqlserver/athena-sqlserver.yaml b/athena-sqlserver/athena-sqlserver.yaml index a2abdcaa1b..8ccefb95cb 100644 --- a/athena-sqlserver/athena-sqlserver.yaml +++ b/athena-sqlserver/athena-sqlserver.yaml @@ -22,8 +22,7 @@ Parameters: DefaultConnectionString: Description: 'The default connection string is used when catalog is "lambda:${LambdaFunctionName}". Catalog specific Connection Strings can be added later. Format: ${DatabaseType}://${NativeJdbcConnectionString}.' Type: String - Default: "" - SecretName: + SecretNamePrefix: Description: 'Used to create resource-based authorization policy for "secretsmanager:GetSecretValue" action. E.g. All Athena JDBC Federation secret names can be prefixed with "AthenaJdbcFederation" and authorization policy will allow "arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:AthenaJdbcFederation*". Parameter value in this case should be "AthenaJdbcFederation". If you do not have a prefix, you can manually update the IAM policy to add allow any secret names.' Type: String SpillBucket: @@ -33,10 +32,6 @@ Parameters: Description: 'The prefix within SpillBucket where this function can spill data.' Type: String Default: athena-spill - GlueConnection: - Description: "(Optional) Name of glue connection storing connection details for Federated Data source." - Type: String - Default: "" LambdaTimeout: Description: 'Maximum Lambda invocation runtime in seconds. (min 1 - 900 max)' Default: 900 @@ -45,7 +40,7 @@ Parameters: Description: 'Lambda memory in MB (min 128 - 3008 max).' Default: 3008 Type: Number - LambdaRoleArn: + LambdaRoleARN: Description: "(Optional) A custom role to be used by the Connector lambda" Type: String Default: "" @@ -61,37 +56,25 @@ Parameters: Description: '(Optional) One or more Subnet IDs corresponding to the Subnet that the Lambda function can use to access you data source. (e.g. subnet1,subnet2)' Type: CommaDelimitedList Default: "" - PermissionsBoundaryArn: + PermissionsBoundaryARN: Description: "(Optional) An IAM policy ARN to use as the PermissionsBoundary for the created Lambda function's execution role" Default: '' Type: String - KmsKeyId: - Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." - Type: String - Default: "" Conditions: - HasSecurityGroups: !Not [ !Equals [ !Join [ "", !Ref SecurityGroupIds ], "" ] ] - HasSubnets: !Not [ !Equals [ !Join [ "", !Ref SubnetIds ], "" ] ] - HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] - NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] - HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryArn, "" ] ] - CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] - HasGlueConnection: !Not [ !Equals [ !Ref GlueConnection, "" ] ] - NotHasGlueConnectionAndHasKmsKeyId: !And - - !Not [ !Condition HasGlueConnection ] - - !Condition HasKmsKeyId + HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryARN, "" ] ] + NotHasLambdaRole: !Equals [!Ref LambdaRoleARN, ""] + HasSecurityGroups: !Not [ !Equals [ !Join ["", !Ref SecurityGroupIds], "" ] ] + HasSubnets: !Not [ !Equals [ !Join ["", !Ref SubnetIds], "" ] ] Resources: JdbcConnectorConfig: Type: 'AWS::Serverless::Function' Properties: Environment: Variables: - disable_spill_encryption: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref DisableSpillEncryption ] - spill_bucket: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillBucket ] - spill_prefix: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillPrefix ] - kms_key_id: !If [ NotHasGlueConnectionAndHasKmsKeyId, !Ref KmsKeyId, !Ref "AWS::NoValue" ] - glue_connection: !If [ HasGlueConnection, !Ref GlueConnection, !Ref "AWS::NoValue" ] - default: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref DefaultConnectionString ] + disable_spill_encryption: !Ref DisableSpillEncryption + spill_bucket: !Ref SpillBucket + spill_prefix: !Ref SpillPrefix + default: !Ref DefaultConnectionString FunctionName: !Ref LambdaFunctionName Handler: "com.amazonaws.athena.connectors.sqlserver.SqlServerMuxCompositeHandler" CodeUri: "./target/athena-sqlserver-2022.47.1.jar" @@ -99,7 +82,8 @@ Resources: Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory - Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn] + PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] + Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleARN] VpcConfig: SecurityGroupIds: !If [ HasSecurityGroups, !Ref SecurityGroupIds, !Ref "AWS::NoValue" ] SubnetIds: !If [ HasSubnets, !Ref SubnetIds, !Ref "AWS::NoValue" ] @@ -107,7 +91,6 @@ Resources: Condition: NotHasLambdaRole Type: AWS::IAM::Role Properties: - PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryArn, !Ref "AWS::NoValue" ] ManagedPolicyArns: - "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole" AssumeRolePolicyDocument: @@ -132,7 +115,7 @@ Resources: - Action: - secretsmanager:GetSecretValue Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretName}*' + Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretNamePrefix}*' - Action: - logs:CreateLogGroup Effect: Allow @@ -172,32 +155,4 @@ Resources: - Fn::Sub: - arn:${AWS::Partition}:s3:::${bucketName}/* - bucketName: - Ref: SpillBucket - - !If - - HasGlueConnection - - Action: - - glue:GetConnection - Effect: Allow - Resource: - - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' - - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' - - !Ref "AWS::NoValue" - - FunctionKmsPolicy: - Condition: CreateKmsPolicy - Type: "AWS::IAM::Policy" - Properties: - PolicyName: FunctionKmsPolicy - PolicyDocument: - Version: 2012-10-17 - Statement: - - Effect: Allow - Action: - - kms:GenerateRandom - Resource: '*' - - Effect: Allow - Action: - - kms:GenerateDataKey - Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" - Roles: - - !Ref FunctionRole + Ref: SpillBucket \ No newline at end of file diff --git a/athena-synapse/athena-synapse-connection.yaml b/athena-synapse/athena-synapse-connection.yaml new file mode 100644 index 0000000000..0b3352b66c --- /dev/null +++ b/athena-synapse/athena-synapse-connection.yaml @@ -0,0 +1,169 @@ +Transform: 'AWS::Serverless-2016-10-31' +Metadata: + 'AWS::ServerlessRepo::Application': + Name: AthenaSynapseConnectorWithGlueConnection + Description: 'This connector enables Amazon Athena to communicate with Synapse using JDBC driver.' + Author: 'default author' + SpdxLicenseId: Apache-2.0 + LicenseUrl: LICENSE.txt + ReadmeUrl: README.md + Labels: + - synapse + - athena-federation + - jdbc + HomePageUrl: 'https://github.com/awslabs/aws-athena-query-federation' + SemanticVersion: 2022.47.1 + SourceCodeUrl: 'https://github.com/awslabs/aws-athena-query-federation' +Parameters: + LambdaFunctionName: + Description: 'This is the name of the lambda function that will be created. This name must satisfy the pattern ^[a-z0-9-_]{1,64}$' + Type: String + AllowedPattern: ^[a-z0-9-_]{1,64}$ + SecretName: + Description: 'Used to create resource-based authorization policy for "secretsmanager:GetSecretValue" action. E.g. All Athena JDBC Federation secret names can be prefixed with "AthenaJdbcFederation" and authorization policy will allow "arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:AthenaJdbcFederation*". Parameter value in this case should be "AthenaJdbcFederation". If you do not have a prefix, you can manually update the IAM policy to add allow any secret names.' + Type: String + SpillBucket: + Description: 'The name of the bucket where this function can spill data.' + Type: String + GlueConnection: + Description: "Name of glue connection storing connection details for Federated Data source." + Type: String + LambdaRoleArn: + Description: "(Optional) A custom role to be used by the Connector lambda" + Default: "" + Type: String + SecurityGroupIds: + Description: '(Optional) One or more SecurityGroup IDs corresponding to the SecurityGroup that should be applied to the Lambda function. (e.g. sg1,sg2,sg3)' + Type: CommaDelimitedList + Default: "" + SubnetIds: + Description: '(Optional) One or more Subnet IDs corresponding to the Subnet that the Lambda function can use to access you data source. (e.g. subnet1,subnet2)' + Type: CommaDelimitedList + Default: "" + KmsKeyId: + Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." + Type: String + Default: "" + +Conditions: + HasSecurityGroups: !Not [ !Equals [ !Join ["", !Ref SecurityGroupIds], "" ] ] + HasSubnets: !Not [ !Equals [ !Join ["", !Ref SubnetIds], "" ] ] + HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] + NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] + CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] + +Resources: + JdbcConnectorConfig: + Type: 'AWS::Serverless::Function' + Properties: + Environment: + Variables: + glue_connection: !Ref GlueConnection + FunctionName: !Ref LambdaFunctionName + Handler: "com.amazonaws.athena.connectors.synapse.SynapseMuxCompositeHandler" + CodeUri: "./target/athena-synapse-2022.47.1.jar" + Description: "Enables Amazon Athena to communicate with SYNPASE using JDBC" + Runtime: java11 + Timeout: 900 + MemorySize: 3008 + Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn] + VpcConfig: + SecurityGroupIds: !If [ HasSecurityGroups, !Ref SecurityGroupIds, !Ref "AWS::NoValue" ] + SubnetIds: !If [ HasSubnets, !Ref SubnetIds, !Ref "AWS::NoValue" ] + + FunctionRole: + Condition: NotHasLambdaRole + Type: AWS::IAM::Role + Properties: + ManagedPolicyArns: + - "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole" + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + Service: + - lambda.amazonaws.com + Action: + - "sts:AssumeRole" + + FunctionExecutionPolicy: + Condition: NotHasLambdaRole + Type: "AWS::IAM::Policy" + Properties: + PolicyName: FunctionExecutionPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Action: + - secretsmanager:GetSecretValue + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretName}*' + - Action: + - logs:CreateLogGroup + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:*' + - Action: + - logs:CreateLogStream + - logs:PutLogEvents + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/${LambdaFunctionName}:*' + - Action: + - athena:GetQueryExecution + Effect: Allow + Resource: '*' + - Action: + - ec2:CreateNetworkInterface + - ec2:DeleteNetworkInterface + - ec2:DescribeNetworkInterfaces + - ec2:DetachNetworkInterface + - autoscaling:CompleteLifecycleAction + Effect: Allow + Resource: '*' + - Action: + - s3:GetObject + - s3:ListBucket + - s3:GetBucketLocation + - s3:GetObjectVersion + - s3:PutObject + - s3:PutObjectAcl + - s3:GetLifecycleConfiguration + - s3:PutLifecycleConfiguration + - s3:DeleteObject + Effect: Allow + Resource: + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName} + - bucketName: + Ref: SpillBucket + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName}/* + - bucketName: + Ref: SpillBucket + - Action: + - glue:GetConnection + Effect: Allow + Resource: + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' + Roles: + - !Ref FunctionRole + + FunctionKmsPolicy: + Condition: CreateKmsPolicy + Type: "AWS::IAM::Policy" + Properties: + PolicyName: FunctionKmsPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Action: + - kms:GenerateRandom + Resource: '*' + - Effect: Allow + Action: + - kms:GenerateDataKey + Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" + Roles: + - !Ref FunctionRole diff --git a/athena-synapse/athena-synapse.yaml b/athena-synapse/athena-synapse.yaml index 60f510cb09..02c8aaf577 100644 --- a/athena-synapse/athena-synapse.yaml +++ b/athena-synapse/athena-synapse.yaml @@ -22,8 +22,7 @@ Parameters: DefaultConnectionString: Description: 'The default connection string is used when catalog is "lambda:${LambdaFunctionName}". Catalog specific Connection Strings can be added later. Format: ${DatabaseType}://${NativeJdbcConnectionString}.' Type: String - Default: "" - SecretName: + SecretNamePrefix: Description: 'Used to create resource-based authorization policy for "secretsmanager:GetSecretValue" action. E.g. All Athena JDBC Federation secret names can be prefixed with "AthenaJdbcFederation" and authorization policy will allow "arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:AthenaJdbcFederation*". Parameter value in this case should be "AthenaJdbcFederation". If you do not have a prefix, you can manually update the IAM policy to add allow any secret names.' Type: String SpillBucket: @@ -33,10 +32,6 @@ Parameters: Description: 'The prefix within SpillBucket where this function can spill data.' Type: String Default: athena-spill - GlueConnection: - Description: "(Optional) Name of glue connection storing connection details for Federated Data source." - Type: String - Default: "" LambdaTimeout: Description: 'Maximum Lambda invocation runtime in seconds. (min 1 - 900 max)' Default: 900 @@ -45,11 +40,11 @@ Parameters: Description: 'Lambda memory in MB (min 128 - 3008 max).' Default: 3008 Type: Number - LambdaRoleArn: + LambdaRoleARN: Description: "(Optional) A custom role to be used by the Connector lambda" Default: "" Type: String - PermissionsBoundaryArn: + PermissionsBoundaryARN: Description: "(Optional) A custom Permission Boundary to be used by the Connector lambda" Default: "" Type: String @@ -65,22 +60,12 @@ Parameters: Description: '(Optional) One or more Subnet IDs corresponding to the Subnet that the Lambda function can use to access you data source. (e.g. subnet1,subnet2)' Type: CommaDelimitedList Default: "" - KmsKeyId: - Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." - Type: String - Default: "" Conditions: + NotHasLambdaRole: !Equals [!Ref LambdaRoleARN, ""] + HasPermissionsBoundary: !Not [!Equals [!Ref PermissionsBoundaryARN, ""]] HasSecurityGroups: !Not [ !Equals [ !Join ["", !Ref SecurityGroupIds], "" ] ] HasSubnets: !Not [ !Equals [ !Join ["", !Ref SubnetIds], "" ] ] - HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] - NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] - HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryArn, "" ] ] - CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] - HasGlueConnection: !Not [ !Equals [ !Ref GlueConnection, "" ] ] - NotHasGlueConnectionAndHasKmsKeyId: !And - - !Not [ !Condition HasGlueConnection ] - - !Condition HasKmsKeyId Resources: JdbcConnectorConfig: @@ -88,12 +73,10 @@ Resources: Properties: Environment: Variables: - disable_spill_encryption: !If [HasGlueConnection, !Ref "AWS::NoValue", !Ref DisableSpillEncryption] - spill_bucket: !If [HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillBucket] - spill_prefix: !If [HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillPrefix] - kms_key_id: !If [NotHasGlueConnectionAndHasKmsKeyId, !Ref KmsKeyId, !Ref "AWS::NoValue"] - glue_connection: !If [HasGlueConnection, !Ref GlueConnection, !Ref "AWS::NoValue"] - default: !If [HasGlueConnection, !Ref "AWS::NoValue", !Ref DefaultConnectionString] + disable_spill_encryption: !Ref DisableSpillEncryption + spill_bucket: !Ref SpillBucket + spill_prefix: !Ref SpillPrefix + default: !Ref DefaultConnectionString FunctionName: !Ref LambdaFunctionName Handler: "com.amazonaws.athena.connectors.synapse.SynapseMuxCompositeHandler" CodeUri: "./target/athena-synapse-2022.47.1.jar" @@ -101,7 +84,7 @@ Resources: Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory - Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn] + Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleARN] VpcConfig: SecurityGroupIds: !If [ HasSecurityGroups, !Ref SecurityGroupIds, !Ref "AWS::NoValue" ] SubnetIds: !If [ HasSubnets, !Ref SubnetIds, !Ref "AWS::NoValue" ] @@ -110,7 +93,11 @@ Resources: Condition: NotHasLambdaRole Type: AWS::IAM::Role Properties: - PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryArn, !Ref "AWS::NoValue" ] + PermissionsBoundary: + Fn::If: + - HasPermissionsBoundary + - Ref: PermissionsBoundaryARN + - Ref: AWS::NoValue ManagedPolicyArns: - "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole" AssumeRolePolicyDocument: @@ -134,7 +121,7 @@ Resources: - Action: - secretsmanager:GetSecretValue Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretName}*' + Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretNamePrefix}*' - Action: - logs:CreateLogGroup Effect: Allow @@ -156,53 +143,5 @@ Resources: - autoscaling:CompleteLifecycleAction Effect: Allow Resource: '*' - - Action: - - s3:GetObject - - s3:ListBucket - - s3:GetBucketLocation - - s3:GetObjectVersion - - s3:PutObject - - s3:PutObjectAcl - - s3:GetLifecycleConfiguration - - s3:PutLifecycleConfiguration - - s3:DeleteObject - Effect: Allow - Resource: - - Fn::Sub: - - arn:${AWS::Partition}:s3:::${bucketName} - - bucketName: - Ref: SpillBucket - - Fn::Sub: - - arn:${AWS::Partition}:s3:::${bucketName}/* - - bucketName: - Ref: SpillBucket - - !If - - HasGlueConnection - - Action: - - glue:GetConnection - Effect: Allow - Resource: - - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' - - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' - - !Ref "AWS::NoValue" - Roles: - - !Ref FunctionRole - - FunctionKmsPolicy: - Condition: CreateKmsPolicy - Type: "AWS::IAM::Policy" - Properties: - PolicyName: FunctionKmsPolicy - PolicyDocument: - Version: 2012-10-17 - Statement: - - Effect: Allow - Action: - - kms:GenerateRandom - Resource: '*' - - Effect: Allow - Action: - - kms:GenerateDataKey - Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" Roles: - - !Ref FunctionRole + - !Ref FunctionRole \ No newline at end of file diff --git a/athena-teradata/athena-teradata-connection.yaml b/athena-teradata/athena-teradata-connection.yaml new file mode 100644 index 0000000000..b2c8c80f56 --- /dev/null +++ b/athena-teradata/athena-teradata-connection.yaml @@ -0,0 +1,164 @@ +Transform: 'AWS::Serverless-2016-10-31' +Metadata: + 'AWS::ServerlessRepo::Application': + Name: AthenaTeradataConnectorWithGlueConnection + Description: 'This connector enables Amazon Athena to communicate with your Teradata instance(s) using JDBC driver.' + Author: 'default author' + SpdxLicenseId: Apache-2.0 + LicenseUrl: LICENSE.txt + ReadmeUrl: README.md + Labels: + - athena-federation + HomePageUrl: 'https://github.com/awslabs/aws-athena-query-federation' + SemanticVersion: 2022.47.1 + SourceCodeUrl: 'https://github.com/awslabs/aws-athena-query-federation' +Parameters: + LambdaFunctionName: + Description: 'This is the name of the lambda function that will be created. This name must satisfy the pattern ^[a-z0-9-_]{1,64}$' + Type: String + AllowedPattern: ^[a-z0-9-_]{1,64}$ + SecretName: + Description: 'Used to create resource-based authorization policy for "secretsmanager:GetSecretValue" action. E.g. All Athena JDBC Federation secret names can be prefixed with "AthenaJdbcFederation" and authorization policy will allow "arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:AthenaJdbcFederation*". Parameter value in this case should be "AthenaJdbcFederation". If you do not have a prefix, you can manually update the IAM policy to add allow any secret names.' + Type: String + SpillBucket: + Description: 'The name of the bucket where this function can spill data.' + Type: String + GlueConnection: + Description: "Name of glue connection storing connection details for Federated Data source." + Type: String + SecurityGroupIds: + Description: '(Optional) One or more SecurityGroup IDs corresponding to the SecurityGroup that should be applied to the Lambda function. (e.g. sg1,sg2,sg3)' + Type: CommaDelimitedList + Default: "" + SubnetIds: + Description: '(Optional) One or more Subnet IDs corresponding to the Subnet that the Lambda function can use to access you data source. (e.g. subnet1,subnet2)' + Type: CommaDelimitedList + Default: "" + KmsKeyId: + Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." + Type: String + Default: "" + LambdaRoleArn: + Description: "(Optional) A custom role to be used by the Connector lambda" + Type: String + Default: "" +Conditions: + HasSecurityGroups: !Not [ !Equals [ !Join ["", !Ref SecurityGroupIds], "" ] ] + HasSubnets: !Not [ !Equals [ !Join ["", !Ref SubnetIds], "" ] ] + HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] + NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] + CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] +Resources: + JdbcConnectorConfig: + Type: 'AWS::Serverless::Function' + Properties: + Environment: + Variables: + glue_connection: !Ref GlueConnection + FunctionName: !Ref LambdaFunctionName + Handler: "com.amazonaws.athena.connectors.teradata.TeradataMuxCompositeHandler" + CodeUri: "./target/athena-teradata-2024.18.2.jar" + Description: "Enables Amazon Athena to communicate with Teradata using JDBC" + Runtime: java11 + Timeout: 900 + MemorySize: 3008 + Role: !If [ NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn ] + VpcConfig: + SecurityGroupIds: !If [ HasSecurityGroups, !Ref SecurityGroupIds, !Ref "AWS::NoValue" ] + SubnetIds: !If [ HasSubnets, !Ref SubnetIds, !Ref "AWS::NoValue" ] + + FunctionRole: + Condition: NotHasLambdaRole + Type: AWS::IAM::Role + Properties: + ManagedPolicyArns: + - "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole" + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + Service: + - lambda.amazonaws.com + Action: + - "sts:AssumeRole" + + FunctionExecutionPolicy: + Condition: NotHasLambdaRole + Type: "AWS::IAM::Policy" + Properties: + Roles: + - !Ref FunctionRole + PolicyName: FunctionExecutionPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Action: + - secretsmanager:GetSecretValue + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretName}*' + - Action: + - logs:CreateLogGroup + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:*' + - Action: + - logs:CreateLogStream + - logs:PutLogEvents + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/${LambdaFunctionName}:*' + - Action: + - athena:GetQueryExecution + Effect: Allow + Resource: '*' + - Action: + - ec2:CreateNetworkInterface + - ec2:DeleteNetworkInterface + - ec2:DescribeNetworkInterfaces + - ec2:DetachNetworkInterface + Effect: Allow + Resource: '*' + - Action: + - s3:GetObject + - s3:ListBucket + - s3:GetBucketLocation + - s3:GetObjectVersion + - s3:PutObject + - s3:PutObjectAcl + - s3:GetLifecycleConfiguration + - s3:PutLifecycleConfiguration + - s3:DeleteObject + Effect: Allow + Resource: + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName} + - bucketName: + Ref: SpillBucket + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName}/* + - bucketName: + Ref: SpillBucket + - Action: + - glue:GetConnection + Effect: Allow + Resource: + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' + + FunctionKmsPolicy: + Condition: CreateKmsPolicy + Type: "AWS::IAM::Policy" + Properties: + PolicyName: FunctionKmsPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Action: + - kms:GenerateRandom + Resource: '*' + - Effect: Allow + Action: + - kms:GenerateDataKey + Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" + Roles: + - !Ref FunctionRole \ No newline at end of file diff --git a/athena-teradata/athena-teradata.yaml b/athena-teradata/athena-teradata.yaml index b757ad033d..3e1431c6c0 100644 --- a/athena-teradata/athena-teradata.yaml +++ b/athena-teradata/athena-teradata.yaml @@ -20,8 +20,7 @@ Parameters: DefaultConnectionString: Description: 'The default connection string is used when catalog is "lambda:${LambdaFunctionName}". Catalog specific Connection Strings can be added later. Format: ${DatabaseType}://${NativeJdbcConnectionString}.' Type: String - Default: "" - SecretName: + SecretNamePrefix: Description: 'Used to create resource-based authorization policy for "secretsmanager:GetSecretValue" action. E.g. All Athena JDBC Federation secret names can be prefixed with "AthenaJdbcFederation" and authorization policy will allow "arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:AthenaJdbcFederation*". Parameter value in this case should be "AthenaJdbcFederation". If you do not have a prefix, you can manually update the IAM policy to add allow any secret names.' Type: String SpillBucket: @@ -31,11 +30,7 @@ Parameters: Description: 'The prefix within SpillBucket where this function can spill data.' Type: String Default: athena-spill - GlueConnection: - Description: "(Optional) Name of glue connection storing connection details for Federated Data source." - Type: String - Default: "" - LambdaJdbcLayername: + LambdaJDBCLayername: Description: 'Lambda JDBC layer Name. Must be ARN of layer' Type: String LambdaTimeout: @@ -62,153 +57,67 @@ Parameters: Description: 'Partition Count Limit' Type: Number Default: 500 - PermissionsBoundaryArn: + PermissionsBoundaryARN: Description: "(Optional) An IAM policy ARN to use as the PermissionsBoundary for the created Lambda function's execution role" Default: '' Type: String - KmsKeyId: - Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." - Type: String - Default: "" - LambdaRoleArn: - Description: "(Optional) A custom role to be used by the Connector lambda" - Type: String - Default: "" Conditions: + HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryARN, "" ] ] HasSecurityGroups: !Not [ !Equals [ !Join ["", !Ref SecurityGroupIds], "" ] ] HasSubnets: !Not [ !Equals [ !Join ["", !Ref SubnetIds], "" ] ] - HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] - NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] - HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryArn, "" ] ] - CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] - HasGlueConnection: !Not [ !Equals [ !Ref GlueConnection, "" ] ] - NotHasGlueConnectionAndHasKmsKeyId: !And - - !Not [ !Condition HasGlueConnection ] - - !Condition HasKmsKeyId Resources: JdbcConnectorConfig: Type: 'AWS::Serverless::Function' Properties: Environment: Variables: - disable_spill_encryption: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref DisableSpillEncryption ] - spill_bucket: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillBucket ] - spill_prefix: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillPrefix ] - kms_key_id: !If [ NotHasGlueConnectionAndHasKmsKeyId, !Ref KmsKeyId, !Ref "AWS::NoValue" ] - glue_connection: !If [ HasGlueConnection, !Ref GlueConnection, !Ref "AWS::NoValue" ] - default: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref DefaultConnectionString ] - partitioncount: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref PartitionCount ] + disable_spill_encryption: !Ref DisableSpillEncryption + spill_bucket: !Ref SpillBucket + spill_prefix: !Ref SpillPrefix + default: !Ref DefaultConnectionString + partitioncount: !Ref PartitionCount FunctionName: !Ref LambdaFunctionName Handler: "com.amazonaws.athena.connectors.teradata.TeradataMuxCompositeHandler" Layers: - - !Ref LambdaJdbcLayername - CodeUri: "./target/athena-teradata-2024.18.2.jar" + - !Ref LambdaJDBCLayername + CodeUri: "./target/athena-teradata-2022.47.1.jar" Description: "Enables Amazon Athena to communicate with Teradata using JDBC" Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory - Role: !If [ NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn ] - VpcConfig: - SecurityGroupIds: !If [ HasSecurityGroups, !Ref SecurityGroupIds, !Ref "AWS::NoValue" ] - SubnetIds: !If [ HasSubnets, !Ref SubnetIds, !Ref "AWS::NoValue" ] - - FunctionRole: - Condition: NotHasLambdaRole - Type: AWS::IAM::Role - Properties: - PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryArn, !Ref "AWS::NoValue" ] - ManagedPolicyArns: - - "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole" - AssumeRolePolicyDocument: - Version: 2012-10-17 - Statement: - - Effect: Allow - Principal: - Service: - - lambda.amazonaws.com - Action: - - "sts:AssumeRole" - - FunctionExecutionPolicy: - Condition: NotHasLambdaRole - Type: "AWS::IAM::Policy" - Properties: - Roles: - - !Ref FunctionRole - PolicyName: FunctionExecutionPolicy - PolicyDocument: - Version: 2012-10-17 - Statement: - - Action: - - secretsmanager:GetSecretValue - Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretName}*' - - Action: - - logs:CreateLogGroup - Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:*' - - Action: - - logs:CreateLogStream - - logs:PutLogEvents - Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/${LambdaFunctionName}:*' - - Action: - - athena:GetQueryExecution - - Effect: Allow - Resource: '*' - - Action: - - ec2:CreateNetworkInterface - - ec2:DeleteNetworkInterface - - ec2:DescribeNetworkInterfaces - - ec2:DetachNetworkInterface - Effect: Allow - Resource: '*' - - Action: - - s3:GetObject - - s3:ListBucket - - s3:GetBucketLocation - - s3:GetObjectVersion - - s3:PutObject - - s3:PutObjectAcl - - s3:GetLifecycleConfiguration - - s3:PutLifecycleConfiguration - - s3:DeleteObject - Effect: Allow - Resource: - - Fn::Sub: - - arn:${AWS::Partition}:s3:::${bucketName} - - bucketName: - Ref: SpillBucket - - Fn::Sub: - - arn:${AWS::Partition}:s3:::${bucketName}/* - - bucketName: - Ref: SpillBucket - - !If - - HasGlueConnection + PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] + Policies: + - Statement: - Action: - - glue:GetConnection + - secretsmanager:GetSecretValue Effect: Allow - Resource: - - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' - - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' - - !Ref "AWS::NoValue" - - FunctionKmsPolicy: - Condition: CreateKmsPolicy - Type: "AWS::IAM::Policy" - Properties: - PolicyName: FunctionKmsPolicy - PolicyDocument: - Version: 2012-10-17 - Statement: - - Effect: Allow - Action: - - kms:GenerateRandom - Resource: '*' - - Effect: Allow - Action: - - kms:GenerateDataKey - Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" - Roles: - - !Ref FunctionRole \ No newline at end of file + Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretNamePrefix}*' + Version: '2012-10-17' + - Statement: + - Action: + - logs:CreateLogGroup + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:*' + Version: '2012-10-17' + - Statement: + - Action: + - logs:CreateLogStream + - logs:PutLogEvents + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/${LambdaFunctionName}:*' + Version: '2012-10-17' + - Statement: + - Action: + - athena:GetQueryExecution + Effect: Allow + Resource: '*' + Version: '2012-10-17' + #S3CrudPolicy allows our connector to spill large responses to S3. You can optionally replace this pre-made policy + #with one that is more restrictive and can only 'put' but not read,delete, or overwrite files. + - S3CrudPolicy: + BucketName: !Ref SpillBucket + #VPCAccessPolicy allows our connector to run in a VPC so that it can access your data source. + - VPCAccessPolicy: {} + VpcConfig: + SecurityGroupIds: !If [ HasSecurityGroups, !Ref SecurityGroupIds, !Ref "AWS::NoValue" ] + SubnetIds: !If [ HasSubnets, !Ref SubnetIds, !Ref "AWS::NoValue" ] \ No newline at end of file diff --git a/athena-timestream/athena-timestream-connection.yaml b/athena-timestream/athena-timestream-connection.yaml new file mode 100644 index 0000000000..207a1fcef3 --- /dev/null +++ b/athena-timestream/athena-timestream-connection.yaml @@ -0,0 +1,139 @@ +Transform: 'AWS::Serverless-2016-10-31' +Metadata: + 'AWS::ServerlessRepo::Application': + Name: AthenaTimestreamConnectorWithGlueConnection + Description: 'This connector enables Amazon Athena to communicate with Amazon Timestream, making your time series data accessible from Athena.' + Author: 'default author' + SpdxLicenseId: Apache-2.0 + LicenseUrl: LICENSE.txt + ReadmeUrl: README.md + Labels: + - athena-federation + HomePageUrl: 'https://github.com/awslabs/aws-athena-query-federation' + SemanticVersion: 2022.47.1 + SourceCodeUrl: 'https://github.com/awslabs/aws-athena-query-federation' +Parameters: + LambdaFunctionName: + Description: 'This is the name of the lambda function that will be created. This name must satisfy the pattern ^[a-z0-9-_]{1,64}$' + Type: String + AllowedPattern: ^[a-z0-9-_]{1,64}$ + SpillBucket: + Description: 'The name of the bucket where this function can spill data.' + Type: String + GlueConnection: + Description: "Name of glue connection storing connection details for Federated Data source." + Type: String + KmsKeyId: + Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." + Type: String + Default: "" + LambdaRoleArn: + Description: "(Optional) A custom role to be used by the Connector lambda" + Type: String + Default: "" +Conditions: + HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] + NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] + CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] +Resources: + ConnectorConfig: + Type: 'AWS::Serverless::Function' + Properties: + Environment: + Variables: + glue_connection: !Ref GlueConnection + FunctionName: !Ref LambdaFunctionName + Handler: "com.amazonaws.athena.connectors.timestream.TimestreamCompositeHandler" + CodeUri: "./target/athena-timestream-2022.47.1.jar" + Description: "Enables Amazon Athena to communicate with Amazon Timestream, making your time series data accessible from Athena." + Runtime: java11 + Timeout: 900 + MemorySize: 3008 + Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn] + + FunctionRole: + Condition: NotHasLambdaRole + Type: AWS::IAM::Role + Properties: + ManagedPolicyArns: + - "arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + Service: + - lambda.amazonaws.com + Action: + - "sts:AssumeRole" + + FunctionExecutionPolicy: + Condition: NotHasLambdaRole + Type: "AWS::IAM::Policy" + Properties: + Roles: + - !Ref FunctionRole + PolicyName: FunctionExecutionPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Action: + - glue:GetTableVersions + - glue:GetPartitions + - glue:GetTables + - glue:GetTableVersion + - glue:GetDatabases + - glue:GetTable + - glue:GetPartition + - glue:GetDatabase + - athena:GetQueryExecution + - timestream:Describe* + - timestream:List* + - timestream:Select* + Effect: Allow + Resource: '*' + - Action: + - s3:GetObject + - s3:ListBucket + - s3:GetBucketLocation + - s3:GetObjectVersion + - s3:PutObject + - s3:PutObjectAcl + - s3:GetLifecycleConfiguration + - s3:PutLifecycleConfiguration + - s3:DeleteObject + Effect: Allow + Resource: + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName} + - bucketName: + Ref: SpillBucket + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName}/* + - bucketName: + Ref: SpillBucket + - Action: + - glue:GetConnection + Effect: Allow + Resource: + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' + + FunctionKmsPolicy: + Condition: CreateKmsPolicy + Type: "AWS::IAM::Policy" + Properties: + PolicyName: FunctionKmsPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Action: + - kms:GenerateRandom + Resource: '*' + - Effect: Allow + Action: + - kms:GenerateDataKey + Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" + Roles: + - !Ref FunctionRole \ No newline at end of file diff --git a/athena-timestream/athena-timestream.yaml b/athena-timestream/athena-timestream.yaml index 470a55feb1..3ee8788678 100644 --- a/athena-timestream/athena-timestream.yaml +++ b/athena-timestream/athena-timestream.yaml @@ -13,7 +13,7 @@ Metadata: SemanticVersion: 2022.47.1 SourceCodeUrl: 'https://github.com/awslabs/aws-athena-query-federation' Parameters: - LambdaFunctionName: + AthenaCatalogName: Description: 'This is the name of the lambda function that will be created. This name must satisfy the pattern ^[a-z0-9-_]{1,64}$' Type: String AllowedPattern: ^[a-z0-9-_]{1,64}$ @@ -24,10 +24,6 @@ Parameters: Description: 'The prefix within SpillBucket where this function can spill data.' Type: String Default: athena-spill - GlueConnection: - Description: "(Optional) Name of glue connection storing connection details for Federated Data source." - Type: String - Default: "" LambdaTimeout: Description: 'Maximum Lambda invocation runtime in seconds. (min 1 - 900 max)' Default: 900 @@ -40,134 +36,48 @@ Parameters: Description: "WARNING: If set to 'true' encryption for spilled data is disabled." Default: 'false' Type: String - PermissionsBoundaryArn: + PermissionsBoundaryARN: Description: "(Optional) An IAM policy ARN to use as the PermissionsBoundary for the created Lambda function's execution role" Default: '' Type: String - KmsKeyId: - Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." - Type: String - Default: "" - LambdaRoleArn: - Description: "(Optional) A custom role to be used by the Connector lambda" - Type: String - Default: "" Conditions: - HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] - NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] - HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryArn, "" ] ] - CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] - HasGlueConnection: !Not [ !Equals [ !Ref GlueConnection, "" ] ] - NotHasGlueConnectionAndHasKmsKeyId: !And - - !Not [ !Condition HasGlueConnection ] - - !Condition HasKmsKeyId + HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryARN, "" ] ] Resources: ConnectorConfig: Type: 'AWS::Serverless::Function' Properties: Environment: Variables: - disable_spill_encryption: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref DisableSpillEncryption ] - spill_bucket: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillBucket ] - spill_prefix: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillPrefix ] - kms_key_id: !If [ NotHasGlueConnectionAndHasKmsKeyId, !Ref KmsKeyId, !Ref "AWS::NoValue" ] - glue_connection: !If [ HasGlueConnection, !Ref GlueConnection, !Ref "AWS::NoValue" ] - FunctionName: !Ref LambdaFunctionName + disable_spill_encryption: !Ref DisableSpillEncryption + spill_bucket: !Ref SpillBucket + spill_prefix: !Ref SpillPrefix + FunctionName: !Ref AthenaCatalogName Handler: "com.amazonaws.athena.connectors.timestream.TimestreamCompositeHandler" CodeUri: "./target/athena-timestream-2022.47.1.jar" Description: "Enables Amazon Athena to communicate with Amazon Timestream, making your time series data accessible from Athena." Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory - Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn] - - FunctionRole: - Condition: NotHasLambdaRole - Type: AWS::IAM::Role - Properties: - PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryArn, !Ref "AWS::NoValue" ] - ManagedPolicyArns: - - "arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" - AssumeRolePolicyDocument: - Version: 2012-10-17 - Statement: - - Effect: Allow - Principal: - Service: - - lambda.amazonaws.com - Action: - - "sts:AssumeRole" - - FunctionExecutionPolicy: - Condition: NotHasLambdaRole - Type: "AWS::IAM::Policy" - Properties: - Roles: - - !Ref FunctionRole - PolicyName: FunctionExecutionPolicy - PolicyDocument: - Version: 2012-10-17 - Statement: - - Action: - - glue:GetTableVersions - - glue:GetPartitions - - glue:GetTables - - glue:GetTableVersion - - glue:GetDatabases - - glue:GetTable - - glue:GetPartition - - glue:GetDatabase - - athena:GetQueryExecution - - timestream:Describe* - - timestream:List* - - timestream:Select* - Effect: Allow - Resource: '*' - - Action: - - s3:GetObject - - s3:ListBucket - - s3:GetBucketLocation - - s3:GetObjectVersion - - s3:PutObject - - s3:PutObjectAcl - - s3:GetLifecycleConfiguration - - s3:PutLifecycleConfiguration - - s3:DeleteObject - Effect: Allow - Resource: - - Fn::Sub: - - arn:${AWS::Partition}:s3:::${bucketName} - - bucketName: - Ref: SpillBucket - - Fn::Sub: - - arn:${AWS::Partition}:s3:::${bucketName}/* - - bucketName: - Ref: SpillBucket - - !If - - HasGlueConnection + PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] + Policies: + - Statement: - Action: - - glue:GetConnection + - glue:GetTableVersions + - glue:GetPartitions + - glue:GetTables + - glue:GetTableVersion + - glue:GetDatabases + - glue:GetTable + - glue:GetPartition + - glue:GetDatabase + - athena:GetQueryExecution + - timestream:Describe* + - timestream:List* + - timestream:Select* Effect: Allow - Resource: - - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' - - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' - - !Ref "AWS::NoValue" - - FunctionKmsPolicy: - Condition: CreateKmsPolicy - Type: "AWS::IAM::Policy" - Properties: - PolicyName: FunctionKmsPolicy - PolicyDocument: - Version: 2012-10-17 - Statement: - - Effect: Allow - Action: - - kms:GenerateRandom - Resource: '*' - - Effect: Allow - Action: - - kms:GenerateDataKey - Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" - Roles: - - !Ref FunctionRole \ No newline at end of file + Resource: '*' + Version: '2012-10-17' + #S3CrudPolicy allows our connector to spill large responses to S3. You can optionally replace this pre-made policy + #with one that is more restrictive and can only 'put' but not read,delete, or overwrite files. + - S3CrudPolicy: + BucketName: !Ref SpillBucket \ No newline at end of file diff --git a/athena-tpcds/athena-tpcds-connection.yaml b/athena-tpcds/athena-tpcds-connection.yaml new file mode 100644 index 0000000000..f2488e28d4 --- /dev/null +++ b/athena-tpcds/athena-tpcds-connection.yaml @@ -0,0 +1,130 @@ +Transform: 'AWS::Serverless-2016-10-31' +Metadata: + 'AWS::ServerlessRepo::Application': + Name: AthenaTPCDSConnectorWithGlueConnection + Description: 'This connector enables Amazon Athena to communicate with a randomly generated TPC-DS data source.' + Author: 'default author' + SpdxLicenseId: Apache-2.0 + LicenseUrl: LICENSE.txt + ReadmeUrl: README.md + Labels: + - athena-federation + HomePageUrl: 'https://github.com/awslabs/aws-athena-query-federation' + SemanticVersion: 2022.47.1 + SourceCodeUrl: 'https://github.com/awslabs/aws-athena-query-federation' +Parameters: + LambdaFunctionName: + Description: 'This is the name of the lambda function that will be created. This name must satisfy the pattern ^[a-z0-9-_]{1,64}$' + Type: String + AllowedPattern: ^[a-z0-9-_]{1,64}$ + SpillBucket: + Description: 'The name of the bucket where this function can spill data.' + Type: String + GlueConnection: + Description: "Name of glue connection storing connection details for Federated Data source." + Type: String + KmsKeyId: + Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." + Type: String + Default: "" + LambdaRoleArn: + Description: "(Optional) A custom role to be used by the Connector lambda" + Type: String + Default: "" + +Conditions: + HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] + NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] + CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] + +Resources: + ConnectorConfig: + Type: 'AWS::Serverless::Function' + Properties: + Environment: + Variables: + glue_connection: Ref GlueConnection + FunctionName: !Ref LambdaFunctionName + Handler: "com.amazonaws.athena.connectors.tpcds.TPCDSCompositeHandler" + CodeUri: "./target/athena-tpcds-2022.47.1.jar" + Description: "This connector enables Amazon Athena to communicate with a randomly generated TPC-DS data source." + Runtime: java11 + Timeout: 900 + MemorySize: 3008 + Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn] + + FunctionRole: + Condition: NotHasLambdaRole + Type: AWS::IAM::Role + Properties: + ManagedPolicyArns: + - "arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + Service: + - lambda.amazonaws.com + Action: + - "sts:AssumeRole" + + FunctionExecutionPolicy: + Condition: NotHasLambdaRole + Type: "AWS::IAM::Policy" + Properties: + Roles: + - !Ref FunctionRole + PolicyName: FunctionExecutionPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Action: + - athena:GetQueryExecution + Effect: Allow + Resource: '*' + - Action: + - s3:GetObject + - s3:ListBucket + - s3:GetBucketLocation + - s3:GetObjectVersion + - s3:PutObject + - s3:PutObjectAcl + - s3:GetLifecycleConfiguration + - s3:PutLifecycleConfiguration + - s3:DeleteObject + Effect: Allow + Resource: + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName} + - bucketName: + Ref: SpillBucket + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${bucketName}/* + - bucketName: + Ref: SpillBucket + - Action: + - glue:GetConnection + Effect: Allow + Resource: + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' + + FunctionKmsPolicy: + Condition: CreateKmsPolicy + Type: "AWS::IAM::Policy" + Properties: + PolicyName: FunctionKmsPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Action: + - kms:GenerateRandom + Resource: '*' + - Effect: Allow + Action: + - kms:GenerateDataKey + Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" + Roles: + - !Ref FunctionRole \ No newline at end of file diff --git a/athena-tpcds/athena-tpcds.yaml b/athena-tpcds/athena-tpcds.yaml index 2215bde558..219e0a7dd2 100644 --- a/athena-tpcds/athena-tpcds.yaml +++ b/athena-tpcds/athena-tpcds.yaml @@ -13,7 +13,7 @@ Metadata: SemanticVersion: 2022.47.1 SourceCodeUrl: 'https://github.com/awslabs/aws-athena-query-federation' Parameters: - LambdaFunctionName: + AthenaCatalogName: Description: 'This is the name of the lambda function that will be created. This name must satisfy the pattern ^[a-z0-9-_]{1,64}$' Type: String AllowedPattern: ^[a-z0-9-_]{1,64}$ @@ -24,10 +24,6 @@ Parameters: Description: 'The prefix within SpillBucket where this function can spill data.' Type: String Default: athena-spill - GlueConnection: - Description: "(Optional) Name of glue connection storing connection details for Federated Data source." - Type: String - Default: "" LambdaTimeout: Description: 'Maximum Lambda invocation runtime in seconds. (min 1 - 900 max)' Default: 900 @@ -40,126 +36,37 @@ Parameters: Description: "WARNING: If set to 'true' encryption for spilled data is disabled." Default: 'false' Type: String - PermissionsBoundaryArn: + PermissionsBoundaryARN: Description: "(Optional) An IAM policy ARN to use as the PermissionsBoundary for the created Lambda function's execution role" Default: '' Type: String - KmsKeyId: - Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." - Type: String - Default: "" - LambdaRoleArn: - Description: "(Optional) A custom role to be used by the Connector lambda" - Type: String - Default: "" - Conditions: - HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] - NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] - HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryArn, "" ] ] - CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] - HasGlueConnection: !Not [ !Equals [ !Ref GlueConnection, "" ] ] - NotHasGlueConnectionAndHasKmsKeyId: !And - - !Not [ !Condition HasGlueConnection ] - - !Condition HasKmsKeyId - + HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryARN, "" ] ] Resources: ConnectorConfig: Type: 'AWS::Serverless::Function' Properties: Environment: Variables: - disable_spill_encryption: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref DisableSpillEncryption ] - spill_bucket: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillBucket ] - spill_prefix: !If [ HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillPrefix ] - kms_key_id: !If [ NotHasGlueConnectionAndHasKmsKeyId, !Ref KmsKeyId, !Ref "AWS::NoValue" ] - glue_connection: !If [ HasGlueConnection, !Ref GlueConnection, !Ref "AWS::NoValue" ] - FunctionName: !Ref LambdaFunctionName + disable_spill_encryption: !Ref DisableSpillEncryption + spill_bucket: !Ref SpillBucket + spill_prefix: !Ref SpillPrefix + FunctionName: !Ref AthenaCatalogName Handler: "com.amazonaws.athena.connectors.tpcds.TPCDSCompositeHandler" CodeUri: "./target/athena-tpcds-2022.47.1.jar" Description: "This connector enables Amazon Athena to communicate with a randomly generated TPC-DS data source." Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory - Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn] - - FunctionRole: - Condition: NotHasLambdaRole - Type: AWS::IAM::Role - Properties: - PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryArn, !Ref "AWS::NoValue" ] - ManagedPolicyArns: - - "arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" - AssumeRolePolicyDocument: - Version: 2012-10-17 - Statement: - - Effect: Allow - Principal: - Service: - - lambda.amazonaws.com - Action: - - "sts:AssumeRole" - - FunctionExecutionPolicy: - Condition: NotHasLambdaRole - Type: "AWS::IAM::Policy" - Properties: - Roles: - - !Ref FunctionRole - PolicyName: FunctionExecutionPolicy - PolicyDocument: - Version: 2012-10-17 - Statement: - - Action: - - athena:GetQueryExecution - - Effect: Allow - Resource: '*' - - Action: - - s3:GetObject - - s3:ListBucket - - s3:GetBucketLocation - - s3:GetObjectVersion - - s3:PutObject - - s3:PutObjectAcl - - s3:GetLifecycleConfiguration - - s3:PutLifecycleConfiguration - - s3:DeleteObject - Effect: Allow - Resource: - - Fn::Sub: - - arn:${AWS::Partition}:s3:::${bucketName} - - bucketName: - Ref: SpillBucket - - Fn::Sub: - - arn:${AWS::Partition}:s3:::${bucketName}/* - - bucketName: - Ref: SpillBucket - - !If - - HasGlueConnection + PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] + Policies: + - Statement: - Action: - - glue:GetConnection + - athena:GetQueryExecution Effect: Allow - Resource: - - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' - - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' - - !Ref "AWS::NoValue" - - FunctionKmsPolicy: - Condition: CreateKmsPolicy - Type: "AWS::IAM::Policy" - Properties: - PolicyName: FunctionKmsPolicy - PolicyDocument: - Version: 2012-10-17 - Statement: - - Effect: Allow - Action: - - kms:GenerateRandom - Resource: '*' - - Effect: Allow - Action: - - kms:GenerateDataKey - Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" - Roles: - - !Ref FunctionRole \ No newline at end of file + Resource: '*' + Version: '2012-10-17' + #S3CrudPolicy allows our connector to spill large responses to S3. You can optionally replace this pre-made policy + #with one that is more restrictive and can only 'put' but not read,delete, or overwrite files. + - S3CrudPolicy: + BucketName: !Ref SpillBucket \ No newline at end of file diff --git a/athena-vertica/athena-vertica-connection.yaml b/athena-vertica/athena-vertica-connection.yaml new file mode 100644 index 0000000000..a3210b7df4 --- /dev/null +++ b/athena-vertica/athena-vertica-connection.yaml @@ -0,0 +1,174 @@ +Transform: 'AWS::Serverless-2016-10-31' + +Metadata: + AWS::ServerlessRepo::Application: + Name: AthenaVerticaConnectorWithGlueConnection + Description: 'This connector enables Amazon Athena to communicate with Vertica' + Author: 'default author' + SpdxLicenseId: Apache-2.0 + LicenseUrl: LICENSE.txt + ReadmeUrl: README.md + Labels: ['athena-federation'] + HomePageUrl: https://github.com/awslabs/aws-athena-query-federation + SemanticVersion: 2022.47.1 + SourceCodeUrl: https://github.com/awslabs/aws-athena-query-federation + +# Parameters are CloudFormation features to pass input +# to your template when you create a stack +Parameters: + LambdaFunctionName: + Description: 'This is the name of the lambda function that will be created. This name must satisfy the pattern ^[a-z0-9-_]{1,64}$' + Type: String + AllowedPattern: ^[a-z0-9-_]{1,64}$ + SpillBucket: + Description: 'The name of the bucket where this function can spill data.' + Type: String + ExportBucket: + Description: "The bucket where the Vertica Query results will be exported." + Type: String + GlueConnection: + Description: "Name of glue connection storing connection details for Federated Data source." + Type: String + SubnetIds: + Description: 'One or more Subnet IDs corresponding to the Subnet that the Lambda function can use to access you data source. (e.g. subnet1,subnet2)' + Type: 'List' + SecurityGroupIds: + Description: 'One or more SecurityGroup IDs corresponding to the SecurityGroup that should be applied to the Lambda function. (e.g. sg1,sg2,sg3)' + Type: 'List' + SecretName: + Description: 'The name or prefix of a set of names within Secrets Manager that this function should have access to. (e.g. vertica-*).' + Type: String + Default: "vertica-*" + KmsKeyId: + Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." + Type: String + Default: "" + LambdaRoleArn: + Description: "(Optional) A custom role to be used by the Connector lambda" + Type: String + Default: "" + +Conditions: + HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] + NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] + CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] + +Resources: + ConnectorConfig: + Type: 'AWS::Serverless::Function' + Properties: + Environment: + Variables: + glue_connection: !Ref GlueConnection + FunctionName: !Ref LambdaFunctionName + Handler: "com.amazonaws.athena.connectors.vertica.VerticaCompositeHandler" + CodeUri: "./target/athena-vertica-2022.47.1.jar" + Description: "Amazon Athena Vertica Connector" + Runtime: java11 + Timeout: 900 + MemorySize: 3008 + Role: !If [ NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn ] + VpcConfig: + SecurityGroupIds: !Ref SecurityGroupIds + SubnetIds: !Ref SubnetIds + + FunctionRole: + Condition: NotHasLambdaRole + Type: AWS::IAM::Role + Properties: + ManagedPolicyArns: + - "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole" + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + Service: + - lambda.amazonaws.com + Action: + - "sts:AssumeRole" + + FunctionExecutionPolicy: + Condition: NotHasLambdaRole + Type: "AWS::IAM::Policy" + Properties: + Roles: + - !Ref FunctionRole + PolicyName: FunctionExecutionPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Action: + - athena:GetQueryExecution + Effect: Allow + Resource: '*' + - Action: + - s3:ListBucket + Effect: Allow + Resource: + - !Sub 'arn:${AWS::Partition}:s3:::${ExportBucket}' + - !Sub 'arn:${AWS::Partition}:s3:::${SpillBucket}' + - Action: + - secretsmanager:GetSecretValue + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:*:*:secret:${SecretName}*' + - Action: + - ec2:CreateNetworkInterface + - ec2:DeleteNetworkInterface + - ec2:DescribeNetworkInterfaces + - ec2:DetachNetworkInterface + Effect: Allow + Resource: '*' + - Action: + - s3:GetObject + - s3:ListBucket + - s3:GetBucketLocation + - s3:GetObjectVersion + - s3:PutObject + - s3:PutObjectAcl + - s3:GetLifecycleConfiguration + - s3:PutLifecycleConfiguration + - s3:DeleteObject + Effect: Allow + Resource: + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${spillBucketName} + - spillBucketName: + Ref: SpillBucket + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${spillBucketName}/* + - spillBucketName: + Ref: SpillBucket + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${exportBucketName} + - exportBucketName: + Ref: ExportBucket + - Fn::Sub: + - arn:${AWS::Partition}:s3:::${exportBucketName}/* + - exportBucketName: + Ref: ExportBucket + - Action: + - glue:GetConnection + Effect: Allow + Resource: + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' + - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' + + FunctionKmsPolicy: + Condition: CreateKmsPolicy + Type: "AWS::IAM::Policy" + Properties: + PolicyName: FunctionKmsPolicy + PolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Action: + - kms:GenerateRandom + Resource: '*' + - Effect: Allow + Action: + - kms:GenerateDataKey + Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" + Roles: + - !Ref FunctionRole \ No newline at end of file diff --git a/athena-vertica/athena-vertica.yaml b/athena-vertica/athena-vertica.yaml index c5045e686d..65cb959214 100644 --- a/athena-vertica/athena-vertica.yaml +++ b/athena-vertica/athena-vertica.yaml @@ -16,24 +16,20 @@ Metadata: # Parameters are CloudFormation features to pass input # to your template when you create a stack Parameters: - LambdaFunctionName: + AthenaCatalogName: Description: 'This is the name of the lambda function that will be created. This name must satisfy the pattern ^[a-z0-9-_]{1,64}$' Type: String AllowedPattern: ^[a-z0-9-_]{1,64}$ SpillBucket: Description: 'The name of the bucket where this function can spill data.' Type: String - ExportBucket: + VerticaExportBucket: Description: "The bucket where the Vertica Query results will be exported." Type: String SpillPrefix: Description: 'The prefix within SpillBucket where this function can spill data.' Type: String Default: athena-spill - GlueConnection: - Description: "(Optional) Name of glue connection storing connection details for Federated Data source." - Type: String - Default: "" LambdaTimeout: Description: "Maximum Lambda invocation runtime in seconds. (min 1 - 900 max)" Default: 900 @@ -46,169 +42,88 @@ Parameters: Description: "WARNING: If set to 'true' encryption for spilled data is disabled." Default: "false" Type: String + VpcId: + Description: 'VPC ID' + Type: 'AWS::EC2::VPC::Id' SubnetIds: Description: 'One or more Subnet IDs corresponding to the Subnet that the Lambda function can use to access you data source. (e.g. subnet1,subnet2)' Type: 'List' - SecurityGroupIds: - Description: 'One or more SecurityGroup IDs corresponding to the SecurityGroup that should be applied to the Lambda function. (e.g. sg1,sg2,sg3)' - Type: 'List' - SecretName: + SecretNameOrPrefix: Description: 'The name or prefix of a set of names within Secrets Manager that this function should have access to. (e.g. vertica-*).' Type: String Default: "vertica-*" - DefaultConnectionString: + VerticaConnectionString: Description: 'The Vertica connection details to use by default if not catalog specific connection is defined and optionally using SecretsManager (e.g. ${secret_name}).' Type: String Default: "vertica://jdbc:vertica://:/?user=${vertica-username}&password=${vertica-password}" - PermissionsBoundaryArn: + PermissionsBoundaryARN: Description: "(Optional) An IAM policy ARN to use as the PermissionsBoundary for the created Lambda function's execution role" Default: '' Type: String - KmsKeyId: - Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." - Type: String - Default: "" - LambdaRoleArn: - Description: "(Optional) A custom role to be used by the Connector lambda" - Type: String - Default: "" Conditions: - HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] - NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] - HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryArn, "" ] ] - CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] - HasGlueConnection: !Not [ !Equals [ !Ref GlueConnection, "" ] ] - NotHasGlueConnectionAndHasKmsKeyId: !And - - !Not [ !Condition HasGlueConnection ] - - !Condition HasKmsKeyId + HasPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundaryARN, "" ] ] Resources: + LambdaSecurityGroup: + Type: 'AWS::EC2::SecurityGroup' + Properties: + GroupDescription: 'Athena Vertica Connector Lambda VPC Security Group' + VpcId: !Ref VpcId ConnectorConfig: Type: 'AWS::Serverless::Function' Properties: Environment: Variables: - disable_spill_encryption: !If [HasGlueConnection, !Ref "AWS::NoValue", !Ref DisableSpillEncryption] - spill_bucket: !If [HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillBucket] - spill_prefix: !If [HasGlueConnection, !Ref "AWS::NoValue", !Ref SpillPrefix] - kms_key_id: !If [NotHasGlueConnectionAndHasKmsKeyId, !Ref KmsKeyId, !Ref "AWS::NoValue"] - glue_connection: !If [HasGlueConnection, !Ref GlueConnection, !Ref "AWS::NoValue"] - export_bucket: !If [HasGlueConnection, !Ref "AWS::NoValue", !Ref ExportBucket] - default: !If [HasGlueConnection, !Ref "AWS::NoValue", !Ref DefaultConnectionString] - FunctionName: !Ref LambdaFunctionName + disable_spill_encryption: !Ref DisableSpillEncryption + spill_bucket: !Ref SpillBucket + spill_prefix: !Ref SpillPrefix + export_bucket: !Ref VerticaExportBucket + default: !Ref VerticaConnectionString + + FunctionName: !Sub "${AthenaCatalogName}" Handler: "com.amazonaws.athena.connectors.vertica.VerticaCompositeHandler" CodeUri: "./target/athena-vertica-2022.47.1.jar" Description: "Amazon Athena Vertica Connector" Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory - Role: !If [ NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn ] - VpcConfig: - SecurityGroupIds: !Ref SecurityGroupIds - SubnetIds: !Ref SubnetIds - - FunctionRole: - Condition: NotHasLambdaRole - Type: AWS::IAM::Role - Properties: - PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryArn, !Ref "AWS::NoValue" ] - ManagedPolicyArns: - - "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole" - AssumeRolePolicyDocument: - Version: 2012-10-17 - Statement: - - Effect: Allow - Principal: - Service: - - lambda.amazonaws.com - Action: - - "sts:AssumeRole" - - FunctionExecutionPolicy: - Condition: NotHasLambdaRole - Type: "AWS::IAM::Policy" - Properties: - Roles: - - !Ref FunctionRole - PolicyName: FunctionExecutionPolicy - PolicyDocument: - Version: 2012-10-17 - Statement: - - Action: - - athena:GetQueryExecution - Effect: Allow - Resource: '*' - - Action: - - s3:ListBucket - Effect: Allow - Resource: - - !Sub 'arn:${AWS::Partition}:s3:::${ExportBucket}' - - !Sub 'arn:${AWS::Partition}:s3:::${SpillBucket}' - - Action: - - secretsmanager:GetSecretValue - Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:*:*:secret:${SecretName}*' - - Action: - - ec2:CreateNetworkInterface - - ec2:DeleteNetworkInterface - - ec2:DescribeNetworkInterfaces - - ec2:DetachNetworkInterface - Effect: Allow - Resource: '*' - - Action: - - s3:GetObject - - s3:ListBucket - - s3:GetBucketLocation - - s3:GetObjectVersion - - s3:PutObject - - s3:PutObjectAcl - - s3:GetLifecycleConfiguration - - s3:PutLifecycleConfiguration - - s3:DeleteObject - Effect: Allow - Resource: - - Fn::Sub: - - arn:${AWS::Partition}:s3:::${spillBucketName} - - spillBucketName: - Ref: SpillBucket - - Fn::Sub: - - arn:${AWS::Partition}:s3:::${spillBucketName}/* - - spillBucketName: - Ref: SpillBucket - - Fn::Sub: - - arn:${AWS::Partition}:s3:::${exportBucketName} - - exportBucketName: - Ref: ExportBucket - - Fn::Sub: - - arn:${AWS::Partition}:s3:::${exportBucketName}/* - - exportBucketName: - Ref: ExportBucket - - !If - - HasGlueConnection + PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] + Policies: + - Statement: + - Action: + - athena:GetQueryExecution + Effect: Allow + Resource: '*' + Version: '2012-10-17' + - Statement: - Action: - - glue:GetConnection + - s3:ListBucket Effect: Allow Resource: - - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:connection/${GlueConnection}' - - !Sub 'arn:${AWS::Partition}:glue:${AWS::Region}:${AWS::AccountId}:catalog' - - !Ref "AWS::NoValue" - - FunctionKmsPolicy: - Condition: CreateKmsPolicy - Type: "AWS::IAM::Policy" - Properties: - PolicyName: FunctionKmsPolicy - PolicyDocument: - Version: 2012-10-17 - Statement: - - Effect: Allow - Action: - - kms:GenerateRandom - Resource: '*' - - Effect: Allow - Action: - - kms:GenerateDataKey - Resource: !Sub "arn:aws:kms:${AWS::Region}:${AWS::AccountId}:key/${KmsKeyId}" - Roles: - - !Ref FunctionRole \ No newline at end of file + - !Sub 'arn:${AWS::Partition}:s3:::${VerticaExportBucket}' + - !Sub 'arn:${AWS::Partition}:s3:::${SpillBucket}' + Version: '2012-10-17' + - Statement: + - Action: + - secretsmanager:GetSecretValue + Effect: Allow + Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:*:*:secret:${SecretNameOrPrefix}' + - S3ReadPolicy: + BucketName: + Ref: SpillBucket + - S3WritePolicy: + BucketName: + Ref: SpillBucket + - S3ReadPolicy: + BucketName: + Ref: VerticaExportBucket + - S3WritePolicy: + BucketName: + Ref: VerticaExportBucket + #VPCAccessPolicy allows our connector to run in a VPC so that it can access your data source. + VpcConfig: + SecurityGroupIds: + #SecurityGroup that should be applied to the Lambda function + - !Ref LambdaSecurityGroup + SubnetIds: !Ref SubnetIds \ No newline at end of file From 6e8cedc2c0b8566fba05e996ef0f0e33de6ff433 Mon Sep 17 00:00:00 2001 From: ejeffrli <144148373+ejeffrli@users.noreply.github.com> Date: Tue, 10 Sep 2024 10:19:55 -0400 Subject: [PATCH 20/87] Add rest of JDBC connectors excluding redshift (#10) * Add rest of JDBC connectors excluding redshift --- .../cloudera/HiveCompositeHandler.java | 4 +- .../cloudera/ImpalaCompositeHandler.java | 5 +- .../DataLakeGen2CompositeHandler.java | 4 +- .../db2as400/Db2As400CompositeHandler.java | 4 +- .../ClouderaHiveEnvironmentProperties.java | 64 +++++++++++++++++++ .../DataLakeGen2EnvironmentProperties.java | 31 +++++++++ .../Db2As400EnvironmentProperties.java | 49 ++++++++++++++ .../connection/Db2EnvironmentProperties.java | 43 +++++++++++++ .../connection/EnvironmentProperties.java | 2 +- .../HortonworksEnvironmentProperties.java | 31 +++++++++ .../ImpalaEnvironmentProperties.java | 31 +++++++++ .../connection/JdbcEnvironmentProperties.java | 36 ++++++++--- .../OracleEnvironmentProperties.java | 9 ++- .../SaphanaEnvironmentProperties.java | 37 +++++++++++ .../SnowflakeEnvironmentProperties.java | 46 +++++++++++++ .../SqlServerEnvironmentProperties.java | 24 +++---- .../TeradataEnvironmentProperties.java | 43 +++++++++++++ .../VerticaEnvironmentProperties.java | 31 +++++++++ .../hortonworks/HiveCompositeHandler.java | 4 +- .../saphana/SaphanaCompositeHandler.java | 4 +- .../snowflake/SnowflakeCompositeHandler.java | 4 +- .../vertica/VerticaCompositeHandler.java | 4 +- 22 files changed, 471 insertions(+), 39 deletions(-) create mode 100644 athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/ClouderaHiveEnvironmentProperties.java create mode 100644 athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/DataLakeGen2EnvironmentProperties.java create mode 100644 athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/Db2As400EnvironmentProperties.java create mode 100644 athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/Db2EnvironmentProperties.java create mode 100644 athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/HortonworksEnvironmentProperties.java create mode 100644 athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/ImpalaEnvironmentProperties.java create mode 100644 athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/SaphanaEnvironmentProperties.java create mode 100644 athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/SnowflakeEnvironmentProperties.java create mode 100644 athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/TeradataEnvironmentProperties.java create mode 100644 athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/VerticaEnvironmentProperties.java diff --git a/athena-cloudera-hive/src/main/java/com/amazonaws/athena/connectors/cloudera/HiveCompositeHandler.java b/athena-cloudera-hive/src/main/java/com/amazonaws/athena/connectors/cloudera/HiveCompositeHandler.java index e22a0a01af..832376bdb9 100644 --- a/athena-cloudera-hive/src/main/java/com/amazonaws/athena/connectors/cloudera/HiveCompositeHandler.java +++ b/athena-cloudera-hive/src/main/java/com/amazonaws/athena/connectors/cloudera/HiveCompositeHandler.java @@ -19,7 +19,7 @@ */ package com.amazonaws.athena.connectors.cloudera; -import com.amazonaws.athena.connector.lambda.GlueConnectionUtils; +import com.amazonaws.athena.connector.lambda.connection.ClouderaHiveEnvironmentProperties; import com.amazonaws.athena.connector.lambda.handlers.CompositeHandler; /** @@ -33,6 +33,6 @@ public class HiveCompositeHandler { public HiveCompositeHandler() { - super(new HiveMetadataHandler(GlueConnectionUtils.getGlueConnection()), new HiveRecordHandler(GlueConnectionUtils.getGlueConnection())); + super(new HiveMetadataHandler(new ClouderaHiveEnvironmentProperties().createEnvironment()), new HiveRecordHandler(new ClouderaHiveEnvironmentProperties().createEnvironment())); } } diff --git a/athena-cloudera-impala/src/main/java/com/amazonaws/athena/connectors/cloudera/ImpalaCompositeHandler.java b/athena-cloudera-impala/src/main/java/com/amazonaws/athena/connectors/cloudera/ImpalaCompositeHandler.java index 6413ca0f95..e827d4d877 100644 --- a/athena-cloudera-impala/src/main/java/com/amazonaws/athena/connectors/cloudera/ImpalaCompositeHandler.java +++ b/athena-cloudera-impala/src/main/java/com/amazonaws/athena/connectors/cloudera/ImpalaCompositeHandler.java @@ -20,7 +20,7 @@ */ package com.amazonaws.athena.connectors.cloudera; -import com.amazonaws.athena.connector.lambda.GlueConnectionUtils; +import com.amazonaws.athena.connector.lambda.connection.ImpalaEnvironmentProperties; import com.amazonaws.athena.connector.lambda.handlers.CompositeHandler; /** @@ -34,6 +34,7 @@ public class ImpalaCompositeHandler { public ImpalaCompositeHandler() { - super(new ImpalaMetadataHandler(GlueConnectionUtils.getGlueConnection()), new ImpalaRecordHandler(GlueConnectionUtils.getGlueConnection())); + super(new ImpalaMetadataHandler(new ImpalaEnvironmentProperties().createEnvironment()), + new ImpalaRecordHandler(new ImpalaEnvironmentProperties().createEnvironment())); } } diff --git a/athena-datalakegen2/src/main/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2CompositeHandler.java b/athena-datalakegen2/src/main/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2CompositeHandler.java index c9be952c40..8a8cd495d2 100644 --- a/athena-datalakegen2/src/main/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2CompositeHandler.java +++ b/athena-datalakegen2/src/main/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2CompositeHandler.java @@ -19,7 +19,7 @@ */ package com.amazonaws.athena.connectors.datalakegen2; -import com.amazonaws.athena.connector.lambda.GlueConnectionUtils; +import com.amazonaws.athena.connector.lambda.connection.DataLakeGen2EnvironmentProperties; import com.amazonaws.athena.connector.lambda.handlers.CompositeHandler; /** @@ -32,6 +32,6 @@ public class DataLakeGen2CompositeHandler extends CompositeHandler { public DataLakeGen2CompositeHandler() { - super(new DataLakeGen2MetadataHandler(GlueConnectionUtils.getGlueConnection()), new DataLakeGen2RecordHandler(GlueConnectionUtils.getGlueConnection())); + super(new DataLakeGen2MetadataHandler(new DataLakeGen2EnvironmentProperties().createEnvironment()), new DataLakeGen2RecordHandler(new DataLakeGen2EnvironmentProperties().createEnvironment())); } } diff --git a/athena-db2-as400/src/main/java/com/amazonaws/athena/connectors/db2as400/Db2As400CompositeHandler.java b/athena-db2-as400/src/main/java/com/amazonaws/athena/connectors/db2as400/Db2As400CompositeHandler.java index 136e1357c8..04dcb0828c 100644 --- a/athena-db2-as400/src/main/java/com/amazonaws/athena/connectors/db2as400/Db2As400CompositeHandler.java +++ b/athena-db2-as400/src/main/java/com/amazonaws/athena/connectors/db2as400/Db2As400CompositeHandler.java @@ -19,7 +19,7 @@ */ package com.amazonaws.athena.connectors.db2as400; -import com.amazonaws.athena.connector.lambda.GlueConnectionUtils; +import com.amazonaws.athena.connector.lambda.connection.Db2As400EnvironmentProperties; import com.amazonaws.athena.connector.lambda.handlers.CompositeHandler; /** @@ -32,6 +32,6 @@ public class Db2As400CompositeHandler extends CompositeHandler { public Db2As400CompositeHandler() { - super(new Db2As400MetadataHandler(GlueConnectionUtils.getGlueConnection()), new Db2As400RecordHandler(GlueConnectionUtils.getGlueConnection())); + super(new Db2As400MetadataHandler(new Db2As400EnvironmentProperties().createEnvironment()), new Db2As400RecordHandler(new Db2As400EnvironmentProperties().createEnvironment())); } } diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/ClouderaHiveEnvironmentProperties.java b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/ClouderaHiveEnvironmentProperties.java new file mode 100644 index 0000000000..dfc1272564 --- /dev/null +++ b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/ClouderaHiveEnvironmentProperties.java @@ -0,0 +1,64 @@ +/*- + * #%L + * Amazon Athena Query Federation SDK + * %% + * Copyright (C) 2019 - 2024 Amazon Web Services + * %% + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * #L% + */ +package com.amazonaws.athena.connector.lambda.connection; + +import java.util.Map; + +public class ClouderaHiveEnvironmentProperties extends JdbcEnvironmentProperties +{ + private static final String SESSION_CONFS = "SESSION_CONFS"; + private static final String HIVE_CONFS = "HIVE_CONFS"; + private static final String HIVE_VARS = "HIVE_VARS"; + + @Override + protected String getConnectionStringPrefix(Map connectionProperties) + { + return "hive://jdbc:hive2://"; + } + + @Override + protected String getJdbcParameters(Map connectionProperties) + { + String params = "?" + connectionProperties.getOrDefault(SESSION_CONFS, ""); + + if (connectionProperties.containsKey(HIVE_CONFS)) { + if (connectionProperties.containsKey(SESSION_CONFS)) { + params = params + ";"; + } + params = params + connectionProperties.get(HIVE_CONFS); + } + + if (connectionProperties.containsKey(HIVE_VARS)) { + if (connectionProperties.containsKey(HIVE_CONFS)) { + params = params + ";"; + } + params = params + connectionProperties.get(HIVE_VARS); + } + + if (connectionProperties.containsKey(SECRET_NAME)) { + if (connectionProperties.containsKey(HIVE_VARS)) { // need to add delimiter + params = params + ";"; + } + params = params + "${" + connectionProperties.get(SECRET_NAME) + "}"; + } + + return params; + } +} diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/DataLakeGen2EnvironmentProperties.java b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/DataLakeGen2EnvironmentProperties.java new file mode 100644 index 0000000000..fd929e1bc1 --- /dev/null +++ b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/DataLakeGen2EnvironmentProperties.java @@ -0,0 +1,31 @@ +/*- + * #%L + * Amazon Athena Query Federation SDK + * %% + * Copyright (C) 2019 - 2024 Amazon Web Services + * %% + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * #L% + */ +package com.amazonaws.athena.connector.lambda.connection; + +import java.util.Map; + +public class DataLakeGen2EnvironmentProperties extends SqlServerEnvironmentProperties +{ + @Override + protected String getConnectionStringPrefix(Map connectionProperties) + { + return "datalakegentwo://jdbc:sqlserver://"; + } +} diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/Db2As400EnvironmentProperties.java b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/Db2As400EnvironmentProperties.java new file mode 100644 index 0000000000..09061c11ab --- /dev/null +++ b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/Db2As400EnvironmentProperties.java @@ -0,0 +1,49 @@ +/*- + * #%L + * Amazon Athena Query Federation SDK + * %% + * Copyright (C) 2019 - 2024 Amazon Web Services + * %% + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * #L% + */ +package com.amazonaws.athena.connector.lambda.connection; + +import java.util.HashMap; +import java.util.Map; + +public class Db2As400EnvironmentProperties extends EnvironmentProperties +{ + private static final String JDBC_PARAMS = "JDBC_PARAMS"; + private static final String DEFAULT = "default"; + @Override + public Map connectionPropertiesToEnvironment(Map connectionProperties) + { + HashMap environment = new HashMap<>(); + + // now construct jdbc string + String connectionString = "db2as400://jdbc:as400://" + connectionProperties.get("HOST") + + ";" + connectionProperties.getOrDefault(JDBC_PARAMS, ""); + + if (connectionProperties.containsKey(SECRET_NAME)) { + if (connectionProperties.containsKey(JDBC_PARAMS)) { // need to add delimiter + connectionString = connectionString + ";"; + } + connectionString = connectionString + ":${" + connectionProperties.get(SECRET_NAME) + "}"; + } + + logger.debug("Constructed connection string: {}", connectionString); + environment.put(DEFAULT, connectionString); + return environment; + } +} diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/Db2EnvironmentProperties.java b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/Db2EnvironmentProperties.java new file mode 100644 index 0000000000..62b3e7c1c0 --- /dev/null +++ b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/Db2EnvironmentProperties.java @@ -0,0 +1,43 @@ +/*- + * #%L + * Amazon Athena Query Federation SDK + * %% + * Copyright (C) 2019 - 2024 Amazon Web Services + * %% + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * #L% + */ +package com.amazonaws.athena.connector.lambda.connection; + +import java.util.Map; + +public class Db2EnvironmentProperties extends JdbcEnvironmentProperties +{ + @Override + protected String getConnectionStringPrefix(Map connectionProperties) + { + return "dbtwo://jdbc:db2://"; + } + + @Override + protected String getDatabase(Map connectionProperties) + { + return ":" + connectionProperties.get(DATABASE); + } + + @Override + protected String getDelimiter() + { + return ";"; + } +} diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/EnvironmentProperties.java b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/EnvironmentProperties.java index 144c065abf..0df340cde2 100644 --- a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/EnvironmentProperties.java +++ b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/EnvironmentProperties.java @@ -41,7 +41,7 @@ public class EnvironmentProperties protected static final String SECRET_NAME = "secret_name"; protected static final String SPILL_KMS_KEY_ID = "spill_kms_key_id"; protected static final String KMS_KEY_ID = "kms_key_id"; - private static final Logger logger = LoggerFactory.getLogger(EnvironmentProperties.class); + protected static final Logger logger = LoggerFactory.getLogger(EnvironmentProperties.class); public Map createEnvironment() throws RuntimeException { diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/HortonworksEnvironmentProperties.java b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/HortonworksEnvironmentProperties.java new file mode 100644 index 0000000000..87f02c3e78 --- /dev/null +++ b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/HortonworksEnvironmentProperties.java @@ -0,0 +1,31 @@ +/*- + * #%L + * Amazon Athena Query Federation SDK + * %% + * Copyright (C) 2019 - 2024 Amazon Web Services + * %% + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * #L% + */ +package com.amazonaws.athena.connector.lambda.connection; + +import java.util.Map; + +public class HortonworksEnvironmentProperties extends JdbcEnvironmentProperties +{ + @Override + protected String getConnectionStringPrefix(Map connectionProperties) + { + return "hive://jdbc:hive2://"; + } +} diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/ImpalaEnvironmentProperties.java b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/ImpalaEnvironmentProperties.java new file mode 100644 index 0000000000..f1f372e08b --- /dev/null +++ b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/ImpalaEnvironmentProperties.java @@ -0,0 +1,31 @@ +/*- + * #%L + * Amazon Athena Query Federation SDK + * %% + * Copyright (C) 2019 - 2024 Amazon Web Services + * %% + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * #L% + */ +package com.amazonaws.athena.connector.lambda.connection; + +import java.util.Map; + +public class ImpalaEnvironmentProperties extends SaphanaEnvironmentProperties +{ + @Override + protected String getConnectionStringPrefix(Map connectionProperties) + { + return "impala://jdbc:impala://"; + } +} diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/JdbcEnvironmentProperties.java b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/JdbcEnvironmentProperties.java index aa6227176c..752327b1d7 100644 --- a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/JdbcEnvironmentProperties.java +++ b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/JdbcEnvironmentProperties.java @@ -35,28 +35,46 @@ public Map connectionPropertiesToEnvironment(Map // now construct jdbc string String connectionString = getConnectionStringPrefix(connectionProperties) + connectionProperties.get("HOST") - + ":" + connectionProperties.get("PORT") + getConnectionStringSuffix(connectionProperties); + + ":" + connectionProperties.get("PORT") + getDatabase(connectionProperties) + getJdbcParameters(connectionProperties); + logger.debug("Constructed connection string: {}", connectionString); environment.put(DEFAULT, connectionString); return environment; } protected abstract String getConnectionStringPrefix(Map connectionProperties); - protected String getConnectionStringSuffix(Map connectionProperties) + protected String getDatabase(Map connectionProperties) { - String suffix = "/" + connectionProperties.get(DATABASE) + "?" - + connectionProperties.getOrDefault(JDBC_PARAMS, ""); + return getDatabaseSeparator() + connectionProperties.get(DATABASE); + } + + protected String getJdbcParameters(Map connectionProperties) + { + String params = getJdbcParametersSeparator() + connectionProperties.getOrDefault(JDBC_PARAMS, ""); if (connectionProperties.containsKey(SECRET_NAME)) { if (connectionProperties.containsKey(JDBC_PARAMS)) { // need to add delimiter - suffix = suffix + "&${" + connectionProperties.get(SECRET_NAME) + "}"; - } - else { - suffix = suffix + "${" + connectionProperties.get(SECRET_NAME) + "}"; + params = params + getDelimiter(); } + params = params + "${" + connectionProperties.get(SECRET_NAME) + "}"; } - return suffix; + return params; + } + + protected String getDatabaseSeparator() + { + return "/"; + } + + protected String getJdbcParametersSeparator() + { + return "?"; + } + + protected String getDelimiter() + { + return "&"; } } diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/OracleEnvironmentProperties.java b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/OracleEnvironmentProperties.java index 5447aa3df3..7d33ec2bcf 100644 --- a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/OracleEnvironmentProperties.java +++ b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/OracleEnvironmentProperties.java @@ -30,13 +30,20 @@ protected String getConnectionStringPrefix(Map connectionPropert if (connectionProperties.containsKey(SECRET_NAME)) { prefix = prefix + "${" + connectionProperties.get(SECRET_NAME) + "}"; } + prefix = prefix + "@//"; return prefix; } @Override - protected String getConnectionStringSuffix(Map connectionProperties) + protected String getDatabase(Map connectionProperties) { return "/" + connectionProperties.get(DATABASE); } + + @Override + protected String getJdbcParameters(Map connectionProperties) + { + return ""; + } } diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/SaphanaEnvironmentProperties.java b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/SaphanaEnvironmentProperties.java new file mode 100644 index 0000000000..38beefa547 --- /dev/null +++ b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/SaphanaEnvironmentProperties.java @@ -0,0 +1,37 @@ +/*- + * #%L + * Amazon Athena Query Federation SDK + * %% + * Copyright (C) 2019 - 2024 Amazon Web Services + * %% + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * #L% + */ +package com.amazonaws.athena.connector.lambda.connection; + +import java.util.Map; + +public class SaphanaEnvironmentProperties extends JdbcEnvironmentProperties +{ + @Override + protected String getConnectionStringPrefix(Map connectionProperties) + { + return "saphana://jdbc:sap://"; + } + + @Override + protected String getDatabase(Map connectionProperties) + { + return "/"; + } +} diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/SnowflakeEnvironmentProperties.java b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/SnowflakeEnvironmentProperties.java new file mode 100644 index 0000000000..ee8c7b39d9 --- /dev/null +++ b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/SnowflakeEnvironmentProperties.java @@ -0,0 +1,46 @@ +/*- + * #%L + * Amazon Athena Query Federation SDK + * %% + * Copyright (C) 2019 - 2024 Amazon Web Services + * %% + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * #L% + */ +package com.amazonaws.athena.connector.lambda.connection; + +import java.util.Map; + +public class SnowflakeEnvironmentProperties extends JdbcEnvironmentProperties +{ + private static final String WAREHOUSE = "WAREHOUSE"; + private static final String SCHEMA = "SCHEMA"; + @Override + protected String getConnectionStringPrefix(Map connectionProperties) + { + return "snowflake://jdbc:snowflake://"; + } + + @Override + protected String getDatabase(Map connectionProperties) + { + if (!connectionProperties.containsKey(SCHEMA)) { + logger.debug("No schema specified in connection string"); + } + + String databaseString = "/?warehouse=" + connectionProperties.get(WAREHOUSE) + + "&db=" + connectionProperties.get(DATABASE) + + "&schema=" + connectionProperties.get(SCHEMA); + return databaseString; + } +} diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/SqlServerEnvironmentProperties.java b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/SqlServerEnvironmentProperties.java index 8962545e8c..33a2a3c9d4 100644 --- a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/SqlServerEnvironmentProperties.java +++ b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/SqlServerEnvironmentProperties.java @@ -30,20 +30,20 @@ protected String getConnectionStringPrefix(Map connectionPropert } @Override - protected String getConnectionStringSuffix(Map connectionProperties) + protected String getDatabase(Map connectionProperties) { - String suffix = ";databaseName=" + connectionProperties.get(DATABASE) + ";" - + connectionProperties.getOrDefault(JDBC_PARAMS, ""); + return ";databaseName=" + connectionProperties.get(DATABASE); + } - if (connectionProperties.containsKey(SECRET_NAME)) { - if (connectionProperties.containsKey(JDBC_PARAMS)) { // need to add delimiter - suffix = suffix + ";${" + connectionProperties.get(SECRET_NAME) + "}"; - } - else { - suffix = suffix + "${" + connectionProperties.get(SECRET_NAME) + "}"; - } - } + @Override + protected String getJdbcParametersSeparator() + { + return ";"; + } - return suffix; + @Override + protected String getDelimiter() + { + return ";"; } } diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/TeradataEnvironmentProperties.java b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/TeradataEnvironmentProperties.java new file mode 100644 index 0000000000..fff460399b --- /dev/null +++ b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/TeradataEnvironmentProperties.java @@ -0,0 +1,43 @@ +/*- + * #%L + * Amazon Athena Query Federation SDK + * %% + * Copyright (C) 2019 - 2024 Amazon Web Services + * %% + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * #L% + */ +package com.amazonaws.athena.connector.lambda.connection; + +import java.util.Map; + +public class TeradataEnvironmentProperties extends JdbcEnvironmentProperties +{ + @Override + protected String getConnectionStringPrefix(Map connectionProperties) + { + return "teradata://jdbc:teradata://"; + } + + @Override + protected String getDatabase(Map connectionProperties) + { + return "/TMODE=ANSI,CHARSET=UTF8,DATABASE=" + connectionProperties.get(DATABASE); + } + + @Override + protected String getJdbcParametersSeparator() + { + return ","; + } +} diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/VerticaEnvironmentProperties.java b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/VerticaEnvironmentProperties.java new file mode 100644 index 0000000000..021a202e58 --- /dev/null +++ b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/VerticaEnvironmentProperties.java @@ -0,0 +1,31 @@ +/*- + * #%L + * Amazon Athena Query Federation SDK + * %% + * Copyright (C) 2019 - 2024 Amazon Web Services + * %% + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * #L% + */ +package com.amazonaws.athena.connector.lambda.connection; + +import java.util.Map; + +public class VerticaEnvironmentProperties extends JdbcEnvironmentProperties +{ + @Override + protected String getConnectionStringPrefix(Map connectionProperties) + { + return "vertica://jdbc:vertica://"; + } +} diff --git a/athena-hortonworks-hive/src/main/java/com/amazonaws/athena/connectors/hortonworks/HiveCompositeHandler.java b/athena-hortonworks-hive/src/main/java/com/amazonaws/athena/connectors/hortonworks/HiveCompositeHandler.java index 65e877242d..bb9b76007a 100644 --- a/athena-hortonworks-hive/src/main/java/com/amazonaws/athena/connectors/hortonworks/HiveCompositeHandler.java +++ b/athena-hortonworks-hive/src/main/java/com/amazonaws/athena/connectors/hortonworks/HiveCompositeHandler.java @@ -19,7 +19,7 @@ */ package com.amazonaws.athena.connectors.hortonworks; -import com.amazonaws.athena.connector.lambda.GlueConnectionUtils; +import com.amazonaws.athena.connector.lambda.connection.HortonworksEnvironmentProperties; import com.amazonaws.athena.connector.lambda.handlers.CompositeHandler; /** @@ -33,6 +33,6 @@ public class HiveCompositeHandler { public HiveCompositeHandler() { - super(new HiveMetadataHandler(GlueConnectionUtils.getGlueConnection()), new HiveRecordHandler(GlueConnectionUtils.getGlueConnection())); + super(new HiveMetadataHandler(new HortonworksEnvironmentProperties().createEnvironment()), new HiveRecordHandler(new HortonworksEnvironmentProperties().createEnvironment())); } } diff --git a/athena-saphana/src/main/java/com/amazonaws/athena/connectors/saphana/SaphanaCompositeHandler.java b/athena-saphana/src/main/java/com/amazonaws/athena/connectors/saphana/SaphanaCompositeHandler.java index df4f57b295..b22112a659 100644 --- a/athena-saphana/src/main/java/com/amazonaws/athena/connectors/saphana/SaphanaCompositeHandler.java +++ b/athena-saphana/src/main/java/com/amazonaws/athena/connectors/saphana/SaphanaCompositeHandler.java @@ -22,7 +22,7 @@ package com.amazonaws.athena.connectors.saphana; -import com.amazonaws.athena.connector.lambda.GlueConnectionUtils; +import com.amazonaws.athena.connector.lambda.connection.SaphanaEnvironmentProperties; import com.amazonaws.athena.connector.lambda.handlers.CompositeHandler; /** @@ -36,6 +36,6 @@ public class SaphanaCompositeHandler { public SaphanaCompositeHandler() { - super(new SaphanaMetadataHandler(GlueConnectionUtils.getGlueConnection()), new SaphanaRecordHandler(GlueConnectionUtils.getGlueConnection())); + super(new SaphanaMetadataHandler(new SaphanaEnvironmentProperties().createEnvironment()), new SaphanaRecordHandler(new SaphanaEnvironmentProperties().createEnvironment())); } } diff --git a/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeCompositeHandler.java b/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeCompositeHandler.java index 87aa9af6b0..be8f456fe3 100644 --- a/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeCompositeHandler.java +++ b/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeCompositeHandler.java @@ -22,7 +22,7 @@ package com.amazonaws.athena.connectors.snowflake; -import com.amazonaws.athena.connector.lambda.GlueConnectionUtils; +import com.amazonaws.athena.connector.lambda.connection.SnowflakeEnvironmentProperties; import com.amazonaws.athena.connector.lambda.handlers.CompositeHandler; /** @@ -36,6 +36,6 @@ public class SnowflakeCompositeHandler { public SnowflakeCompositeHandler() { - super(new SnowflakeMetadataHandler(GlueConnectionUtils.getGlueConnection()), new SnowflakeRecordHandler(GlueConnectionUtils.getGlueConnection())); + super(new SnowflakeMetadataHandler(new SnowflakeEnvironmentProperties().createEnvironment()), new SnowflakeRecordHandler(new SnowflakeEnvironmentProperties().createEnvironment())); } } diff --git a/athena-vertica/src/main/java/com/amazonaws/athena/connectors/vertica/VerticaCompositeHandler.java b/athena-vertica/src/main/java/com/amazonaws/athena/connectors/vertica/VerticaCompositeHandler.java index 85fe3ef67e..d7d39a3b16 100644 --- a/athena-vertica/src/main/java/com/amazonaws/athena/connectors/vertica/VerticaCompositeHandler.java +++ b/athena-vertica/src/main/java/com/amazonaws/athena/connectors/vertica/VerticaCompositeHandler.java @@ -19,7 +19,7 @@ */ package com.amazonaws.athena.connectors.vertica; -import com.amazonaws.athena.connector.lambda.GlueConnectionUtils; +import com.amazonaws.athena.connector.lambda.connection.VerticaEnvironmentProperties; import com.amazonaws.athena.connector.lambda.handlers.CompositeHandler; import java.io.IOException; @@ -39,7 +39,7 @@ public class VerticaCompositeHandler { public VerticaCompositeHandler() throws CertificateEncodingException, IOException, NoSuchAlgorithmException, KeyStoreException { - super(new VerticaMetadataHandler(GlueConnectionUtils.getGlueConnection()), new VerticaRecordHandler(GlueConnectionUtils.getGlueConnection())); + super(new VerticaMetadataHandler(new VerticaEnvironmentProperties().createEnvironment()), new VerticaRecordHandler(new VerticaEnvironmentProperties().createEnvironment())); installCaCertificate(); setupNativeEnvironmentVariables(); } From f1e066decaa08ab51337e71588b934f971114500 Mon Sep 17 00:00:00 2001 From: Aimery Methena <159072740+aimethed@users.noreply.github.com> Date: Tue, 10 Sep 2024 11:44:19 -0400 Subject: [PATCH 21/87] V2 image deployment (#2253) --- athena-aws-cmdb/Dockerfile | 9 +++++++++ athena-aws-cmdb/athena-aws-cmdb.yaml | 5 ++--- athena-clickhouse/Dockerfile | 9 +++++++++ athena-clickhouse/athena-clickhouse.yaml | 5 ++--- athena-cloudera-hive/Dockerfile | 9 +++++++++ athena-cloudera-hive/athena-cloudera-hive.yaml | 5 ++--- athena-cloudera-impala/Dockerfile | 9 +++++++++ athena-cloudera-impala/athena-cloudera-impala.yaml | 5 ++--- athena-cloudwatch-metrics/Dockerfile | 9 +++++++++ athena-cloudwatch-metrics/athena-cloudwatch-metrics.yaml | 5 ++--- athena-cloudwatch/Dockerfile | 9 +++++++++ athena-cloudwatch/athena-cloudwatch.yaml | 5 ++--- athena-datalakegen2/Dockerfile | 9 +++++++++ athena-datalakegen2/athena-datalakegen2.yaml | 5 ++--- athena-db2-as400/Dockerfile | 9 +++++++++ athena-db2-as400/athena-db2-as400.yaml | 5 ++--- athena-db2/Dockerfile | 9 +++++++++ athena-db2/athena-db2.yaml | 5 ++--- athena-docdb/Dockerfile | 9 +++++++++ athena-docdb/athena-docdb.yaml | 5 ++--- athena-dynamodb/Dockerfile | 9 +++++++++ athena-dynamodb/athena-dynamodb.yaml | 5 ++--- athena-elasticsearch/Dockerfile | 9 +++++++++ athena-elasticsearch/athena-elasticsearch.yaml | 5 ++--- athena-gcs/Dockerfile | 9 +++++++++ athena-gcs/athena-gcs.yaml | 5 ++--- athena-google-bigquery/Dockerfile | 9 +++++++++ athena-google-bigquery/athena-google-bigquery.yaml | 5 ++--- athena-hbase/Dockerfile | 9 +++++++++ athena-hbase/athena-hbase.yaml | 5 ++--- athena-hortonworks-hive/Dockerfile | 9 +++++++++ athena-hortonworks-hive/athena-hortonworks-hive.yaml | 5 ++--- athena-kafka/Dockerfile | 9 +++++++++ athena-kafka/athena-kafka.yaml | 5 ++--- athena-msk/Dockerfile | 9 +++++++++ athena-msk/athena-msk.yaml | 5 ++--- athena-mysql/Dockerfile | 9 +++++++++ athena-mysql/athena-mysql.yaml | 5 ++--- athena-neptune/Dockerfile | 9 +++++++++ athena-neptune/athena-neptune.yaml | 5 ++--- athena-oracle/Dockerfile | 9 +++++++++ athena-oracle/athena-oracle.yaml | 5 ++--- athena-postgresql/Dockerfile | 9 +++++++++ athena-postgresql/athena-postgresql.yaml | 7 ++++--- athena-redis/Dockerfile | 9 +++++++++ athena-redis/athena-redis.yaml | 5 ++--- athena-redshift/Dockerfile | 9 +++++++++ athena-redshift/athena-redshift.yaml | 5 ++--- athena-saphana/Dockerfile | 9 +++++++++ athena-saphana/athena-saphana.yaml | 5 ++--- athena-snowflake/Dockerfile | 9 +++++++++ athena-snowflake/athena-snowflake.yaml | 5 ++--- athena-sqlserver/Dockerfile | 9 +++++++++ athena-sqlserver/athena-sqlserver.yaml | 5 ++--- athena-synapse/Dockerfile | 9 +++++++++ athena-synapse/athena-synapse.yaml | 5 ++--- athena-teradata/Dockerfile | 9 +++++++++ athena-teradata/athena-teradata.yaml | 5 ++--- athena-timestream/Dockerfile | 9 +++++++++ athena-timestream/athena-timestream.yaml | 5 ++--- athena-tpcds/Dockerfile | 9 +++++++++ athena-tpcds/athena-tpcds.yaml | 5 ++--- athena-udfs/Dockerfile | 9 +++++++++ athena-udfs/athena-udfs.yaml | 5 ++--- athena-vertica/Dockerfile | 9 +++++++++ athena-vertica/athena-vertica.yaml | 5 ++--- 66 files changed, 365 insertions(+), 99 deletions(-) create mode 100644 athena-aws-cmdb/Dockerfile create mode 100644 athena-clickhouse/Dockerfile create mode 100644 athena-cloudera-hive/Dockerfile create mode 100644 athena-cloudera-impala/Dockerfile create mode 100644 athena-cloudwatch-metrics/Dockerfile create mode 100644 athena-cloudwatch/Dockerfile create mode 100644 athena-datalakegen2/Dockerfile create mode 100644 athena-db2-as400/Dockerfile create mode 100644 athena-db2/Dockerfile create mode 100644 athena-docdb/Dockerfile create mode 100644 athena-dynamodb/Dockerfile create mode 100644 athena-elasticsearch/Dockerfile create mode 100644 athena-gcs/Dockerfile create mode 100644 athena-google-bigquery/Dockerfile create mode 100644 athena-hbase/Dockerfile create mode 100644 athena-hortonworks-hive/Dockerfile create mode 100644 athena-kafka/Dockerfile create mode 100644 athena-msk/Dockerfile create mode 100644 athena-mysql/Dockerfile create mode 100644 athena-neptune/Dockerfile create mode 100644 athena-oracle/Dockerfile create mode 100644 athena-postgresql/Dockerfile create mode 100644 athena-redis/Dockerfile create mode 100644 athena-redshift/Dockerfile create mode 100644 athena-saphana/Dockerfile create mode 100644 athena-snowflake/Dockerfile create mode 100644 athena-sqlserver/Dockerfile create mode 100644 athena-synapse/Dockerfile create mode 100644 athena-teradata/Dockerfile create mode 100644 athena-timestream/Dockerfile create mode 100644 athena-tpcds/Dockerfile create mode 100644 athena-udfs/Dockerfile create mode 100644 athena-vertica/Dockerfile diff --git a/athena-aws-cmdb/Dockerfile b/athena-aws-cmdb/Dockerfile new file mode 100644 index 0000000000..a599a28963 --- /dev/null +++ b/athena-aws-cmdb/Dockerfile @@ -0,0 +1,9 @@ +FROM public.ecr.aws/lambda/java:11 + +# Copy function code and runtime dependencies from Maven layout +COPY target/athena-aws-cmdb-2022.47.1.jar ${LAMBDA_TASK_ROOT} +# Unpack the jar +RUN jar xf athena-aws-cmdb-2022.47.1.jar + +# Set the CMD to your handler (could also be done as a parameter override outside of the Dockerfile) +CMD [ "com.amazonaws.athena.connectors.aws.cmdb.AwsCmdbCompositeHandler" ] \ No newline at end of file diff --git a/athena-aws-cmdb/athena-aws-cmdb.yaml b/athena-aws-cmdb/athena-aws-cmdb.yaml index b3265cd1eb..28640a646d 100644 --- a/athena-aws-cmdb/athena-aws-cmdb.yaml +++ b/athena-aws-cmdb/athena-aws-cmdb.yaml @@ -52,10 +52,9 @@ Resources: spill_bucket: !Ref SpillBucket spill_prefix: !Ref SpillPrefix FunctionName: !Ref AthenaCatalogName - Handler: "com.amazonaws.athena.connectors.aws.cmdb.AwsCmdbCompositeHandler" - CodeUri: "./target/athena-aws-cmdb-2022.47.1.jar" + PackageType: "Image" + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-aws-cmdb:2022.47.1' Description: "Enables Amazon Athena to communicate with various AWS Services, making your resource inventories accessible via SQL." - Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] diff --git a/athena-clickhouse/Dockerfile b/athena-clickhouse/Dockerfile new file mode 100644 index 0000000000..a092ba28cb --- /dev/null +++ b/athena-clickhouse/Dockerfile @@ -0,0 +1,9 @@ +FROM public.ecr.aws/lambda/java:11 + +# Copy function code and runtime dependencies from Maven layout +COPY target/athena-clickhouse-2022.47.1.jar ${LAMBDA_TASK_ROOT} +# Unpack the jar +RUN jar xf athena-clickhouse-2022.47.1.jar + +# Set the CMD to your handler (could also be done as a parameter override outside of the Dockerfile) +CMD [ "com.amazonaws.athena.connectors.clickhouse.ClickHouseMuxCompositeHandler" ] \ No newline at end of file diff --git a/athena-clickhouse/athena-clickhouse.yaml b/athena-clickhouse/athena-clickhouse.yaml index ca6d171dcb..39b4164381 100644 --- a/athena-clickhouse/athena-clickhouse.yaml +++ b/athena-clickhouse/athena-clickhouse.yaml @@ -70,10 +70,9 @@ Resources: spill_prefix: !Ref SpillPrefix default: !Ref DefaultConnectionString FunctionName: !Ref LambdaFunctionName - Handler: "com.amazonaws.athena.connectors.clickhouse.ClickHouseMuxCompositeHandler" - CodeUri: "./target/athena-clickhouse-2022.47.1.jar" + PackageType: "Image" + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-clickhouse:2022.47.1' Description: "Enables Amazon Athena to communicate with ClickHouse using JDBC" - Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] diff --git a/athena-cloudera-hive/Dockerfile b/athena-cloudera-hive/Dockerfile new file mode 100644 index 0000000000..a56019f693 --- /dev/null +++ b/athena-cloudera-hive/Dockerfile @@ -0,0 +1,9 @@ +FROM public.ecr.aws/lambda/java:11 + +# Copy function code and runtime dependencies from Maven layout +COPY target/athena-cloudera-hive-2022.47.1.jar ${LAMBDA_TASK_ROOT} +# Unpack the jar +RUN jar xf athena-cloudera-hive-2022.47.1.jar + +# Set the CMD to your handler (could also be done as a parameter override outside of the Dockerfile) +CMD [ "com.amazonaws.athena.connectors.cloudera.HiveMuxCompositeHandler" ] \ No newline at end of file diff --git a/athena-cloudera-hive/athena-cloudera-hive.yaml b/athena-cloudera-hive/athena-cloudera-hive.yaml index 0984f3e01d..23f5201623 100644 --- a/athena-cloudera-hive/athena-cloudera-hive.yaml +++ b/athena-cloudera-hive/athena-cloudera-hive.yaml @@ -65,10 +65,9 @@ Resources: spill_prefix: !Ref SpillPrefix default: !Ref DefaultConnectionString FunctionName: !Ref LambdaFunctionName - Handler: "com.amazonaws.athena.connectors.cloudera.HiveMuxCompositeHandler" - CodeUri: "./target/athena-cloudera-hive-2022.47.1.jar" + PackageType: "Image" + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-cloudera-hive:2022.47.1' Description: "Enables Amazon Athena to communicate with Coludera Hive using JDBC" - Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] diff --git a/athena-cloudera-impala/Dockerfile b/athena-cloudera-impala/Dockerfile new file mode 100644 index 0000000000..2ed43aeaa9 --- /dev/null +++ b/athena-cloudera-impala/Dockerfile @@ -0,0 +1,9 @@ +FROM public.ecr.aws/lambda/java:11 + +# Copy function code and runtime dependencies from Maven layout +COPY target/athena-cloudera-impala-2022.47.1.jar ${LAMBDA_TASK_ROOT} +# Unpack the jar +RUN jar xf athena-cloudera-impala-2022.47.1.jar + +# Set the CMD to your handler (could also be done as a parameter override outside of the Dockerfile) +CMD [ "com.amazonaws.athena.connectors.cloudera.ImpalaMuxCompositeHandler" ] \ No newline at end of file diff --git a/athena-cloudera-impala/athena-cloudera-impala.yaml b/athena-cloudera-impala/athena-cloudera-impala.yaml index ee292cea3a..399cad4769 100644 --- a/athena-cloudera-impala/athena-cloudera-impala.yaml +++ b/athena-cloudera-impala/athena-cloudera-impala.yaml @@ -70,10 +70,9 @@ Resources: spill_prefix: !Ref SpillPrefix default: !Ref DefaultConnectionString FunctionName: !Ref LambdaFunctionName - Handler: "com.amazonaws.athena.connectors.cloudera.ImpalaMuxCompositeHandler" - CodeUri: "./target/athena-cloudera-impala-2022.47.1.jar" + PackageType: "Image" + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-cloudera-impala:2022.47.1' Description: "Enables Amazon Athena to communicate with Cloudera Impala using JDBC" - Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] diff --git a/athena-cloudwatch-metrics/Dockerfile b/athena-cloudwatch-metrics/Dockerfile new file mode 100644 index 0000000000..b3eafc1e38 --- /dev/null +++ b/athena-cloudwatch-metrics/Dockerfile @@ -0,0 +1,9 @@ +FROM public.ecr.aws/lambda/java:11 + +# Copy function code and runtime dependencies from Maven layout +COPY target/athena-cloudwatch-metrics-2022.47.1.jar ${LAMBDA_TASK_ROOT} +# Unpack the jar +RUN jar xf athena-cloudwatch-metrics-2022.47.1.jar + +# Set the CMD to your handler (could also be done as a parameter override outside of the Dockerfile) +CMD [ "com.amazonaws.athena.connectors.cloudwatch.metrics.MetricsCompositeHandler" ] \ No newline at end of file diff --git a/athena-cloudwatch-metrics/athena-cloudwatch-metrics.yaml b/athena-cloudwatch-metrics/athena-cloudwatch-metrics.yaml index d1d815063c..fd66c1081a 100644 --- a/athena-cloudwatch-metrics/athena-cloudwatch-metrics.yaml +++ b/athena-cloudwatch-metrics/athena-cloudwatch-metrics.yaml @@ -52,10 +52,9 @@ Resources: spill_bucket: !Ref SpillBucket spill_prefix: !Ref SpillPrefix FunctionName: !Ref AthenaCatalogName - Handler: "com.amazonaws.athena.connectors.cloudwatch.metrics.MetricsCompositeHandler" - CodeUri: "./target/athena-cloudwatch-metrics-2022.47.1.jar" + PackageType: "Image" + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-cloudwatch-metrics:2022.47.1' Description: "Enables Amazon Athena to communicate with Cloudwatch Metrics, making your metrics data accessible via SQL" - Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] diff --git a/athena-cloudwatch/Dockerfile b/athena-cloudwatch/Dockerfile new file mode 100644 index 0000000000..9859ff8b4c --- /dev/null +++ b/athena-cloudwatch/Dockerfile @@ -0,0 +1,9 @@ +FROM public.ecr.aws/lambda/java:11 + +# Copy function code and runtime dependencies from Maven layout +COPY target/athena-cloudwatch-2022.47.1.jar ${LAMBDA_TASK_ROOT} +# Unpack the jar +RUN jar xf athena-cloudwatch-2022.47.1.jar + +# Set the CMD to your handler (could also be done as a parameter override outside of the Dockerfile) +CMD [ "com.amazonaws.athena.connectors.cloudwatch.CloudwatchCompositeHandler" ] \ No newline at end of file diff --git a/athena-cloudwatch/athena-cloudwatch.yaml b/athena-cloudwatch/athena-cloudwatch.yaml index a5d69fbb2e..82a58b4e1b 100644 --- a/athena-cloudwatch/athena-cloudwatch.yaml +++ b/athena-cloudwatch/athena-cloudwatch.yaml @@ -66,10 +66,9 @@ Resources: spill_prefix: !Ref SpillPrefix kms_key_id: !If [HasKMSKeyId, !Ref KMSKeyId, !Ref "AWS::NoValue"] FunctionName: !Ref AthenaCatalogName - Handler: "com.amazonaws.athena.connectors.cloudwatch.CloudwatchCompositeHandler" - CodeUri: "./target/athena-cloudwatch-2022.47.1.jar" + PackageType: "Image" + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-cloudwatch:2022.47.1' Description: "Enables Amazon Athena to communicate with Cloudwatch, making your log accessible via SQL" - Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRole] diff --git a/athena-datalakegen2/Dockerfile b/athena-datalakegen2/Dockerfile new file mode 100644 index 0000000000..4e1929f607 --- /dev/null +++ b/athena-datalakegen2/Dockerfile @@ -0,0 +1,9 @@ +FROM public.ecr.aws/lambda/java:11 + +# Copy function code and runtime dependencies from Maven layout +COPY target/athena-datalakegen2-2022.47.1.jar ${LAMBDA_TASK_ROOT} +# Unpack the jar +RUN jar xf athena-datalakegen2-2022.47.1.jar + +# Set the CMD to your handler (could also be done as a parameter override outside of the Dockerfile) +CMD [ "com.amazonaws.athena.connectors.datalakegen2.DataLakeGen2MuxCompositeHandler" ] \ No newline at end of file diff --git a/athena-datalakegen2/athena-datalakegen2.yaml b/athena-datalakegen2/athena-datalakegen2.yaml index 32da145587..0f2f19a92a 100644 --- a/athena-datalakegen2/athena-datalakegen2.yaml +++ b/athena-datalakegen2/athena-datalakegen2.yaml @@ -71,10 +71,9 @@ Resources: spill_prefix: !Ref SpillPrefix default: !Ref DefaultConnectionString FunctionName: !Ref LambdaFunctionName - Handler: "com.amazonaws.athena.connectors.datalakegen2.DataLakeGen2MuxCompositeHandler" - CodeUri: "./target/athena-datalakegen2-2022.47.1.jar" + PackageType: "Image" + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-datalakegen2:2022.47.1' Description: "Enables Amazon Athena to communicate with DataLake Gen2 using JDBC" - Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] diff --git a/athena-db2-as400/Dockerfile b/athena-db2-as400/Dockerfile new file mode 100644 index 0000000000..affd37e7bb --- /dev/null +++ b/athena-db2-as400/Dockerfile @@ -0,0 +1,9 @@ +FROM public.ecr.aws/lambda/java:11 + +# Copy function code and runtime dependencies from Maven layout +COPY target/athena-db2-as400-2022.47.1.jar ${LAMBDA_TASK_ROOT} +# Unpack the jar +RUN jar xf athena-db2-as400-2022.47.1.jar + +# Set the CMD to your handler (could also be done as a parameter override outside of the Dockerfile) +CMD [ "com.amazonaws.athena.connectors.db2as400.Db2As400MuxCompositeHandler" ] \ No newline at end of file diff --git a/athena-db2-as400/athena-db2-as400.yaml b/athena-db2-as400/athena-db2-as400.yaml index c84dac623e..1d7643dae8 100644 --- a/athena-db2-as400/athena-db2-as400.yaml +++ b/athena-db2-as400/athena-db2-as400.yaml @@ -72,10 +72,9 @@ Resources: spill_prefix: !Ref SpillPrefix default: !Ref DefaultConnectionString FunctionName: !Ref LambdaFunctionName - Handler: "com.amazonaws.athena.connectors.db2as400.Db2As400MuxCompositeHandler" - CodeUri: "./target/athena-db2-as400-2022.47.1.jar" + PackageType: "Image" + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-db2-as400:2022.47.1' Description: "Enables Amazon Athena to communicate with DB2 on iSeries (AS400) using JDBC" - Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] diff --git a/athena-db2/Dockerfile b/athena-db2/Dockerfile new file mode 100644 index 0000000000..0d8231fa29 --- /dev/null +++ b/athena-db2/Dockerfile @@ -0,0 +1,9 @@ +FROM public.ecr.aws/lambda/java:11 + +# Copy function code and runtime dependencies from Maven layout +COPY target/athena-db2-2022.47.1.jar ${LAMBDA_TASK_ROOT} +# Unpack the jar +RUN jar xf athena-db2-2022.47.1.jar + +# Set the CMD to your handler (could also be done as a parameter override outside of the Dockerfile) +CMD [ "com.amazonaws.athena.connectors.db2.Db2MuxCompositeHandler" ] \ No newline at end of file diff --git a/athena-db2/athena-db2.yaml b/athena-db2/athena-db2.yaml index cbaaa93af9..4a930929e8 100644 --- a/athena-db2/athena-db2.yaml +++ b/athena-db2/athena-db2.yaml @@ -72,10 +72,9 @@ Resources: spill_prefix: !Ref SpillPrefix default: !Ref DefaultConnectionString FunctionName: !Ref LambdaFunctionName - Handler: "com.amazonaws.athena.connectors.db2.Db2MuxCompositeHandler" - CodeUri: "./target/athena-db2-2022.47.1.jar" + PackageType: "Image" + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-db2:2022.47.1' Description: "Enables Amazon Athena to communicate with DB2 using JDBC" - Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] diff --git a/athena-docdb/Dockerfile b/athena-docdb/Dockerfile new file mode 100644 index 0000000000..06e8a5c907 --- /dev/null +++ b/athena-docdb/Dockerfile @@ -0,0 +1,9 @@ +FROM public.ecr.aws/lambda/java:11 + +# Copy function code and runtime dependencies from Maven layout +COPY target/athena-docdb-2022.47.1.jar ${LAMBDA_TASK_ROOT} +# Unpack the jar +RUN jar xf athena-docdb-2022.47.1.jar + +# Set the CMD to your handler (could also be done as a parameter override outside of the Dockerfile) +CMD [ "com.amazonaws.athena.connectors.docdb.DocDBCompositeHandler" ] \ No newline at end of file diff --git a/athena-docdb/athena-docdb.yaml b/athena-docdb/athena-docdb.yaml index efb7fc0b2e..b713852b26 100644 --- a/athena-docdb/athena-docdb.yaml +++ b/athena-docdb/athena-docdb.yaml @@ -66,10 +66,9 @@ Resources: spill_prefix: !Ref SpillPrefix default_docdb: !Ref DocDBConnectionString FunctionName: !Ref AthenaCatalogName - Handler: "com.amazonaws.athena.connectors.docdb.DocDBCompositeHandler" - CodeUri: "./target/athena-docdb-2022.47.1.jar" + PackageType: "Image" + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-docdb:2022.47.1' Description: "Enables Amazon Athena to communicate with DocumentDB, making your DocumentDB data accessible via SQL." - Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] diff --git a/athena-dynamodb/Dockerfile b/athena-dynamodb/Dockerfile new file mode 100644 index 0000000000..868346d735 --- /dev/null +++ b/athena-dynamodb/Dockerfile @@ -0,0 +1,9 @@ +FROM public.ecr.aws/lambda/java:11 + +# Copy function code and runtime dependencies from Maven layout +COPY target/athena-dynamodb-2022.47.1.jar ${LAMBDA_TASK_ROOT} +# Unpack the jar +RUN jar xf athena-dynamodb-2022.47.1.jar + +# Set the CMD to your handler (could also be done as a parameter override outside of the Dockerfile) +CMD [ "com.amazonaws.athena.connectors.dynamodb.DynamoDBCompositeHandler" ] \ No newline at end of file diff --git a/athena-dynamodb/athena-dynamodb.yaml b/athena-dynamodb/athena-dynamodb.yaml index 01138151e6..d460c41e14 100644 --- a/athena-dynamodb/athena-dynamodb.yaml +++ b/athena-dynamodb/athena-dynamodb.yaml @@ -66,10 +66,9 @@ Resources: spill_prefix: !Ref SpillPrefix kms_key_id: !If [HasKMSKeyId, !Ref KMSKeyId, !Ref "AWS::NoValue"] FunctionName: !Ref AthenaCatalogName - Handler: "com.amazonaws.athena.connectors.dynamodb.DynamoDBCompositeHandler" - CodeUri: "./target/athena-dynamodb-2022.47.1.jar" + PackageType: "Image" + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-dynamodb:2022.47.1' Description: "Enables Amazon Athena to communicate with DynamoDB, making your tables accessible via SQL" - Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRole] diff --git a/athena-elasticsearch/Dockerfile b/athena-elasticsearch/Dockerfile new file mode 100644 index 0000000000..d153e67a95 --- /dev/null +++ b/athena-elasticsearch/Dockerfile @@ -0,0 +1,9 @@ +FROM public.ecr.aws/lambda/java:11 + +# Copy function code and runtime dependencies from Maven layout +COPY target/athena-elasticsearch-2022.47.1.jar ${LAMBDA_TASK_ROOT} +# Unpack the jar +RUN jar xf athena-elasticsearch-2022.47.1.jar + +# Set the CMD to your handler (could also be done as a parameter override outside of the Dockerfile) +CMD [ "com.amazonaws.athena.connectors.elasticsearch.ElasticsearchCompositeHandler" ] \ No newline at end of file diff --git a/athena-elasticsearch/athena-elasticsearch.yaml b/athena-elasticsearch/athena-elasticsearch.yaml index ff12e031ad..c4106b1de8 100644 --- a/athena-elasticsearch/athena-elasticsearch.yaml +++ b/athena-elasticsearch/athena-elasticsearch.yaml @@ -102,10 +102,9 @@ Resources: query_timeout_search: !Ref QueryTimeoutSearch query_scroll_timeout: !Ref QueryScrollTimeout FunctionName: !Sub "${AthenaCatalogName}" - Handler: "com.amazonaws.athena.connectors.elasticsearch.ElasticsearchCompositeHandler" - CodeUri: "./target/athena-elasticsearch-2022.47.1.jar" + PackageType: "Image" + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-elasticsearch:2022.47.1' Description: "The Elasticsearch Lambda Connector provides Athena users the ability to query data stored on Elasticsearch clusters." - Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] diff --git a/athena-gcs/Dockerfile b/athena-gcs/Dockerfile new file mode 100644 index 0000000000..04614fe43b --- /dev/null +++ b/athena-gcs/Dockerfile @@ -0,0 +1,9 @@ +FROM public.ecr.aws/lambda/java:11 + +# Copy function code and runtime dependencies from Maven layout +COPY target/athena-gcs.zip ${LAMBDA_TASK_ROOT} +# Unpack the jar +RUN jar xf athena-gcs.zip + +# Set the CMD to your handler (could also be done as a parameter override outside of the Dockerfile) +CMD [ "com.amazonaws.athena.connectors.gcs.GcsCompositeHandler" ] \ No newline at end of file diff --git a/athena-gcs/athena-gcs.yaml b/athena-gcs/athena-gcs.yaml index 3c7e3b3f1b..46079ad5f7 100644 --- a/athena-gcs/athena-gcs.yaml +++ b/athena-gcs/athena-gcs.yaml @@ -59,10 +59,9 @@ Resources: spill_prefix: !Ref SpillPrefix secret_manager_gcp_creds_name: !Ref GCSSecretName FunctionName: !Ref LambdaFunctionName - Handler: "com.amazonaws.athena.connectors.gcs.GcsCompositeHandler" - CodeUri: "./target/athena-gcs.zip" + PackageType: "Image" + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-gcs:2022.47.1' Description: "Amazon Athena GCS Connector" - Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] diff --git a/athena-google-bigquery/Dockerfile b/athena-google-bigquery/Dockerfile new file mode 100644 index 0000000000..b1dbf5ef11 --- /dev/null +++ b/athena-google-bigquery/Dockerfile @@ -0,0 +1,9 @@ +FROM public.ecr.aws/lambda/java:11 + +# Copy function code and runtime dependencies from Maven layout +COPY target/athena-google-bigquery-2022.47.1.jar ${LAMBDA_TASK_ROOT} +# Unpack the jar +RUN jar xf athena-google-bigquery-2022.47.1.jar + +# Set the CMD to your handler (could also be done as a parameter override outside of the Dockerfile) +CMD [ "com.amazonaws.athena.connectors.google.bigquery.BigQueryCompositeHandler" ] \ No newline at end of file diff --git a/athena-google-bigquery/athena-google-bigquery.yaml b/athena-google-bigquery/athena-google-bigquery.yaml index e5dd95f9b2..294b417504 100644 --- a/athena-google-bigquery/athena-google-bigquery.yaml +++ b/athena-google-bigquery/athena-google-bigquery.yaml @@ -79,10 +79,9 @@ Resources: big_query_endpoint: !Ref BigQueryEndpoint GOOGLE_APPLICATION_CREDENTIALS: '/tmp/service-account.json' FunctionName: !Ref LambdaFunctionName - Handler: "com.amazonaws.athena.connectors.google.bigquery.BigQueryCompositeHandler" - CodeUri: "./target/athena-google-bigquery-2022.47.1.jar" + PackageType: "Image" + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-google-bigquery:2022.47.1' Description: "Enables Amazon Athena to communicate with BigQuery using Google SDK" - Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] diff --git a/athena-hbase/Dockerfile b/athena-hbase/Dockerfile new file mode 100644 index 0000000000..6772c2c793 --- /dev/null +++ b/athena-hbase/Dockerfile @@ -0,0 +1,9 @@ +FROM public.ecr.aws/lambda/java:11 + +# Copy function code and runtime dependencies from Maven layout +COPY target/athena-hbase-2022.47.1.jar ${LAMBDA_TASK_ROOT} +# Unpack the jar +RUN jar xf athena-hbase-2022.47.1.jar + +# Set the CMD to your handler (could also be done as a parameter override outside of the Dockerfile) +CMD [ "com.amazonaws.athena.connectors.hbase.HbaseCompositeHandler" ] \ No newline at end of file diff --git a/athena-hbase/athena-hbase.yaml b/athena-hbase/athena-hbase.yaml index 447f96048f..cb059e5088 100644 --- a/athena-hbase/athena-hbase.yaml +++ b/athena-hbase/athena-hbase.yaml @@ -85,10 +85,9 @@ Resources: principal_name: !Ref PrincipalName hbase_rpc_protection: !Ref HbaseRpcProtection FunctionName: !Ref AthenaCatalogName - Handler: "com.amazonaws.athena.connectors.hbase.HbaseCompositeHandler" - CodeUri: "./target/athena-hbase-2022.47.1.jar" + PackageType: "Image" + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-hbase:2022.47.1' Description: "Enables Amazon Athena to communicate with HBase, making your HBase data accessible via SQL" - Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] diff --git a/athena-hortonworks-hive/Dockerfile b/athena-hortonworks-hive/Dockerfile new file mode 100644 index 0000000000..3a68e6d997 --- /dev/null +++ b/athena-hortonworks-hive/Dockerfile @@ -0,0 +1,9 @@ +FROM public.ecr.aws/lambda/java:11 + +# Copy function code and runtime dependencies from Maven layout +COPY target/athena-hortonworks-hive-2022.47.1.jar ${LAMBDA_TASK_ROOT} +# Unpack the jar +RUN jar xf athena-hortonworks-hive-2022.47.1.jar + +# Set the CMD to your handler (could also be done as a parameter override outside of the Dockerfile) +CMD [ "com.amazonaws.athena.connectors.hortonworks.HiveMuxCompositeHandler" ] \ No newline at end of file diff --git a/athena-hortonworks-hive/athena-hortonworks-hive.yaml b/athena-hortonworks-hive/athena-hortonworks-hive.yaml index 5ea4c07bce..9b32050951 100644 --- a/athena-hortonworks-hive/athena-hortonworks-hive.yaml +++ b/athena-hortonworks-hive/athena-hortonworks-hive.yaml @@ -69,10 +69,9 @@ Resources: spill_prefix: !Ref SpillPrefix default: !Ref DefaultConnectionString FunctionName: !Ref LambdaFunctionName - Handler: "com.amazonaws.athena.connectors.hortonworks.HiveMuxCompositeHandler" - CodeUri: "./target/athena-hortonworks-hive-2022.47.1.jar" + PackageType: "Image" + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-hortonworks-hive:2022.47.1' Description: "Enables Amazon Athena to communicate with Hortonworks Hive using JDBC" - Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] diff --git a/athena-kafka/Dockerfile b/athena-kafka/Dockerfile new file mode 100644 index 0000000000..fbab927e79 --- /dev/null +++ b/athena-kafka/Dockerfile @@ -0,0 +1,9 @@ +FROM public.ecr.aws/lambda/java:11 + +# Copy function code and runtime dependencies from Maven layout +COPY target/athena-kafka-2022.47.1.jar ${LAMBDA_TASK_ROOT} +# Unpack the jar +RUN jar xf athena-kafka-2022.47.1.jar + +# Set the CMD to your handler (could also be done as a parameter override outside of the Dockerfile) +CMD [ "com.amazonaws.athena.connectors.kafka.KafkaCompositeHandler" ] \ No newline at end of file diff --git a/athena-kafka/athena-kafka.yaml b/athena-kafka/athena-kafka.yaml index 0d6949ee55..0c95d94491 100644 --- a/athena-kafka/athena-kafka.yaml +++ b/athena-kafka/athena-kafka.yaml @@ -101,10 +101,9 @@ Resources: schema_registry_url: !Ref SchemaRegistryUrl auth_type: !Ref AuthType FunctionName: !Ref LambdaFunctionName - Handler: "com.amazonaws.athena.connectors.kafka.KafkaCompositeHandler" - CodeUri: "./target/athena-kafka-2022.47.1.jar" + PackageType: "Image" + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-kafka:2022.47.1' Description: "Enables Amazon Athena to communicate with Kafka clusters" - Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleARN] diff --git a/athena-msk/Dockerfile b/athena-msk/Dockerfile new file mode 100644 index 0000000000..b9cc2149da --- /dev/null +++ b/athena-msk/Dockerfile @@ -0,0 +1,9 @@ +FROM public.ecr.aws/lambda/java:11 + +# Copy function code and runtime dependencies from Maven layout +COPY target/athena-msk-2022.47.1.jar ${LAMBDA_TASK_ROOT} +# Unpack the jar +RUN jar xf athena-msk-2022.47.1.jar + +# Set the CMD to your handler (could also be done as a parameter override outside of the Dockerfile) +CMD [ "com.amazonaws.athena.connectors.msk.AmazonMskCompositeHandler" ] \ No newline at end of file diff --git a/athena-msk/athena-msk.yaml b/athena-msk/athena-msk.yaml index a73380ab19..00b63aa85f 100644 --- a/athena-msk/athena-msk.yaml +++ b/athena-msk/athena-msk.yaml @@ -96,10 +96,9 @@ Resources: kafka_endpoint: !Ref KafkaEndpoint auth_type: !Ref AuthType FunctionName: !Ref LambdaFunctionName - Handler: "com.amazonaws.athena.connectors.msk.AmazonMskCompositeHandler" - CodeUri: "./target/athena-msk-2022.47.1.jar" + PackageType: "Image" + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-msk:2022.47.1' Description: "Enables Amazon Athena to communicate with MSK clusters" - Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleARN] diff --git a/athena-mysql/Dockerfile b/athena-mysql/Dockerfile new file mode 100644 index 0000000000..08f27b704d --- /dev/null +++ b/athena-mysql/Dockerfile @@ -0,0 +1,9 @@ +FROM public.ecr.aws/lambda/java:11 + +# Copy function code and runtime dependencies from Maven layout +COPY target/athena-mysql-2022.47.1.jar ${LAMBDA_TASK_ROOT} +# Unpack the jar +RUN jar xf athena-mysql-2022.47.1.jar + +# Set the CMD to your handler (could also be done as a parameter override outside of the Dockerfile) +CMD [ "com.amazonaws.athena.connectors.mysql.MySqlMuxCompositeHandler" ] \ No newline at end of file diff --git a/athena-mysql/athena-mysql.yaml b/athena-mysql/athena-mysql.yaml index 8ad8417a1d..49ec08fbd2 100644 --- a/athena-mysql/athena-mysql.yaml +++ b/athena-mysql/athena-mysql.yaml @@ -70,10 +70,9 @@ Resources: spill_prefix: !Ref SpillPrefix default: !Ref DefaultConnectionString FunctionName: !Ref LambdaFunctionName - Handler: "com.amazonaws.athena.connectors.mysql.MySqlMuxCompositeHandler" - CodeUri: "./target/athena-mysql-2022.47.1.jar" + PackageType: "Image" + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-mysql:2022.47.1' Description: "Enables Amazon Athena to communicate with MySQL using JDBC" - Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] diff --git a/athena-neptune/Dockerfile b/athena-neptune/Dockerfile new file mode 100644 index 0000000000..c8573d87c1 --- /dev/null +++ b/athena-neptune/Dockerfile @@ -0,0 +1,9 @@ +FROM public.ecr.aws/lambda/java:11 + +# Copy function code and runtime dependencies from Maven layout +COPY target/athena-neptune-2022.47.1.jar ${LAMBDA_TASK_ROOT} +# Unpack the jar +RUN jar xf athena-neptune-2022.47.1.jar + +# Set the CMD to your handler (could also be done as a parameter override outside of the Dockerfile) +CMD [ "com.amazonaws.athena.connectors.neptune.NeptuneCompositeHandler" ] \ No newline at end of file diff --git a/athena-neptune/athena-neptune.yaml b/athena-neptune/athena-neptune.yaml index 54ebf62cbb..804d8e880c 100644 --- a/athena-neptune/athena-neptune.yaml +++ b/athena-neptune/athena-neptune.yaml @@ -96,10 +96,9 @@ Resources: SERVICE_REGION: !Ref AWS::Region enable_caseinsensitivematch: !Ref EnableCaseInsensitiveMatch FunctionName: !Ref AthenaCatalogName - Handler: "com.amazonaws.athena.connectors.neptune.NeptuneCompositeHandler" - CodeUri: "./target/athena-neptune-2022.47.1.jar" + PackageType: "Image" + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-neptune:2022.47.1' Description: "Enables Amazon Athena to communicate with Neptune, making your Neptune graph data accessible via SQL." - Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] diff --git a/athena-oracle/Dockerfile b/athena-oracle/Dockerfile new file mode 100644 index 0000000000..e85f8c566e --- /dev/null +++ b/athena-oracle/Dockerfile @@ -0,0 +1,9 @@ +FROM public.ecr.aws/lambda/java:11 + +# Copy function code and runtime dependencies from Maven layout +COPY target/athena-oracle-2022.47.1.jar ${LAMBDA_TASK_ROOT} +# Unpack the jar +RUN jar xf athena-oracle-2022.47.1.jar + +# Set the CMD to your handler (could also be done as a parameter override outside of the Dockerfile) +CMD [ "com.amazonaws.athena.connectors.oracle.OracleMuxCompositeHandler" ] \ No newline at end of file diff --git a/athena-oracle/athena-oracle.yaml b/athena-oracle/athena-oracle.yaml index dd19543fff..7e9a5fe0dd 100644 --- a/athena-oracle/athena-oracle.yaml +++ b/athena-oracle/athena-oracle.yaml @@ -82,10 +82,9 @@ Resources: default: !Ref DefaultConnectionString is_FIPS_Enabled: !Ref IsFIPSEnabled FunctionName: !Ref LambdaFunctionName - Handler: "com.amazonaws.athena.connectors.oracle.OracleMuxCompositeHandler" - CodeUri: "./target/athena-oracle-2022.47.1.jar" + PackageType: "Image" + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-oracle:2022.47.1' Description: "Enables Amazon Athena to communicate with ORACLE using JDBC" - Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] diff --git a/athena-postgresql/Dockerfile b/athena-postgresql/Dockerfile new file mode 100644 index 0000000000..3376a994dc --- /dev/null +++ b/athena-postgresql/Dockerfile @@ -0,0 +1,9 @@ +FROM public.ecr.aws/lambda/java:11 + +# Copy function code and runtime dependencies from Maven layout +COPY target/athena-postgresql-2022.47.1.jar ${LAMBDA_TASK_ROOT} +# Unpack the jar +RUN jar xf athena-postgresql-2022.47.1.jar + +# Command can be overwritten by providing a different command in the template directly. +# No need to specify here (already defined in athena-postgresql.yaml because has two different handlers) \ No newline at end of file diff --git a/athena-postgresql/athena-postgresql.yaml b/athena-postgresql/athena-postgresql.yaml index ab68f4b22c..beda73c0fd 100644 --- a/athena-postgresql/athena-postgresql.yaml +++ b/athena-postgresql/athena-postgresql.yaml @@ -81,10 +81,11 @@ Resources: default: !Ref DefaultConnectionString default_scale: !Ref DefaultScale FunctionName: !Ref LambdaFunctionName - Handler: !Sub "com.amazonaws.athena.connectors.postgresql.${CompositeHandler}" - CodeUri: "./target/athena-postgresql-2022.47.1.jar" + PackageType: "Image" + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-postgresql:2022.47.1' + ImageConfig: + Command: [ !Sub "com.amazonaws.athena.connectors.postgresql.${CompositeHandler}" ] Description: "Enables Amazon Athena to communicate with PostgreSQL using JDBC" - Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] diff --git a/athena-redis/Dockerfile b/athena-redis/Dockerfile new file mode 100644 index 0000000000..3e9e9888f7 --- /dev/null +++ b/athena-redis/Dockerfile @@ -0,0 +1,9 @@ +FROM public.ecr.aws/lambda/java:11 + +# Copy function code and runtime dependencies from Maven layout +COPY target/athena-redis-2022.47.1.jar ${LAMBDA_TASK_ROOT} +# Unpack the jar +RUN jar xf athena-redis-2022.47.1.jar + +# Set the CMD to your handler (could also be done as a parameter override outside of the Dockerfile) +CMD [ "com.amazonaws.athena.connectors.redis.RedisCompositeHandler" ] \ No newline at end of file diff --git a/athena-redis/athena-redis.yaml b/athena-redis/athena-redis.yaml index 85ffd47a79..1fabf14d1d 100644 --- a/athena-redis/athena-redis.yaml +++ b/athena-redis/athena-redis.yaml @@ -81,10 +81,9 @@ Resources: qpt_cluster: !Ref QPTConnectionCluster qpt_db_number: !Ref QPTConnectionDBNumber FunctionName: !Ref AthenaCatalogName - Handler: "com.amazonaws.athena.connectors.redis.RedisCompositeHandler" - CodeUri: "./target/athena-redis-2022.47.1.jar" + PackageType: "Image" + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-redis:2022.47.1' Description: "Enables Amazon Athena to communicate with Redis, making your Redis data accessible via SQL" - Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] diff --git a/athena-redshift/Dockerfile b/athena-redshift/Dockerfile new file mode 100644 index 0000000000..0e7d808823 --- /dev/null +++ b/athena-redshift/Dockerfile @@ -0,0 +1,9 @@ +FROM public.ecr.aws/lambda/java:11 + +# Copy function code and runtime dependencies from Maven layout +COPY target/athena-redshift-2022.47.1.jar ${LAMBDA_TASK_ROOT} +# Unpack the jar +RUN jar xf athena-redshift-2022.47.1.jar + +# Set the CMD to your handler (could also be done as a parameter override outside of the Dockerfile) +CMD [ "com.amazonaws.athena.connectors.redshift.RedshiftMuxCompositeHandler" ] \ No newline at end of file diff --git a/athena-redshift/athena-redshift.yaml b/athena-redshift/athena-redshift.yaml index 11ad42864d..fd5f90fe5c 100644 --- a/athena-redshift/athena-redshift.yaml +++ b/athena-redshift/athena-redshift.yaml @@ -79,10 +79,9 @@ Resources: default: !Ref DefaultConnectionString kms_key_id: !If [HasKMSKeyId, !Ref KMSKeyId, !Ref "AWS::NoValue"] FunctionName: !Ref LambdaFunctionName - Handler: "com.amazonaws.athena.connectors.redshift.RedshiftMuxCompositeHandler" - CodeUri: "./target/athena-redshift-2022.47.1.jar" + PackageType: "Image" + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-redshift:2022.47.1' Description: "Enables Amazon Athena to communicate with Redshift using JDBC" - Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRole] diff --git a/athena-saphana/Dockerfile b/athena-saphana/Dockerfile new file mode 100644 index 0000000000..5e55d28a12 --- /dev/null +++ b/athena-saphana/Dockerfile @@ -0,0 +1,9 @@ +FROM public.ecr.aws/lambda/java:11 + +# Copy function code and runtime dependencies from Maven layout +COPY target/athena-saphana.zip ${LAMBDA_TASK_ROOT} +# Unpack the jar +RUN jar xf athena-saphana.zip + +# Set the CMD to your handler (could also be done as a parameter override outside of the Dockerfile) +CMD [ "com.amazonaws.athena.connectors.saphana.SaphanaMuxCompositeHandler" ] \ No newline at end of file diff --git a/athena-saphana/athena-saphana.yaml b/athena-saphana/athena-saphana.yaml index 13b945c42f..66d1ee30f1 100644 --- a/athena-saphana/athena-saphana.yaml +++ b/athena-saphana/athena-saphana.yaml @@ -69,10 +69,9 @@ Resources: spill_prefix: !Ref SpillPrefix default: !Ref DefaultConnectionString FunctionName: !Ref LambdaFunctionName - Handler: "com.amazonaws.athena.connectors.saphana.SaphanaMuxCompositeHandler" - CodeUri: "./target/athena-saphana.zip" + PackageType: "Image" + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-saphana:2022.47.1' Description: "Enables Amazon Athena to communicate with Teradata using JDBC" - Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] diff --git a/athena-snowflake/Dockerfile b/athena-snowflake/Dockerfile new file mode 100644 index 0000000000..8d4d9081a6 --- /dev/null +++ b/athena-snowflake/Dockerfile @@ -0,0 +1,9 @@ +FROM public.ecr.aws/lambda/java:11 + +# Copy function code and runtime dependencies from Maven layout +COPY target/athena-snowflake.zip ${LAMBDA_TASK_ROOT} +# Unpack the jar +RUN jar xf athena-snowflake.zip + +# Set the CMD to your handler (could also be done as a parameter override outside of the Dockerfile) +CMD [ "com.amazonaws.athena.connectors.snowflake.SnowflakeMuxCompositeHandler" ] \ No newline at end of file diff --git a/athena-snowflake/athena-snowflake.yaml b/athena-snowflake/athena-snowflake.yaml index 24edcaf9f8..651883f9a5 100644 --- a/athena-snowflake/athena-snowflake.yaml +++ b/athena-snowflake/athena-snowflake.yaml @@ -69,10 +69,9 @@ Resources: spill_prefix: !Ref SpillPrefix default: !Ref DefaultConnectionString FunctionName: !Ref LambdaFunctionName - Handler: "com.amazonaws.athena.connectors.snowflake.SnowflakeMuxCompositeHandler" - CodeUri: "./target/athena-snowflake.zip" + PackageType: "Image" + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-snowflake:2022.47.1' Description: "Enables Amazon Athena to communicate with Snowflake using JDBC" - Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] diff --git a/athena-sqlserver/Dockerfile b/athena-sqlserver/Dockerfile new file mode 100644 index 0000000000..e602b9fc50 --- /dev/null +++ b/athena-sqlserver/Dockerfile @@ -0,0 +1,9 @@ +FROM public.ecr.aws/lambda/java:11 + +# Copy function code and runtime dependencies from Maven layout +COPY target/athena-sqlserver-2022.47.1.jar ${LAMBDA_TASK_ROOT} +# Unpack the jar +RUN jar xf athena-sqlserver-2022.47.1.jar + +# Set the CMD to your handler (could also be done as a parameter override outside of the Dockerfile) +CMD [ "com.amazonaws.athena.connectors.sqlserver.SqlServerMuxCompositeHandler" ] \ No newline at end of file diff --git a/athena-sqlserver/athena-sqlserver.yaml b/athena-sqlserver/athena-sqlserver.yaml index 22c9e1b89c..59c369eedf 100644 --- a/athena-sqlserver/athena-sqlserver.yaml +++ b/athena-sqlserver/athena-sqlserver.yaml @@ -76,10 +76,9 @@ Resources: spill_prefix: !Ref SpillPrefix default: !Ref DefaultConnectionString FunctionName: !Ref LambdaFunctionName - Handler: "com.amazonaws.athena.connectors.sqlserver.SqlServerMuxCompositeHandler" - CodeUri: "./target/athena-sqlserver-2022.47.1.jar" + PackageType: "Image" + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-sqlserver:2022.47.1' Description: "Enables Amazon Athena to communicate with SQLSERVER using JDBC" - Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] diff --git a/athena-synapse/Dockerfile b/athena-synapse/Dockerfile new file mode 100644 index 0000000000..2a7a05ec98 --- /dev/null +++ b/athena-synapse/Dockerfile @@ -0,0 +1,9 @@ +FROM public.ecr.aws/lambda/java:11 + +# Copy function code and runtime dependencies from Maven layout +COPY target/athena-synapse-2022.47.1.jar ${LAMBDA_TASK_ROOT} +# Unpack the jar +RUN jar xf athena-synapse-2022.47.1.jar + +# Set the CMD to your handler (could also be done as a parameter override outside of the Dockerfile) +CMD [ "com.amazonaws.athena.connectors.synapse.SynapseMuxCompositeHandler" ] \ No newline at end of file diff --git a/athena-synapse/athena-synapse.yaml b/athena-synapse/athena-synapse.yaml index 176c0d2631..2e3e80653e 100644 --- a/athena-synapse/athena-synapse.yaml +++ b/athena-synapse/athena-synapse.yaml @@ -78,10 +78,9 @@ Resources: spill_prefix: !Ref SpillPrefix default: !Ref DefaultConnectionString FunctionName: !Ref LambdaFunctionName - Handler: "com.amazonaws.athena.connectors.synapse.SynapseMuxCompositeHandler" - CodeUri: "./target/athena-synapse-2022.47.1.jar" + PackageType: "Image" + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-synapse:2022.47.1' Description: "Enables Amazon Athena to communicate with SYNPASE using JDBC" - Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleARN] diff --git a/athena-teradata/Dockerfile b/athena-teradata/Dockerfile new file mode 100644 index 0000000000..8f58411065 --- /dev/null +++ b/athena-teradata/Dockerfile @@ -0,0 +1,9 @@ +FROM public.ecr.aws/lambda/java:11 + +# Copy function code and runtime dependencies from Maven layout +COPY target/athena-teradata-2022.47.1.jar ${LAMBDA_TASK_ROOT} +# Unpack the jar +RUN jar xf athena-teradata-2022.47.1.jar + +# Set the CMD to your handler (could also be done as a parameter override outside of the Dockerfile) +CMD [ "com.amazonaws.athena.connectors.teradata.TeradataMuxCompositeHandler" ] \ No newline at end of file diff --git a/athena-teradata/athena-teradata.yaml b/athena-teradata/athena-teradata.yaml index 3e1431c6c0..8b4760a912 100644 --- a/athena-teradata/athena-teradata.yaml +++ b/athena-teradata/athena-teradata.yaml @@ -77,12 +77,11 @@ Resources: default: !Ref DefaultConnectionString partitioncount: !Ref PartitionCount FunctionName: !Ref LambdaFunctionName - Handler: "com.amazonaws.athena.connectors.teradata.TeradataMuxCompositeHandler" Layers: - !Ref LambdaJDBCLayername - CodeUri: "./target/athena-teradata-2022.47.1.jar" + PackageType: "Image" + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-teradata:2022.47.1' Description: "Enables Amazon Athena to communicate with Teradata using JDBC" - Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] diff --git a/athena-timestream/Dockerfile b/athena-timestream/Dockerfile new file mode 100644 index 0000000000..8a0be2c8f9 --- /dev/null +++ b/athena-timestream/Dockerfile @@ -0,0 +1,9 @@ +FROM public.ecr.aws/lambda/java:11 + +# Copy function code and runtime dependencies from Maven layout +COPY target/athena-timestream-2022.47.1.jar ${LAMBDA_TASK_ROOT} +# Unpack the jar +RUN jar xf athena-timestream-2022.47.1.jar + +# Set the CMD to your handler (could also be done as a parameter override outside of the Dockerfile) +CMD [ "com.amazonaws.athena.connectors.timestream.TimestreamCompositeHandler" ] \ No newline at end of file diff --git a/athena-timestream/athena-timestream.yaml b/athena-timestream/athena-timestream.yaml index e0e3de7840..d036577be1 100644 --- a/athena-timestream/athena-timestream.yaml +++ b/athena-timestream/athena-timestream.yaml @@ -52,10 +52,9 @@ Resources: spill_bucket: !Ref SpillBucket spill_prefix: !Ref SpillPrefix FunctionName: !Ref AthenaCatalogName - Handler: "com.amazonaws.athena.connectors.timestream.TimestreamCompositeHandler" - CodeUri: "./target/athena-timestream-2022.47.1.jar" + PackageType: "Image" + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-timestream:2022.47.1' Description: "Enables Amazon Athena to communicate with Amazon Timestream, making your time series data accessible from Athena." - Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] diff --git a/athena-tpcds/Dockerfile b/athena-tpcds/Dockerfile new file mode 100644 index 0000000000..7c4d31ffa1 --- /dev/null +++ b/athena-tpcds/Dockerfile @@ -0,0 +1,9 @@ +FROM public.ecr.aws/lambda/java:11 + +# Copy function code and runtime dependencies from Maven layout +COPY target/athena-tpcds-2022.47.1.jar ${LAMBDA_TASK_ROOT} +# Unpack the jar +RUN jar xf athena-tpcds-2022.47.1.jar + +# Set the CMD to your handler (could also be done as a parameter override outside of the Dockerfile) +CMD [ "com.amazonaws.athena.connectors.tpcds.TPCDSCompositeHandler" ] \ No newline at end of file diff --git a/athena-tpcds/athena-tpcds.yaml b/athena-tpcds/athena-tpcds.yaml index 219e0a7dd2..40b76f7194 100644 --- a/athena-tpcds/athena-tpcds.yaml +++ b/athena-tpcds/athena-tpcds.yaml @@ -52,10 +52,9 @@ Resources: spill_bucket: !Ref SpillBucket spill_prefix: !Ref SpillPrefix FunctionName: !Ref AthenaCatalogName - Handler: "com.amazonaws.athena.connectors.tpcds.TPCDSCompositeHandler" - CodeUri: "./target/athena-tpcds-2022.47.1.jar" + PackageType: "Image" + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-tpcds:2022.47.1' Description: "This connector enables Amazon Athena to communicate with a randomly generated TPC-DS data source." - Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] diff --git a/athena-udfs/Dockerfile b/athena-udfs/Dockerfile new file mode 100644 index 0000000000..d18b85ae78 --- /dev/null +++ b/athena-udfs/Dockerfile @@ -0,0 +1,9 @@ +FROM public.ecr.aws/lambda/java:11 + +# Copy function code and runtime dependencies from Maven layout +COPY target/athena-udfs-2022.47.1.jar ${LAMBDA_TASK_ROOT} +# Unpack the jar +RUN jar xf athena-udfs-2022.47.1.jar + +# Set the CMD to your handler (could also be done as a parameter override outside of the Dockerfile) +CMD [ "com.amazonaws.athena.connectors.udfs.AthenaUDFHandler" ] \ No newline at end of file diff --git a/athena-udfs/athena-udfs.yaml b/athena-udfs/athena-udfs.yaml index 64fd2f54ef..968582181d 100644 --- a/athena-udfs/athena-udfs.yaml +++ b/athena-udfs/athena-udfs.yaml @@ -39,10 +39,9 @@ Resources: Type: 'AWS::Serverless::Function' Properties: FunctionName: !Ref LambdaFunctionName - Handler: "com.amazonaws.athena.connectors.udfs.AthenaUDFHandler" - CodeUri: "./target/athena-udfs-2022.47.1.jar" + PackageType: "Image" + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-udfs:2022.47.1' Description: "This connector enables Amazon Athena to leverage common UDFs made available via Lambda." - Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] diff --git a/athena-vertica/Dockerfile b/athena-vertica/Dockerfile new file mode 100644 index 0000000000..c06ed8b9c2 --- /dev/null +++ b/athena-vertica/Dockerfile @@ -0,0 +1,9 @@ +FROM public.ecr.aws/lambda/java:11 + +# Copy function code and runtime dependencies from Maven layout +COPY target/athena-vertica-2022.47.1.jar ${LAMBDA_TASK_ROOT} +# Unpack the jar +RUN jar xf athena-vertica-2022.47.1.jar + +# Set the CMD to your handler (could also be done as a parameter override outside of the Dockerfile) +CMD [ "com.amazonaws.athena.connectors.vertica.VerticaCompositeHandler" ] \ No newline at end of file diff --git a/athena-vertica/athena-vertica.yaml b/athena-vertica/athena-vertica.yaml index 39fcb916cd..c70c97cc2b 100644 --- a/athena-vertica/athena-vertica.yaml +++ b/athena-vertica/athena-vertica.yaml @@ -82,10 +82,9 @@ Resources: default: !Ref VerticaConnectionString FunctionName: !Sub "${AthenaCatalogName}" - Handler: "com.amazonaws.athena.connectors.vertica.VerticaCompositeHandler" - CodeUri: "./target/athena-vertica-2022.47.1.jar" + PackageType: "Image" + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-vertica:2022.47.1' Description: "Amazon Athena Vertica Connector" - Runtime: java11 Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ] From ef3602c110ccf7dd87d266aa03f541a0e15631e6 Mon Sep 17 00:00:00 2001 From: Jithendar Trianz <106380520+Jithendar12@users.noreply.github.com> Date: Tue, 10 Sep 2024 21:58:08 +0530 Subject: [PATCH 22/87] Migrate Elasticache to AWS SDK v2 (#2238) --- athena-redis/pom.xml | 6 +-- .../redis/integ/RedisIntegTest.java | 39 ++++++++++--------- 2 files changed, 24 insertions(+), 21 deletions(-) diff --git a/athena-redis/pom.xml b/athena-redis/pom.xml index 7fec21cf3d..20dace645e 100644 --- a/athena-redis/pom.xml +++ b/athena-redis/pom.xml @@ -38,9 +38,9 @@ ${slf4j-log4j.version} - com.amazonaws - aws-java-sdk-elasticache - ${aws-sdk.version} + software.amazon.awssdk + elasticache + ${aws-sdk-v2.version} test diff --git a/athena-redis/src/test/java/com/amazonaws/athena/connectors/redis/integ/RedisIntegTest.java b/athena-redis/src/test/java/com/amazonaws/athena/connectors/redis/integ/RedisIntegTest.java index 2cb870fa7c..98c81ab204 100644 --- a/athena-redis/src/test/java/com/amazonaws/athena/connectors/redis/integ/RedisIntegTest.java +++ b/athena-redis/src/test/java/com/amazonaws/athena/connectors/redis/integ/RedisIntegTest.java @@ -19,7 +19,6 @@ */ package com.amazonaws.athena.connectors.redis.integ; -import com.amazonaws.ClientConfiguration; import com.amazonaws.athena.connector.integ.ConnectorStackFactory; import com.amazonaws.athena.connector.integ.IntegrationTestBase; import com.amazonaws.athena.connector.integ.clients.CloudFormationClient; @@ -28,13 +27,6 @@ import com.amazonaws.athena.connector.integ.data.ConnectorVpcAttributes; import com.amazonaws.athena.connector.integ.data.SecretsManagerCredentials; import com.amazonaws.athena.connector.integ.providers.ConnectorPackagingAttributesProvider; -import com.amazonaws.services.elasticache.AmazonElastiCache; -import com.amazonaws.services.elasticache.AmazonElastiCacheClientBuilder; -import com.amazonaws.services.elasticache.model.DescribeCacheClustersRequest; -import com.amazonaws.services.elasticache.model.DescribeCacheClustersResult; -import com.amazonaws.services.elasticache.model.DescribeReplicationGroupsRequest; -import com.amazonaws.services.elasticache.model.DescribeReplicationGroupsResult; -import com.amazonaws.services.elasticache.model.Endpoint; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.testng.annotations.AfterClass; @@ -56,6 +48,12 @@ import software.amazon.awscdk.services.s3.IBucket; import software.amazon.awssdk.http.apache.ApacheHttpClient; import software.amazon.awssdk.services.athena.model.Row; +import software.amazon.awssdk.services.elasticache.ElastiCacheClient; +import software.amazon.awssdk.services.elasticache.model.DescribeCacheClustersRequest; +import software.amazon.awssdk.services.elasticache.model.DescribeCacheClustersResponse; +import software.amazon.awssdk.services.elasticache.model.DescribeReplicationGroupsRequest; +import software.amazon.awssdk.services.elasticache.model.DescribeReplicationGroupsResponse; +import software.amazon.awssdk.services.elasticache.model.Endpoint; import software.amazon.awssdk.services.glue.GlueClient; import software.amazon.awssdk.services.glue.model.EntityNotFoundException; import software.amazon.awssdk.services.glue.model.TableInput; @@ -143,12 +141,12 @@ protected void setUp() Endpoint standaloneEndpoint = getRedisInstanceData(redisStandaloneName, false); logger.info("Got Endpoint: " + standaloneEndpoint.toString()); redisEndpoints.put(STANDALONE_KEY, String.format("%s:%s", - standaloneEndpoint.getAddress(), standaloneEndpoint.getPort())); + standaloneEndpoint.address(), standaloneEndpoint.port())); Endpoint clusterEndpoint = getRedisInstanceData(redisClusterName, true); logger.info("Got Endpoint: " + clusterEndpoint.toString()); redisEndpoints.put(CLUSTER_KEY, String.format("%s:%s:%s", - clusterEndpoint.getAddress(), clusterEndpoint.getPort(), redisPassword)); + clusterEndpoint.address(), clusterEndpoint.port(), redisPassword)); // Get endpoint information and set the connection string environment var for Lambda. environmentVars.put("standalone_connection", redisEndpoints.get(STANDALONE_KEY)); @@ -346,21 +344,26 @@ private Stack getRedisStack() */ private Endpoint getRedisInstanceData(String redisName, boolean isCluster) { - AmazonElastiCache elastiCacheClient = AmazonElastiCacheClientBuilder.defaultClient(); + ElastiCacheClient elastiCacheClient = ElastiCacheClient.create(); try { if (isCluster) { - DescribeReplicationGroupsResult describeResult = elastiCacheClient.describeReplicationGroups(new DescribeReplicationGroupsRequest() - .withReplicationGroupId(redisName)); - return describeResult.getReplicationGroups().get(0).getConfigurationEndpoint(); + DescribeReplicationGroupsRequest describeRequest = DescribeReplicationGroupsRequest.builder() + .replicationGroupId(redisName) + .build(); + DescribeReplicationGroupsResponse describeResponse = elastiCacheClient.describeReplicationGroups(describeRequest); + return describeResponse.replicationGroups().get(0).configurationEndpoint(); } else { - DescribeCacheClustersResult describeResult = elastiCacheClient.describeCacheClusters(new DescribeCacheClustersRequest() - .withCacheClusterId(redisName).withShowCacheNodeInfo(true)); - return describeResult.getCacheClusters().get(0).getCacheNodes().get(0).getEndpoint(); + DescribeCacheClustersRequest describeRequest = DescribeCacheClustersRequest.builder() + .cacheClusterId(redisName) + .showCacheNodeInfo(true) + .build(); + DescribeCacheClustersResponse describeResponse = elastiCacheClient.describeCacheClusters(describeRequest); + return describeResponse.cacheClusters().get(0).cacheNodes().get(0).endpoint(); } } finally { - elastiCacheClient.shutdown(); + elastiCacheClient.close(); } } From b7dd9889849a9ba93ef4ce888a42040226088979 Mon Sep 17 00:00:00 2001 From: VenkatasivareddyTR <110587813+VenkatasivareddyTR@users.noreply.github.com> Date: Tue, 10 Sep 2024 22:12:39 +0530 Subject: [PATCH 23/87] v2 migration elasticsearch (#2243) --- athena-elasticsearch/pom.xml | 8 +-- .../AwsElasticsearchFactory.java | 11 ++-- .../ElasticsearchDomainMapProvider.java | 31 +++++----- .../ElasticsearchDomainMapProviderTest.java | 59 +++++++++---------- 4 files changed, 53 insertions(+), 56 deletions(-) diff --git a/athena-elasticsearch/pom.xml b/athena-elasticsearch/pom.xml index 56c6d3f956..15ff6fa7b4 100644 --- a/athena-elasticsearch/pom.xml +++ b/athena-elasticsearch/pom.xml @@ -81,11 +81,11 @@ elasticsearch-rest-high-level-client 7.10.2 - + - com.amazonaws - aws-java-sdk-elasticsearch - ${aws-sdk.version} + software.amazon.awssdk + elasticsearch + ${aws-sdk-v2.version} diff --git a/athena-elasticsearch/src/main/java/com/amazonaws/athena/connectors/elasticsearch/AwsElasticsearchFactory.java b/athena-elasticsearch/src/main/java/com/amazonaws/athena/connectors/elasticsearch/AwsElasticsearchFactory.java index 1d1dd9eaa4..0cf4c0005b 100644 --- a/athena-elasticsearch/src/main/java/com/amazonaws/athena/connectors/elasticsearch/AwsElasticsearchFactory.java +++ b/athena-elasticsearch/src/main/java/com/amazonaws/athena/connectors/elasticsearch/AwsElasticsearchFactory.java @@ -7,9 +7,9 @@ * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 - * + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -19,8 +19,7 @@ */ package com.amazonaws.athena.connectors.elasticsearch; -import com.amazonaws.services.elasticsearch.AWSElasticsearch; -import com.amazonaws.services.elasticsearch.AWSElasticsearchClientBuilder; +import software.amazon.awssdk.services.elasticsearch.ElasticsearchClient; /** * This factory class provides an AWS ES Client. @@ -31,8 +30,8 @@ public class AwsElasticsearchFactory * Gets a default AWS ES client. * @return default AWS ES client. */ - public AWSElasticsearch getClient() + public ElasticsearchClient getClient() { - return AWSElasticsearchClientBuilder.defaultClient(); + return ElasticsearchClient.create(); } } diff --git a/athena-elasticsearch/src/main/java/com/amazonaws/athena/connectors/elasticsearch/ElasticsearchDomainMapProvider.java b/athena-elasticsearch/src/main/java/com/amazonaws/athena/connectors/elasticsearch/ElasticsearchDomainMapProvider.java index 63df6af55c..b3051f842d 100644 --- a/athena-elasticsearch/src/main/java/com/amazonaws/athena/connectors/elasticsearch/ElasticsearchDomainMapProvider.java +++ b/athena-elasticsearch/src/main/java/com/amazonaws/athena/connectors/elasticsearch/ElasticsearchDomainMapProvider.java @@ -19,15 +19,14 @@ */ package com.amazonaws.athena.connectors.elasticsearch; -import com.amazonaws.services.elasticsearch.AWSElasticsearch; -import com.amazonaws.services.elasticsearch.model.DescribeElasticsearchDomainsRequest; -import com.amazonaws.services.elasticsearch.model.DescribeElasticsearchDomainsResult; -import com.amazonaws.services.elasticsearch.model.ListDomainNamesRequest; -import com.amazonaws.services.elasticsearch.model.ListDomainNamesResult; import com.google.common.base.Splitter; import org.apache.arrow.util.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.elasticsearch.ElasticsearchClient; +import software.amazon.awssdk.services.elasticsearch.model.DescribeElasticsearchDomainsRequest; +import software.amazon.awssdk.services.elasticsearch.model.DescribeElasticsearchDomainsResponse; +import software.amazon.awssdk.services.elasticsearch.model.ListDomainNamesResponse; import java.util.ArrayList; import java.util.HashMap; @@ -100,14 +99,14 @@ public Map getDomainMap(String domainMapping) private Map getDomainMapFromAmazonElasticsearch() throws RuntimeException { - final AWSElasticsearch awsEsClient = awsElasticsearchFactory.getClient(); + final ElasticsearchClient awsEsClient = awsElasticsearchFactory.getClient(); final Map domainMap = new HashMap<>(); try { - ListDomainNamesResult listDomainNamesResult = awsEsClient.listDomainNames(new ListDomainNamesRequest()); + ListDomainNamesResponse listDomainNamesResponse = awsEsClient.listDomainNames(); List domainNames = new ArrayList<>(); - listDomainNamesResult.getDomainNames().forEach(domainInfo -> - domainNames.add(domainInfo.getDomainName())); + listDomainNamesResponse.domainNames().forEach(domainInfo -> + domainNames.add(domainInfo.domainName())); int startDomainNameIndex = 0; int endDomainNameIndex; @@ -117,13 +116,13 @@ private Map getDomainMapFromAmazonElasticsearch() // DescribeElasticsearchDomains - Describes the domain configuration for up to five specified Amazon // ES domains. Create multiple requests when list of Domain Names > 5. endDomainNameIndex = Math.min(startDomainNameIndex + 5, maxDomainNames); - DescribeElasticsearchDomainsRequest describeDomainsRequest = new DescribeElasticsearchDomainsRequest() - .withDomainNames(domainNames.subList(startDomainNameIndex, endDomainNameIndex)); - DescribeElasticsearchDomainsResult describeDomainsResult = + DescribeElasticsearchDomainsRequest describeDomainsRequest = DescribeElasticsearchDomainsRequest + .builder().domainNames(domainNames.subList(startDomainNameIndex, endDomainNameIndex)).build(); + DescribeElasticsearchDomainsResponse describeDomainsResult = awsEsClient.describeElasticsearchDomains(describeDomainsRequest); - describeDomainsResult.getDomainStatusList().forEach(domainStatus -> { - String domainEndpoint = (domainStatus.getEndpoint() == null) ? domainStatus.getEndpoints().get("vpc") : domainStatus.getEndpoint(); - domainMap.put(domainStatus.getDomainName(), endpointPrefix + domainEndpoint); + describeDomainsResult.domainStatusList().forEach(domainStatus -> { + String domainEndpoint = (domainStatus.endpoint() == null) ? domainStatus.endpoints().get("vpc") : domainStatus.endpoint(); + domainMap.put(domainStatus.domainName(), endpointPrefix + domainEndpoint); }); startDomainNameIndex = endDomainNameIndex; } @@ -138,7 +137,7 @@ private Map getDomainMapFromAmazonElasticsearch() throw new RuntimeException("Unable to create domain map: " + error.getMessage(), error); } finally { - awsEsClient.shutdown(); + awsEsClient.close(); } } diff --git a/athena-elasticsearch/src/test/java/com/amazonaws/athena/connectors/elasticsearch/ElasticsearchDomainMapProviderTest.java b/athena-elasticsearch/src/test/java/com/amazonaws/athena/connectors/elasticsearch/ElasticsearchDomainMapProviderTest.java index 2f2895d34a..7dfd58be3d 100644 --- a/athena-elasticsearch/src/test/java/com/amazonaws/athena/connectors/elasticsearch/ElasticsearchDomainMapProviderTest.java +++ b/athena-elasticsearch/src/test/java/com/amazonaws/athena/connectors/elasticsearch/ElasticsearchDomainMapProviderTest.java @@ -7,9 +7,9 @@ * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 - * + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -19,25 +19,24 @@ */ package com.amazonaws.athena.connectors.elasticsearch; -import com.amazonaws.services.elasticsearch.AWSElasticsearch; -import com.amazonaws.services.elasticsearch.model.DomainInfo; -import com.amazonaws.services.elasticsearch.model.DescribeElasticsearchDomainsRequest; -import com.amazonaws.services.elasticsearch.model.DescribeElasticsearchDomainsResult; -import com.amazonaws.services.elasticsearch.model.ElasticsearchDomainStatus; -import com.amazonaws.services.elasticsearch.model.ListDomainNamesResult; import com.google.common.collect.ImmutableList; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.junit.MockitoJUnitRunner; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.elasticsearch.ElasticsearchClient; +import software.amazon.awssdk.services.elasticsearch.model.DescribeElasticsearchDomainsRequest; +import software.amazon.awssdk.services.elasticsearch.model.DescribeElasticsearchDomainsResponse; +import software.amazon.awssdk.services.elasticsearch.model.DomainInfo; +import software.amazon.awssdk.services.elasticsearch.model.ElasticsearchDomainStatus; +import software.amazon.awssdk.services.elasticsearch.model.ListDomainNamesResponse; import java.util.ArrayList; import java.util.List; import java.util.Map; import static org.junit.Assert.assertEquals; -import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -83,40 +82,40 @@ public void getDomainMapFromAwsElasticsearchTest() AwsElasticsearchFactory mockElasticsearchFactory = mock(AwsElasticsearchFactory.class); ElasticsearchDomainMapProvider domainProvider = new ElasticsearchDomainMapProvider(true, mockElasticsearchFactory); - AWSElasticsearch mockClient = mock(AWSElasticsearch.class); - ListDomainNamesResult mockDomainInfo = mock(ListDomainNamesResult.class); + ElasticsearchClient mockClient = mock(ElasticsearchClient.class); + ListDomainNamesResponse mockDomainInfo = mock(ListDomainNamesResponse.class); List domainNames = ImmutableList.of("domain1", "domain2", "domain3", "domain4", "domain5", "domain6"); List domainInfo = new ArrayList<>(); List domainStatus = new ArrayList<>(); domainNames.forEach(domainName -> { - domainInfo.add(new DomainInfo().withDomainName(domainName)); - domainStatus.add(new ElasticsearchDomainStatus() - .withDomainName(domainName) - .withEndpoint("www.domain." + domainName)); + domainInfo.add(DomainInfo.builder().domainName(domainName).build()); + domainStatus.add(ElasticsearchDomainStatus.builder() + .domainName(domainName) + .endpoint("www.domain." + domainName).build()); }); when(mockElasticsearchFactory.getClient()).thenReturn(mockClient); - when(mockClient.listDomainNames(any())).thenReturn(mockDomainInfo); - when(mockDomainInfo.getDomainNames()).thenReturn(domainInfo); - - when(mockClient.describeElasticsearchDomains(new DescribeElasticsearchDomainsRequest() - .withDomainNames(domainNames.subList(0, 5)))) - .thenReturn(new DescribeElasticsearchDomainsResult() - .withDomainStatusList(domainStatus.subList(0, 5))); - when(mockClient.describeElasticsearchDomains(new DescribeElasticsearchDomainsRequest() - .withDomainNames(domainNames.subList(5, 6)))) - .thenReturn(new DescribeElasticsearchDomainsResult() - .withDomainStatusList(domainStatus.subList(5, 6))); + when(mockClient.listDomainNames()).thenReturn(mockDomainInfo); + when(mockDomainInfo.domainNames()).thenReturn(domainInfo); + + when(mockClient.describeElasticsearchDomains(DescribeElasticsearchDomainsRequest.builder(). + domainNames(domainNames.subList(0, 5)).build())) + .thenReturn(DescribeElasticsearchDomainsResponse.builder() + .domainStatusList(domainStatus.subList(0, 5)).build()); + when(mockClient.describeElasticsearchDomains(DescribeElasticsearchDomainsRequest.builder() + .domainNames(domainNames.subList(5, 6)).build())) + .thenReturn(DescribeElasticsearchDomainsResponse.builder() + .domainStatusList(domainStatus.subList(5, 6)).build()); Map domainMap = domainProvider.getDomainMap(null); logger.info("Domain Map: {}", domainMap); - verify(mockClient).describeElasticsearchDomains(new DescribeElasticsearchDomainsRequest() - .withDomainNames(domainNames.subList(0, 5))); - verify(mockClient).describeElasticsearchDomains(new DescribeElasticsearchDomainsRequest() - .withDomainNames(domainNames.subList(5, 6))); + verify(mockClient).describeElasticsearchDomains(DescribeElasticsearchDomainsRequest.builder() + .domainNames(domainNames.subList(0, 5)).build()); + verify(mockClient).describeElasticsearchDomains(DescribeElasticsearchDomainsRequest.builder() + .domainNames(domainNames.subList(5, 6)).build()); assertEquals("Invalid number of domains.", domainNames.size(), domainMap.size()); domainNames.forEach(domainName -> { From 12fede57b789ae39ca539ce41c21f239c6163032 Mon Sep 17 00:00:00 2001 From: Aimery Methena <159072740+aimethed@users.noreply.github.com> Date: Tue, 10 Sep 2024 14:52:09 -0400 Subject: [PATCH 24/87] V2 master merge 9/10 (#2255) Signed-off-by: dependabot[bot] Co-authored-by: Mario Rial Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: AbdulRehman --- athena-elasticsearch/pom.xml | 2 +- .../jdbc/manager/JdbcSplitQueryBuilder.java | 2 +- athena-msk/pom.xml | 2 +- .../PostGreSqlQueryStringBuilder.java | 54 ++++++++++- .../postgresql/PostGreSqlRecordHandler.java | 3 +- .../PostGreSqlRecordHandlerTest.java | 96 ++++++++++++++++++- .../redshift/RedshiftRecordHandler.java | 4 +- .../redshift/RedshiftRecordHandlerTest.java | 2 +- .../saphana/SaphanaQueryStringBuilder.java | 7 -- pom.xml | 2 +- 10 files changed, 154 insertions(+), 20 deletions(-) diff --git a/athena-elasticsearch/pom.xml b/athena-elasticsearch/pom.xml index 15ff6fa7b4..49d2cb92dd 100644 --- a/athena-elasticsearch/pom.xml +++ b/athena-elasticsearch/pom.xml @@ -73,7 +73,7 @@ org.elasticsearch.client elasticsearch-rest-client - 8.15.0 + 8.15.1 diff --git a/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/manager/JdbcSplitQueryBuilder.java b/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/manager/JdbcSplitQueryBuilder.java index a532d02aa2..104f271017 100644 --- a/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/manager/JdbcSplitQueryBuilder.java +++ b/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/manager/JdbcSplitQueryBuilder.java @@ -336,7 +336,7 @@ else if (singleValues.size() > 1) { return "(" + Joiner.on(" OR ").join(disjuncts) + ")"; } - private String toPredicate(String columnName, String operator, Object value, ArrowType type, + protected String toPredicate(String columnName, String operator, Object value, ArrowType type, List accumulator) { accumulator.add(new TypeAndValue(type, value)); diff --git a/athena-msk/pom.xml b/athena-msk/pom.xml index 81881d0156..a03c099b60 100644 --- a/athena-msk/pom.xml +++ b/athena-msk/pom.xml @@ -61,7 +61,7 @@ com.amazonaws aws-java-sdk-sts - 1.12.770 + 1.12.771 software.amazon.msk diff --git a/athena-postgresql/src/main/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlQueryStringBuilder.java b/athena-postgresql/src/main/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlQueryStringBuilder.java index e36638b64b..046d7049ca 100644 --- a/athena-postgresql/src/main/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlQueryStringBuilder.java +++ b/athena-postgresql/src/main/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlQueryStringBuilder.java @@ -23,7 +23,10 @@ import com.amazonaws.athena.connector.lambda.domain.predicate.Constraints; import com.amazonaws.athena.connectors.jdbc.manager.FederationExpressionParser; import com.amazonaws.athena.connectors.jdbc.manager.JdbcSplitQueryBuilder; +import com.amazonaws.athena.connectors.jdbc.manager.TypeAndValue; import com.google.common.base.Strings; +import org.apache.arrow.vector.types.Types; +import org.apache.arrow.vector.types.pojo.ArrowType; import org.apache.arrow.vector.types.pojo.Schema; import java.sql.Connection; @@ -34,6 +37,8 @@ import java.util.Objects; import java.util.stream.Collectors; +import static java.lang.String.format; + /** * Extends {@link JdbcSplitQueryBuilder} and implements PostGreSql specific SQL clauses for split. * @@ -42,9 +47,13 @@ public class PostGreSqlQueryStringBuilder extends JdbcSplitQueryBuilder { - public PostGreSqlQueryStringBuilder(final String quoteCharacters, final FederationExpressionParser federationExpressionParser) + private final java.util.Map configOptions; + private final String postgresqlCollateExperimentalFlag = "postgresql_collate_experimental_flag"; + + public PostGreSqlQueryStringBuilder(final String quoteCharacters, final FederationExpressionParser federationExpressionParser, final java.util.Map configOptions) { super(quoteCharacters, federationExpressionParser); + this.configOptions = configOptions; } @Override @@ -97,10 +106,10 @@ protected String getFromClauseWithSplit(String catalog, String schema, String ta if (PostGreSqlMetadataHandler.ALL_PARTITIONS.equals(partitionSchemaName) || PostGreSqlMetadataHandler.ALL_PARTITIONS.equals(partitionName)) { // No partitions - return String.format(" FROM %s ", tableName); + return format(" FROM %s ", tableName); } - return String.format(" FROM %s.%s ", quote(partitionSchemaName), quote(partitionName)); + return format(" FROM %s.%s ", quote(partitionSchemaName), quote(partitionName)); } @Override @@ -113,4 +122,43 @@ protected List getPartitionWhereClauses(final Split split) return Collections.emptyList(); } + + protected String toPredicate(String columnName, String operator, Object value, ArrowType type, + List accumulator) + { + if (isPostgresqlCollateExperimentalFlagEnabled()) { + Types.MinorType minorType = Types.getMinorTypeForArrowType(type); + //Only check for varchar; as it's the only collate-able type + //Only a range that is applicable + if (minorType.equals(Types.MinorType.VARCHAR) && isOperatorARange(operator)) { + accumulator.add(new TypeAndValue(type, value)); + return format("%s %s ? COLLATE \"C\"", quote(columnName), operator); + } + } + // Default to parent's behavior + return super.toPredicate(columnName, operator, value, type, accumulator); + } + + /** + * Flags to check if experimental flag to allow different collate for postgresql + * @return true if a flag is set; default otherwise to false; + */ + private boolean isPostgresqlCollateExperimentalFlagEnabled() + { + String flag = configOptions.getOrDefault(postgresqlCollateExperimentalFlag, "false"); + return flag.equalsIgnoreCase("true"); + } + + private boolean isOperatorARange(String operator) + { + switch (operator) { + case ">": + case "<": + case ">=": + case "<=": + return true; + default: + return false; + } + } } diff --git a/athena-postgresql/src/main/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlRecordHandler.java b/athena-postgresql/src/main/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlRecordHandler.java index 0c89828a66..98a8cd089b 100644 --- a/athena-postgresql/src/main/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlRecordHandler.java +++ b/athena-postgresql/src/main/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlRecordHandler.java @@ -69,7 +69,8 @@ public PostGreSqlRecordHandler(java.util.Map configOptions) public PostGreSqlRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, java.util.Map configOptions) { this(databaseConnectionConfig, S3Client.create(), SecretsManagerClient.create(), AthenaClient.create(), - new GenericJdbcConnectionFactory(databaseConnectionConfig, PostGreSqlMetadataHandler.JDBC_PROPERTIES, new DatabaseConnectionInfo(POSTGRESQL_DRIVER_CLASS, POSTGRESQL_DEFAULT_PORT)), new PostGreSqlQueryStringBuilder(POSTGRES_QUOTE_CHARACTER, new PostgreSqlFederationExpressionParser(POSTGRES_QUOTE_CHARACTER)), configOptions); + new GenericJdbcConnectionFactory(databaseConnectionConfig, PostGreSqlMetadataHandler.JDBC_PROPERTIES, new DatabaseConnectionInfo(POSTGRESQL_DRIVER_CLASS, POSTGRESQL_DEFAULT_PORT)), + new PostGreSqlQueryStringBuilder(POSTGRES_QUOTE_CHARACTER, new PostgreSqlFederationExpressionParser(POSTGRES_QUOTE_CHARACTER), configOptions), configOptions); } @VisibleForTesting diff --git a/athena-postgresql/src/test/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlRecordHandlerTest.java b/athena-postgresql/src/test/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlRecordHandlerTest.java index e54c337d8e..b11f7f965d 100644 --- a/athena-postgresql/src/test/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlRecordHandlerTest.java +++ b/athena-postgresql/src/test/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlRecordHandlerTest.java @@ -56,6 +56,7 @@ import java.sql.PreparedStatement; import java.sql.SQLException; import java.util.Collections; +import java.util.Map; import java.util.concurrent.TimeUnit; import static com.amazonaws.athena.connectors.postgresql.PostGreSqlConstants.POSTGRES_NAME; @@ -75,7 +76,7 @@ public class PostGreSqlRecordHandlerTest extends TestBase private SecretsManagerClient secretsManager; private AthenaClient athena; private MockedStatic mockedPostGreSqlMetadataHandler; - + private DatabaseConnectionConfig databaseConnectionConfig; @Before public void setup() throws Exception @@ -86,8 +87,8 @@ public void setup() this.connection = Mockito.mock(Connection.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); Mockito.when(this.jdbcConnectionFactory.getConnection(nullable(JdbcCredentialProvider.class))).thenReturn(this.connection); - jdbcSplitQueryBuilder = new PostGreSqlQueryStringBuilder("\"", new PostgreSqlFederationExpressionParser("\"")); - final DatabaseConnectionConfig databaseConnectionConfig = new DatabaseConnectionConfig("testCatalog", POSTGRES_NAME, + jdbcSplitQueryBuilder = new PostGreSqlQueryStringBuilder("\"", new PostgreSqlFederationExpressionParser("\""), Collections.emptyMap()); + databaseConnectionConfig = new DatabaseConnectionConfig("testCatalog", POSTGRES_NAME, "postgres://jdbc:postgresql://hostname/user=A&password=B"); this.postGreSqlRecordHandler = new PostGreSqlRecordHandler(databaseConnectionConfig, amazonS3, secretsManager, athena, jdbcConnectionFactory, jdbcSplitQueryBuilder, com.google.common.collect.ImmutableMap.of()); @@ -229,6 +230,95 @@ public void buildSplitSqlForDateTest() logger.info("buildSplitSqlForDateTest - exit"); } + @Test + public void buildSplitSqlCollateAwareQuery() + throws SQLException + { + logger.info("buildSplitSqlCollateAwareQuery - enter"); + + TableName tableName = new TableName("testSchema", "testTable"); + + SchemaBuilder schemaBuilder = SchemaBuilder.newBuilder(); + schemaBuilder.addField(FieldBuilder.newBuilder("testCol1", Types.MinorType.INT.getType()).build()); + schemaBuilder.addField(FieldBuilder.newBuilder("testCol2", Types.MinorType.VARCHAR.getType()).build()); + schemaBuilder.addField(FieldBuilder.newBuilder("testCol3", Types.MinorType.BIGINT.getType()).build()); + schemaBuilder.addField(FieldBuilder.newBuilder("testCol4", Types.MinorType.FLOAT4.getType()).build()); + schemaBuilder.addField(FieldBuilder.newBuilder("testCol5", Types.MinorType.SMALLINT.getType()).build()); + schemaBuilder.addField(FieldBuilder.newBuilder("testCol6", Types.MinorType.TINYINT.getType()).build()); + schemaBuilder.addField(FieldBuilder.newBuilder("testCol7", Types.MinorType.FLOAT8.getType()).build()); + schemaBuilder.addField(FieldBuilder.newBuilder("testCol8", Types.MinorType.BIT.getType()).build()); + schemaBuilder.addField(FieldBuilder.newBuilder("testCol9", new ArrowType.Decimal(8, 2)).build()); + schemaBuilder.addField(FieldBuilder.newBuilder("testCol10", new ArrowType.Utf8()).build()); + schemaBuilder.addField(FieldBuilder.newBuilder("partition_schema_name", Types.MinorType.VARCHAR.getType()).build()); + schemaBuilder.addField(FieldBuilder.newBuilder("partition_name", Types.MinorType.VARCHAR.getType()).build()); + Schema schema = schemaBuilder.build(); + + Split split = Mockito.mock(Split.class); + Mockito.when(split.getProperties()).thenReturn(ImmutableMap.of("partition_schema_name", "s0", "partition_name", "p0")); + Mockito.when(split.getProperty(Mockito.eq(com.amazonaws.athena.connectors.postgresql.PostGreSqlMetadataHandler.BLOCK_PARTITION_SCHEMA_COLUMN_NAME))).thenReturn("s0"); + Mockito.when(split.getProperty(Mockito.eq(com.amazonaws.athena.connectors.postgresql.PostGreSqlMetadataHandler.BLOCK_PARTITION_COLUMN_NAME))).thenReturn("p0"); + + Range range1a = Mockito.mock(Range.class, Mockito.RETURNS_DEEP_STUBS); + Mockito.when(range1a.isSingleValue()).thenReturn(true); + Mockito.when(range1a.getLow().getValue()).thenReturn(1); + Range range1b = Mockito.mock(Range.class, Mockito.RETURNS_DEEP_STUBS); + Mockito.when(range1b.isSingleValue()).thenReturn(true); + Mockito.when(range1b.getLow().getValue()).thenReturn(2); + ValueSet valueSet1 = Mockito.mock(SortedRangeSet.class, Mockito.RETURNS_DEEP_STUBS); + Mockito.when(valueSet1.getRanges().getOrderedRanges()).thenReturn(ImmutableList.of(range1a, range1b)); + + ValueSet valueSet2 = getRangeSet(Marker.Bound.EXACTLY, "1", Marker.Bound.BELOW, "10"); + ValueSet valueSet3 = getRangeSet(Marker.Bound.ABOVE, 2L, Marker.Bound.EXACTLY, 20L); + ValueSet valueSet4 = getSingleValueSet(1.1F); + ValueSet valueSet5 = getSingleValueSet(1); + ValueSet valueSet6 = getSingleValueSet(0); + ValueSet valueSet7 = getSingleValueSet(1.2d); + ValueSet valueSet8 = getSingleValueSet(true); + ValueSet valueSet9 = getSingleValueSet(BigDecimal.valueOf(12.34)); + ValueSet valueSet10 = getSingleValueSet("A"); + + Constraints constraints = Mockito.mock(Constraints.class); + Mockito.when(constraints.getSummary()).thenReturn(new ImmutableMap.Builder() + .put("testCol1", valueSet1) + .put("testCol2", valueSet2) + .put("testCol3", valueSet3) + .put("testCol4", valueSet4) + .put("testCol5", valueSet5) + .put("testCol6", valueSet6) + .put("testCol7", valueSet7) + .put("testCol8", valueSet8) + .put("testCol9", valueSet9) + .put("testCol10", valueSet10) + .build()); + + String expectedSql = "SELECT \"testCol1\", \"testCol2\", \"testCol3\", \"testCol4\", \"testCol5\", \"testCol6\", \"testCol7\", \"testCol8\", \"testCol9\", RTRIM(\"testCol10\") AS \"testCol10\" FROM \"s0\".\"p0\" WHERE (\"testCol1\" IN (?,?)) AND ((\"testCol2\" >= ? COLLATE \"C\" AND \"testCol2\" < ? COLLATE \"C\")) AND ((\"testCol3\" > ? AND \"testCol3\" <= ?)) AND (\"testCol4\" = ?) AND (\"testCol5\" = ?) AND (\"testCol6\" = ?) AND (\"testCol7\" = ?) AND (\"testCol8\" = ?) AND (\"testCol9\" = ?) AND (\"testCol10\" = ?)"; + PreparedStatement expectedPreparedStatement = Mockito.mock(PreparedStatement.class); + Mockito.when(this.connection.prepareStatement(Mockito.eq(expectedSql))).thenReturn(expectedPreparedStatement); + + //Setting Collate Aware query builder flag on + Map configOptions = ImmutableMap.of("postgresql_collate_experimental_flag", "true"); + PostGreSqlQueryStringBuilder localJdbcSplitQueryBuilder = new PostGreSqlQueryStringBuilder("\"", new PostgreSqlFederationExpressionParser("\""), configOptions); + PostGreSqlRecordHandler localPostgresqlRecordHandler = new PostGreSqlRecordHandler(databaseConnectionConfig, amazonS3, secretsManager, athena, jdbcConnectionFactory, localJdbcSplitQueryBuilder, configOptions); + PreparedStatement preparedStatement = localPostgresqlRecordHandler.buildSplitSql(this.connection, "testCatalogName", tableName, schema, constraints, split); + + Assert.assertEquals(expectedPreparedStatement, preparedStatement); + Mockito.verify(preparedStatement, Mockito.times(1)).setInt(1, 1); + Mockito.verify(preparedStatement, Mockito.times(1)).setInt(2, 2); + Mockito.verify(preparedStatement, Mockito.times(1)).setString(3, "1"); + Mockito.verify(preparedStatement, Mockito.times(1)).setString(4, "10"); + Mockito.verify(preparedStatement, Mockito.times(1)).setLong(5, 2L); + Mockito.verify(preparedStatement, Mockito.times(1)).setLong(6, 20L); + Mockito.verify(preparedStatement, Mockito.times(1)).setFloat(7, 1.1F); + Mockito.verify(preparedStatement, Mockito.times(1)).setShort(8, (short) 1); + Mockito.verify(preparedStatement, Mockito.times(1)).setByte(9, (byte) 0); + Mockito.verify(preparedStatement, Mockito.times(1)).setDouble(10, 1.2d); + Mockito.verify(preparedStatement, Mockito.times(1)).setBoolean(11, true); + Mockito.verify(preparedStatement, Mockito.times(1)).setBigDecimal(12, BigDecimal.valueOf(12.34)); + Mockito.verify(preparedStatement, Mockito.times(1)).setString(13, "A"); + + logger.info("buildSplitSqlCollateAwareQuery - exit"); + } + private ValueSet getSingleValueSet(Object value) { Range range = Mockito.mock(Range.class, Mockito.RETURNS_DEEP_STUBS); Mockito.when(range.isSingleValue()).thenReturn(true); diff --git a/athena-redshift/src/main/java/com/amazonaws/athena/connectors/redshift/RedshiftRecordHandler.java b/athena-redshift/src/main/java/com/amazonaws/athena/connectors/redshift/RedshiftRecordHandler.java index 8684595097..16de7ec46e 100644 --- a/athena-redshift/src/main/java/com/amazonaws/athena/connectors/redshift/RedshiftRecordHandler.java +++ b/athena-redshift/src/main/java/com/amazonaws/athena/connectors/redshift/RedshiftRecordHandler.java @@ -60,7 +60,9 @@ public RedshiftRecordHandler(java.util.Map configOptions) public RedshiftRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, java.util.Map configOptions) { super(databaseConnectionConfig, S3Client.create(), SecretsManagerClient.create(), AthenaClient.create(), - new GenericJdbcConnectionFactory(databaseConnectionConfig, PostGreSqlMetadataHandler.JDBC_PROPERTIES, new DatabaseConnectionInfo(REDSHIFT_DRIVER_CLASS, REDSHIFT_DEFAULT_PORT)), new PostGreSqlQueryStringBuilder(POSTGRES_QUOTE_CHARACTER, new PostgreSqlFederationExpressionParser(POSTGRES_QUOTE_CHARACTER)), configOptions); + new GenericJdbcConnectionFactory(databaseConnectionConfig, PostGreSqlMetadataHandler.JDBC_PROPERTIES, + new DatabaseConnectionInfo(REDSHIFT_DRIVER_CLASS, REDSHIFT_DEFAULT_PORT)), + new PostGreSqlQueryStringBuilder(POSTGRES_QUOTE_CHARACTER, new PostgreSqlFederationExpressionParser(POSTGRES_QUOTE_CHARACTER), configOptions), configOptions); } @VisibleForTesting diff --git a/athena-redshift/src/test/java/com/amazonaws/athena/connectors/redshift/RedshiftRecordHandlerTest.java b/athena-redshift/src/test/java/com/amazonaws/athena/connectors/redshift/RedshiftRecordHandlerTest.java index c3e73e0e1f..32f1218358 100644 --- a/athena-redshift/src/test/java/com/amazonaws/athena/connectors/redshift/RedshiftRecordHandlerTest.java +++ b/athena-redshift/src/test/java/com/amazonaws/athena/connectors/redshift/RedshiftRecordHandlerTest.java @@ -90,7 +90,7 @@ public void setup() this.connection = Mockito.mock(Connection.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); Mockito.when(this.jdbcConnectionFactory.getConnection(nullable(JdbcCredentialProvider.class))).thenReturn(this.connection); - jdbcSplitQueryBuilder = new PostGreSqlQueryStringBuilder("\"", new PostgreSqlFederationExpressionParser("\"")); + jdbcSplitQueryBuilder = new PostGreSqlQueryStringBuilder("\"", new PostgreSqlFederationExpressionParser("\""), Collections.emptyMap()); final DatabaseConnectionConfig databaseConnectionConfig = new DatabaseConnectionConfig("testCatalog", REDSHIFT_NAME, "redshift://jdbc:redshift://hostname/user=A&password=B"); diff --git a/athena-saphana/src/main/java/com/amazonaws/athena/connectors/saphana/SaphanaQueryStringBuilder.java b/athena-saphana/src/main/java/com/amazonaws/athena/connectors/saphana/SaphanaQueryStringBuilder.java index 16ec948679..ca65ce8efe 100644 --- a/athena-saphana/src/main/java/com/amazonaws/athena/connectors/saphana/SaphanaQueryStringBuilder.java +++ b/athena-saphana/src/main/java/com/amazonaws/athena/connectors/saphana/SaphanaQueryStringBuilder.java @@ -349,11 +349,4 @@ else if (singleValues.size() > 1) { return "(" + Joiner.on(" OR ").join(disjuncts) + ")"; } - - private String toPredicate(String columnName, String operator, Object value, ArrowType type, - List accumulator) - { - accumulator.add(new TypeAndValue(type, value)); - return quote(columnName) + " " + operator + " ?"; - } } diff --git a/pom.xml b/pom.xml index 8e8e833541..1e9ce9a931 100644 --- a/pom.xml +++ b/pom.xml @@ -30,7 +30,7 @@ 2.17.2 3.5.0 - 2.23.1 + 2.24.0 13.0.0 33.3.0-jre 3.25.3 From bb0053f003100169026c77268cb7c8ffc0593dc8 Mon Sep 17 00:00:00 2001 From: burhan94 Date: Wed, 11 Sep 2024 13:35:09 -0400 Subject: [PATCH 25/87] fixing yaml files with correct parameters (#13) --- athena-gcs/athena-gcs-connection.yaml | 4 ++-- athena-google-bigquery/athena-google-bigquery-connection.yaml | 4 ++-- athena-msk/athena-msk-connection.yaml | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/athena-gcs/athena-gcs-connection.yaml b/athena-gcs/athena-gcs-connection.yaml index 55da3c8f9b..7047cef28f 100644 --- a/athena-gcs/athena-gcs-connection.yaml +++ b/athena-gcs/athena-gcs-connection.yaml @@ -24,7 +24,7 @@ Parameters: GlueConnection: Description: "Name of glue connection storing connection details for Federated Data source." Type: String - SecretManagerGcpCredsName: + SecretName: Description: 'Secret key name in the AWS Secrets Manager.' Type: String KmsKeyId: @@ -103,7 +103,7 @@ Resources: - Action: - secretsmanager:GetSecretValue Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:*:*:secret:${SecretManagerGcpCredsName}*' + Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:*:*:secret:${SecretName}*' - Action: - s3:GetObject - s3:ListBucket diff --git a/athena-google-bigquery/athena-google-bigquery-connection.yaml b/athena-google-bigquery/athena-google-bigquery-connection.yaml index d5aa61df95..ed1551f422 100644 --- a/athena-google-bigquery/athena-google-bigquery-connection.yaml +++ b/athena-google-bigquery/athena-google-bigquery-connection.yaml @@ -20,7 +20,7 @@ Parameters: Description: 'This is the name of the lambda function that will be created. This name must satisfy the pattern ^[a-z0-9-_]{1,64}$' Type: String AllowedPattern: ^[a-z0-9-_]{1,64}$ - SecretManagerGcpCredsName: + SecretName: Description: "The secret name within AWS Secrets Manager that contains your Google Cloud Platform Credentials." Type: String SpillBucket: @@ -102,7 +102,7 @@ Resources: - Action: - secretsmanager:GetSecretValue Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretManagerGcpCredsName}*' + Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretName}*' - Action: - logs:CreateLogGroup Effect: Allow diff --git a/athena-msk/athena-msk-connection.yaml b/athena-msk/athena-msk-connection.yaml index c268d25fb7..c0a415cafe 100644 --- a/athena-msk/athena-msk-connection.yaml +++ b/athena-msk/athena-msk-connection.yaml @@ -18,7 +18,7 @@ Parameters: Description: 'This is the name of the lambda function that will be created. This name must satisfy the pattern ^[a-z0-9-_]{1,64}$' Type: String AllowedPattern: ^[a-z0-9-_]{1,64}$ - SecretsManagerSecret: + SecretName: Description: "The secret name within AWS Secrets Manager that contains your aws key and secret Credentials(Not Required for IAM AUTH)" Default: "" Type: String @@ -98,7 +98,7 @@ Resources: - Action: - secretsmanager:GetSecretValue Effect: Allow - Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretsManagerSecret}*' + Resource: !Sub 'arn:${AWS::Partition}:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:${SecretName}*' - Action: - logs:CreateLogGroup Effect: Allow From a61f1f32e77efabe54e8feba38a1a813cf31772c Mon Sep 17 00:00:00 2001 From: Aimery Methena Date: Wed, 11 Sep 2024 15:37:27 -0400 Subject: [PATCH 26/87] update connections yaml files to use ecr image --- athena-aws-cmdb/athena-aws-cmdb-connection.yaml | 5 ++--- athena-cloudera-hive/athena-cloudera-hive-connection.yaml | 5 ++--- .../athena-cloudera-impala-connection.yaml | 5 ++--- .../athena-cloudwatch-metrics-connection.yaml | 5 ++--- athena-cloudwatch/athena-cloudwatch-connection.yaml | 5 ++--- athena-datalakegen2/athena-datalakegen2-connection.yaml | 5 ++--- athena-db2-as400/athena-db2-as400-connection.yaml | 5 ++--- athena-db2/athena-db2-connection.yaml | 5 ++--- athena-docdb/athena-docdb-connection.yaml | 5 ++--- athena-dynamodb/athena-dynamodb-connection.yaml | 5 ++--- athena-elasticsearch/athena-elasticsearch-connection.yaml | 5 ++--- athena-gcs/athena-gcs-connection.yaml | 5 ++--- .../athena-google-bigquery-connection.yaml | 5 ++--- athena-hbase/athena-hbase-connection.yaml | 5 ++--- .../athena-hortonworks-hive-connection.yaml | 5 ++--- athena-msk/athena-msk-connection.yaml | 5 ++--- athena-mysql/athena-mysql-connection.yaml | 5 ++--- athena-neptune/athena-neptune-connection.yaml | 5 ++--- athena-oracle/athena-oracle-connection.yaml | 5 ++--- athena-postgresql/athena-postgresql-connection.yaml | 4 ++-- athena-redis/athena-redis-connection.yaml | 5 ++--- athena-redshift/athena-redshift-connection.yaml | 5 ++--- athena-saphana/athena-saphana-connection.yaml | 5 ++--- athena-snowflake/athena-snowflake-connection.yaml | 5 ++--- athena-sqlserver/athena-sqlserver-connection.yaml | 5 ++--- athena-synapse/athena-synapse-connection.yaml | 5 ++--- athena-teradata/athena-teradata-connection.yaml | 5 ++--- athena-timestream/athena-timestream-connection.yaml | 5 ++--- athena-tpcds/athena-tpcds-connection.yaml | 5 ++--- athena-vertica/athena-vertica-connection.yaml | 5 ++--- 30 files changed, 60 insertions(+), 89 deletions(-) diff --git a/athena-aws-cmdb/athena-aws-cmdb-connection.yaml b/athena-aws-cmdb/athena-aws-cmdb-connection.yaml index 454119caa7..bc3b57bd76 100644 --- a/athena-aws-cmdb/athena-aws-cmdb-connection.yaml +++ b/athena-aws-cmdb/athena-aws-cmdb-connection.yaml @@ -45,10 +45,9 @@ Resources: Variables: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName - Handler: "com.amazonaws.athena.connectors.aws.cmdb.AwsCmdbCompositeHandler" - CodeUri: "./target/athena-aws-cmdb-2022.47.1.jar" + PackageType: "Image" + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-aws-cmdb:2022.47.1' Description: "Enables Amazon Athena to communicate with various AWS Services, making your resource inventories accessible via SQL." - Runtime: java11 Timeout: 900 MemorySize: 3008 Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn] diff --git a/athena-cloudera-hive/athena-cloudera-hive-connection.yaml b/athena-cloudera-hive/athena-cloudera-hive-connection.yaml index 75343bd08a..ce0d4f3ba1 100644 --- a/athena-cloudera-hive/athena-cloudera-hive-connection.yaml +++ b/athena-cloudera-hive/athena-cloudera-hive-connection.yaml @@ -52,10 +52,9 @@ Resources: Variables: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName - Handler: "com.amazonaws.athena.connectors.cloudera.HiveMuxCompositeHandler" - CodeUri: "./target/athena-cloudera-hive-2022.47.1.jar" + PackageType: "Image" + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-cloudera-hive:2022.47.1' Description: "Enables Amazon Athena to communicate with Cloudera Hive using JDBC" - Runtime: java11 Timeout: 900 MemorySize: 3008 Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn] diff --git a/athena-cloudera-impala/athena-cloudera-impala-connection.yaml b/athena-cloudera-impala/athena-cloudera-impala-connection.yaml index ea829130f1..78a8e5fc08 100644 --- a/athena-cloudera-impala/athena-cloudera-impala-connection.yaml +++ b/athena-cloudera-impala/athena-cloudera-impala-connection.yaml @@ -52,10 +52,9 @@ Resources: Variables: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName - Handler: "com.amazonaws.athena.connectors.cloudera.ImpalaMuxCompositeHandler" - CodeUri: "./target/athena-cloudera-impala-2022.47.1.jar" + PackageType: "Image" + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-cloudera-impala:2022.47.1' Description: "Enables Amazon Athena to communicate with Cloudera Impala using JDBC" - Runtime: java11 Timeout: 900 MemorySize: 3008 Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn] diff --git a/athena-cloudwatch-metrics/athena-cloudwatch-metrics-connection.yaml b/athena-cloudwatch-metrics/athena-cloudwatch-metrics-connection.yaml index b967005c40..59dde9ff37 100644 --- a/athena-cloudwatch-metrics/athena-cloudwatch-metrics-connection.yaml +++ b/athena-cloudwatch-metrics/athena-cloudwatch-metrics-connection.yaml @@ -45,10 +45,9 @@ Resources: Variables: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName - Handler: "com.amazonaws.athena.connectors.cloudwatch.metrics.MetricsCompositeHandler" - CodeUri: "./target/athena-cloudwatch-metrics-2022.47.1.jar" + PackageType: "Image" + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-cloudwatch-metrics:2022.47.1' Description: "Enables Amazon Athena to communicate with Cloudwatch Metrics, making your metrics data accessible via SQL" - Runtime: java11 Timeout: 900 MemorySize: 3008 Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn] diff --git a/athena-cloudwatch/athena-cloudwatch-connection.yaml b/athena-cloudwatch/athena-cloudwatch-connection.yaml index 7363f5dc62..14f7969ea2 100644 --- a/athena-cloudwatch/athena-cloudwatch-connection.yaml +++ b/athena-cloudwatch/athena-cloudwatch-connection.yaml @@ -45,10 +45,9 @@ Resources: Variables: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName - Handler: "com.amazonaws.athena.connectors.cloudwatch.CloudwatchCompositeHandler" - CodeUri: "./target/athena-cloudwatch-2022.47.1.jar" + PackageType: "Image" + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-cloudwatch:2022.47.1' Description: "Enables Amazon Athena to communicate with Cloudwatch, making your log accessible via SQL" - Runtime: java11 Timeout: 900 MemorySize: 3008 Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRole] diff --git a/athena-datalakegen2/athena-datalakegen2-connection.yaml b/athena-datalakegen2/athena-datalakegen2-connection.yaml index b16dcd38da..2fbcf8b4d4 100644 --- a/athena-datalakegen2/athena-datalakegen2-connection.yaml +++ b/athena-datalakegen2/athena-datalakegen2-connection.yaml @@ -60,10 +60,9 @@ Resources: Variables: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName - Handler: "com.amazonaws.athena.connectors.datalakegen2.DataLakeGen2MuxCompositeHandler" - CodeUri: "./target/athena-datalakegen2-2022.47.1.jar" + PackageType: "Image" + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-datalakegen2:2022.47.1' Description: "Enables Amazon Athena to communicate with DataLake Gen2 using JDBC" - Runtime: java11 Timeout: !Ref 900 MemorySize: !Ref 3008 Role: !If [ NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn ] diff --git a/athena-db2-as400/athena-db2-as400-connection.yaml b/athena-db2-as400/athena-db2-as400-connection.yaml index 89ba799ef6..65729914d6 100644 --- a/athena-db2-as400/athena-db2-as400-connection.yaml +++ b/athena-db2-as400/athena-db2-as400-connection.yaml @@ -61,10 +61,9 @@ Resources: Variables: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName - Handler: "com.amazonaws.athena.connectors.db2as400.Db2As400MuxCompositeHandler" - CodeUri: "./target/athena-db2-as400-2022.47.1.jar" + PackageType: "Image" + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-db2-as400:2022.47.1' Description: "Enables Amazon Athena to communicate with DB2 on iSeries (AS400) using JDBC" - Runtime: java11 Timeout: 900 MemorySize: 3008 Role: !If [ NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn ] diff --git a/athena-db2/athena-db2-connection.yaml b/athena-db2/athena-db2-connection.yaml index 2d6a5d9b99..1c00ad6c01 100644 --- a/athena-db2/athena-db2-connection.yaml +++ b/athena-db2/athena-db2-connection.yaml @@ -61,10 +61,9 @@ Resources: Variables: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName - Handler: "com.amazonaws.athena.connectors.db2.Db2MuxCompositeHandler" - CodeUri: "./target/athena-db2-2022.47.1.jar" + PackageType: "Image" + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-db2:2022.47.1' Description: "Enables Amazon Athena to communicate with DB2 using JDBC" - Runtime: java11 Timeout: 900 MemorySize: 3008 Role: !If [ NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn ] diff --git a/athena-docdb/athena-docdb-connection.yaml b/athena-docdb/athena-docdb-connection.yaml index f4a2a435eb..4c8bb80129 100644 --- a/athena-docdb/athena-docdb-connection.yaml +++ b/athena-docdb/athena-docdb-connection.yaml @@ -54,10 +54,9 @@ Resources: Variables: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName - Handler: "com.amazonaws.athena.connectors.docdb.DocDBCompositeHandler" - CodeUri: "./target/athena-docdb-2022.47.1.jar" + PackageType: "Image" + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-docdb:2022.47.1' Description: "Enables Amazon Athena to communicate with DocumentDB, making your DocumentDB data accessible via SQL." - Runtime: java11 Timeout: 900 MemorySize: 3008 Role: !If [ NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn ] diff --git a/athena-dynamodb/athena-dynamodb-connection.yaml b/athena-dynamodb/athena-dynamodb-connection.yaml index aef157678f..e91f0c2fd0 100644 --- a/athena-dynamodb/athena-dynamodb-connection.yaml +++ b/athena-dynamodb/athena-dynamodb-connection.yaml @@ -45,10 +45,9 @@ Resources: Variables: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName - Handler: "com.amazonaws.athena.connectors.dynamodb.DynamoDBCompositeHandler" - CodeUri: "./target/athena-dynamodb-2022.47.1.jar" + PackageType: "Image" + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-dynamodb:2022.47.1' Description: "Enables Amazon Athena to communicate with DynamoDB, making your tables accessible via SQL" - Runtime: java11 Timeout: 900 MemorySize: 3008 Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn] diff --git a/athena-elasticsearch/athena-elasticsearch-connection.yaml b/athena-elasticsearch/athena-elasticsearch-connection.yaml index a2b06ada0c..9ec31ee486 100644 --- a/athena-elasticsearch/athena-elasticsearch-connection.yaml +++ b/athena-elasticsearch/athena-elasticsearch-connection.yaml @@ -62,10 +62,9 @@ Resources: Variables: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName - Handler: "com.amazonaws.athena.connectors.elasticsearch.ElasticsearchCompositeHandler" - CodeUri: "./target/athena-elasticsearch-2024.18.2.jar" + PackageType: "Image" + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-elasticsearch:2022.47.1' Description: "The Elasticsearch Lambda Connector provides Athena users the ability to query data stored on Elasticsearch clusters." - Runtime: java11 Timeout: 900 MemorySize: 3008 Role: !If [ NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn ] diff --git a/athena-gcs/athena-gcs-connection.yaml b/athena-gcs/athena-gcs-connection.yaml index 55da3c8f9b..de88eaa909 100644 --- a/athena-gcs/athena-gcs-connection.yaml +++ b/athena-gcs/athena-gcs-connection.yaml @@ -49,10 +49,9 @@ Resources: Variables: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName - Handler: "com.amazonaws.athena.connectors.gcs.GcsCompositeHandler" - CodeUri: "./target/athena-gcs.zip" + PackageType: "Image" + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-gcs:2022.47.1' Description: "Amazon Athena GCS Connector" - Runtime: java11 Timeout: 900 MemorySize: 3008 Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn] diff --git a/athena-google-bigquery/athena-google-bigquery-connection.yaml b/athena-google-bigquery/athena-google-bigquery-connection.yaml index d5aa61df95..b89e30cfb1 100644 --- a/athena-google-bigquery/athena-google-bigquery-connection.yaml +++ b/athena-google-bigquery/athena-google-bigquery-connection.yaml @@ -62,10 +62,9 @@ Resources: glue_connection: !Ref GlueConnection GOOGLE_APPLICATION_CREDENTIALS: '/tmp/service-account.json' FunctionName: !Ref LambdaFunctionName - Handler: "com.amazonaws.athena.connectors.google.bigquery.BigQueryCompositeHandler" - CodeUri: "./target/athena-google-bigquery-2022.47.1.jar" + PackageType: "Image" + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-google-bigquery:2022.47.1' Description: "Enables Amazon Athena to communicate with BigQuery using Google SDK" - Runtime: java11 Timeout: 900 MemorySize: 3008 Role: !If [ NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn ] diff --git a/athena-hbase/athena-hbase-connection.yaml b/athena-hbase/athena-hbase-connection.yaml index 9ba9662917..792fd6b0db 100644 --- a/athena-hbase/athena-hbase-connection.yaml +++ b/athena-hbase/athena-hbase-connection.yaml @@ -54,10 +54,9 @@ Resources: Variables: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName - Handler: "com.amazonaws.athena.connectors.hbase.HbaseCompositeHandler" - CodeUri: "./target/athena-hbase-2022.47.1.jar" + PackageType: "Image" + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-hbase:2022.47.1' Description: "Enables Amazon Athena to communicate with HBase, making your HBase data accessible via SQL" - Runtime: java11 Timeout: 900 MemorySize: 3008 Role: !If [ NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn ] diff --git a/athena-hortonworks-hive/athena-hortonworks-hive-connection.yaml b/athena-hortonworks-hive/athena-hortonworks-hive-connection.yaml index 4671c4b710..44ad427f58 100644 --- a/athena-hortonworks-hive/athena-hortonworks-hive-connection.yaml +++ b/athena-hortonworks-hive/athena-hortonworks-hive-connection.yaml @@ -58,10 +58,9 @@ Resources: Variables: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName - Handler: "com.amazonaws.athena.connectors.hortonworks.HiveMuxCompositeHandler" - CodeUri: "./target/athena-hortonworks-hive-2022.47.1.jar" + PackageType: "Image" + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-hortonworks-hive:2022.47.1' Description: "Enables Amazon Athena to communicate with Hortonworks Hive using JDBC" - Runtime: java11 Timeout: 900 MemorySize: 3008 Role: !If [ NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn ] diff --git a/athena-msk/athena-msk-connection.yaml b/athena-msk/athena-msk-connection.yaml index c268d25fb7..43b9277df9 100644 --- a/athena-msk/athena-msk-connection.yaml +++ b/athena-msk/athena-msk-connection.yaml @@ -60,10 +60,9 @@ Resources: Variables: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName - Handler: "com.amazonaws.athena.connectors.msk.AmazonMskCompositeHandler" - CodeUri: "./target/athena-msk-2022.47.1.jar" + PackageType: "Image" + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-msk:2022.47.1' Description: "Enables Amazon Athena to communicate with MSK clusters" - Runtime: java11 Timeout: 900 MemorySize: 3008 Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn] diff --git a/athena-mysql/athena-mysql-connection.yaml b/athena-mysql/athena-mysql-connection.yaml index ded479cd93..1a51271003 100644 --- a/athena-mysql/athena-mysql-connection.yaml +++ b/athena-mysql/athena-mysql-connection.yaml @@ -54,10 +54,9 @@ Resources: Variables: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName - Handler: "com.amazonaws.athena.connectors.mysql.MySqlMuxCompositeHandler" - CodeUri: "./target/athena-mysql-2022.47.1.jar" + PackageType: "Image" + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-mysql:2022.47.1' Description: "Enables Amazon Athena to communicate with MySQL using JDBC" - Runtime: java11 Timeout: 900 MemorySize: 3008 Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn] diff --git a/athena-neptune/athena-neptune-connection.yaml b/athena-neptune/athena-neptune-connection.yaml index a0a2839cc5..f1c02b0eab 100644 --- a/athena-neptune/athena-neptune-connection.yaml +++ b/athena-neptune/athena-neptune-connection.yaml @@ -57,10 +57,9 @@ Resources: glue_connection: !Ref GlueConnection SERVICE_REGION: !Ref AWS::Region FunctionName: !Ref LambdaFunctionName - Handler: "com.amazonaws.athena.connectors.neptune.NeptuneCompositeHandler" - CodeUri: "./target/athena-neptune-2022.47.1.jar" + PackageType: "Image" + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-neptune:2022.47.1' Description: "Enables Amazon Athena to communicate with Neptune, making your Neptune graph data accessible via SQL." - Runtime: java11 Timeout: 900 MemorySize: 3008 Role: !If [ NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn ] diff --git a/athena-oracle/athena-oracle-connection.yaml b/athena-oracle/athena-oracle-connection.yaml index 647dc19579..bcc3a830a3 100644 --- a/athena-oracle/athena-oracle-connection.yaml +++ b/athena-oracle/athena-oracle-connection.yaml @@ -55,10 +55,9 @@ Resources: Variables: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName - Handler: "com.amazonaws.athena.connectors.oracle.OracleMuxCompositeHandler" - CodeUri: "./target/athena-oracle-2022.47.1.jar" + PackageType: "Image" + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-oracle:2022.47.1' Description: "Enables Amazon Athena to communicate with ORACLE using JDBC" - Runtime: java11 Timeout: 900 MemorySize: 3008 Role: !If [ NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn ] diff --git a/athena-postgresql/athena-postgresql-connection.yaml b/athena-postgresql/athena-postgresql-connection.yaml index d61372175a..94f5710312 100644 --- a/athena-postgresql/athena-postgresql-connection.yaml +++ b/athena-postgresql/athena-postgresql-connection.yaml @@ -63,8 +63,8 @@ Resources: Variables: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName - Handler: !Sub "com.amazonaws.athena.connectors.postgresql.${CompositeHandler}" - CodeUri: "./target/athena-postgresql-2022.47.1.jar" + PackageType: "Image" + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-postgresql:2022.47.1' Description: "Enables Amazon Athena to communicate with PostgreSQL using JDBC" Runtime: java11 Timeout: 900 diff --git a/athena-redis/athena-redis-connection.yaml b/athena-redis/athena-redis-connection.yaml index 7a6ca5d0cf..78813117e5 100644 --- a/athena-redis/athena-redis-connection.yaml +++ b/athena-redis/athena-redis-connection.yaml @@ -52,10 +52,9 @@ Resources: Variables: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName - Handler: "com.amazonaws.athena.connectors.redis.RedisCompositeHandler" - CodeUri: "./target/athena-redis-2022.47.1.jar" + PackageType: "Image" + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-redis:2022.47.1' Description: "Enables Amazon Athena to communicate with Redis, making your Redis data accessible via SQL" - Runtime: java11 Timeout: 900 MemorySize: 3008 Role: !If [ NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn ] diff --git a/athena-redshift/athena-redshift-connection.yaml b/athena-redshift/athena-redshift-connection.yaml index fedef92cc0..8aec82475e 100644 --- a/athena-redshift/athena-redshift-connection.yaml +++ b/athena-redshift/athena-redshift-connection.yaml @@ -52,10 +52,9 @@ Resources: Variables: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName - Handler: "com.amazonaws.athena.connectors.redshift.RedshiftMuxCompositeHandler" - CodeUri: "./target/athena-redshift-2022.47.1.jar" + PackageType: "Image" + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-redshift:2022.47.1' Description: "Enables Amazon Athena to communicate with Redshift using JDBC" - Runtime: java11 Timeout: 900 MemorySize: 3008 Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn] diff --git a/athena-saphana/athena-saphana-connection.yaml b/athena-saphana/athena-saphana-connection.yaml index afef3098a5..7a7cdd3dd7 100644 --- a/athena-saphana/athena-saphana-connection.yaml +++ b/athena-saphana/athena-saphana-connection.yaml @@ -58,10 +58,9 @@ Resources: Variables: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName - Handler: "com.amazonaws.athena.connectors.saphana.SaphanaMuxCompositeHandler" - CodeUri: "./target/athena-saphana.zip" + PackageType: "Image" + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-saphana:2022.47.1' Description: "Enables Amazon Athena to communicate with Teradata using JDBC" - Runtime: java11 Timeout: 900 MemorySize: 3008 Role: !If [ NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn ] diff --git a/athena-snowflake/athena-snowflake-connection.yaml b/athena-snowflake/athena-snowflake-connection.yaml index 6ddc4ef17d..e9496f04e1 100644 --- a/athena-snowflake/athena-snowflake-connection.yaml +++ b/athena-snowflake/athena-snowflake-connection.yaml @@ -58,10 +58,9 @@ Resources: Variables: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName - Handler: "com.amazonaws.athena.connectors.snowflake.SnowflakeMuxCompositeHandler" - CodeUri: "./target/athena-snowflake.zip" + PackageType: "Image" + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-snowflake:2022.47.1' Description: "Enables Amazon Athena to communicate with Snowflake using JDBC" - Runtime: java11 Timeout: 900 MemorySize: 3008 Role: !If [ NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn ] diff --git a/athena-sqlserver/athena-sqlserver-connection.yaml b/athena-sqlserver/athena-sqlserver-connection.yaml index 241a35ff98..39c49754eb 100644 --- a/athena-sqlserver/athena-sqlserver-connection.yaml +++ b/athena-sqlserver/athena-sqlserver-connection.yaml @@ -58,10 +58,9 @@ Resources: Variables: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName - Handler: "com.amazonaws.athena.connectors.sqlserver.SqlServerMuxCompositeHandler" - CodeUri: "./target/athena-sqlserver-2022.47.1.jar" + PackageType: "Image" + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-sqlserver:2022.47.1' Description: "Enables Amazon Athena to communicate with SQLSERVER using JDBC" - Runtime: java11 Timeout: 900 MemorySize: 3008 Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn] diff --git a/athena-synapse/athena-synapse-connection.yaml b/athena-synapse/athena-synapse-connection.yaml index 0b3352b66c..e2779b2fb0 100644 --- a/athena-synapse/athena-synapse-connection.yaml +++ b/athena-synapse/athena-synapse-connection.yaml @@ -60,10 +60,9 @@ Resources: Variables: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName - Handler: "com.amazonaws.athena.connectors.synapse.SynapseMuxCompositeHandler" - CodeUri: "./target/athena-synapse-2022.47.1.jar" + PackageType: "Image" + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-synapse:2022.47.1' Description: "Enables Amazon Athena to communicate with SYNPASE using JDBC" - Runtime: java11 Timeout: 900 MemorySize: 3008 Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn] diff --git a/athena-teradata/athena-teradata-connection.yaml b/athena-teradata/athena-teradata-connection.yaml index b2c8c80f56..576edd79bd 100644 --- a/athena-teradata/athena-teradata-connection.yaml +++ b/athena-teradata/athena-teradata-connection.yaml @@ -56,10 +56,9 @@ Resources: Variables: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName - Handler: "com.amazonaws.athena.connectors.teradata.TeradataMuxCompositeHandler" - CodeUri: "./target/athena-teradata-2024.18.2.jar" + PackageType: "Image" + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-teradata:2022.47.1' Description: "Enables Amazon Athena to communicate with Teradata using JDBC" - Runtime: java11 Timeout: 900 MemorySize: 3008 Role: !If [ NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn ] diff --git a/athena-timestream/athena-timestream-connection.yaml b/athena-timestream/athena-timestream-connection.yaml index 207a1fcef3..f2f3c7cd84 100644 --- a/athena-timestream/athena-timestream-connection.yaml +++ b/athena-timestream/athena-timestream-connection.yaml @@ -43,10 +43,9 @@ Resources: Variables: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName - Handler: "com.amazonaws.athena.connectors.timestream.TimestreamCompositeHandler" - CodeUri: "./target/athena-timestream-2022.47.1.jar" + PackageType: "Image" + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-timestream:2022.47.1' Description: "Enables Amazon Athena to communicate with Amazon Timestream, making your time series data accessible from Athena." - Runtime: java11 Timeout: 900 MemorySize: 3008 Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn] diff --git a/athena-tpcds/athena-tpcds-connection.yaml b/athena-tpcds/athena-tpcds-connection.yaml index f2488e28d4..a91cbda763 100644 --- a/athena-tpcds/athena-tpcds-connection.yaml +++ b/athena-tpcds/athena-tpcds-connection.yaml @@ -45,10 +45,9 @@ Resources: Variables: glue_connection: Ref GlueConnection FunctionName: !Ref LambdaFunctionName - Handler: "com.amazonaws.athena.connectors.tpcds.TPCDSCompositeHandler" - CodeUri: "./target/athena-tpcds-2022.47.1.jar" + PackageType: "Image" + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-tpcds:2022.47.1' Description: "This connector enables Amazon Athena to communicate with a randomly generated TPC-DS data source." - Runtime: java11 Timeout: 900 MemorySize: 3008 Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn] diff --git a/athena-vertica/athena-vertica-connection.yaml b/athena-vertica/athena-vertica-connection.yaml index a3210b7df4..71cadb8bec 100644 --- a/athena-vertica/athena-vertica-connection.yaml +++ b/athena-vertica/athena-vertica-connection.yaml @@ -61,10 +61,9 @@ Resources: Variables: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName - Handler: "com.amazonaws.athena.connectors.vertica.VerticaCompositeHandler" - CodeUri: "./target/athena-vertica-2022.47.1.jar" + PackageType: "Image" + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-vertica:2022.47.1' Description: "Amazon Athena Vertica Connector" - Runtime: java11 Timeout: 900 MemorySize: 3008 Role: !If [ NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn ] From a6fc14d40ed75dc698bd676505d010a56765174d Mon Sep 17 00:00:00 2001 From: Aimery Methena <159072740+aimethed@users.noreply.github.com> Date: Thu, 12 Sep 2024 17:02:21 -0400 Subject: [PATCH 27/87] update new uses of semantic version (#2258) --- tools/bump_versions/bump_connectors_version.py | 4 ++++ tools/bump_versions/common.py | 7 +++++++ 2 files changed, 11 insertions(+) diff --git a/tools/bump_versions/bump_connectors_version.py b/tools/bump_versions/bump_connectors_version.py index d478fd1c3e..d6cd78edd4 100755 --- a/tools/bump_versions/bump_connectors_version.py +++ b/tools/bump_versions/bump_connectors_version.py @@ -49,3 +49,7 @@ # Bump the versions in the yaml files yaml_files = glob.glob(f"{connector}/*.yaml") + glob.glob(f"{connector}/*.yml") common.update_yaml(yaml_files, new_version) + + # Bump the versions in the Dockerfiles + dockerfiles = glob.glob("Dockerfile") + common.update_dockerfile(dockerfiles, new_version) diff --git a/tools/bump_versions/common.py b/tools/bump_versions/common.py index 40ba70be79..bec31d3038 100755 --- a/tools/bump_versions/common.py +++ b/tools/bump_versions/common.py @@ -36,6 +36,13 @@ def update_yaml(yaml_files, new_version): for yml in yaml_files: subprocess.run(["sed", "-i", f"s/\(SemanticVersion:\s*\).*/\\1{new_version}/", yml]) subprocess.run(["sed", "-i", f"s/\(CodeUri:.*-\)[0-9]*\.[0-9]*\.[0-9]*\(-\?.*\.jar\)/\\1{new_version}\\2/", yml]) + subprocess.run(["sed", "-i", f"s/\(ImageUri:.*:\)[0-9]*\.[0-9]*\.[0-9]*\(\'\)/\\1{new_version}\\2/", yml]) + + +def update_dockerfile(dockerfiles, new_version): + for file in dockerfiles: + subprocess.run(["sed", "-i", f"s/\(COPY\s.*-\)[0-9]*\.[0-9]*\.[0-9]*\(\.jar.*\)/\\1{new_version}\\2/", file]) + subprocess.run(["sed", "-i", f"s/\(RUN\sjar\sxf.*-\)[0-9]*\.[0-9]*\.[0-9]*\(\.jar\)/\\1{new_version}\\2/", file]) def update_project_version(soup, new_version): From b70cbd5c3ac7b040b9ee085de9f9b807f7ecfae0 Mon Sep 17 00:00:00 2001 From: Trianz-Akshay <108925344+Trianz-Akshay@users.noreply.github.com> Date: Sat, 14 Sep 2024 02:00:33 +0530 Subject: [PATCH 28/87] v2 changes for timestream (#2239) --- athena-timestream/pom.xml | 18 ++-- .../timestream/TimestreamClientBuilder.java | 30 +++--- .../timestream/TimestreamMetadataHandler.java | 100 +++++++++--------- .../timestream/TimestreamRecordHandler.java | 48 ++++----- .../connectors/timestream/TestUtils.java | 72 ++++++------- .../TimestreamClientBuilderTest.java | 7 +- .../TimestreamMetadataHandlerTest.java | 95 ++++++++--------- .../TimestreamRecordHandlerTest.java | 38 +++---- .../timestream/integ/TimestreamIntegTest.java | 24 ++--- .../TimestreamWriteRecordRequestBuilder.java | 34 +++--- 10 files changed, 232 insertions(+), 234 deletions(-) diff --git a/athena-timestream/pom.xml b/athena-timestream/pom.xml index a58b2c13c0..f00279ec9d 100644 --- a/athena-timestream/pom.xml +++ b/athena-timestream/pom.xml @@ -47,14 +47,14 @@ ${slf4j-log4j.version} - com.amazonaws - aws-java-sdk-timestreamwrite - ${aws-sdk.version} + software.amazon.awssdk + timestreamwrite + ${aws-sdk-v2.version} - com.amazonaws - aws-java-sdk-timestreamquery - ${aws-sdk.version} + software.amazon.awssdk + timestreamquery + ${aws-sdk-v2.version} org.slf4j @@ -85,6 +85,12 @@ ${log4j2Version} runtime + + org.mockito + mockito-inline + ${mockito.version} + test + diff --git a/athena-timestream/src/main/java/com/amazonaws/athena/connectors/timestream/TimestreamClientBuilder.java b/athena-timestream/src/main/java/com/amazonaws/athena/connectors/timestream/TimestreamClientBuilder.java index 5f0a228f73..473ea85932 100644 --- a/athena-timestream/src/main/java/com/amazonaws/athena/connectors/timestream/TimestreamClientBuilder.java +++ b/athena-timestream/src/main/java/com/amazonaws/athena/connectors/timestream/TimestreamClientBuilder.java @@ -19,38 +19,42 @@ */ package com.amazonaws.athena.connectors.timestream; -import com.amazonaws.ClientConfiguration; -import com.amazonaws.services.timestreamquery.AmazonTimestreamQuery; -import com.amazonaws.services.timestreamquery.AmazonTimestreamQueryClientBuilder; -import com.amazonaws.services.timestreamwrite.AmazonTimestreamWrite; -import com.amazonaws.services.timestreamwrite.AmazonTimestreamWriteClientBuilder; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.auth.credentials.DefaultCredentialsProvider; +import software.amazon.awssdk.core.client.config.ClientOverrideConfiguration; +import software.amazon.awssdk.core.client.config.SdkAdvancedClientOption; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.regions.providers.DefaultAwsRegionProviderChain; +import software.amazon.awssdk.services.timestreamquery.TimestreamQueryClient; +import software.amazon.awssdk.services.timestreamwrite.TimestreamWriteClient; public class TimestreamClientBuilder { private static final Logger logger = LoggerFactory.getLogger(TimestreamClientBuilder.class); - + static Region defaultRegion = DefaultAwsRegionProviderChain.builder().build().getRegion(); private TimestreamClientBuilder() { // prevent instantiation with private constructor } - public static AmazonTimestreamQuery buildQueryClient(String sourceType) + public static TimestreamQueryClient buildQueryClient(String sourceType) { - return AmazonTimestreamQueryClientBuilder.standard().withClientConfiguration(buildClientConfiguration(sourceType)).build(); + return TimestreamQueryClient.builder().region(defaultRegion).credentialsProvider(DefaultCredentialsProvider.create()) + .overrideConfiguration(buildClientConfiguration(sourceType)).build(); } - public static AmazonTimestreamWrite buildWriteClient(String sourceType) + public static TimestreamWriteClient buildWriteClient(String sourceType) { - return AmazonTimestreamWriteClientBuilder.standard().withClientConfiguration(buildClientConfiguration(sourceType)).build(); + return TimestreamWriteClient.builder().region(defaultRegion).credentialsProvider(DefaultCredentialsProvider.create()) + .overrideConfiguration(buildClientConfiguration(sourceType)).build(); } - static ClientConfiguration buildClientConfiguration(String sourceType) + static ClientOverrideConfiguration buildClientConfiguration(String sourceType) { String userAgent = "aws-athena-" + sourceType + "-connector"; - ClientConfiguration clientConfiguration = new ClientConfiguration().withUserAgentPrefix(userAgent); - logger.info("Created client configuration with user agent {} for Timestream SDK", clientConfiguration.getUserAgentPrefix()); + ClientOverrideConfiguration clientConfiguration = ClientOverrideConfiguration.builder().putAdvancedOption(SdkAdvancedClientOption.USER_AGENT_PREFIX, userAgent).build(); + logger.info("Created client configuration with user agent {} for Timestream SDK is present", clientConfiguration.advancedOption(SdkAdvancedClientOption.USER_AGENT_PREFIX).isPresent()); return clientConfiguration; } } diff --git a/athena-timestream/src/main/java/com/amazonaws/athena/connectors/timestream/TimestreamMetadataHandler.java b/athena-timestream/src/main/java/com/amazonaws/athena/connectors/timestream/TimestreamMetadataHandler.java index 23fb89ad37..84cdb9fb76 100644 --- a/athena-timestream/src/main/java/com/amazonaws/athena/connectors/timestream/TimestreamMetadataHandler.java +++ b/athena-timestream/src/main/java/com/amazonaws/athena/connectors/timestream/TimestreamMetadataHandler.java @@ -42,16 +42,6 @@ import com.amazonaws.athena.connector.util.PaginatedRequestIterator; import com.amazonaws.athena.connectors.timestream.qpt.TimestreamQueryPassthrough; import com.amazonaws.athena.connectors.timestream.query.QueryFactory; -import com.amazonaws.services.timestreamquery.AmazonTimestreamQuery; -import com.amazonaws.services.timestreamquery.model.ColumnInfo; -import com.amazonaws.services.timestreamquery.model.Datum; -import com.amazonaws.services.timestreamquery.model.QueryRequest; -import com.amazonaws.services.timestreamquery.model.QueryResult; -import com.amazonaws.services.timestreamquery.model.Row; -import com.amazonaws.services.timestreamwrite.AmazonTimestreamWrite; -import com.amazonaws.services.timestreamwrite.model.ListDatabasesRequest; -import com.amazonaws.services.timestreamwrite.model.ListDatabasesResult; -import com.amazonaws.services.timestreamwrite.model.ListTablesResult; import com.google.common.collect.ImmutableMap; import org.apache.arrow.util.VisibleForTesting; import org.apache.arrow.vector.types.pojo.Field; @@ -62,6 +52,16 @@ import software.amazon.awssdk.services.glue.GlueClient; import software.amazon.awssdk.services.glue.model.Table; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; +import software.amazon.awssdk.services.timestreamquery.TimestreamQueryClient; +import software.amazon.awssdk.services.timestreamquery.model.ColumnInfo; +import software.amazon.awssdk.services.timestreamquery.model.Datum; +import software.amazon.awssdk.services.timestreamquery.model.QueryRequest; +import software.amazon.awssdk.services.timestreamquery.model.QueryResponse; +import software.amazon.awssdk.services.timestreamquery.model.Row; +import software.amazon.awssdk.services.timestreamwrite.TimestreamWriteClient; +import software.amazon.awssdk.services.timestreamwrite.model.Database; +import software.amazon.awssdk.services.timestreamwrite.model.ListDatabasesRequest; +import software.amazon.awssdk.services.timestreamwrite.model.ListDatabasesResponse; import java.util.Collections; import java.util.List; @@ -90,8 +90,8 @@ public class TimestreamMetadataHandler private final QueryFactory queryFactory = new QueryFactory(); private final GlueClient glue; - private final AmazonTimestreamQuery tsQuery; - private final AmazonTimestreamWrite tsMeta; + private final TimestreamQueryClient tsQuery; + private final TimestreamWriteClient tsMeta; private final TimestreamQueryPassthrough queryPassthrough; @@ -106,8 +106,8 @@ public TimestreamMetadataHandler(java.util.Map configOptions) @VisibleForTesting protected TimestreamMetadataHandler( - AmazonTimestreamQuery tsQuery, - AmazonTimestreamWrite tsMeta, + TimestreamQueryClient tsQuery, + TimestreamWriteClient tsMeta, GlueClient glue, EncryptionKeyFactory keyFactory, SecretsManagerClient secretsManager, @@ -136,9 +136,9 @@ public GetDataSourceCapabilitiesResponse doGetDataSourceCapabilities(BlockAlloca public ListSchemasResponse doListSchemaNames(BlockAllocator blockAllocator, ListSchemasRequest request) throws Exception { - List schemas = PaginatedRequestIterator.stream(this::doListSchemaNamesOnePage, ListDatabasesResult::getNextToken) - .flatMap(result -> result.getDatabases().stream()) - .map(db -> db.getDatabaseName()) + List schemas = PaginatedRequestIterator.stream(this::doListSchemaNamesOnePage, ListDatabasesResponse::nextToken) + .flatMap(result -> result.databases().stream()) + .map(Database::databaseName) .collect(Collectors.toList()); return new ListSchemasResponse( @@ -146,9 +146,9 @@ public ListSchemasResponse doListSchemaNames(BlockAllocator blockAllocator, List schemas); } - private ListDatabasesResult doListSchemaNamesOnePage(String nextToken) + private ListDatabasesResponse doListSchemaNamesOnePage(String nextToken) { - return tsMeta.listDatabases(new ListDatabasesRequest().withNextToken(nextToken)); + return tsMeta.listDatabases(ListDatabasesRequest.builder().nextToken(nextToken).build()); } @Override @@ -159,7 +159,7 @@ public ListTablesResponse doListTables(BlockAllocator blockAllocator, ListTables try { return doListTablesInternal(blockAllocator, request); } - catch (com.amazonaws.services.timestreamwrite.model.ResourceNotFoundException ex) { + catch (software.amazon.awssdk.services.timestreamwrite.model.ResourceNotFoundException ex) { // If it fails then we will retry after resolving the schema name by ignoring the casing String resolvedSchemaName = findSchemaNameIgnoringCase(request.getSchemaName()); request = new ListTablesRequest(request.getIdentity(), request.getQueryId(), request.getCatalogName(), resolvedSchemaName, request.getNextToken(), request.getPageSize()); @@ -191,43 +191,43 @@ private ListTablesResponse doListTablesInternal(BlockAllocator blockAllocator, L } // Otherwise don't retrieve all pages, just pass through the page token. - ListTablesResult timestreamResults = doListTablesOnePage(request.getSchemaName(), request.getNextToken()); - List tableNames = timestreamResults.getTables() + software.amazon.awssdk.services.timestreamwrite.model.ListTablesResponse timestreamResults = doListTablesOnePage(request.getSchemaName(), request.getNextToken()); + List tableNames = timestreamResults.tables() .stream() - .map(table -> new TableName(request.getSchemaName(), table.getTableName())) + .map(table -> new TableName(request.getSchemaName(), table.tableName())) .collect(Collectors.toList()); // Pass through whatever token we got from Glue to the user ListTablesResponse result = new ListTablesResponse( request.getCatalogName(), tableNames, - timestreamResults.getNextToken()); + timestreamResults.nextToken()); logger.debug("doListTables [paginated] result: {}", result); return result; } - private ListTablesResult doListTablesOnePage(String schemaName, String nextToken) + private software.amazon.awssdk.services.timestreamwrite.model.ListTablesResponse doListTablesOnePage(String schemaName, String nextToken) { // TODO: We should pass through the pageSize as withMaxResults(pageSize) - com.amazonaws.services.timestreamwrite.model.ListTablesRequest listTablesRequest = - new com.amazonaws.services.timestreamwrite.model.ListTablesRequest() - .withDatabaseName(schemaName) - .withNextToken(nextToken); + software.amazon.awssdk.services.timestreamwrite.model.ListTablesRequest listTablesRequest = software.amazon.awssdk.services.timestreamwrite.model.ListTablesRequest.builder() + .databaseName(schemaName) + .nextToken(nextToken) + .build(); return tsMeta.listTables(listTablesRequest); } private Stream getTableNamesInSchema(String schemaName) { - return PaginatedRequestIterator.stream((pageToken) -> doListTablesOnePage(schemaName, pageToken), ListTablesResult::getNextToken) - .flatMap(currResult -> currResult.getTables().stream()) - .map(table -> new TableName(schemaName, table.getTableName())); + return PaginatedRequestIterator.stream((pageToken) -> doListTablesOnePage(schemaName, pageToken), software.amazon.awssdk.services.timestreamwrite.model.ListTablesResponse::nextToken) + .flatMap(currResult -> currResult.tables().stream()) + .map(table -> new TableName(schemaName, table.tableName())); } private String findSchemaNameIgnoringCase(String schemaNameInsensitive) { - return PaginatedRequestIterator.stream(this::doListSchemaNamesOnePage, ListDatabasesResult::getNextToken) - .flatMap(result -> result.getDatabases().stream()) - .map(db -> db.getDatabaseName()) + return PaginatedRequestIterator.stream(this::doListSchemaNamesOnePage, ListDatabasesResponse::nextToken) + .flatMap(result -> result.databases().stream()) + .map(Database::databaseName) .filter(name -> name.equalsIgnoreCase(schemaNameInsensitive)) .findAny() .orElseThrow(() -> new RuntimeException(String.format("Could not find a case-insensitive match for schema name %s", schemaNameInsensitive))); @@ -238,9 +238,9 @@ private TableName findTableNameIgnoringCase(BlockAllocator blockAllocator, GetTa String caseInsenstiveSchemaNameMatch = findSchemaNameIgnoringCase(getTableRequest.getTableName().getSchemaName()); // based on AmazonMskMetadataHandler::findGlueRegistryNameIgnoringCasing - return PaginatedRequestIterator.stream((pageToken) -> doListTablesOnePage(caseInsenstiveSchemaNameMatch, pageToken), ListTablesResult::getNextToken) - .flatMap(result -> result.getTables().stream()) - .map(tbl -> new TableName(caseInsenstiveSchemaNameMatch, tbl.getTableName())) + return PaginatedRequestIterator.stream((pageToken) -> doListTablesOnePage(caseInsenstiveSchemaNameMatch, pageToken), software.amazon.awssdk.services.timestreamwrite.model.ListTablesResponse::nextToken) + .flatMap(result -> result.tables().stream()) + .map(tbl -> new TableName(caseInsenstiveSchemaNameMatch, tbl.tableName())) .filter(tbl -> tbl.getTableName().equalsIgnoreCase(getTableRequest.getTableName().getTableName())) .findAny() .orElseThrow(() -> new RuntimeException(String.format("Could not find a case-insensitive match for table name %s", getTableRequest.getTableName().getTableName()))); @@ -256,24 +256,24 @@ private Schema inferSchemaForTable(TableName tableName) logger.info("doGetTable: Retrieving schema for table[{}] from TimeStream using describeQuery[{}].", tableName, describeQuery); - QueryRequest queryRequest = new QueryRequest().withQueryString(describeQuery); + QueryRequest queryRequest = QueryRequest.builder().queryString(describeQuery).build(); SchemaBuilder schemaBuilder = SchemaBuilder.newBuilder(); do { - QueryResult queryResult = tsQuery.query(queryRequest); - for (Row next : queryResult.getRows()) { - List datum = next.getData(); + QueryResponse queryResult = tsQuery.query(queryRequest); + for (Row next : queryResult.rows()) { + List datum = next.data(); if (datum.size() != 3) { throw new RuntimeException("Unexpected datum size " + datum.size() + " while getting schema from datum[" + datum.toString() + "]"); } - Field nextField = TimestreamSchemaUtils.makeField(datum.get(0).getScalarValue(), datum.get(1).getScalarValue()); + Field nextField = TimestreamSchemaUtils.makeField(datum.get(0).scalarValue(), datum.get(1).scalarValue()); schemaBuilder.addField(nextField); } - queryRequest = new QueryRequest().withNextToken(queryResult.getNextToken()); + queryRequest = QueryRequest.builder().nextToken(queryResult.nextToken()).build(); } - while (queryRequest.getNextToken() != null); + while (queryRequest.nextToken() != null); return schemaBuilder.build(); } @@ -300,7 +300,7 @@ public GetTableResponse doGetTable(BlockAllocator blockAllocator, GetTableReques Schema schema = inferSchemaForTable(request.getTableName()); return new GetTableResponse(request.getCatalogName(), request.getTableName(), schema); } - catch (com.amazonaws.services.timestreamquery.model.ValidationException ex) { + catch (software.amazon.awssdk.services.timestreamquery.model.ValidationException ex) { logger.debug("Could not find table name matching {} in database {}. Falling back to case-insensitive lookup.", request.getTableName().getTableName(), request.getTableName().getSchemaName()); TableName resolvedTableName = findTableNameIgnoringCase(blockAllocator, request); @@ -319,13 +319,13 @@ public GetTableResponse doGetQueryPassthroughSchema(BlockAllocator allocator, Ge queryPassthrough.verify(request.getQueryPassthroughArguments()); String customerPassedQuery = request.getQueryPassthroughArguments().get(TimestreamQueryPassthrough.QUERY); - QueryRequest queryRequest = new QueryRequest().withQueryString(customerPassedQuery).withMaxRows(1); + QueryRequest queryRequest = QueryRequest.builder().queryString(customerPassedQuery).maxRows(1).build(); // Timestream Query does not provide a way to conduct a dry run or retrieve metadata results without execution. Therefore, we need to "seek" at least once before obtaining metadata. - QueryResult queryResult = tsQuery.query(queryRequest); - List columnInfo = queryResult.getColumnInfo(); + QueryResponse queryResult = tsQuery.query(queryRequest); + List columnInfo = queryResult.columnInfo(); SchemaBuilder schemaBuilder = SchemaBuilder.newBuilder(); for (ColumnInfo column : columnInfo) { - Field nextField = TimestreamSchemaUtils.makeField(column.getName(), column.getType().getScalarType().toLowerCase()); + Field nextField = TimestreamSchemaUtils.makeField(column.name(), column.type().scalarTypeAsString().toLowerCase()); schemaBuilder.addField(nextField); } diff --git a/athena-timestream/src/main/java/com/amazonaws/athena/connectors/timestream/TimestreamRecordHandler.java b/athena-timestream/src/main/java/com/amazonaws/athena/connectors/timestream/TimestreamRecordHandler.java index a8cc2be021..f25b7d7b41 100644 --- a/athena-timestream/src/main/java/com/amazonaws/athena/connectors/timestream/TimestreamRecordHandler.java +++ b/athena-timestream/src/main/java/com/amazonaws/athena/connectors/timestream/TimestreamRecordHandler.java @@ -40,12 +40,6 @@ import com.amazonaws.athena.connectors.timestream.qpt.TimestreamQueryPassthrough; import com.amazonaws.athena.connectors.timestream.query.QueryFactory; import com.amazonaws.athena.connectors.timestream.query.SelectQueryBuilder; -import com.amazonaws.services.timestreamquery.AmazonTimestreamQuery; -import com.amazonaws.services.timestreamquery.model.Datum; -import com.amazonaws.services.timestreamquery.model.QueryRequest; -import com.amazonaws.services.timestreamquery.model.QueryResult; -import com.amazonaws.services.timestreamquery.model.Row; -import com.amazonaws.services.timestreamquery.model.TimeSeriesDataPoint; import org.apache.arrow.util.VisibleForTesting; import org.apache.arrow.vector.FieldVector; import org.apache.arrow.vector.holders.NullableBigIntHolder; @@ -59,6 +53,12 @@ import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; +import software.amazon.awssdk.services.timestreamquery.TimestreamQueryClient; +import software.amazon.awssdk.services.timestreamquery.model.Datum; +import software.amazon.awssdk.services.timestreamquery.model.QueryRequest; +import software.amazon.awssdk.services.timestreamquery.model.QueryResponse; +import software.amazon.awssdk.services.timestreamquery.model.Row; +import software.amazon.awssdk.services.timestreamquery.model.TimeSeriesDataPoint; import java.time.Instant; import java.time.ZoneId; @@ -85,7 +85,7 @@ public class TimestreamRecordHandler private static final String SOURCE_TYPE = "timestream"; private final QueryFactory queryFactory = new QueryFactory(); - private final AmazonTimestreamQuery tsQuery; + private final TimestreamQueryClient tsQuery; private final TimestreamQueryPassthrough queryPassthrough = new TimestreamQueryPassthrough(); public TimestreamRecordHandler(java.util.Map configOptions) @@ -99,7 +99,7 @@ public TimestreamRecordHandler(java.util.Map configOptions) } @VisibleForTesting - protected TimestreamRecordHandler(S3Client amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, AmazonTimestreamQuery tsQuery, java.util.Map configOptions) + protected TimestreamRecordHandler(S3Client amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, TimestreamQueryClient tsQuery, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, SOURCE_TYPE, configOptions); this.tsQuery = tsQuery; @@ -135,15 +135,15 @@ protected void readWithConstraint(BlockSpiller spiller, ReadRecordsRequest recor long numRows = 0; do { - QueryResult queryResult = tsQuery.query(new QueryRequest().withQueryString(query).withNextToken(nextToken)); - List data = queryResult.getRows(); + QueryResponse queryResult = tsQuery.query(QueryRequest.builder().queryString(query).nextToken(nextToken).build()); + List data = queryResult.rows(); if (data != null) { numRows += data.size(); for (Row nextRow : data) { spiller.writeRows((Block block, int rowNum) -> rowWriter.writeRow(block, rowNum, nextRow) ? 1 : 0); } } - nextToken = queryResult.getNextToken(); + nextToken = queryResult.nextToken(); logger.info("readWithConstraint: numRows[{}]", numRows); } while (nextToken != null && !nextToken.isEmpty()); } @@ -158,7 +158,7 @@ private GeneratedRowWriter buildRowWriter(ReadRecordsRequest request) switch (Types.getMinorTypeForArrowType(nextField.getType())) { case VARCHAR: builder.withExtractor(nextField.getName(), (VarCharExtractor) (Object context, NullableVarCharHolder value) -> { - String stringValue = ((Row) context).getData().get(curFieldNum).getScalarValue(); + String stringValue = ((Row) context).data().get(curFieldNum).scalarValue(); if (stringValue != null) { value.isSet = 1; value.value = stringValue; @@ -170,7 +170,7 @@ private GeneratedRowWriter buildRowWriter(ReadRecordsRequest request) break; case FLOAT8: builder.withExtractor(nextField.getName(), (Float8Extractor) (Object context, NullableFloat8Holder value) -> { - String doubleValue = ((Row) context).getData().get(curFieldNum).getScalarValue(); + String doubleValue = ((Row) context).data().get(curFieldNum).scalarValue(); if (doubleValue != null) { value.isSet = 1; value.value = Double.valueOf(doubleValue); @@ -183,12 +183,12 @@ private GeneratedRowWriter buildRowWriter(ReadRecordsRequest request) case BIT: builder.withExtractor(nextField.getName(), (BitExtractor) (Object context, NullableBitHolder value) -> { value.isSet = 1; - value.value = Boolean.valueOf(((Row) context).getData().get(curFieldNum).getScalarValue()) == false ? 0 : 1; + value.value = Boolean.valueOf(((Row) context).data().get(curFieldNum).scalarValue()) == false ? 0 : 1; }); break; case BIGINT: builder.withExtractor(nextField.getName(), (BigIntExtractor) (Object context, NullableBigIntHolder value) -> { - String longValue = ((Row) context).getData().get(curFieldNum).getScalarValue(); + String longValue = ((Row) context).data().get(curFieldNum).scalarValue(); if (longValue != null) { value.isSet = 1; value.value = Long.valueOf(longValue); @@ -200,7 +200,7 @@ private GeneratedRowWriter buildRowWriter(ReadRecordsRequest request) break; case DATEMILLI: builder.withExtractor(nextField.getName(), (DateMilliExtractor) (Object context, NullableDateMilliHolder value) -> { - String dateMilliValue = ((Row) context).getData().get(curFieldNum).getScalarValue(); + String dateMilliValue = ((Row) context).data().get(curFieldNum).scalarValue(); if (dateMilliValue != null) { value.isSet = 1; value.value = Instant.from(TIMESTAMP_FORMATTER.parse(dateMilliValue)).toEpochMilli(); @@ -230,30 +230,30 @@ private void buildTimeSeriesExtractor(GeneratedRowWriter.RowWriterBuilder builde (FieldVector vector, Extractor extractor, ConstraintProjector constraint) -> (Object context, int rowNum) -> { Row row = (Row) context; - Datum datum = row.getData().get(curFieldNum); + Datum datum = row.data().get(curFieldNum); Field timeField = field.getChildren().get(0).getChildren().get(0); Field valueField = field.getChildren().get(0).getChildren().get(1); - if (datum.getTimeSeriesValue() != null) { + if (datum.timeSeriesValue() != null) { List> values = new ArrayList<>(); - for (TimeSeriesDataPoint nextDatum : datum.getTimeSeriesValue()) { + for (TimeSeriesDataPoint nextDatum : datum.timeSeriesValue()) { Map eventMap = new HashMap<>(); - eventMap.put(timeField.getName(), Instant.from(TIMESTAMP_FORMATTER.parse(nextDatum.getTime())).toEpochMilli()); + eventMap.put(timeField.getName(), Instant.from(TIMESTAMP_FORMATTER.parse(nextDatum.time())).toEpochMilli()); switch (Types.getMinorTypeForArrowType(valueField.getType())) { case FLOAT8: - eventMap.put(valueField.getName(), Double.valueOf(nextDatum.getValue().getScalarValue())); + eventMap.put(valueField.getName(), Double.valueOf(nextDatum.value().scalarValue())); break; case BIGINT: - eventMap.put(valueField.getName(), Long.valueOf(nextDatum.getValue().getScalarValue())); + eventMap.put(valueField.getName(), Long.valueOf(nextDatum.value().scalarValue())); break; case INT: - eventMap.put(valueField.getName(), Integer.valueOf(nextDatum.getValue().getScalarValue())); + eventMap.put(valueField.getName(), Integer.valueOf(nextDatum.value().scalarValue())); break; case BIT: eventMap.put(valueField.getName(), - Boolean.valueOf(((Row) context).getData().get(curFieldNum).getScalarValue()) == false ? 0 : 1); + Boolean.valueOf(((Row) context).data().get(curFieldNum).scalarValue()) == false ? 0 : 1); break; } values.add(eventMap); diff --git a/athena-timestream/src/test/java/com/amazonaws/athena/connectors/timestream/TestUtils.java b/athena-timestream/src/test/java/com/amazonaws/athena/connectors/timestream/TestUtils.java index b09fccbbbf..5656dccb58 100644 --- a/athena-timestream/src/test/java/com/amazonaws/athena/connectors/timestream/TestUtils.java +++ b/athena-timestream/src/test/java/com/amazonaws/athena/connectors/timestream/TestUtils.java @@ -19,32 +19,21 @@ */ package com.amazonaws.athena.connectors.timestream; -import com.amazonaws.athena.connector.lambda.data.BlockUtils; -import com.amazonaws.athena.connector.lambda.data.FieldResolver; -import com.amazonaws.athena.connector.lambda.data.writers.GeneratedRowWriter; -import com.amazonaws.athena.connector.lambda.data.writers.extractors.Extractor; -import com.amazonaws.athena.connector.lambda.domain.predicate.ConstraintProjector; -import com.amazonaws.services.timestreamquery.model.Datum; -import com.amazonaws.services.timestreamquery.model.QueryResult; -import com.amazonaws.services.timestreamquery.model.Row; -import com.amazonaws.services.timestreamquery.model.TimeSeriesDataPoint; -import org.apache.arrow.vector.FieldVector; import org.apache.arrow.vector.types.Types; import org.apache.arrow.vector.types.pojo.Field; import org.apache.arrow.vector.types.pojo.Schema; import org.mockito.stubbing.Answer; +import software.amazon.awssdk.services.timestreamquery.model.Datum; +import software.amazon.awssdk.services.timestreamquery.model.QueryResponse; +import software.amazon.awssdk.services.timestreamquery.model.Row; +import software.amazon.awssdk.services.timestreamquery.model.TimeSeriesDataPoint; -import java.text.SimpleDateFormat; import java.time.LocalDateTime; import java.util.ArrayList; -import java.util.Date; -import java.util.HashMap; import java.util.List; -import java.util.Map; import java.util.Random; import java.util.concurrent.atomic.AtomicLong; -import static org.apache.arrow.vector.types.Types.MinorType.FLOAT8; import static org.junit.Assert.assertEquals; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -59,17 +48,17 @@ private TestUtils() {} private static final String[] AZS = {"us-east-1a", "us-east-1b", "us-east-1c", "us-east-1d"}; - public static QueryResult makeMockQueryResult(Schema schemaForRead, int numRows) + public static QueryResponse makeMockQueryResult(Schema schemaForRead, int numRows) { return makeMockQueryResult(schemaForRead, numRows, 100, true); } - public static QueryResult makeMockQueryResult(Schema schemaForRead, int numRows, int maxDataGenerationRow, boolean isRandomAZ) + public static QueryResponse makeMockQueryResult(Schema schemaForRead, int numRows, int maxDataGenerationRow, boolean isRandomAZ) { - QueryResult mockResult = mock(QueryResult.class); + QueryResponse mockResult = mock(QueryResponse.class); final AtomicLong nextToken = new AtomicLong(0); - when(mockResult.getRows()).thenAnswer((Answer>) invocationOnMock -> { + when(mockResult.rows()).thenAnswer((Answer>) invocationOnMock -> { List rows = new ArrayList<>(); for (int i = 0; i < maxDataGenerationRow; i++) { nextToken.incrementAndGet(); @@ -78,15 +67,14 @@ public static QueryResult makeMockQueryResult(Schema schemaForRead, int numRows, columnData.add(makeValue(nextField, i, isRandomAZ)); } - Row row = new Row(); - row.setData(columnData); + Row row = Row.builder().data(columnData).build(); rows.add(row); } return rows; } ); - when(mockResult.getNextToken()).thenAnswer((Answer) invocationOnMock -> { + when(mockResult.nextToken()).thenAnswer((Answer) invocationOnMock -> { if (nextToken.get() < numRows) { return String.valueOf(nextToken.get()); } @@ -99,30 +87,30 @@ public static QueryResult makeMockQueryResult(Schema schemaForRead, int numRows, public static Datum makeValue(Field field, int num, boolean isRandomAZ) { - Datum datum = new Datum(); + Datum.Builder datum = Datum.builder(); switch (Types.getMinorTypeForArrowType(field.getType())) { case VARCHAR: if (field.getName().equals("az")) { - datum.setScalarValue(isRandomAZ ? AZS[RAND.nextInt(4)] : "us-east-1a"); + datum.scalarValue(isRandomAZ ? AZS[RAND.nextInt(4)] : "us-east-1a"); } else { - datum.setScalarValue(field.getName() + "_" + RAND.nextInt(10_000_000)); + datum.scalarValue(field.getName() + "_" + RAND.nextInt(10_000_000)); } break; case FLOAT8: - datum.setScalarValue(String.valueOf(RAND.nextFloat())); + datum.scalarValue(String.valueOf(RAND.nextFloat())); break; case INT: - datum.setScalarValue(String.valueOf(RAND.nextInt())); + datum.scalarValue(String.valueOf(RAND.nextInt())); break; case BIT: - datum.setScalarValue(String.valueOf(RAND.nextBoolean())); + datum.scalarValue(String.valueOf(RAND.nextBoolean())); break; case BIGINT: - datum.setScalarValue(String.valueOf(RAND.nextLong())); + datum.scalarValue(String.valueOf(RAND.nextLong())); break; case DATEMILLI: - datum.setScalarValue(startDate.plusDays(num).toString().replace('T', ' ')); + datum.scalarValue(startDate.plusDays(num).toString().replace('T', ' ')); break; case LIST: buildTimeSeries(field, datum, num); @@ -131,17 +119,17 @@ public static Datum makeValue(Field field, int num, boolean isRandomAZ) throw new RuntimeException("Unsupported field type[" + field.getType() + "] for field[" + field.getName() + "]"); } - return datum; + return datum.build(); } - private static void buildTimeSeries(Field field, Datum datum, int num) + private static void buildTimeSeries(Field field, Datum.Builder datum, int num) { List dataPoints = new ArrayList<>(); for (int i = 0; i < 10; i++) { - TimeSeriesDataPoint dataPoint = new TimeSeriesDataPoint(); - Datum dataPointValue = new Datum(); + TimeSeriesDataPoint.Builder dataPoint = TimeSeriesDataPoint.builder(); + Datum.Builder dataPointValue = Datum.builder(); - dataPoint.setTime(startDate.plusDays(num).toString().replace('T', ' ')); + dataPoint.time(startDate.plusDays(num).toString().replace('T', ' ')); /** * Presently we only support TimeSeries as LIST> @@ -152,22 +140,22 @@ private static void buildTimeSeries(Field field, Datum datum, int num) switch (Types.getMinorTypeForArrowType(baseSeriesType.getType())) { case FLOAT8: - dataPointValue.setScalarValue(String.valueOf(RAND.nextFloat())); + dataPointValue.scalarValue(String.valueOf(RAND.nextFloat())); break; case BIT: - dataPointValue.setScalarValue(String.valueOf(RAND.nextBoolean())); + dataPointValue.scalarValue(String.valueOf(RAND.nextBoolean())); break; case INT: - dataPointValue.setScalarValue(String.valueOf(RAND.nextInt())); + dataPointValue.scalarValue(String.valueOf(RAND.nextInt())); break; case BIGINT: - dataPointValue.setScalarValue(String.valueOf(RAND.nextLong())); + dataPointValue.scalarValue(String.valueOf(RAND.nextLong())); break; } - dataPoint.setValue(dataPointValue); - dataPoints.add(dataPoint); + dataPoint.value(dataPointValue.build()); + dataPoints.add(dataPoint.build()); } - datum.setTimeSeriesValue(dataPoints); + datum.timeSeriesValue(dataPoints); } } diff --git a/athena-timestream/src/test/java/com/amazonaws/athena/connectors/timestream/TimestreamClientBuilderTest.java b/athena-timestream/src/test/java/com/amazonaws/athena/connectors/timestream/TimestreamClientBuilderTest.java index c3c4d4a486..de4f83b0bb 100644 --- a/athena-timestream/src/test/java/com/amazonaws/athena/connectors/timestream/TimestreamClientBuilderTest.java +++ b/athena-timestream/src/test/java/com/amazonaws/athena/connectors/timestream/TimestreamClientBuilderTest.java @@ -19,8 +19,9 @@ */ package com.amazonaws.athena.connectors.timestream; -import com.amazonaws.ClientConfiguration; import org.junit.Test; +import software.amazon.awssdk.core.client.config.ClientOverrideConfiguration; +import software.amazon.awssdk.core.client.config.SdkAdvancedClientOption; import static org.junit.Assert.assertEquals; @@ -29,7 +30,7 @@ public class TimestreamClientBuilderTest { @Test public void testUserAgentField() { - ClientConfiguration clientConfiguration = TimestreamClientBuilder.buildClientConfiguration("timestream"); - assertEquals("aws-athena-timestream-connector", clientConfiguration.getUserAgentPrefix()); + ClientOverrideConfiguration clientConfiguration = TimestreamClientBuilder.buildClientConfiguration("timestream"); + assertEquals("aws-athena-timestream-connector", clientConfiguration.advancedOption(SdkAdvancedClientOption.USER_AGENT_PREFIX).get()); } } diff --git a/athena-timestream/src/test/java/com/amazonaws/athena/connectors/timestream/TimestreamMetadataHandlerTest.java b/athena-timestream/src/test/java/com/amazonaws/athena/connectors/timestream/TimestreamMetadataHandlerTest.java index 0744aae186..9262c45f68 100644 --- a/athena-timestream/src/test/java/com/amazonaws/athena/connectors/timestream/TimestreamMetadataHandlerTest.java +++ b/athena-timestream/src/test/java/com/amazonaws/athena/connectors/timestream/TimestreamMetadataHandlerTest.java @@ -40,17 +40,6 @@ import com.amazonaws.athena.connector.lambda.metadata.MetadataResponse; import com.amazonaws.athena.connector.lambda.security.FederatedIdentity; import com.amazonaws.athena.connector.lambda.security.LocalKeyFactory; -import com.amazonaws.services.timestreamquery.AmazonTimestreamQuery; -import com.amazonaws.services.timestreamquery.model.Datum; -import com.amazonaws.services.timestreamquery.model.QueryRequest; -import com.amazonaws.services.timestreamquery.model.QueryResult; -import com.amazonaws.services.timestreamquery.model.Row; -import com.amazonaws.services.timestreamwrite.AmazonTimestreamWrite; -import com.amazonaws.services.timestreamwrite.model.Database; -import com.amazonaws.services.timestreamwrite.model.ListDatabasesRequest; -import com.amazonaws.services.timestreamwrite.model.ListDatabasesResult; -import com.amazonaws.services.timestreamwrite.model.ListTablesResult; -import com.amazonaws.services.timestreamwrite.model.Table; import org.apache.arrow.vector.types.Types; import org.apache.arrow.vector.types.pojo.Field; import org.apache.arrow.vector.types.pojo.Schema; @@ -68,6 +57,16 @@ import software.amazon.awssdk.services.glue.model.Column; import software.amazon.awssdk.services.glue.model.StorageDescriptor; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; +import software.amazon.awssdk.services.timestreamquery.TimestreamQueryClient; +import software.amazon.awssdk.services.timestreamquery.model.Datum; +import software.amazon.awssdk.services.timestreamquery.model.QueryRequest; +import software.amazon.awssdk.services.timestreamquery.model.QueryResponse; +import software.amazon.awssdk.services.timestreamquery.model.Row; +import software.amazon.awssdk.services.timestreamwrite.TimestreamWriteClient; +import software.amazon.awssdk.services.timestreamwrite.model.Database; +import software.amazon.awssdk.services.timestreamwrite.model.ListDatabasesRequest; +import software.amazon.awssdk.services.timestreamwrite.model.ListDatabasesResponse; +import software.amazon.awssdk.services.timestreamwrite.model.Table; import java.util.ArrayList; import java.util.Collections; @@ -77,9 +76,9 @@ import static com.amazonaws.athena.connector.lambda.domain.predicate.Constraints.DEFAULT_NO_LIMIT; import static com.amazonaws.athena.connector.lambda.handlers.GlueMetadataHandler.VIEW_METADATA_FIELD; import static com.amazonaws.athena.connector.lambda.metadata.ListTablesRequest.UNLIMITED_PAGE_SIZE_VALUE; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; import static org.mockito.ArgumentMatchers.nullable; -import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -99,9 +98,9 @@ public class TimestreamMetadataHandlerTest @Mock protected AthenaClient mockAthena; @Mock - protected AmazonTimestreamQuery mockTsQuery; + protected TimestreamQueryClient mockTsQuery; @Mock - protected AmazonTimestreamWrite mockTsMeta; + protected TimestreamWriteClient mockTsMeta; @Mock protected GlueClient mockGlue; @@ -140,26 +139,26 @@ public void doListSchemaNames() String newNextToken = null; List databases = new ArrayList<>(); - if (request.getNextToken() == null) { + if (request.nextToken() == null) { for (int i = 0; i < 10; i++) { - databases.add(new Database().withDatabaseName("database_" + i)); + databases.add(Database.builder().databaseName("database_" + i).build()); } newNextToken = "1"; } - else if (request.getNextToken().equals("1")) { + else if (request.nextToken().equals("1")) { for (int i = 10; i < 100; i++) { - databases.add(new Database().withDatabaseName("database_" + i)); + databases.add(Database.builder().databaseName("database_" + i).build()); } newNextToken = "2"; } - else if (request.getNextToken().equals("2")) { + else if (request.nextToken().equals("2")) { for (int i = 100; i < 1000; i++) { - databases.add(new Database().withDatabaseName("database_" + i)); + databases.add(Database.builder().databaseName("database_" + i).build()); } newNextToken = null; } - return new ListDatabasesResult().withDatabases(databases).withNextToken(newNextToken); + return ListDatabasesResponse.builder().databases(databases).nextToken(newNextToken).build(); }); ListSchemasRequest req = new ListSchemasRequest(identity, "queryId", "default"); @@ -182,33 +181,33 @@ public void doListTables() { logger.info("doListTables - enter"); - when(mockTsMeta.listTables(nullable(com.amazonaws.services.timestreamwrite.model.ListTablesRequest.class))) + when(mockTsMeta.listTables(nullable(software.amazon.awssdk.services.timestreamwrite.model.ListTablesRequest.class))) .thenAnswer((InvocationOnMock invocation) -> { - com.amazonaws.services.timestreamwrite.model.ListTablesRequest request = - invocation.getArgument(0, com.amazonaws.services.timestreamwrite.model.ListTablesRequest.class); + software.amazon.awssdk.services.timestreamwrite.model.ListTablesRequest request = + invocation.getArgument(0, software.amazon.awssdk.services.timestreamwrite.model.ListTablesRequest.class); String newNextToken = null; List
tables = new ArrayList<>(); - if (request.getNextToken() == null) { + if (request.nextToken() == null) { for (int i = 0; i < 10; i++) { - tables.add(new Table().withDatabaseName(request.getDatabaseName()).withTableName("table_" + i)); + tables.add(Table.builder().databaseName(request.databaseName()).tableName("table_" + i).build()); } newNextToken = "1"; } - else if (request.getNextToken().equals("1")) { + else if (request.nextToken().equals("1")) { for (int i = 10; i < 100; i++) { - tables.add(new Table().withDatabaseName(request.getDatabaseName()).withTableName("table_" + i)); + tables.add(Table.builder().databaseName(request.databaseName()).tableName("table_" + i).build()); } newNextToken = "2"; } - else if (request.getNextToken().equals("2")) { + else if (request.nextToken().equals("2")) { for (int i = 100; i < 1000; i++) { - tables.add(new Table().withDatabaseName(request.getDatabaseName()).withTableName("table_" + i)); + tables.add(Table.builder().databaseName(request.databaseName()).tableName("table_" + i).build()); } newNextToken = null; } - return new ListTablesResult().withTables(tables).withNextToken(newNextToken); + return software.amazon.awssdk.services.timestreamwrite.model.ListTablesResponse.builder().tables(tables).nextToken(newNextToken).build(); }); ListTablesRequest req = new ListTablesRequest(identity, "queryId", "default", defaultSchema, @@ -218,7 +217,7 @@ else if (request.getNextToken().equals("2")) { assertEquals(1000, res.getTables().size()); verify(mockTsMeta, times(3)) - .listTables(nullable(com.amazonaws.services.timestreamwrite.model.ListTablesRequest.class)); + .listTables(nullable(software.amazon.awssdk.services.timestreamwrite.model.ListTablesRequest.class)); Iterator schemaItr = res.getTables().iterator(); for (int i = 0; i < 1000; i++) { @@ -241,24 +240,24 @@ public void doGetTable() when(mockTsQuery.query(nullable(QueryRequest.class))).thenAnswer((InvocationOnMock invocation) -> { QueryRequest request = invocation.getArgument(0, QueryRequest.class); - assertEquals("DESCRIBE \"default\".\"table1\"", request.getQueryString()); + assertEquals("DESCRIBE \"default\".\"table1\"", request.queryString()); List rows = new ArrayList<>(); //TODO: Add types here - rows.add(new Row().withData(new Datum().withScalarValue("availability_zone"), - new Datum().withScalarValue("varchar"), - new Datum().withScalarValue("dimension"))); - rows.add(new Row().withData(new Datum().withScalarValue("measure_value"), - new Datum().withScalarValue("double"), - new Datum().withScalarValue("measure_value"))); - rows.add(new Row().withData(new Datum().withScalarValue("measure_name"), - new Datum().withScalarValue("varchar"), - new Datum().withScalarValue("measure_name"))); - rows.add(new Row().withData(new Datum().withScalarValue("time"), - new Datum().withScalarValue("timestamp"), - new Datum().withScalarValue("timestamp"))); - - return new QueryResult().withRows(rows); + rows.add(Row.builder().data(Datum.builder().scalarValue("availability_zone").build(), + Datum.builder().scalarValue("varchar").build(), + Datum.builder().scalarValue("dimension").build()).build()); + rows.add(Row.builder().data(Datum.builder().scalarValue("measure_value").build(), + Datum.builder().scalarValue("double").build(), + Datum.builder().scalarValue("measure_value").build()).build()); + rows.add(Row.builder().data(Datum.builder().scalarValue("measure_name").build(), + Datum.builder().scalarValue("varchar").build(), + Datum.builder().scalarValue("measure_name").build()).build()); + rows.add(Row.builder().data(Datum.builder().scalarValue("time").build(), + Datum.builder().scalarValue("timestamp").build(), + Datum.builder().scalarValue("timestamp").build()).build()); + + return QueryResponse.builder().rows(rows).build(); }); GetTableRequest req = new GetTableRequest(identity, diff --git a/athena-timestream/src/test/java/com/amazonaws/athena/connectors/timestream/TimestreamRecordHandlerTest.java b/athena-timestream/src/test/java/com/amazonaws/athena/connectors/timestream/TimestreamRecordHandlerTest.java index d7ad28e816..f3daeaff80 100644 --- a/athena-timestream/src/test/java/com/amazonaws/athena/connectors/timestream/TimestreamRecordHandlerTest.java +++ b/athena-timestream/src/test/java/com/amazonaws/athena/connectors/timestream/TimestreamRecordHandlerTest.java @@ -40,9 +40,6 @@ import com.amazonaws.athena.connector.lambda.security.EncryptionKeyFactory; import com.amazonaws.athena.connector.lambda.security.FederatedIdentity; import com.amazonaws.athena.connector.lambda.security.LocalKeyFactory; -import com.amazonaws.services.timestreamquery.AmazonTimestreamQuery; -import com.amazonaws.services.timestreamquery.model.QueryRequest; -import com.amazonaws.services.timestreamquery.model.QueryResult; import com.google.common.io.ByteStreams; import org.apache.arrow.vector.complex.reader.FieldReader; import org.apache.arrow.vector.types.Types; @@ -68,6 +65,9 @@ import software.amazon.awssdk.services.s3.model.PutObjectRequest; import software.amazon.awssdk.services.s3.model.PutObjectResponse; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; +import software.amazon.awssdk.services.timestreamquery.TimestreamQueryClient; +import software.amazon.awssdk.services.timestreamquery.model.QueryRequest; +import software.amazon.awssdk.services.timestreamquery.model.QueryResponse; import java.io.ByteArrayInputStream; import java.io.IOException; @@ -115,7 +115,7 @@ public class TimestreamRecordHandlerTest public TestName testName = new TestName(); @Mock - private AmazonTimestreamQuery mockClient; + private TimestreamQueryClient mockClient; @Mock private SecretsManagerClient mockSecretsManager; @@ -198,11 +198,11 @@ public void doReadRecordsNoSpill() int numRowsGenerated = 1_000; String expectedQuery = "SELECT measure_name, measure_value::double, az, time, hostname, region FROM \"my_schema\".\"my_table\" WHERE (\"az\" IN ('us-east-1a','us-east-1b'))"; - QueryResult mockResult = makeMockQueryResult(schemaForRead, numRowsGenerated); + QueryResponse mockResult = makeMockQueryResult(schemaForRead, numRowsGenerated); when(mockClient.query(nullable(QueryRequest.class))) - .thenAnswer((Answer) invocationOnMock -> { + .thenAnswer((Answer) invocationOnMock -> { QueryRequest request = (QueryRequest) invocationOnMock.getArguments()[0]; - assertEquals(expectedQuery, request.getQueryString().replace("\n", "")); + assertEquals(expectedQuery, request.queryString().replace("\n", "")); return mockResult; } ); @@ -253,11 +253,11 @@ public void doReadRecordsSpill() { String expectedQuery = "SELECT measure_name, measure_value::double, az, time, hostname, region FROM \"my_schema\".\"my_table\" WHERE (\"az\" IN ('us-east-1a','us-east-1b'))"; - QueryResult mockResult = makeMockQueryResult(schemaForRead, 100_000); + QueryResponse mockResult = makeMockQueryResult(schemaForRead, 100_000); when(mockClient.query(nullable(QueryRequest.class))) - .thenAnswer((Answer) invocationOnMock -> { + .thenAnswer((Answer) invocationOnMock -> { QueryRequest request = (QueryRequest) invocationOnMock.getArguments()[0]; - assertEquals(expectedQuery, request.getQueryString().replace("\n", "")); + assertEquals(expectedQuery, request.queryString().replace("\n", "")); return mockResult; } ); @@ -327,11 +327,11 @@ public void readRecordsView() String expectedQuery = "WITH t1 AS ( select measure_name, az,sum(\"measure_value::double\") as value, count(*) as num_samples from \"my_schema\".\"my_table\" group by measure_name, az ) SELECT measure_name, az, value, num_samples FROM t1 WHERE (\"az\" IN ('us-east-1a','us-east-1b'))"; - QueryResult mockResult = makeMockQueryResult(schemaForReadView, 1_000); + QueryResponse mockResult = makeMockQueryResult(schemaForReadView, 1_000); when(mockClient.query(nullable(QueryRequest.class))) - .thenAnswer((Answer) invocationOnMock -> { + .thenAnswer((Answer) invocationOnMock -> { QueryRequest request = (QueryRequest) invocationOnMock.getArguments()[0]; - assertEquals(expectedQuery, request.getQueryString().replace("\n", "")); + assertEquals(expectedQuery, request.queryString().replace("\n", "")); return mockResult; } ); @@ -394,11 +394,11 @@ public void readRecordsTimeSeriesView() String expectedQuery = "WITH t1 AS ( select az, hostname, region, CREATE_TIME_SERIES(time, measure_value::double) as cpu_utilization from \"my_schema\".\"my_table\" WHERE measure_name = 'cpu_utilization' GROUP BY measure_name, az, hostname, region ) SELECT region, az, hostname, cpu_utilization FROM t1 WHERE (\"az\" IN ('us-east-1a','us-east-1b'))"; - QueryResult mockResult = makeMockQueryResult(schemaForReadView, 1_000); + QueryResponse mockResult = makeMockQueryResult(schemaForReadView, 1_000); when(mockClient.query(nullable(QueryRequest.class))) - .thenAnswer((Answer) invocationOnMock -> { + .thenAnswer((Answer) invocationOnMock -> { QueryRequest request = (QueryRequest) invocationOnMock.getArguments()[0]; - assertEquals("actual: " + request.getQueryString(), expectedQuery, request.getQueryString().replace("\n", "")); + assertEquals("actual: " + request.queryString(), expectedQuery, request.queryString().replace("\n", "")); return mockResult; } ); @@ -449,11 +449,11 @@ public void doReadRecordsNoSpillValidateTimeStamp() int numRows = 10; String expectedQuery = "SELECT measure_name, measure_value::double, az, time, hostname, region FROM \"my_schema\".\"my_table\" WHERE (\"az\" IN ('us-east-1a'))"; - QueryResult mockResult = makeMockQueryResult(schemaForRead, numRows, numRows, false); + QueryResponse mockResult = makeMockQueryResult(schemaForRead, numRows, numRows, false); when(mockClient.query(nullable(QueryRequest.class))) - .thenAnswer((Answer) invocationOnMock -> { + .thenAnswer((Answer) invocationOnMock -> { QueryRequest request = (QueryRequest) invocationOnMock.getArguments()[0]; - assertEquals(expectedQuery, request.getQueryString().replace("\n", "")); + assertEquals(expectedQuery, request.queryString().replace("\n", "")); return mockResult; } ); diff --git a/athena-timestream/src/test/java/com/amazonaws/athena/connectors/timestream/integ/TimestreamIntegTest.java b/athena-timestream/src/test/java/com/amazonaws/athena/connectors/timestream/integ/TimestreamIntegTest.java index 66dfd428ec..10f6575220 100644 --- a/athena-timestream/src/test/java/com/amazonaws/athena/connectors/timestream/integ/TimestreamIntegTest.java +++ b/athena-timestream/src/test/java/com/amazonaws/athena/connectors/timestream/integ/TimestreamIntegTest.java @@ -21,10 +21,6 @@ import com.amazonaws.athena.connector.integ.IntegrationTestBase; import com.amazonaws.athena.connectors.timestream.TimestreamClientBuilder; -import com.amazonaws.services.timestreamwrite.AmazonTimestreamWrite; -import com.amazonaws.services.timestreamwrite.model.CreateTableRequest; -import com.amazonaws.services.timestreamwrite.model.DeleteTableRequest; -import com.amazonaws.services.timestreamwrite.model.MeasureValueType; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import org.slf4j.Logger; @@ -38,6 +34,10 @@ import software.amazon.awscdk.services.iam.PolicyStatement; import software.amazon.awscdk.services.timestream.CfnDatabase; import software.amazon.awssdk.services.athena.model.Row; +import software.amazon.awssdk.services.timestreamwrite.TimestreamWriteClient; +import software.amazon.awssdk.services.timestreamwrite.model.CreateTableRequest; +import software.amazon.awssdk.services.timestreamwrite.model.DeleteTableRequest; +import software.amazon.awssdk.services.timestreamwrite.model.MeasureValueType; import java.util.ArrayList; import java.util.List; @@ -58,7 +58,7 @@ public class TimestreamIntegTest extends IntegrationTestBase private final String jokePunchline; private final String lambdaFunctionName; private final long[] timeStream; - private final AmazonTimestreamWrite timestreamWriteClient; + private final TimestreamWriteClient timestreamWriteClient; public TimestreamIntegTest() { @@ -123,9 +123,9 @@ private void createTimestreamTable() logger.info("Creating the Timestream table: {}", timestreamTableName); logger.info("----------------------------------------------------"); - timestreamWriteClient.createTable(new CreateTableRequest() - .withDatabaseName(timestreamDbName) - .withTableName(timestreamTableName)); + timestreamWriteClient.createTable(CreateTableRequest.builder() + .databaseName(timestreamDbName) + .tableName(timestreamTableName).build()); } /** @@ -138,16 +138,16 @@ private void deleteTimstreamTable() logger.info("----------------------------------------------------"); try { - timestreamWriteClient.deleteTable(new DeleteTableRequest() - .withDatabaseName(timestreamDbName) - .withTableName(timestreamTableName)); + timestreamWriteClient.deleteTable(DeleteTableRequest.builder() + .databaseName(timestreamDbName) + .tableName(timestreamTableName).build()); } catch (Exception e) { // Do not rethrow here. logger.error("Unable to delete Timestream table: " + e.getMessage(), e); } finally { - timestreamWriteClient.shutdown(); + timestreamWriteClient.close(); } } diff --git a/athena-timestream/src/test/java/com/amazonaws/athena/connectors/timestream/integ/TimestreamWriteRecordRequestBuilder.java b/athena-timestream/src/test/java/com/amazonaws/athena/connectors/timestream/integ/TimestreamWriteRecordRequestBuilder.java index 6c9b8acddc..73a9e63bd5 100644 --- a/athena-timestream/src/test/java/com/amazonaws/athena/connectors/timestream/integ/TimestreamWriteRecordRequestBuilder.java +++ b/athena-timestream/src/test/java/com/amazonaws/athena/connectors/timestream/integ/TimestreamWriteRecordRequestBuilder.java @@ -19,11 +19,11 @@ */ package com.amazonaws.athena.connectors.timestream.integ; -import com.amazonaws.services.timestreamwrite.model.Dimension; -import com.amazonaws.services.timestreamwrite.model.MeasureValueType; -import com.amazonaws.services.timestreamwrite.model.Record; -import com.amazonaws.services.timestreamwrite.model.TimeUnit; -import com.amazonaws.services.timestreamwrite.model.WriteRecordsRequest; +import software.amazon.awssdk.services.timestreamwrite.model.Dimension; +import software.amazon.awssdk.services.timestreamwrite.model.MeasureValueType; +import software.amazon.awssdk.services.timestreamwrite.model.Record; +import software.amazon.awssdk.services.timestreamwrite.model.TimeUnit; +import software.amazon.awssdk.services.timestreamwrite.model.WriteRecordsRequest; import java.util.ArrayList; import java.util.List; @@ -104,14 +104,14 @@ public TimestreamWriteRecordRequestBuilder withRecord(Map column long timeMillis) { List dimensions = new ArrayList<>(); - columns.forEach((k, v) -> dimensions.add(new Dimension().withName(k).withValue(v))); - records.add(new Record() - .withDimensions(dimensions) - .withMeasureName(measureName) - .withMeasureValue(measureValue) - .withMeasureValueType(measureValueType) - .withTime(String.valueOf(timeMillis)) - .withTimeUnit(TimeUnit.MILLISECONDS)); + columns.forEach((k, v) -> dimensions.add(Dimension.builder().name(k).value(v).build())); + records.add(Record.builder() + .dimensions(dimensions) + .measureName(measureName) + .measureValue(measureValue) + .measureValueType(measureValueType) + .time(String.valueOf(timeMillis)) + .timeUnit(TimeUnit.MILLISECONDS).build()); return this; } @@ -121,9 +121,9 @@ public TimestreamWriteRecordRequestBuilder withRecord(Map column */ public WriteRecordsRequest build() { - return new WriteRecordsRequest() - .withDatabaseName(databaseName) - .withTableName(tableName) - .withRecords(records); + return WriteRecordsRequest.builder() + .databaseName(databaseName) + .tableName(tableName) + .records(records).build(); } } From 164ad928369ed8cba16dda74c033a8267ddcdcd4 Mon Sep 17 00:00:00 2001 From: ejeffrli <144148373+ejeffrli@users.noreply.github.com> Date: Wed, 18 Sep 2024 10:08:52 -0400 Subject: [PATCH 29/87] Implement EnvironmentProperties for remaining non-jdbc connectors (#12) * Implement EnvironmentProperties for remaining non-jdbc connectors --- .../aws/cmdb/AwsCmdbCompositeHandler.java | 4 +- .../ClouderaHiveEnvironmentProperties.java | 13 +++-- .../cloudera/HiveCompositeHandler.java | 1 - .../cloudera/ImpalaCompositeHandler.java | 1 - .../ImpalaEnvironmentProperties.java | 12 ++++- .../metrics/MetricsCompositeHandler.java | 4 +- .../CloudwatchCompositeHandler.java | 4 +- .../DataLakeGen2CompositeHandler.java | 1 - .../DataLakeGen2EnvironmentProperties.java | 26 ++++++++- .../db2as400/Db2As400CompositeHandler.java | 1 - .../Db2As400EnvironmentProperties.java | 13 +++-- .../connectors/db2/Db2CompositeHandler.java | 3 +- .../db2}/Db2EnvironmentProperties.java | 6 ++- .../docdb/DocDBCompositeHandler.java | 3 +- .../docdb/DocDBEnvironmentProperties.java | 46 ++++++++++++++++ .../dynamodb/DynamoDBCompositeHandler.java | 4 +- .../ElasticsearchCompositeHandler.java | 4 +- .../connection/EnvironmentConstants.java | 54 +++++++++++++++++++ .../connection/EnvironmentProperties.java | 11 ++-- .../connectors/gcs/GcsCompositeHandler.java | 5 +- .../gcs/GcsEnvironmentProperties.java | 40 ++++++++++++++ .../bigquery/BigQueryCompositeHandler.java | 5 +- .../BigQueryEnvironmentProperties.java | 45 ++++++++++++++++ .../hbase/HbaseCompositeHandler.java | 3 +- .../hbase/HbaseConnectionFactory.java | 3 +- .../hbase/HbaseEnvironmentProperties.java | 44 +++++++++++++++ .../connection/HbaseConnectionFactory.java | 4 +- .../hbase/integ/HbaseTableUtils.java | 4 +- .../hortonworks/HiveCompositeHandler.java | 1 - .../HortonworksEnvironmentProperties.java | 4 +- .../jdbc}/JdbcEnvironmentProperties.java | 20 ++++--- .../msk/AmazonMskCompositeHandler.java | 3 +- .../msk/AmazonMskEnvironmentProperties.java | 50 +++++++++++++++++ .../mysql/MySqlCompositeHandler.java | 1 - .../mysql}/MySqlEnvironmentProperties.java | 4 +- .../athena/connectors/neptune/Constants.java | 1 + .../neptune/NeptuneCompositeHandler.java | 3 +- .../neptune/NeptuneEnvironmentProperties.java | 49 +++++++++++++++++ .../oracle/OracleCompositeHandler.java | 1 - .../oracle}/OracleEnvironmentProperties.java | 7 ++- .../PostGreSqlCompositeHandler.java | 1 - .../PostGreSqlEnvironmentProperties.java | 4 +- .../redis/RedisCompositeHandler.java | 4 +- .../saphana/SaphanaCompositeHandler.java | 1 - .../SaphanaEnvironmentProperties.java | 4 +- .../snowflake/SnowflakeCompositeHandler.java | 1 - .../SnowflakeEnvironmentProperties.java | 10 ++-- .../sqlserver/SqlServerCompositeHandler.java | 1 - .../SqlServerEnvironmentProperties.java | 6 ++- .../synapse/SynapseCompositeHandler.java | 1 - .../SynapseEnvironmentProperties.java | 26 ++++++++- .../teradata/TeradataCompositeHandler.java | 3 +- .../TeradataEnvironmentProperties.java | 6 ++- .../TimestreamCompositeHandler.java | 4 +- .../tpcds/TPCDSCompositeHandler.java | 4 +- .../vertica/VerticaCompositeHandler.java | 1 - .../VerticaEnvironmentProperties.java | 4 +- 57 files changed, 496 insertions(+), 93 deletions(-) rename {athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection => athena-cloudera-hive/src/main/java/com/amazonaws/athena/connectors/cloudera}/ClouderaHiveEnvironmentProperties.java (79%) rename {athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection => athena-cloudera-impala/src/main/java/com/amazonaws/athena/connectors/cloudera}/ImpalaEnvironmentProperties.java (73%) rename {athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection => athena-datalakegen2/src/main/java/com/amazonaws/athena/connectors/datalakegen2}/DataLakeGen2EnvironmentProperties.java (57%) rename {athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection => athena-db2-as400/src/main/java/com/amazonaws/athena/connectors/db2as400}/Db2As400EnvironmentProperties.java (75%) rename {athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection => athena-db2/src/main/java/com/amazonaws/athena/connectors/db2}/Db2EnvironmentProperties.java (84%) create mode 100644 athena-docdb/src/main/java/com/amazonaws/athena/connectors/docdb/DocDBEnvironmentProperties.java create mode 100644 athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/EnvironmentConstants.java create mode 100644 athena-gcs/src/main/java/com/amazonaws/athena/connectors/gcs/GcsEnvironmentProperties.java create mode 100644 athena-google-bigquery/src/main/java/com/amazonaws/athena/connectors/google/bigquery/BigQueryEnvironmentProperties.java create mode 100644 athena-hbase/src/main/java/com/amazonaws/athena/connectors/hbase/HbaseEnvironmentProperties.java rename {athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection => athena-hortonworks-hive/src/main/java/com/amazonaws/athena/connectors/hortonworks}/HortonworksEnvironmentProperties.java (88%) rename {athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection => athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc}/JdbcEnvironmentProperties.java (71%) create mode 100644 athena-msk/src/main/java/com/amazonaws/athena/connectors/msk/AmazonMskEnvironmentProperties.java rename {athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection => athena-mysql/src/main/java/com/amazonaws/athena/connectors/mysql}/MySqlEnvironmentProperties.java (88%) create mode 100644 athena-neptune/src/main/java/com/amazonaws/athena/connectors/neptune/NeptuneEnvironmentProperties.java rename {athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection => athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle}/OracleEnvironmentProperties.java (82%) rename {athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection => athena-postgresql/src/main/java/com/amazonaws/athena/connectors/postgresql}/PostGreSqlEnvironmentProperties.java (88%) rename {athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection => athena-saphana/src/main/java/com/amazonaws/athena/connectors/saphana}/SaphanaEnvironmentProperties.java (89%) rename {athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection => athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake}/SnowflakeEnvironmentProperties.java (77%) rename {athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection => athena-sqlserver/src/main/java/com/amazonaws/athena/connectors/sqlserver}/SqlServerEnvironmentProperties.java (85%) rename {athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection => athena-synapse/src/main/java/com/amazonaws/athena/connectors/synapse}/SynapseEnvironmentProperties.java (57%) rename {athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection => athena-teradata/src/main/java/com/amazonaws/athena/connectors/teradata}/TeradataEnvironmentProperties.java (85%) rename {athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection => athena-vertica/src/main/java/com/amazonaws/athena/connectors/vertica}/VerticaEnvironmentProperties.java (88%) diff --git a/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/AwsCmdbCompositeHandler.java b/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/AwsCmdbCompositeHandler.java index a7d58bf7ae..02549fff2a 100644 --- a/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/AwsCmdbCompositeHandler.java +++ b/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/AwsCmdbCompositeHandler.java @@ -19,7 +19,7 @@ */ package com.amazonaws.athena.connectors.aws.cmdb; -import com.amazonaws.athena.connector.lambda.GlueConnectionUtils; +import com.amazonaws.athena.connector.lambda.connection.EnvironmentProperties; import com.amazonaws.athena.connector.lambda.handlers.CompositeHandler; /** @@ -31,6 +31,6 @@ public class AwsCmdbCompositeHandler { public AwsCmdbCompositeHandler() { - super(new AwsCmdbMetadataHandler(GlueConnectionUtils.getGlueConnection()), new AwsCmdbRecordHandler(GlueConnectionUtils.getGlueConnection())); + super(new AwsCmdbMetadataHandler(new EnvironmentProperties().createEnvironment()), new AwsCmdbRecordHandler(new EnvironmentProperties().createEnvironment())); } } diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/ClouderaHiveEnvironmentProperties.java b/athena-cloudera-hive/src/main/java/com/amazonaws/athena/connectors/cloudera/ClouderaHiveEnvironmentProperties.java similarity index 79% rename from athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/ClouderaHiveEnvironmentProperties.java rename to athena-cloudera-hive/src/main/java/com/amazonaws/athena/connectors/cloudera/ClouderaHiveEnvironmentProperties.java index dfc1272564..f53c6001bb 100644 --- a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/ClouderaHiveEnvironmentProperties.java +++ b/athena-cloudera-hive/src/main/java/com/amazonaws/athena/connectors/cloudera/ClouderaHiveEnvironmentProperties.java @@ -17,16 +17,19 @@ * limitations under the License. * #L% */ -package com.amazonaws.athena.connector.lambda.connection; +package com.amazonaws.athena.connectors.cloudera; + +import com.amazonaws.athena.connectors.jdbc.JdbcEnvironmentProperties; import java.util.Map; +import static com.amazonaws.athena.connector.lambda.connection.EnvironmentConstants.HIVE_CONFS; +import static com.amazonaws.athena.connector.lambda.connection.EnvironmentConstants.HIVE_VARS; +import static com.amazonaws.athena.connector.lambda.connection.EnvironmentConstants.SECRET_NAME; +import static com.amazonaws.athena.connector.lambda.connection.EnvironmentConstants.SESSION_CONFS; + public class ClouderaHiveEnvironmentProperties extends JdbcEnvironmentProperties { - private static final String SESSION_CONFS = "SESSION_CONFS"; - private static final String HIVE_CONFS = "HIVE_CONFS"; - private static final String HIVE_VARS = "HIVE_VARS"; - @Override protected String getConnectionStringPrefix(Map connectionProperties) { diff --git a/athena-cloudera-hive/src/main/java/com/amazonaws/athena/connectors/cloudera/HiveCompositeHandler.java b/athena-cloudera-hive/src/main/java/com/amazonaws/athena/connectors/cloudera/HiveCompositeHandler.java index 832376bdb9..505bcc0e67 100644 --- a/athena-cloudera-hive/src/main/java/com/amazonaws/athena/connectors/cloudera/HiveCompositeHandler.java +++ b/athena-cloudera-hive/src/main/java/com/amazonaws/athena/connectors/cloudera/HiveCompositeHandler.java @@ -19,7 +19,6 @@ */ package com.amazonaws.athena.connectors.cloudera; -import com.amazonaws.athena.connector.lambda.connection.ClouderaHiveEnvironmentProperties; import com.amazonaws.athena.connector.lambda.handlers.CompositeHandler; /** diff --git a/athena-cloudera-impala/src/main/java/com/amazonaws/athena/connectors/cloudera/ImpalaCompositeHandler.java b/athena-cloudera-impala/src/main/java/com/amazonaws/athena/connectors/cloudera/ImpalaCompositeHandler.java index e827d4d877..45664b1be3 100644 --- a/athena-cloudera-impala/src/main/java/com/amazonaws/athena/connectors/cloudera/ImpalaCompositeHandler.java +++ b/athena-cloudera-impala/src/main/java/com/amazonaws/athena/connectors/cloudera/ImpalaCompositeHandler.java @@ -20,7 +20,6 @@ */ package com.amazonaws.athena.connectors.cloudera; -import com.amazonaws.athena.connector.lambda.connection.ImpalaEnvironmentProperties; import com.amazonaws.athena.connector.lambda.handlers.CompositeHandler; /** diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/ImpalaEnvironmentProperties.java b/athena-cloudera-impala/src/main/java/com/amazonaws/athena/connectors/cloudera/ImpalaEnvironmentProperties.java similarity index 73% rename from athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/ImpalaEnvironmentProperties.java rename to athena-cloudera-impala/src/main/java/com/amazonaws/athena/connectors/cloudera/ImpalaEnvironmentProperties.java index f1f372e08b..a450056740 100644 --- a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/ImpalaEnvironmentProperties.java +++ b/athena-cloudera-impala/src/main/java/com/amazonaws/athena/connectors/cloudera/ImpalaEnvironmentProperties.java @@ -17,15 +17,23 @@ * limitations under the License. * #L% */ -package com.amazonaws.athena.connector.lambda.connection; +package com.amazonaws.athena.connectors.cloudera; + +import com.amazonaws.athena.connectors.jdbc.JdbcEnvironmentProperties; import java.util.Map; -public class ImpalaEnvironmentProperties extends SaphanaEnvironmentProperties +public class ImpalaEnvironmentProperties extends JdbcEnvironmentProperties { @Override protected String getConnectionStringPrefix(Map connectionProperties) { return "impala://jdbc:impala://"; } + + @Override + protected String getDatabase(Map connectionProperties) + { + return "/"; + } } diff --git a/athena-cloudwatch-metrics/src/main/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricsCompositeHandler.java b/athena-cloudwatch-metrics/src/main/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricsCompositeHandler.java index 2fe68ea0d0..ebee6b84e6 100644 --- a/athena-cloudwatch-metrics/src/main/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricsCompositeHandler.java +++ b/athena-cloudwatch-metrics/src/main/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricsCompositeHandler.java @@ -19,7 +19,7 @@ */ package com.amazonaws.athena.connectors.cloudwatch.metrics; -import com.amazonaws.athena.connector.lambda.GlueConnectionUtils; +import com.amazonaws.athena.connector.lambda.connection.EnvironmentProperties; import com.amazonaws.athena.connector.lambda.handlers.CompositeHandler; /** @@ -31,6 +31,6 @@ public class MetricsCompositeHandler { public MetricsCompositeHandler() { - super(new MetricsMetadataHandler(GlueConnectionUtils.getGlueConnection()), new MetricsRecordHandler(GlueConnectionUtils.getGlueConnection())); + super(new MetricsMetadataHandler(new EnvironmentProperties().createEnvironment()), new MetricsRecordHandler(new EnvironmentProperties().createEnvironment())); } } diff --git a/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchCompositeHandler.java b/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchCompositeHandler.java index 561a7a25cd..99719f1098 100644 --- a/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchCompositeHandler.java +++ b/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchCompositeHandler.java @@ -19,7 +19,7 @@ */ package com.amazonaws.athena.connectors.cloudwatch; -import com.amazonaws.athena.connector.lambda.GlueConnectionUtils; +import com.amazonaws.athena.connector.lambda.connection.EnvironmentProperties; import com.amazonaws.athena.connector.lambda.handlers.CompositeHandler; /** @@ -31,6 +31,6 @@ public class CloudwatchCompositeHandler { public CloudwatchCompositeHandler() { - super(new CloudwatchMetadataHandler(GlueConnectionUtils.getGlueConnection()), new CloudwatchRecordHandler(GlueConnectionUtils.getGlueConnection())); + super(new CloudwatchMetadataHandler(new EnvironmentProperties().createEnvironment()), new CloudwatchRecordHandler(new EnvironmentProperties().createEnvironment())); } } diff --git a/athena-datalakegen2/src/main/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2CompositeHandler.java b/athena-datalakegen2/src/main/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2CompositeHandler.java index 8a8cd495d2..9eb6d61bc9 100644 --- a/athena-datalakegen2/src/main/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2CompositeHandler.java +++ b/athena-datalakegen2/src/main/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2CompositeHandler.java @@ -19,7 +19,6 @@ */ package com.amazonaws.athena.connectors.datalakegen2; -import com.amazonaws.athena.connector.lambda.connection.DataLakeGen2EnvironmentProperties; import com.amazonaws.athena.connector.lambda.handlers.CompositeHandler; /** diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/DataLakeGen2EnvironmentProperties.java b/athena-datalakegen2/src/main/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2EnvironmentProperties.java similarity index 57% rename from athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/DataLakeGen2EnvironmentProperties.java rename to athena-datalakegen2/src/main/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2EnvironmentProperties.java index fd929e1bc1..dfecee2fff 100644 --- a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/DataLakeGen2EnvironmentProperties.java +++ b/athena-datalakegen2/src/main/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2EnvironmentProperties.java @@ -17,15 +17,37 @@ * limitations under the License. * #L% */ -package com.amazonaws.athena.connector.lambda.connection; +package com.amazonaws.athena.connectors.datalakegen2; + +import com.amazonaws.athena.connectors.jdbc.JdbcEnvironmentProperties; import java.util.Map; -public class DataLakeGen2EnvironmentProperties extends SqlServerEnvironmentProperties +import static com.amazonaws.athena.connector.lambda.connection.EnvironmentConstants.DATABASE; + +public class DataLakeGen2EnvironmentProperties extends JdbcEnvironmentProperties { @Override protected String getConnectionStringPrefix(Map connectionProperties) { return "datalakegentwo://jdbc:sqlserver://"; } + + @Override + protected String getDatabase(Map connectionProperties) + { + return ";databaseName=" + connectionProperties.get(DATABASE); + } + + @Override + protected String getJdbcParametersSeparator() + { + return ";"; + } + + @Override + protected String getDelimiter() + { + return ";"; + } } diff --git a/athena-db2-as400/src/main/java/com/amazonaws/athena/connectors/db2as400/Db2As400CompositeHandler.java b/athena-db2-as400/src/main/java/com/amazonaws/athena/connectors/db2as400/Db2As400CompositeHandler.java index 04dcb0828c..f0e0a8ea04 100644 --- a/athena-db2-as400/src/main/java/com/amazonaws/athena/connectors/db2as400/Db2As400CompositeHandler.java +++ b/athena-db2-as400/src/main/java/com/amazonaws/athena/connectors/db2as400/Db2As400CompositeHandler.java @@ -19,7 +19,6 @@ */ package com.amazonaws.athena.connectors.db2as400; -import com.amazonaws.athena.connector.lambda.connection.Db2As400EnvironmentProperties; import com.amazonaws.athena.connector.lambda.handlers.CompositeHandler; /** diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/Db2As400EnvironmentProperties.java b/athena-db2-as400/src/main/java/com/amazonaws/athena/connectors/db2as400/Db2As400EnvironmentProperties.java similarity index 75% rename from athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/Db2As400EnvironmentProperties.java rename to athena-db2-as400/src/main/java/com/amazonaws/athena/connectors/db2as400/Db2As400EnvironmentProperties.java index 09061c11ab..46dcde3b14 100644 --- a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/Db2As400EnvironmentProperties.java +++ b/athena-db2-as400/src/main/java/com/amazonaws/athena/connectors/db2as400/Db2As400EnvironmentProperties.java @@ -17,22 +17,27 @@ * limitations under the License. * #L% */ -package com.amazonaws.athena.connector.lambda.connection; +package com.amazonaws.athena.connectors.db2as400; + +import com.amazonaws.athena.connector.lambda.connection.EnvironmentProperties; import java.util.HashMap; import java.util.Map; +import static com.amazonaws.athena.connector.lambda.connection.EnvironmentConstants.DEFAULT; +import static com.amazonaws.athena.connector.lambda.connection.EnvironmentConstants.HOST; +import static com.amazonaws.athena.connector.lambda.connection.EnvironmentConstants.JDBC_PARAMS; +import static com.amazonaws.athena.connector.lambda.connection.EnvironmentConstants.SECRET_NAME; + public class Db2As400EnvironmentProperties extends EnvironmentProperties { - private static final String JDBC_PARAMS = "JDBC_PARAMS"; - private static final String DEFAULT = "default"; @Override public Map connectionPropertiesToEnvironment(Map connectionProperties) { HashMap environment = new HashMap<>(); // now construct jdbc string - String connectionString = "db2as400://jdbc:as400://" + connectionProperties.get("HOST") + String connectionString = "db2as400://jdbc:as400://" + connectionProperties.get(HOST) + ";" + connectionProperties.getOrDefault(JDBC_PARAMS, ""); if (connectionProperties.containsKey(SECRET_NAME)) { diff --git a/athena-db2/src/main/java/com/amazonaws/athena/connectors/db2/Db2CompositeHandler.java b/athena-db2/src/main/java/com/amazonaws/athena/connectors/db2/Db2CompositeHandler.java index 9c0ccec19a..0bc8c1a332 100644 --- a/athena-db2/src/main/java/com/amazonaws/athena/connectors/db2/Db2CompositeHandler.java +++ b/athena-db2/src/main/java/com/amazonaws/athena/connectors/db2/Db2CompositeHandler.java @@ -19,7 +19,6 @@ */ package com.amazonaws.athena.connectors.db2; -import com.amazonaws.athena.connector.lambda.GlueConnectionUtils; import com.amazonaws.athena.connector.lambda.handlers.CompositeHandler; /** @@ -32,6 +31,6 @@ public class Db2CompositeHandler extends CompositeHandler { public Db2CompositeHandler() { - super(new Db2MetadataHandler(GlueConnectionUtils.getGlueConnection()), new Db2RecordHandler(GlueConnectionUtils.getGlueConnection())); + super(new Db2MetadataHandler(new Db2EnvironmentProperties().createEnvironment()), new Db2RecordHandler(new Db2EnvironmentProperties().createEnvironment())); } } diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/Db2EnvironmentProperties.java b/athena-db2/src/main/java/com/amazonaws/athena/connectors/db2/Db2EnvironmentProperties.java similarity index 84% rename from athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/Db2EnvironmentProperties.java rename to athena-db2/src/main/java/com/amazonaws/athena/connectors/db2/Db2EnvironmentProperties.java index 62b3e7c1c0..06c0f6ca4e 100644 --- a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/Db2EnvironmentProperties.java +++ b/athena-db2/src/main/java/com/amazonaws/athena/connectors/db2/Db2EnvironmentProperties.java @@ -17,10 +17,14 @@ * limitations under the License. * #L% */ -package com.amazonaws.athena.connector.lambda.connection; +package com.amazonaws.athena.connectors.db2; + +import com.amazonaws.athena.connectors.jdbc.JdbcEnvironmentProperties; import java.util.Map; +import static com.amazonaws.athena.connector.lambda.connection.EnvironmentConstants.DATABASE; + public class Db2EnvironmentProperties extends JdbcEnvironmentProperties { @Override diff --git a/athena-docdb/src/main/java/com/amazonaws/athena/connectors/docdb/DocDBCompositeHandler.java b/athena-docdb/src/main/java/com/amazonaws/athena/connectors/docdb/DocDBCompositeHandler.java index 0d64948410..e88dfdd57e 100644 --- a/athena-docdb/src/main/java/com/amazonaws/athena/connectors/docdb/DocDBCompositeHandler.java +++ b/athena-docdb/src/main/java/com/amazonaws/athena/connectors/docdb/DocDBCompositeHandler.java @@ -19,7 +19,6 @@ */ package com.amazonaws.athena.connectors.docdb; -import com.amazonaws.athena.connector.lambda.GlueConnectionUtils; import com.amazonaws.athena.connector.lambda.handlers.CompositeHandler; /** @@ -31,6 +30,6 @@ public class DocDBCompositeHandler { public DocDBCompositeHandler() { - super(new DocDBMetadataHandler(GlueConnectionUtils.getGlueConnection()), new DocDBRecordHandler(GlueConnectionUtils.getGlueConnection())); + super(new DocDBMetadataHandler(new DocDBEnvironmentProperties().createEnvironment()), new DocDBRecordHandler(new DocDBEnvironmentProperties().createEnvironment())); } } diff --git a/athena-docdb/src/main/java/com/amazonaws/athena/connectors/docdb/DocDBEnvironmentProperties.java b/athena-docdb/src/main/java/com/amazonaws/athena/connectors/docdb/DocDBEnvironmentProperties.java new file mode 100644 index 0000000000..50c378b676 --- /dev/null +++ b/athena-docdb/src/main/java/com/amazonaws/athena/connectors/docdb/DocDBEnvironmentProperties.java @@ -0,0 +1,46 @@ +/*- + * #%L + * Amazon Athena Query Federation SDK + * %% + * Copyright (C) 2019 - 2024 Amazon Web Services + * %% + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * #L% + */ +package com.amazonaws.athena.connectors.docdb; + +import com.amazonaws.athena.connector.lambda.connection.EnvironmentProperties; + +import java.util.HashMap; +import java.util.Map; + +import static com.amazonaws.athena.connector.lambda.connection.EnvironmentConstants.DEFAULT_DOCDB; +import static com.amazonaws.athena.connector.lambda.connection.EnvironmentConstants.HOST; +import static com.amazonaws.athena.connector.lambda.connection.EnvironmentConstants.JDBC_PARAMS; +import static com.amazonaws.athena.connector.lambda.connection.EnvironmentConstants.PORT; +import static com.amazonaws.athena.connector.lambda.connection.EnvironmentConstants.SECRET_NAME; + +public class DocDBEnvironmentProperties extends EnvironmentProperties +{ + @Override + public Map connectionPropertiesToEnvironment(Map connectionProperties) + { + Map environment = new HashMap<>(); + + String connectionString = "mongodb://${" + connectionProperties.get(SECRET_NAME) + "}@" + + connectionProperties.get(HOST) + connectionProperties.get(PORT) + "/?" + + connectionProperties.getOrDefault(JDBC_PARAMS, ""); + environment.put(DEFAULT_DOCDB, connectionString); + return environment; + } +} diff --git a/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/DynamoDBCompositeHandler.java b/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/DynamoDBCompositeHandler.java index 079ac4283c..23a5887535 100644 --- a/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/DynamoDBCompositeHandler.java +++ b/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/DynamoDBCompositeHandler.java @@ -19,7 +19,7 @@ */ package com.amazonaws.athena.connectors.dynamodb; -import com.amazonaws.athena.connector.lambda.GlueConnectionUtils; +import com.amazonaws.athena.connector.lambda.connection.EnvironmentProperties; import com.amazonaws.athena.connector.lambda.handlers.CompositeHandler; /** @@ -31,6 +31,6 @@ public class DynamoDBCompositeHandler { public DynamoDBCompositeHandler() { - super(new DynamoDBMetadataHandler(GlueConnectionUtils.getGlueConnection()), new DynamoDBRecordHandler(GlueConnectionUtils.getGlueConnection())); + super(new DynamoDBMetadataHandler(new EnvironmentProperties().createEnvironment()), new DynamoDBRecordHandler(new EnvironmentProperties().createEnvironment())); } } diff --git a/athena-elasticsearch/src/main/java/com/amazonaws/athena/connectors/elasticsearch/ElasticsearchCompositeHandler.java b/athena-elasticsearch/src/main/java/com/amazonaws/athena/connectors/elasticsearch/ElasticsearchCompositeHandler.java index 1b0ba6906d..3e20875455 100644 --- a/athena-elasticsearch/src/main/java/com/amazonaws/athena/connectors/elasticsearch/ElasticsearchCompositeHandler.java +++ b/athena-elasticsearch/src/main/java/com/amazonaws/athena/connectors/elasticsearch/ElasticsearchCompositeHandler.java @@ -19,7 +19,7 @@ */ package com.amazonaws.athena.connectors.elasticsearch; -import com.amazonaws.athena.connector.lambda.GlueConnectionUtils; +import com.amazonaws.athena.connector.lambda.connection.EnvironmentProperties; import com.amazonaws.athena.connector.lambda.handlers.CompositeHandler; /** @@ -31,6 +31,6 @@ public class ElasticsearchCompositeHandler { public ElasticsearchCompositeHandler() { - super(new ElasticsearchMetadataHandler(GlueConnectionUtils.getGlueConnection()), new ElasticsearchRecordHandler(GlueConnectionUtils.getGlueConnection())); + super(new ElasticsearchMetadataHandler(new EnvironmentProperties().createEnvironment()), new ElasticsearchRecordHandler(new EnvironmentProperties().createEnvironment())); } } diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/EnvironmentConstants.java b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/EnvironmentConstants.java new file mode 100644 index 0000000000..33687e029a --- /dev/null +++ b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/EnvironmentConstants.java @@ -0,0 +1,54 @@ +/*- + * #%L + * Amazon Athena Query Federation SDK + * %% + * Copyright (C) 2019 - 2024 Amazon Web Services + * %% + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * #L% + */ +package com.amazonaws.athena.connector.lambda.connection; + +public final class EnvironmentConstants +{ + private EnvironmentConstants() {} + + public static final int CONNECT_TIMEOUT = 250; + + // Lambda environment variable keys + public static final String DEFAULT_GLUE_CONNECTION = "glue_connection"; + public static final String SECRET_NAME = "secret_name"; + public static final String SPILL_KMS_KEY_ID = "spill_kms_key_id"; + public static final String KMS_KEY_ID = "kms_key_id"; + public static final String DEFAULT = "default"; + public static final String DEFAULT_DOCDB = "default_docdb"; + public static final String DEFAULT_HBASE = "default_hbase"; + + // glue connection property names + public static final String HOST = "HOST"; + public static final String PORT = "PORT"; + public static final String JDBC_PARAMS = "JDBC_PARAMS"; + public static final String DATABASE = "DATABASE"; + public static final String SESSION_CONFS = "SESSION_CONFS"; + public static final String HIVE_CONFS = "HIVE_CONFS"; + public static final String HIVE_VARS = "HIVE_VARS"; + public static final String WAREHOUSE = "WAREHOUSE"; + public static final String SCHEMA = "SCHEMA"; + public static final String PROJECT_ID = "PROJECT_ID"; + public static final String CLUSTER_RES_ID = "CLUSTER_RESOURCE_ID"; + public static final String GRAPH_TYPE = "GRAPH_TYPE"; + public static final String HBASE_PORT = "HBASE_PORT"; + public static final String ZOOKEEPER_PORT = "ZOOKEEPER_PORT"; + public static final String CUSTOM_AUTH_TYPE = "CUSTOM_AUTH_TYPE"; + public static final String GLUE_CERTIFICATES_S3_REFERENCE = "CERTIFICATE_S3_REFERENCE"; +} diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/EnvironmentProperties.java b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/EnvironmentProperties.java index 0df340cde2..3aa27b449f 100644 --- a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/EnvironmentProperties.java +++ b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/EnvironmentProperties.java @@ -34,13 +34,14 @@ import java.util.HashMap; import java.util.Map; +import static com.amazonaws.athena.connector.lambda.connection.EnvironmentConstants.CONNECT_TIMEOUT; +import static com.amazonaws.athena.connector.lambda.connection.EnvironmentConstants.DEFAULT_GLUE_CONNECTION; +import static com.amazonaws.athena.connector.lambda.connection.EnvironmentConstants.KMS_KEY_ID; +import static com.amazonaws.athena.connector.lambda.connection.EnvironmentConstants.SECRET_NAME; +import static com.amazonaws.athena.connector.lambda.connection.EnvironmentConstants.SPILL_KMS_KEY_ID; + public class EnvironmentProperties { - public static final String DEFAULT_GLUE_CONNECTION = "glue_connection"; - private static final int CONNECT_TIMEOUT = 250; - protected static final String SECRET_NAME = "secret_name"; - protected static final String SPILL_KMS_KEY_ID = "spill_kms_key_id"; - protected static final String KMS_KEY_ID = "kms_key_id"; protected static final Logger logger = LoggerFactory.getLogger(EnvironmentProperties.class); public Map createEnvironment() throws RuntimeException diff --git a/athena-gcs/src/main/java/com/amazonaws/athena/connectors/gcs/GcsCompositeHandler.java b/athena-gcs/src/main/java/com/amazonaws/athena/connectors/gcs/GcsCompositeHandler.java index 651878eeca..668f8e3a2d 100644 --- a/athena-gcs/src/main/java/com/amazonaws/athena/connectors/gcs/GcsCompositeHandler.java +++ b/athena-gcs/src/main/java/com/amazonaws/athena/connectors/gcs/GcsCompositeHandler.java @@ -19,7 +19,6 @@ */ package com.amazonaws.athena.connectors.gcs; -import com.amazonaws.athena.connector.lambda.GlueConnectionUtils; import com.amazonaws.athena.connector.lambda.handlers.CompositeHandler; import org.apache.arrow.memory.BufferAllocator; import org.apache.arrow.memory.RootAllocator; @@ -47,9 +46,9 @@ public class GcsCompositeHandler */ public GcsCompositeHandler() throws IOException, CertificateEncodingException, NoSuchAlgorithmException, KeyStoreException { - super(new GcsMetadataHandler(allocator, GlueConnectionUtils.getGlueConnection()), new GcsRecordHandler(allocator, GlueConnectionUtils.getGlueConnection())); + super(new GcsMetadataHandler(allocator, new GcsEnvironmentProperties().createEnvironment()), new GcsRecordHandler(allocator, new GcsEnvironmentProperties().createEnvironment())); installCaCertificate(); - installGoogleCredentialsJsonFile(GlueConnectionUtils.getGlueConnection()); + installGoogleCredentialsJsonFile(new GcsEnvironmentProperties().createEnvironment()); setupNativeEnvironmentVariables(); } } diff --git a/athena-gcs/src/main/java/com/amazonaws/athena/connectors/gcs/GcsEnvironmentProperties.java b/athena-gcs/src/main/java/com/amazonaws/athena/connectors/gcs/GcsEnvironmentProperties.java new file mode 100644 index 0000000000..0308591318 --- /dev/null +++ b/athena-gcs/src/main/java/com/amazonaws/athena/connectors/gcs/GcsEnvironmentProperties.java @@ -0,0 +1,40 @@ +/*- + * #%L + * athena-gcs + * %% + * Copyright (C) 2019 - 2024 Amazon Web Services + * %% + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * #L% + */ +package com.amazonaws.athena.connectors.gcs; + +import com.amazonaws.athena.connector.lambda.connection.EnvironmentProperties; + +import java.util.HashMap; +import java.util.Map; + +import static com.amazonaws.athena.connector.lambda.connection.EnvironmentConstants.SECRET_NAME; +import static com.amazonaws.athena.connectors.gcs.GcsConstants.GCS_SECRET_KEY_ENV_VAR; + +public class GcsEnvironmentProperties extends EnvironmentProperties +{ + @Override + public Map connectionPropertiesToEnvironment(Map connectionProperties) + { + Map environment = new HashMap<>(); + + environment.put(GCS_SECRET_KEY_ENV_VAR, connectionProperties.get(SECRET_NAME)); + return environment; + } +} diff --git a/athena-google-bigquery/src/main/java/com/amazonaws/athena/connectors/google/bigquery/BigQueryCompositeHandler.java b/athena-google-bigquery/src/main/java/com/amazonaws/athena/connectors/google/bigquery/BigQueryCompositeHandler.java index ba94b34315..0694c48aa2 100644 --- a/athena-google-bigquery/src/main/java/com/amazonaws/athena/connectors/google/bigquery/BigQueryCompositeHandler.java +++ b/athena-google-bigquery/src/main/java/com/amazonaws/athena/connectors/google/bigquery/BigQueryCompositeHandler.java @@ -20,7 +20,6 @@ */ package com.amazonaws.athena.connectors.google.bigquery; -import com.amazonaws.athena.connector.lambda.GlueConnectionUtils; import com.amazonaws.athena.connector.lambda.handlers.CompositeHandler; import org.apache.arrow.memory.BufferAllocator; import org.apache.arrow.memory.RootAllocator; @@ -42,8 +41,8 @@ public class BigQueryCompositeHandler public BigQueryCompositeHandler() throws IOException { - super(new BigQueryMetadataHandler(GlueConnectionUtils.getGlueConnection()), new BigQueryRecordHandler(GlueConnectionUtils.getGlueConnection(), allocator)); - installGoogleCredentialsJsonFile(GlueConnectionUtils.getGlueConnection()); + super(new BigQueryMetadataHandler(new BigQueryEnvironmentProperties().createEnvironment()), new BigQueryRecordHandler(new BigQueryEnvironmentProperties().createEnvironment(), allocator)); + installGoogleCredentialsJsonFile(new BigQueryEnvironmentProperties().createEnvironment()); setupNativeEnvironmentVariables(); logger.info("Inside BigQueryCompositeHandler()"); } diff --git a/athena-google-bigquery/src/main/java/com/amazonaws/athena/connectors/google/bigquery/BigQueryEnvironmentProperties.java b/athena-google-bigquery/src/main/java/com/amazonaws/athena/connectors/google/bigquery/BigQueryEnvironmentProperties.java new file mode 100644 index 0000000000..0efc606a25 --- /dev/null +++ b/athena-google-bigquery/src/main/java/com/amazonaws/athena/connectors/google/bigquery/BigQueryEnvironmentProperties.java @@ -0,0 +1,45 @@ +/*- + * #%L + * Amazon Athena Query Federation SDK + * %% + * Copyright (C) 2019 - 2024 Amazon Web Services + * %% + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * #L% + */ +package com.amazonaws.athena.connectors.google.bigquery; + +import com.amazonaws.athena.connector.lambda.connection.EnvironmentProperties; + +import java.util.HashMap; +import java.util.Map; + +import static com.amazonaws.athena.connector.lambda.connection.EnvironmentConstants.PROJECT_ID; +import static com.amazonaws.athena.connector.lambda.connection.EnvironmentConstants.SECRET_NAME; +import static com.amazonaws.athena.connectors.google.bigquery.BigQueryConstants.ENV_BIG_QUERY_CREDS_SM_ID; +import static com.amazonaws.athena.connectors.google.bigquery.BigQueryConstants.GCP_PROJECT_ID; + +public class BigQueryEnvironmentProperties extends EnvironmentProperties +{ + @Override + public Map connectionPropertiesToEnvironment(Map connectionProperties) + { + Map environment = new HashMap<>(); + + if (connectionProperties.containsKey(PROJECT_ID)) { + environment.put(GCP_PROJECT_ID, connectionProperties.get(PROJECT_ID)); + } + environment.put(ENV_BIG_QUERY_CREDS_SM_ID, connectionProperties.get(SECRET_NAME)); + return environment; + } +} diff --git a/athena-hbase/src/main/java/com/amazonaws/athena/connectors/hbase/HbaseCompositeHandler.java b/athena-hbase/src/main/java/com/amazonaws/athena/connectors/hbase/HbaseCompositeHandler.java index c61470c4c4..e02af8ecb4 100644 --- a/athena-hbase/src/main/java/com/amazonaws/athena/connectors/hbase/HbaseCompositeHandler.java +++ b/athena-hbase/src/main/java/com/amazonaws/athena/connectors/hbase/HbaseCompositeHandler.java @@ -19,7 +19,6 @@ */ package com.amazonaws.athena.connectors.hbase; -import com.amazonaws.athena.connector.lambda.GlueConnectionUtils; import com.amazonaws.athena.connector.lambda.handlers.CompositeHandler; /** @@ -31,6 +30,6 @@ public class HbaseCompositeHandler { public HbaseCompositeHandler() { - super(new HbaseMetadataHandler(GlueConnectionUtils.getGlueConnection()), new HbaseRecordHandler(GlueConnectionUtils.getGlueConnection())); + super(new HbaseMetadataHandler(new HbaseEnvironmentProperties().createEnvironment()), new HbaseRecordHandler(new HbaseEnvironmentProperties().createEnvironment())); } } diff --git a/athena-hbase/src/main/java/com/amazonaws/athena/connectors/hbase/HbaseConnectionFactory.java b/athena-hbase/src/main/java/com/amazonaws/athena/connectors/hbase/HbaseConnectionFactory.java index dab76e2531..e96a8cef38 100644 --- a/athena-hbase/src/main/java/com/amazonaws/athena/connectors/hbase/HbaseConnectionFactory.java +++ b/athena-hbase/src/main/java/com/amazonaws/athena/connectors/hbase/HbaseConnectionFactory.java @@ -19,7 +19,6 @@ */ package com.amazonaws.athena.connectors.hbase; -import com.amazonaws.athena.connector.lambda.GlueConnectionUtils; import org.apache.arrow.util.VisibleForTesting; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; @@ -126,7 +125,7 @@ private Connection createConnection(String host, String masterPort, String zooke config.set(nextConfig.getKey(), nextConfig.getValue()); } - Map configOptions = GlueConnectionUtils.getGlueConnection(); + Map configOptions = new HbaseEnvironmentProperties().createEnvironment(); boolean kerberosAuthEnabled = configOptions.get(KERBEROS_AUTH_ENABLED) != null && "true".equalsIgnoreCase(configOptions.get(KERBEROS_AUTH_ENABLED)); logger.info("Kerberos Authentication Enabled: " + kerberosAuthEnabled); if (kerberosAuthEnabled) { diff --git a/athena-hbase/src/main/java/com/amazonaws/athena/connectors/hbase/HbaseEnvironmentProperties.java b/athena-hbase/src/main/java/com/amazonaws/athena/connectors/hbase/HbaseEnvironmentProperties.java new file mode 100644 index 0000000000..cfd435ea1c --- /dev/null +++ b/athena-hbase/src/main/java/com/amazonaws/athena/connectors/hbase/HbaseEnvironmentProperties.java @@ -0,0 +1,44 @@ +/*- + * #%L + * Amazon Athena Query Federation SDK + * %% + * Copyright (C) 2019 - 2024 Amazon Web Services + * %% + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * #L% + */ +package com.amazonaws.athena.connectors.hbase; + +import com.amazonaws.athena.connector.lambda.connection.EnvironmentProperties; + +import java.util.HashMap; +import java.util.Map; + +import static com.amazonaws.athena.connector.lambda.connection.EnvironmentConstants.DEFAULT_HBASE; +import static com.amazonaws.athena.connector.lambda.connection.EnvironmentConstants.HBASE_PORT; +import static com.amazonaws.athena.connector.lambda.connection.EnvironmentConstants.HOST; +import static com.amazonaws.athena.connector.lambda.connection.EnvironmentConstants.ZOOKEEPER_PORT; + +public class HbaseEnvironmentProperties extends EnvironmentProperties +{ + @Override + public Map connectionPropertiesToEnvironment(Map connectionProperties) + { + Map environment = new HashMap<>(); + + environment.put(DEFAULT_HBASE, connectionProperties.get(HOST) + + ":" + connectionProperties.get(HBASE_PORT) + + ":" + connectionProperties.get(ZOOKEEPER_PORT)); + return environment; + } +} diff --git a/athena-hbase/src/main/java/com/amazonaws/athena/connectors/hbase/connection/HbaseConnectionFactory.java b/athena-hbase/src/main/java/com/amazonaws/athena/connectors/hbase/connection/HbaseConnectionFactory.java index 20e8b1b371..5708e4266b 100644 --- a/athena-hbase/src/main/java/com/amazonaws/athena/connectors/hbase/connection/HbaseConnectionFactory.java +++ b/athena-hbase/src/main/java/com/amazonaws/athena/connectors/hbase/connection/HbaseConnectionFactory.java @@ -19,7 +19,7 @@ */ package com.amazonaws.athena.connectors.hbase.connection; -import com.amazonaws.athena.connector.lambda.GlueConnectionUtils; +import com.amazonaws.athena.connectors.hbase.HbaseEnvironmentProperties; import org.apache.arrow.util.VisibleForTesting; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; @@ -128,7 +128,7 @@ private HBaseConnection createConnection(String host, String masterPort, String config.set(nextConfig.getKey(), nextConfig.getValue()); } - Map configOptions = GlueConnectionUtils.getGlueConnection(); + Map configOptions = new HbaseEnvironmentProperties().createEnvironment(); boolean kerberosAuthEnabled = configOptions.get(KERBEROS_AUTH_ENABLED) != null && "true".equalsIgnoreCase(configOptions.get(KERBEROS_AUTH_ENABLED)); logger.info("Kerberos Authentication Enabled: " + kerberosAuthEnabled); if (kerberosAuthEnabled) { diff --git a/athena-hbase/src/main/java/com/amazonaws/athena/connectors/hbase/integ/HbaseTableUtils.java b/athena-hbase/src/main/java/com/amazonaws/athena/connectors/hbase/integ/HbaseTableUtils.java index 715eb0d908..2745037afb 100644 --- a/athena-hbase/src/main/java/com/amazonaws/athena/connectors/hbase/integ/HbaseTableUtils.java +++ b/athena-hbase/src/main/java/com/amazonaws/athena/connectors/hbase/integ/HbaseTableUtils.java @@ -19,7 +19,7 @@ */ package com.amazonaws.athena.connectors.hbase.integ; -import com.amazonaws.athena.connector.lambda.GlueConnectionUtils; +import com.amazonaws.athena.connectors.hbase.HbaseEnvironmentProperties; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; @@ -100,7 +100,7 @@ private Configuration getHbaseConfiguration(String connectionStr) configuration.set("hbase.client.pause", "500"); configuration.set("zookeeper.recovery.retry", "2"); - java.util.Map configOptions = GlueConnectionUtils.getGlueConnection(); + java.util.Map configOptions = new HbaseEnvironmentProperties().createEnvironment(); boolean kerberosAuthEnabled = configOptions.get(KERBEROS_AUTH_ENABLED) != null && "true".equalsIgnoreCase(configOptions.get(KERBEROS_AUTH_ENABLED)); logger.info("Kerberos Authentication Enabled: " + kerberosAuthEnabled); if (kerberosAuthEnabled) { diff --git a/athena-hortonworks-hive/src/main/java/com/amazonaws/athena/connectors/hortonworks/HiveCompositeHandler.java b/athena-hortonworks-hive/src/main/java/com/amazonaws/athena/connectors/hortonworks/HiveCompositeHandler.java index bb9b76007a..cc03c1dad0 100644 --- a/athena-hortonworks-hive/src/main/java/com/amazonaws/athena/connectors/hortonworks/HiveCompositeHandler.java +++ b/athena-hortonworks-hive/src/main/java/com/amazonaws/athena/connectors/hortonworks/HiveCompositeHandler.java @@ -19,7 +19,6 @@ */ package com.amazonaws.athena.connectors.hortonworks; -import com.amazonaws.athena.connector.lambda.connection.HortonworksEnvironmentProperties; import com.amazonaws.athena.connector.lambda.handlers.CompositeHandler; /** diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/HortonworksEnvironmentProperties.java b/athena-hortonworks-hive/src/main/java/com/amazonaws/athena/connectors/hortonworks/HortonworksEnvironmentProperties.java similarity index 88% rename from athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/HortonworksEnvironmentProperties.java rename to athena-hortonworks-hive/src/main/java/com/amazonaws/athena/connectors/hortonworks/HortonworksEnvironmentProperties.java index 87f02c3e78..522073b2f0 100644 --- a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/HortonworksEnvironmentProperties.java +++ b/athena-hortonworks-hive/src/main/java/com/amazonaws/athena/connectors/hortonworks/HortonworksEnvironmentProperties.java @@ -17,7 +17,9 @@ * limitations under the License. * #L% */ -package com.amazonaws.athena.connector.lambda.connection; +package com.amazonaws.athena.connectors.hortonworks; + +import com.amazonaws.athena.connectors.jdbc.JdbcEnvironmentProperties; import java.util.Map; diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/JdbcEnvironmentProperties.java b/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/JdbcEnvironmentProperties.java similarity index 71% rename from athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/JdbcEnvironmentProperties.java rename to athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/JdbcEnvironmentProperties.java index 752327b1d7..db4a349155 100644 --- a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/JdbcEnvironmentProperties.java +++ b/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/JdbcEnvironmentProperties.java @@ -17,27 +17,31 @@ * limitations under the License. * #L% */ -package com.amazonaws.athena.connector.lambda.connection; +package com.amazonaws.athena.connectors.jdbc; + +import com.amazonaws.athena.connector.lambda.connection.EnvironmentProperties; import java.util.HashMap; import java.util.Map; +import static com.amazonaws.athena.connector.lambda.connection.EnvironmentConstants.DATABASE; +import static com.amazonaws.athena.connector.lambda.connection.EnvironmentConstants.DEFAULT; +import static com.amazonaws.athena.connector.lambda.connection.EnvironmentConstants.HOST; +import static com.amazonaws.athena.connector.lambda.connection.EnvironmentConstants.JDBC_PARAMS; +import static com.amazonaws.athena.connector.lambda.connection.EnvironmentConstants.PORT; +import static com.amazonaws.athena.connector.lambda.connection.EnvironmentConstants.SECRET_NAME; + public abstract class JdbcEnvironmentProperties extends EnvironmentProperties { - protected static final String DEFAULT = "default"; - protected static final String JDBC_PARAMS = "JDBC_PARAMS"; - protected static final String DATABASE = "DATABASE"; - @Override public Map connectionPropertiesToEnvironment(Map connectionProperties) { HashMap environment = new HashMap<>(); // now construct jdbc string - String connectionString = getConnectionStringPrefix(connectionProperties) + connectionProperties.get("HOST") - + ":" + connectionProperties.get("PORT") + getDatabase(connectionProperties) + getJdbcParameters(connectionProperties); + String connectionString = getConnectionStringPrefix(connectionProperties) + connectionProperties.get(HOST) + + ":" + connectionProperties.get(PORT) + getDatabase(connectionProperties) + getJdbcParameters(connectionProperties); - logger.debug("Constructed connection string: {}", connectionString); environment.put(DEFAULT, connectionString); return environment; } diff --git a/athena-msk/src/main/java/com/amazonaws/athena/connectors/msk/AmazonMskCompositeHandler.java b/athena-msk/src/main/java/com/amazonaws/athena/connectors/msk/AmazonMskCompositeHandler.java index 9879638c38..bdf3f21eab 100644 --- a/athena-msk/src/main/java/com/amazonaws/athena/connectors/msk/AmazonMskCompositeHandler.java +++ b/athena-msk/src/main/java/com/amazonaws/athena/connectors/msk/AmazonMskCompositeHandler.java @@ -19,7 +19,6 @@ */ package com.amazonaws.athena.connectors.msk; -import com.amazonaws.athena.connector.lambda.GlueConnectionUtils; import com.amazonaws.athena.connector.lambda.handlers.CompositeHandler; public class AmazonMskCompositeHandler @@ -27,6 +26,6 @@ public class AmazonMskCompositeHandler { public AmazonMskCompositeHandler() throws Exception { - super(new AmazonMskMetadataHandler(GlueConnectionUtils.getGlueConnection()), new AmazonMskRecordHandler(GlueConnectionUtils.getGlueConnection())); + super(new AmazonMskMetadataHandler(new AmazonMskEnvironmentProperties().createEnvironment()), new AmazonMskRecordHandler(new AmazonMskEnvironmentProperties().createEnvironment())); } } diff --git a/athena-msk/src/main/java/com/amazonaws/athena/connectors/msk/AmazonMskEnvironmentProperties.java b/athena-msk/src/main/java/com/amazonaws/athena/connectors/msk/AmazonMskEnvironmentProperties.java new file mode 100644 index 0000000000..c281bf0479 --- /dev/null +++ b/athena-msk/src/main/java/com/amazonaws/athena/connectors/msk/AmazonMskEnvironmentProperties.java @@ -0,0 +1,50 @@ +/*- + * #%L + * Amazon Athena Query Federation SDK + * %% + * Copyright (C) 2019 - 2024 Amazon Web Services + * %% + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * #L% + */ +package com.amazonaws.athena.connectors.msk; + +import com.amazonaws.athena.connector.lambda.connection.EnvironmentProperties; + +import java.util.HashMap; +import java.util.Map; + +import static com.amazonaws.athena.connector.lambda.connection.EnvironmentConstants.CUSTOM_AUTH_TYPE; +import static com.amazonaws.athena.connector.lambda.connection.EnvironmentConstants.GLUE_CERTIFICATES_S3_REFERENCE; +import static com.amazonaws.athena.connector.lambda.connection.EnvironmentConstants.HOST; +import static com.amazonaws.athena.connector.lambda.connection.EnvironmentConstants.PORT; +import static com.amazonaws.athena.connector.lambda.connection.EnvironmentConstants.SECRET_NAME; +import static com.amazonaws.athena.connectors.msk.AmazonMskConstants.AUTH_TYPE; +import static com.amazonaws.athena.connectors.msk.AmazonMskConstants.CERTIFICATES_S3_REFERENCE; +import static com.amazonaws.athena.connectors.msk.AmazonMskConstants.ENV_KAFKA_ENDPOINT; +import static com.amazonaws.athena.connectors.msk.AmazonMskConstants.SECRET_MANAGER_MSK_CREDS_NAME; + +public class AmazonMskEnvironmentProperties extends EnvironmentProperties +{ + @Override + public Map connectionPropertiesToEnvironment(Map connectionProperties) + { + Map environment = new HashMap<>(); + + environment.put(AUTH_TYPE, connectionProperties.get(CUSTOM_AUTH_TYPE)); + environment.put(CERTIFICATES_S3_REFERENCE, connectionProperties.getOrDefault(GLUE_CERTIFICATES_S3_REFERENCE, "")); + environment.put(SECRET_MANAGER_MSK_CREDS_NAME, connectionProperties.getOrDefault(SECRET_NAME, "")); + environment.put(ENV_KAFKA_ENDPOINT, connectionProperties.get(HOST) + ":" + connectionProperties.get(PORT)); + return environment; + } +} diff --git a/athena-mysql/src/main/java/com/amazonaws/athena/connectors/mysql/MySqlCompositeHandler.java b/athena-mysql/src/main/java/com/amazonaws/athena/connectors/mysql/MySqlCompositeHandler.java index 0b5b878167..01519cd9c1 100644 --- a/athena-mysql/src/main/java/com/amazonaws/athena/connectors/mysql/MySqlCompositeHandler.java +++ b/athena-mysql/src/main/java/com/amazonaws/athena/connectors/mysql/MySqlCompositeHandler.java @@ -19,7 +19,6 @@ */ package com.amazonaws.athena.connectors.mysql; -import com.amazonaws.athena.connector.lambda.connection.MySqlEnvironmentProperties; import com.amazonaws.athena.connector.lambda.handlers.CompositeHandler; /** diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/MySqlEnvironmentProperties.java b/athena-mysql/src/main/java/com/amazonaws/athena/connectors/mysql/MySqlEnvironmentProperties.java similarity index 88% rename from athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/MySqlEnvironmentProperties.java rename to athena-mysql/src/main/java/com/amazonaws/athena/connectors/mysql/MySqlEnvironmentProperties.java index bce78dbe45..d43758930c 100644 --- a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/MySqlEnvironmentProperties.java +++ b/athena-mysql/src/main/java/com/amazonaws/athena/connectors/mysql/MySqlEnvironmentProperties.java @@ -17,7 +17,9 @@ * limitations under the License. * #L% */ -package com.amazonaws.athena.connector.lambda.connection; +package com.amazonaws.athena.connectors.mysql; + +import com.amazonaws.athena.connectors.jdbc.JdbcEnvironmentProperties; import java.util.Map; diff --git a/athena-neptune/src/main/java/com/amazonaws/athena/connectors/neptune/Constants.java b/athena-neptune/src/main/java/com/amazonaws/athena/connectors/neptune/Constants.java index 41215afa55..542aac9f89 100644 --- a/athena-neptune/src/main/java/com/amazonaws/athena/connectors/neptune/Constants.java +++ b/athena-neptune/src/main/java/com/amazonaws/athena/connectors/neptune/Constants.java @@ -32,6 +32,7 @@ protected Constants() public static final String CFG_PORT = "neptune_port"; public static final String CFG_IAM = "iam_enabled"; public static final String CFG_REGION = "AWS_REGION"; + public static final String CFG_ClUSTER_RES_ID = "neptune_cluster_res_id"; public static final String SCHEMA_QUERY = "query"; public static final String SCHEMA_CASE_INSEN = "enable_caseinsensitivematch"; diff --git a/athena-neptune/src/main/java/com/amazonaws/athena/connectors/neptune/NeptuneCompositeHandler.java b/athena-neptune/src/main/java/com/amazonaws/athena/connectors/neptune/NeptuneCompositeHandler.java index 0ae4fbc607..1890b9460d 100644 --- a/athena-neptune/src/main/java/com/amazonaws/athena/connectors/neptune/NeptuneCompositeHandler.java +++ b/athena-neptune/src/main/java/com/amazonaws/athena/connectors/neptune/NeptuneCompositeHandler.java @@ -19,7 +19,6 @@ */ package com.amazonaws.athena.connectors.neptune; -import com.amazonaws.athena.connector.lambda.GlueConnectionUtils; import com.amazonaws.athena.connector.lambda.handlers.CompositeHandler; /** @@ -31,6 +30,6 @@ public class NeptuneCompositeHandler { public NeptuneCompositeHandler() { - super(new NeptuneMetadataHandler(GlueConnectionUtils.getGlueConnection()), new NeptuneRecordHandler(GlueConnectionUtils.getGlueConnection())); + super(new NeptuneMetadataHandler(new NeptuneEnvironmentProperties().createEnvironment()), new NeptuneRecordHandler(new NeptuneEnvironmentProperties().createEnvironment())); } } diff --git a/athena-neptune/src/main/java/com/amazonaws/athena/connectors/neptune/NeptuneEnvironmentProperties.java b/athena-neptune/src/main/java/com/amazonaws/athena/connectors/neptune/NeptuneEnvironmentProperties.java new file mode 100644 index 0000000000..7ddff52a4e --- /dev/null +++ b/athena-neptune/src/main/java/com/amazonaws/athena/connectors/neptune/NeptuneEnvironmentProperties.java @@ -0,0 +1,49 @@ +/*- + * #%L + * Amazon Athena Query Federation SDK + * %% + * Copyright (C) 2019 - 2024 Amazon Web Services + * %% + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * #L% + */ +package com.amazonaws.athena.connectors.neptune; + +import com.amazonaws.athena.connector.lambda.connection.EnvironmentProperties; + +import java.util.HashMap; +import java.util.Map; + +import static com.amazonaws.athena.connector.lambda.connection.EnvironmentConstants.CLUSTER_RES_ID; +import static com.amazonaws.athena.connector.lambda.connection.EnvironmentConstants.GRAPH_TYPE; +import static com.amazonaws.athena.connector.lambda.connection.EnvironmentConstants.HOST; +import static com.amazonaws.athena.connector.lambda.connection.EnvironmentConstants.PORT; +import static com.amazonaws.athena.connectors.neptune.Constants.CFG_ClUSTER_RES_ID; +import static com.amazonaws.athena.connectors.neptune.Constants.CFG_ENDPOINT; +import static com.amazonaws.athena.connectors.neptune.Constants.CFG_GRAPH_TYPE; +import static com.amazonaws.athena.connectors.neptune.Constants.CFG_PORT; + +public class NeptuneEnvironmentProperties extends EnvironmentProperties +{ + @Override + public Map connectionPropertiesToEnvironment(Map connectionProperties) + { + Map environment = new HashMap<>(); + + environment.put(CFG_ENDPOINT, connectionProperties.get(HOST)); + environment.put(CFG_PORT, connectionProperties.get(PORT)); + environment.put(CFG_ClUSTER_RES_ID, environment.get(CLUSTER_RES_ID)); + environment.put(CFG_GRAPH_TYPE, environment.get(GRAPH_TYPE)); + return environment; + } +} diff --git a/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleCompositeHandler.java b/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleCompositeHandler.java index 86a399601e..78a779d000 100644 --- a/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleCompositeHandler.java +++ b/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleCompositeHandler.java @@ -20,7 +20,6 @@ */ package com.amazonaws.athena.connectors.oracle; -import com.amazonaws.athena.connector.lambda.connection.OracleEnvironmentProperties; import com.amazonaws.athena.connector.lambda.handlers.CompositeHandler; /** diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/OracleEnvironmentProperties.java b/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleEnvironmentProperties.java similarity index 82% rename from athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/OracleEnvironmentProperties.java rename to athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleEnvironmentProperties.java index 7d33ec2bcf..1bdd16ca65 100644 --- a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/OracleEnvironmentProperties.java +++ b/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleEnvironmentProperties.java @@ -17,10 +17,15 @@ * limitations under the License. * #L% */ -package com.amazonaws.athena.connector.lambda.connection; +package com.amazonaws.athena.connectors.oracle; + +import com.amazonaws.athena.connectors.jdbc.JdbcEnvironmentProperties; import java.util.Map; +import static com.amazonaws.athena.connector.lambda.connection.EnvironmentConstants.DATABASE; +import static com.amazonaws.athena.connector.lambda.connection.EnvironmentConstants.SECRET_NAME; + public class OracleEnvironmentProperties extends JdbcEnvironmentProperties { @Override diff --git a/athena-postgresql/src/main/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlCompositeHandler.java b/athena-postgresql/src/main/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlCompositeHandler.java index 16fe622869..c5829cbb0a 100644 --- a/athena-postgresql/src/main/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlCompositeHandler.java +++ b/athena-postgresql/src/main/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlCompositeHandler.java @@ -19,7 +19,6 @@ */ package com.amazonaws.athena.connectors.postgresql; -import com.amazonaws.athena.connector.lambda.connection.PostGreSqlEnvironmentProperties; import com.amazonaws.athena.connector.lambda.handlers.CompositeHandler; /** diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/PostGreSqlEnvironmentProperties.java b/athena-postgresql/src/main/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlEnvironmentProperties.java similarity index 88% rename from athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/PostGreSqlEnvironmentProperties.java rename to athena-postgresql/src/main/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlEnvironmentProperties.java index fbd0e16487..d2e49cd53d 100644 --- a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/PostGreSqlEnvironmentProperties.java +++ b/athena-postgresql/src/main/java/com/amazonaws/athena/connectors/postgresql/PostGreSqlEnvironmentProperties.java @@ -17,7 +17,9 @@ * limitations under the License. * #L% */ -package com.amazonaws.athena.connector.lambda.connection; +package com.amazonaws.athena.connectors.postgresql; + +import com.amazonaws.athena.connectors.jdbc.JdbcEnvironmentProperties; import java.util.Map; diff --git a/athena-redis/src/main/java/com/amazonaws/athena/connectors/redis/RedisCompositeHandler.java b/athena-redis/src/main/java/com/amazonaws/athena/connectors/redis/RedisCompositeHandler.java index 341516839b..42e12a0615 100644 --- a/athena-redis/src/main/java/com/amazonaws/athena/connectors/redis/RedisCompositeHandler.java +++ b/athena-redis/src/main/java/com/amazonaws/athena/connectors/redis/RedisCompositeHandler.java @@ -19,7 +19,7 @@ */ package com.amazonaws.athena.connectors.redis; -import com.amazonaws.athena.connector.lambda.GlueConnectionUtils; +import com.amazonaws.athena.connector.lambda.connection.EnvironmentProperties; import com.amazonaws.athena.connector.lambda.handlers.CompositeHandler; /** @@ -31,6 +31,6 @@ public class RedisCompositeHandler { public RedisCompositeHandler() { - super(new RedisMetadataHandler(GlueConnectionUtils.getGlueConnection()), new RedisRecordHandler(GlueConnectionUtils.getGlueConnection())); + super(new RedisMetadataHandler(new EnvironmentProperties().createEnvironment()), new RedisRecordHandler(new EnvironmentProperties().createEnvironment())); } } diff --git a/athena-saphana/src/main/java/com/amazonaws/athena/connectors/saphana/SaphanaCompositeHandler.java b/athena-saphana/src/main/java/com/amazonaws/athena/connectors/saphana/SaphanaCompositeHandler.java index b22112a659..304e20fb19 100644 --- a/athena-saphana/src/main/java/com/amazonaws/athena/connectors/saphana/SaphanaCompositeHandler.java +++ b/athena-saphana/src/main/java/com/amazonaws/athena/connectors/saphana/SaphanaCompositeHandler.java @@ -22,7 +22,6 @@ package com.amazonaws.athena.connectors.saphana; -import com.amazonaws.athena.connector.lambda.connection.SaphanaEnvironmentProperties; import com.amazonaws.athena.connector.lambda.handlers.CompositeHandler; /** diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/SaphanaEnvironmentProperties.java b/athena-saphana/src/main/java/com/amazonaws/athena/connectors/saphana/SaphanaEnvironmentProperties.java similarity index 89% rename from athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/SaphanaEnvironmentProperties.java rename to athena-saphana/src/main/java/com/amazonaws/athena/connectors/saphana/SaphanaEnvironmentProperties.java index 38beefa547..98951d18e7 100644 --- a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/SaphanaEnvironmentProperties.java +++ b/athena-saphana/src/main/java/com/amazonaws/athena/connectors/saphana/SaphanaEnvironmentProperties.java @@ -17,7 +17,9 @@ * limitations under the License. * #L% */ -package com.amazonaws.athena.connector.lambda.connection; +package com.amazonaws.athena.connectors.saphana; + +import com.amazonaws.athena.connectors.jdbc.JdbcEnvironmentProperties; import java.util.Map; diff --git a/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeCompositeHandler.java b/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeCompositeHandler.java index be8f456fe3..c7e9c4b64f 100644 --- a/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeCompositeHandler.java +++ b/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeCompositeHandler.java @@ -22,7 +22,6 @@ package com.amazonaws.athena.connectors.snowflake; -import com.amazonaws.athena.connector.lambda.connection.SnowflakeEnvironmentProperties; import com.amazonaws.athena.connector.lambda.handlers.CompositeHandler; /** diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/SnowflakeEnvironmentProperties.java b/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeEnvironmentProperties.java similarity index 77% rename from athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/SnowflakeEnvironmentProperties.java rename to athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeEnvironmentProperties.java index ee8c7b39d9..d90502132d 100644 --- a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/SnowflakeEnvironmentProperties.java +++ b/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeEnvironmentProperties.java @@ -17,14 +17,18 @@ * limitations under the License. * #L% */ -package com.amazonaws.athena.connector.lambda.connection; +package com.amazonaws.athena.connectors.snowflake; + +import com.amazonaws.athena.connectors.jdbc.JdbcEnvironmentProperties; import java.util.Map; +import static com.amazonaws.athena.connector.lambda.connection.EnvironmentConstants.DATABASE; +import static com.amazonaws.athena.connector.lambda.connection.EnvironmentConstants.SCHEMA; +import static com.amazonaws.athena.connector.lambda.connection.EnvironmentConstants.WAREHOUSE; + public class SnowflakeEnvironmentProperties extends JdbcEnvironmentProperties { - private static final String WAREHOUSE = "WAREHOUSE"; - private static final String SCHEMA = "SCHEMA"; @Override protected String getConnectionStringPrefix(Map connectionProperties) { diff --git a/athena-sqlserver/src/main/java/com/amazonaws/athena/connectors/sqlserver/SqlServerCompositeHandler.java b/athena-sqlserver/src/main/java/com/amazonaws/athena/connectors/sqlserver/SqlServerCompositeHandler.java index 86789e4cc2..b56a72f774 100644 --- a/athena-sqlserver/src/main/java/com/amazonaws/athena/connectors/sqlserver/SqlServerCompositeHandler.java +++ b/athena-sqlserver/src/main/java/com/amazonaws/athena/connectors/sqlserver/SqlServerCompositeHandler.java @@ -19,7 +19,6 @@ */ package com.amazonaws.athena.connectors.sqlserver; -import com.amazonaws.athena.connector.lambda.connection.SqlServerEnvironmentProperties; import com.amazonaws.athena.connector.lambda.handlers.CompositeHandler; public class SqlServerCompositeHandler extends CompositeHandler diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/SqlServerEnvironmentProperties.java b/athena-sqlserver/src/main/java/com/amazonaws/athena/connectors/sqlserver/SqlServerEnvironmentProperties.java similarity index 85% rename from athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/SqlServerEnvironmentProperties.java rename to athena-sqlserver/src/main/java/com/amazonaws/athena/connectors/sqlserver/SqlServerEnvironmentProperties.java index 33a2a3c9d4..d7672db050 100644 --- a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/SqlServerEnvironmentProperties.java +++ b/athena-sqlserver/src/main/java/com/amazonaws/athena/connectors/sqlserver/SqlServerEnvironmentProperties.java @@ -17,10 +17,14 @@ * limitations under the License. * #L% */ -package com.amazonaws.athena.connector.lambda.connection; +package com.amazonaws.athena.connectors.sqlserver; + +import com.amazonaws.athena.connectors.jdbc.JdbcEnvironmentProperties; import java.util.Map; +import static com.amazonaws.athena.connector.lambda.connection.EnvironmentConstants.DATABASE; + public class SqlServerEnvironmentProperties extends JdbcEnvironmentProperties { @Override diff --git a/athena-synapse/src/main/java/com/amazonaws/athena/connectors/synapse/SynapseCompositeHandler.java b/athena-synapse/src/main/java/com/amazonaws/athena/connectors/synapse/SynapseCompositeHandler.java index 4c319877f4..ccf952b241 100644 --- a/athena-synapse/src/main/java/com/amazonaws/athena/connectors/synapse/SynapseCompositeHandler.java +++ b/athena-synapse/src/main/java/com/amazonaws/athena/connectors/synapse/SynapseCompositeHandler.java @@ -19,7 +19,6 @@ */ package com.amazonaws.athena.connectors.synapse; -import com.amazonaws.athena.connector.lambda.connection.SynapseEnvironmentProperties; import com.amazonaws.athena.connector.lambda.handlers.CompositeHandler; public class SynapseCompositeHandler extends CompositeHandler diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/SynapseEnvironmentProperties.java b/athena-synapse/src/main/java/com/amazonaws/athena/connectors/synapse/SynapseEnvironmentProperties.java similarity index 57% rename from athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/SynapseEnvironmentProperties.java rename to athena-synapse/src/main/java/com/amazonaws/athena/connectors/synapse/SynapseEnvironmentProperties.java index 149c5ec056..3c42e4fdce 100644 --- a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/SynapseEnvironmentProperties.java +++ b/athena-synapse/src/main/java/com/amazonaws/athena/connectors/synapse/SynapseEnvironmentProperties.java @@ -17,15 +17,37 @@ * limitations under the License. * #L% */ -package com.amazonaws.athena.connector.lambda.connection; +package com.amazonaws.athena.connectors.synapse; + +import com.amazonaws.athena.connectors.jdbc.JdbcEnvironmentProperties; import java.util.Map; -public class SynapseEnvironmentProperties extends SqlServerEnvironmentProperties +import static com.amazonaws.athena.connector.lambda.connection.EnvironmentConstants.DATABASE; + +public class SynapseEnvironmentProperties extends JdbcEnvironmentProperties { @Override protected String getConnectionStringPrefix(Map connectionProperties) { return "synapse://jdbc:synapse://"; } + + @Override + protected String getDatabase(Map connectionProperties) + { + return ";databaseName=" + connectionProperties.get(DATABASE); + } + + @Override + protected String getJdbcParametersSeparator() + { + return ";"; + } + + @Override + protected String getDelimiter() + { + return ";"; + } } diff --git a/athena-teradata/src/main/java/com/amazonaws/athena/connectors/teradata/TeradataCompositeHandler.java b/athena-teradata/src/main/java/com/amazonaws/athena/connectors/teradata/TeradataCompositeHandler.java index c39f7e8ca4..0990873960 100644 --- a/athena-teradata/src/main/java/com/amazonaws/athena/connectors/teradata/TeradataCompositeHandler.java +++ b/athena-teradata/src/main/java/com/amazonaws/athena/connectors/teradata/TeradataCompositeHandler.java @@ -21,7 +21,6 @@ package com.amazonaws.athena.connectors.teradata; -import com.amazonaws.athena.connector.lambda.GlueConnectionUtils; import com.amazonaws.athena.connector.lambda.handlers.CompositeHandler; /** @@ -35,6 +34,6 @@ public class TeradataCompositeHandler { public TeradataCompositeHandler() { - super(new TeradataMetadataHandler(GlueConnectionUtils.getGlueConnection()), new TeradataRecordHandler(GlueConnectionUtils.getGlueConnection())); + super(new TeradataMetadataHandler(new TeradataEnvironmentProperties().createEnvironment()), new TeradataRecordHandler(new TeradataEnvironmentProperties().createEnvironment())); } } diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/TeradataEnvironmentProperties.java b/athena-teradata/src/main/java/com/amazonaws/athena/connectors/teradata/TeradataEnvironmentProperties.java similarity index 85% rename from athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/TeradataEnvironmentProperties.java rename to athena-teradata/src/main/java/com/amazonaws/athena/connectors/teradata/TeradataEnvironmentProperties.java index fff460399b..0b41bbcb0f 100644 --- a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/TeradataEnvironmentProperties.java +++ b/athena-teradata/src/main/java/com/amazonaws/athena/connectors/teradata/TeradataEnvironmentProperties.java @@ -17,10 +17,14 @@ * limitations under the License. * #L% */ -package com.amazonaws.athena.connector.lambda.connection; +package com.amazonaws.athena.connectors.teradata; + +import com.amazonaws.athena.connectors.jdbc.JdbcEnvironmentProperties; import java.util.Map; +import static com.amazonaws.athena.connector.lambda.connection.EnvironmentConstants.DATABASE; + public class TeradataEnvironmentProperties extends JdbcEnvironmentProperties { @Override diff --git a/athena-timestream/src/main/java/com/amazonaws/athena/connectors/timestream/TimestreamCompositeHandler.java b/athena-timestream/src/main/java/com/amazonaws/athena/connectors/timestream/TimestreamCompositeHandler.java index 502da41d72..3101f0197d 100644 --- a/athena-timestream/src/main/java/com/amazonaws/athena/connectors/timestream/TimestreamCompositeHandler.java +++ b/athena-timestream/src/main/java/com/amazonaws/athena/connectors/timestream/TimestreamCompositeHandler.java @@ -19,7 +19,7 @@ */ package com.amazonaws.athena.connectors.timestream; -import com.amazonaws.athena.connector.lambda.GlueConnectionUtils; +import com.amazonaws.athena.connector.lambda.connection.EnvironmentProperties; import com.amazonaws.athena.connector.lambda.handlers.CompositeHandler; public class TimestreamCompositeHandler @@ -27,6 +27,6 @@ public class TimestreamCompositeHandler { public TimestreamCompositeHandler() { - super(new TimestreamMetadataHandler(GlueConnectionUtils.getGlueConnection()), new TimestreamRecordHandler(GlueConnectionUtils.getGlueConnection())); + super(new TimestreamMetadataHandler(new EnvironmentProperties().createEnvironment()), new TimestreamRecordHandler(new EnvironmentProperties().createEnvironment())); } } diff --git a/athena-tpcds/src/main/java/com/amazonaws/athena/connectors/tpcds/TPCDSCompositeHandler.java b/athena-tpcds/src/main/java/com/amazonaws/athena/connectors/tpcds/TPCDSCompositeHandler.java index 420a623de8..ce8fe93952 100644 --- a/athena-tpcds/src/main/java/com/amazonaws/athena/connectors/tpcds/TPCDSCompositeHandler.java +++ b/athena-tpcds/src/main/java/com/amazonaws/athena/connectors/tpcds/TPCDSCompositeHandler.java @@ -19,7 +19,7 @@ */ package com.amazonaws.athena.connectors.tpcds; -import com.amazonaws.athena.connector.lambda.GlueConnectionUtils; +import com.amazonaws.athena.connector.lambda.connection.EnvironmentProperties; import com.amazonaws.athena.connector.lambda.handlers.CompositeHandler; public class TPCDSCompositeHandler @@ -27,6 +27,6 @@ public class TPCDSCompositeHandler { public TPCDSCompositeHandler() { - super(new TPCDSMetadataHandler(GlueConnectionUtils.getGlueConnection()), new TPCDSRecordHandler(GlueConnectionUtils.getGlueConnection())); + super(new TPCDSMetadataHandler(new EnvironmentProperties().createEnvironment()), new TPCDSRecordHandler(new EnvironmentProperties().createEnvironment())); } } diff --git a/athena-vertica/src/main/java/com/amazonaws/athena/connectors/vertica/VerticaCompositeHandler.java b/athena-vertica/src/main/java/com/amazonaws/athena/connectors/vertica/VerticaCompositeHandler.java index d7d39a3b16..38212f254b 100644 --- a/athena-vertica/src/main/java/com/amazonaws/athena/connectors/vertica/VerticaCompositeHandler.java +++ b/athena-vertica/src/main/java/com/amazonaws/athena/connectors/vertica/VerticaCompositeHandler.java @@ -19,7 +19,6 @@ */ package com.amazonaws.athena.connectors.vertica; -import com.amazonaws.athena.connector.lambda.connection.VerticaEnvironmentProperties; import com.amazonaws.athena.connector.lambda.handlers.CompositeHandler; import java.io.IOException; diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/VerticaEnvironmentProperties.java b/athena-vertica/src/main/java/com/amazonaws/athena/connectors/vertica/VerticaEnvironmentProperties.java similarity index 88% rename from athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/VerticaEnvironmentProperties.java rename to athena-vertica/src/main/java/com/amazonaws/athena/connectors/vertica/VerticaEnvironmentProperties.java index 021a202e58..5a9ec76437 100644 --- a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/VerticaEnvironmentProperties.java +++ b/athena-vertica/src/main/java/com/amazonaws/athena/connectors/vertica/VerticaEnvironmentProperties.java @@ -17,7 +17,9 @@ * limitations under the License. * #L% */ -package com.amazonaws.athena.connector.lambda.connection; +package com.amazonaws.athena.connectors.vertica; + +import com.amazonaws.athena.connectors.jdbc.JdbcEnvironmentProperties; import java.util.Map; From 323dd36f39c2670a75ff7e721171dfc4dea321c9 Mon Sep 17 00:00:00 2001 From: Aimery Methena Date: Fri, 20 Sep 2024 11:29:56 -0400 Subject: [PATCH 30/87] update image uri to not include partition --- athena-aws-cmdb/athena-aws-cmdb-connection.yaml | 2 +- athena-aws-cmdb/athena-aws-cmdb.yaml | 2 +- athena-clickhouse/athena-clickhouse.yaml | 2 +- athena-cloudera-hive/athena-cloudera-hive-connection.yaml | 2 +- athena-cloudera-hive/athena-cloudera-hive.yaml | 2 +- athena-cloudera-impala/athena-cloudera-impala-connection.yaml | 2 +- athena-cloudera-impala/athena-cloudera-impala.yaml | 2 +- .../athena-cloudwatch-metrics-connection.yaml | 2 +- athena-cloudwatch-metrics/athena-cloudwatch-metrics.yaml | 2 +- athena-cloudwatch/athena-cloudwatch-connection.yaml | 2 +- athena-cloudwatch/athena-cloudwatch.yaml | 2 +- athena-datalakegen2/athena-datalakegen2-connection.yaml | 2 +- athena-datalakegen2/athena-datalakegen2.yaml | 2 +- athena-db2-as400/athena-db2-as400-connection.yaml | 2 +- athena-db2-as400/athena-db2-as400.yaml | 2 +- athena-db2/athena-db2-connection.yaml | 2 +- athena-db2/athena-db2.yaml | 2 +- athena-docdb/athena-docdb-connection.yaml | 2 +- athena-docdb/athena-docdb.yaml | 2 +- athena-dynamodb/athena-dynamodb-connection.yaml | 2 +- athena-dynamodb/athena-dynamodb.yaml | 2 +- athena-elasticsearch/athena-elasticsearch-connection.yaml | 2 +- athena-elasticsearch/athena-elasticsearch.yaml | 2 +- athena-gcs/athena-gcs-connection.yaml | 2 +- athena-gcs/athena-gcs.yaml | 2 +- athena-google-bigquery/athena-google-bigquery-connection.yaml | 2 +- athena-google-bigquery/athena-google-bigquery.yaml | 2 +- athena-hbase/athena-hbase-connection.yaml | 2 +- athena-hbase/athena-hbase.yaml | 2 +- athena-hortonworks-hive/athena-hortonworks-hive-connection.yaml | 2 +- athena-hortonworks-hive/athena-hortonworks-hive.yaml | 2 +- athena-kafka/athena-kafka.yaml | 2 +- athena-msk/athena-msk-connection.yaml | 2 +- athena-msk/athena-msk.yaml | 2 +- athena-mysql/athena-mysql-connection.yaml | 2 +- athena-mysql/athena-mysql.yaml | 2 +- athena-neptune/athena-neptune-connection.yaml | 2 +- athena-neptune/athena-neptune.yaml | 2 +- athena-oracle/athena-oracle-connection.yaml | 2 +- athena-oracle/athena-oracle.yaml | 2 +- athena-postgresql/athena-postgresql-connection.yaml | 2 +- athena-postgresql/athena-postgresql.yaml | 2 +- athena-redis/athena-redis-connection.yaml | 2 +- athena-redis/athena-redis.yaml | 2 +- athena-redshift/athena-redshift-connection.yaml | 2 +- athena-redshift/athena-redshift.yaml | 2 +- athena-saphana/athena-saphana-connection.yaml | 2 +- athena-saphana/athena-saphana.yaml | 2 +- athena-snowflake/athena-snowflake-connection.yaml | 2 +- athena-snowflake/athena-snowflake.yaml | 2 +- athena-sqlserver/athena-sqlserver-connection.yaml | 2 +- athena-sqlserver/athena-sqlserver.yaml | 2 +- athena-synapse/athena-synapse-connection.yaml | 2 +- athena-synapse/athena-synapse.yaml | 2 +- athena-teradata/athena-teradata-connection.yaml | 2 +- athena-teradata/athena-teradata.yaml | 2 +- athena-timestream/athena-timestream-connection.yaml | 2 +- athena-timestream/athena-timestream.yaml | 2 +- athena-tpcds/athena-tpcds-connection.yaml | 2 +- athena-tpcds/athena-tpcds.yaml | 2 +- athena-udfs/athena-udfs.yaml | 2 +- athena-vertica/athena-vertica-connection.yaml | 2 +- athena-vertica/athena-vertica.yaml | 2 +- 63 files changed, 63 insertions(+), 63 deletions(-) diff --git a/athena-aws-cmdb/athena-aws-cmdb-connection.yaml b/athena-aws-cmdb/athena-aws-cmdb-connection.yaml index bc3b57bd76..e4ec86f8bc 100644 --- a/athena-aws-cmdb/athena-aws-cmdb-connection.yaml +++ b/athena-aws-cmdb/athena-aws-cmdb-connection.yaml @@ -46,7 +46,7 @@ Resources: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-aws-cmdb:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-aws-cmdb:2022.47.1' Description: "Enables Amazon Athena to communicate with various AWS Services, making your resource inventories accessible via SQL." Timeout: 900 MemorySize: 3008 diff --git a/athena-aws-cmdb/athena-aws-cmdb.yaml b/athena-aws-cmdb/athena-aws-cmdb.yaml index 28640a646d..4365e6781d 100644 --- a/athena-aws-cmdb/athena-aws-cmdb.yaml +++ b/athena-aws-cmdb/athena-aws-cmdb.yaml @@ -53,7 +53,7 @@ Resources: spill_prefix: !Ref SpillPrefix FunctionName: !Ref AthenaCatalogName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-aws-cmdb:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-aws-cmdb:2022.47.1' Description: "Enables Amazon Athena to communicate with various AWS Services, making your resource inventories accessible via SQL." Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory diff --git a/athena-clickhouse/athena-clickhouse.yaml b/athena-clickhouse/athena-clickhouse.yaml index 39b4164381..259aae7198 100644 --- a/athena-clickhouse/athena-clickhouse.yaml +++ b/athena-clickhouse/athena-clickhouse.yaml @@ -71,7 +71,7 @@ Resources: default: !Ref DefaultConnectionString FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-clickhouse:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-clickhouse:2022.47.1' Description: "Enables Amazon Athena to communicate with ClickHouse using JDBC" Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory diff --git a/athena-cloudera-hive/athena-cloudera-hive-connection.yaml b/athena-cloudera-hive/athena-cloudera-hive-connection.yaml index ce0d4f3ba1..b37e5ef1b1 100644 --- a/athena-cloudera-hive/athena-cloudera-hive-connection.yaml +++ b/athena-cloudera-hive/athena-cloudera-hive-connection.yaml @@ -53,7 +53,7 @@ Resources: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-cloudera-hive:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-cloudera-hive:2022.47.1' Description: "Enables Amazon Athena to communicate with Cloudera Hive using JDBC" Timeout: 900 MemorySize: 3008 diff --git a/athena-cloudera-hive/athena-cloudera-hive.yaml b/athena-cloudera-hive/athena-cloudera-hive.yaml index dc5d33b343..3095ec1407 100644 --- a/athena-cloudera-hive/athena-cloudera-hive.yaml +++ b/athena-cloudera-hive/athena-cloudera-hive.yaml @@ -66,7 +66,7 @@ Resources: default: !Ref DefaultConnectionString FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-cloudera-hive:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-cloudera-hive:2022.47.1' Description: "Enables Amazon Athena to communicate with Coludera Hive using JDBC" Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory diff --git a/athena-cloudera-impala/athena-cloudera-impala-connection.yaml b/athena-cloudera-impala/athena-cloudera-impala-connection.yaml index 78a8e5fc08..717dc5fc5f 100644 --- a/athena-cloudera-impala/athena-cloudera-impala-connection.yaml +++ b/athena-cloudera-impala/athena-cloudera-impala-connection.yaml @@ -53,7 +53,7 @@ Resources: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-cloudera-impala:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-cloudera-impala:2022.47.1' Description: "Enables Amazon Athena to communicate with Cloudera Impala using JDBC" Timeout: 900 MemorySize: 3008 diff --git a/athena-cloudera-impala/athena-cloudera-impala.yaml b/athena-cloudera-impala/athena-cloudera-impala.yaml index efcd4a0a8e..812045c764 100644 --- a/athena-cloudera-impala/athena-cloudera-impala.yaml +++ b/athena-cloudera-impala/athena-cloudera-impala.yaml @@ -71,7 +71,7 @@ Resources: default: !Ref DefaultConnectionString FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-cloudera-impala:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-cloudera-impala:2022.47.1' Description: "Enables Amazon Athena to communicate with Cloudera Impala using JDBC" Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory diff --git a/athena-cloudwatch-metrics/athena-cloudwatch-metrics-connection.yaml b/athena-cloudwatch-metrics/athena-cloudwatch-metrics-connection.yaml index 59dde9ff37..c5bae85d66 100644 --- a/athena-cloudwatch-metrics/athena-cloudwatch-metrics-connection.yaml +++ b/athena-cloudwatch-metrics/athena-cloudwatch-metrics-connection.yaml @@ -46,7 +46,7 @@ Resources: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-cloudwatch-metrics:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-cloudwatch-metrics:2022.47.1' Description: "Enables Amazon Athena to communicate with Cloudwatch Metrics, making your metrics data accessible via SQL" Timeout: 900 MemorySize: 3008 diff --git a/athena-cloudwatch-metrics/athena-cloudwatch-metrics.yaml b/athena-cloudwatch-metrics/athena-cloudwatch-metrics.yaml index fd66c1081a..974b979e37 100644 --- a/athena-cloudwatch-metrics/athena-cloudwatch-metrics.yaml +++ b/athena-cloudwatch-metrics/athena-cloudwatch-metrics.yaml @@ -53,7 +53,7 @@ Resources: spill_prefix: !Ref SpillPrefix FunctionName: !Ref AthenaCatalogName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-cloudwatch-metrics:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-cloudwatch-metrics:2022.47.1' Description: "Enables Amazon Athena to communicate with Cloudwatch Metrics, making your metrics data accessible via SQL" Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory diff --git a/athena-cloudwatch/athena-cloudwatch-connection.yaml b/athena-cloudwatch/athena-cloudwatch-connection.yaml index 14f7969ea2..6be26a2409 100644 --- a/athena-cloudwatch/athena-cloudwatch-connection.yaml +++ b/athena-cloudwatch/athena-cloudwatch-connection.yaml @@ -46,7 +46,7 @@ Resources: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-cloudwatch:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-cloudwatch:2022.47.1' Description: "Enables Amazon Athena to communicate with Cloudwatch, making your log accessible via SQL" Timeout: 900 MemorySize: 3008 diff --git a/athena-cloudwatch/athena-cloudwatch.yaml b/athena-cloudwatch/athena-cloudwatch.yaml index 8e7e7901a8..15eef67930 100644 --- a/athena-cloudwatch/athena-cloudwatch.yaml +++ b/athena-cloudwatch/athena-cloudwatch.yaml @@ -67,7 +67,7 @@ Resources: kms_key_id: !If [HasKMSKeyId, !Ref KMSKeyId, !Ref "AWS::NoValue"] FunctionName: !Ref AthenaCatalogName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-cloudwatch:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-cloudwatch:2022.47.1' Description: "Enables Amazon Athena to communicate with Cloudwatch, making your log accessible via SQL" Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory diff --git a/athena-datalakegen2/athena-datalakegen2-connection.yaml b/athena-datalakegen2/athena-datalakegen2-connection.yaml index 2fbcf8b4d4..6978dbc462 100644 --- a/athena-datalakegen2/athena-datalakegen2-connection.yaml +++ b/athena-datalakegen2/athena-datalakegen2-connection.yaml @@ -61,7 +61,7 @@ Resources: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-datalakegen2:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-datalakegen2:2022.47.1' Description: "Enables Amazon Athena to communicate with DataLake Gen2 using JDBC" Timeout: !Ref 900 MemorySize: !Ref 3008 diff --git a/athena-datalakegen2/athena-datalakegen2.yaml b/athena-datalakegen2/athena-datalakegen2.yaml index 0f2f19a92a..5890402513 100644 --- a/athena-datalakegen2/athena-datalakegen2.yaml +++ b/athena-datalakegen2/athena-datalakegen2.yaml @@ -72,7 +72,7 @@ Resources: default: !Ref DefaultConnectionString FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-datalakegen2:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-datalakegen2:2022.47.1' Description: "Enables Amazon Athena to communicate with DataLake Gen2 using JDBC" Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory diff --git a/athena-db2-as400/athena-db2-as400-connection.yaml b/athena-db2-as400/athena-db2-as400-connection.yaml index 65729914d6..17ed8ee54e 100644 --- a/athena-db2-as400/athena-db2-as400-connection.yaml +++ b/athena-db2-as400/athena-db2-as400-connection.yaml @@ -62,7 +62,7 @@ Resources: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-db2-as400:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-db2-as400:2022.47.1' Description: "Enables Amazon Athena to communicate with DB2 on iSeries (AS400) using JDBC" Timeout: 900 MemorySize: 3008 diff --git a/athena-db2-as400/athena-db2-as400.yaml b/athena-db2-as400/athena-db2-as400.yaml index 1d7643dae8..ea0a331051 100644 --- a/athena-db2-as400/athena-db2-as400.yaml +++ b/athena-db2-as400/athena-db2-as400.yaml @@ -73,7 +73,7 @@ Resources: default: !Ref DefaultConnectionString FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-db2-as400:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-db2-as400:2022.47.1' Description: "Enables Amazon Athena to communicate with DB2 on iSeries (AS400) using JDBC" Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory diff --git a/athena-db2/athena-db2-connection.yaml b/athena-db2/athena-db2-connection.yaml index 1c00ad6c01..27222f077c 100644 --- a/athena-db2/athena-db2-connection.yaml +++ b/athena-db2/athena-db2-connection.yaml @@ -62,7 +62,7 @@ Resources: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-db2:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-db2:2022.47.1' Description: "Enables Amazon Athena to communicate with DB2 using JDBC" Timeout: 900 MemorySize: 3008 diff --git a/athena-db2/athena-db2.yaml b/athena-db2/athena-db2.yaml index 4a930929e8..7508f16712 100644 --- a/athena-db2/athena-db2.yaml +++ b/athena-db2/athena-db2.yaml @@ -73,7 +73,7 @@ Resources: default: !Ref DefaultConnectionString FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-db2:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-db2:2022.47.1' Description: "Enables Amazon Athena to communicate with DB2 using JDBC" Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory diff --git a/athena-docdb/athena-docdb-connection.yaml b/athena-docdb/athena-docdb-connection.yaml index 4c8bb80129..6caa42b15e 100644 --- a/athena-docdb/athena-docdb-connection.yaml +++ b/athena-docdb/athena-docdb-connection.yaml @@ -55,7 +55,7 @@ Resources: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-docdb:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-docdb:2022.47.1' Description: "Enables Amazon Athena to communicate with DocumentDB, making your DocumentDB data accessible via SQL." Timeout: 900 MemorySize: 3008 diff --git a/athena-docdb/athena-docdb.yaml b/athena-docdb/athena-docdb.yaml index b713852b26..588b05f52e 100644 --- a/athena-docdb/athena-docdb.yaml +++ b/athena-docdb/athena-docdb.yaml @@ -67,7 +67,7 @@ Resources: default_docdb: !Ref DocDBConnectionString FunctionName: !Ref AthenaCatalogName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-docdb:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-docdb:2022.47.1' Description: "Enables Amazon Athena to communicate with DocumentDB, making your DocumentDB data accessible via SQL." Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory diff --git a/athena-dynamodb/athena-dynamodb-connection.yaml b/athena-dynamodb/athena-dynamodb-connection.yaml index e91f0c2fd0..505d1b1e7f 100644 --- a/athena-dynamodb/athena-dynamodb-connection.yaml +++ b/athena-dynamodb/athena-dynamodb-connection.yaml @@ -46,7 +46,7 @@ Resources: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-dynamodb:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-dynamodb:2022.47.1' Description: "Enables Amazon Athena to communicate with DynamoDB, making your tables accessible via SQL" Timeout: 900 MemorySize: 3008 diff --git a/athena-dynamodb/athena-dynamodb.yaml b/athena-dynamodb/athena-dynamodb.yaml index 4e7e91f81c..fe07b4c27f 100644 --- a/athena-dynamodb/athena-dynamodb.yaml +++ b/athena-dynamodb/athena-dynamodb.yaml @@ -67,7 +67,7 @@ Resources: kms_key_id: !If [HasKMSKeyId, !Ref KMSKeyId, !Ref "AWS::NoValue"] FunctionName: !Ref AthenaCatalogName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-dynamodb:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-dynamodb:2022.47.1' Description: "Enables Amazon Athena to communicate with DynamoDB, making your tables accessible via SQL" Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory diff --git a/athena-elasticsearch/athena-elasticsearch-connection.yaml b/athena-elasticsearch/athena-elasticsearch-connection.yaml index 9ec31ee486..6e58f5e8f1 100644 --- a/athena-elasticsearch/athena-elasticsearch-connection.yaml +++ b/athena-elasticsearch/athena-elasticsearch-connection.yaml @@ -63,7 +63,7 @@ Resources: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-elasticsearch:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-elasticsearch:2022.47.1' Description: "The Elasticsearch Lambda Connector provides Athena users the ability to query data stored on Elasticsearch clusters." Timeout: 900 MemorySize: 3008 diff --git a/athena-elasticsearch/athena-elasticsearch.yaml b/athena-elasticsearch/athena-elasticsearch.yaml index c50e97a7aa..7330324293 100644 --- a/athena-elasticsearch/athena-elasticsearch.yaml +++ b/athena-elasticsearch/athena-elasticsearch.yaml @@ -103,7 +103,7 @@ Resources: query_scroll_timeout: !Ref QueryScrollTimeout FunctionName: !Sub "${AthenaCatalogName}" PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-elasticsearch:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-elasticsearch:2022.47.1' Description: "The Elasticsearch Lambda Connector provides Athena users the ability to query data stored on Elasticsearch clusters." Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory diff --git a/athena-gcs/athena-gcs-connection.yaml b/athena-gcs/athena-gcs-connection.yaml index de88eaa909..a0048e7076 100644 --- a/athena-gcs/athena-gcs-connection.yaml +++ b/athena-gcs/athena-gcs-connection.yaml @@ -50,7 +50,7 @@ Resources: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-gcs:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-gcs:2022.47.1' Description: "Amazon Athena GCS Connector" Timeout: 900 MemorySize: 3008 diff --git a/athena-gcs/athena-gcs.yaml b/athena-gcs/athena-gcs.yaml index 427dee6221..a12e60d369 100644 --- a/athena-gcs/athena-gcs.yaml +++ b/athena-gcs/athena-gcs.yaml @@ -60,7 +60,7 @@ Resources: secret_manager_gcp_creds_name: !Ref GCSSecretName FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-gcs:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-gcs:2022.47.1' Description: "Amazon Athena GCS Connector" Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory diff --git a/athena-google-bigquery/athena-google-bigquery-connection.yaml b/athena-google-bigquery/athena-google-bigquery-connection.yaml index b89e30cfb1..b69b52d7bb 100644 --- a/athena-google-bigquery/athena-google-bigquery-connection.yaml +++ b/athena-google-bigquery/athena-google-bigquery-connection.yaml @@ -63,7 +63,7 @@ Resources: GOOGLE_APPLICATION_CREDENTIALS: '/tmp/service-account.json' FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-google-bigquery:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-google-bigquery:2022.47.1' Description: "Enables Amazon Athena to communicate with BigQuery using Google SDK" Timeout: 900 MemorySize: 3008 diff --git a/athena-google-bigquery/athena-google-bigquery.yaml b/athena-google-bigquery/athena-google-bigquery.yaml index 83c481350e..f75b53334f 100644 --- a/athena-google-bigquery/athena-google-bigquery.yaml +++ b/athena-google-bigquery/athena-google-bigquery.yaml @@ -80,7 +80,7 @@ Resources: GOOGLE_APPLICATION_CREDENTIALS: '/tmp/service-account.json' FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-google-bigquery:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-google-bigquery:2022.47.1' Description: "Enables Amazon Athena to communicate with BigQuery using Google SDK" Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory diff --git a/athena-hbase/athena-hbase-connection.yaml b/athena-hbase/athena-hbase-connection.yaml index 792fd6b0db..e8b9e8bd52 100644 --- a/athena-hbase/athena-hbase-connection.yaml +++ b/athena-hbase/athena-hbase-connection.yaml @@ -55,7 +55,7 @@ Resources: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-hbase:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-hbase:2022.47.1' Description: "Enables Amazon Athena to communicate with HBase, making your HBase data accessible via SQL" Timeout: 900 MemorySize: 3008 diff --git a/athena-hbase/athena-hbase.yaml b/athena-hbase/athena-hbase.yaml index cb059e5088..c9d70a24e0 100644 --- a/athena-hbase/athena-hbase.yaml +++ b/athena-hbase/athena-hbase.yaml @@ -86,7 +86,7 @@ Resources: hbase_rpc_protection: !Ref HbaseRpcProtection FunctionName: !Ref AthenaCatalogName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-hbase:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-hbase:2022.47.1' Description: "Enables Amazon Athena to communicate with HBase, making your HBase data accessible via SQL" Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory diff --git a/athena-hortonworks-hive/athena-hortonworks-hive-connection.yaml b/athena-hortonworks-hive/athena-hortonworks-hive-connection.yaml index 44ad427f58..397a08c04b 100644 --- a/athena-hortonworks-hive/athena-hortonworks-hive-connection.yaml +++ b/athena-hortonworks-hive/athena-hortonworks-hive-connection.yaml @@ -59,7 +59,7 @@ Resources: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-hortonworks-hive:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-hortonworks-hive:2022.47.1' Description: "Enables Amazon Athena to communicate with Hortonworks Hive using JDBC" Timeout: 900 MemorySize: 3008 diff --git a/athena-hortonworks-hive/athena-hortonworks-hive.yaml b/athena-hortonworks-hive/athena-hortonworks-hive.yaml index 138c5b9883..4180c48d15 100644 --- a/athena-hortonworks-hive/athena-hortonworks-hive.yaml +++ b/athena-hortonworks-hive/athena-hortonworks-hive.yaml @@ -70,7 +70,7 @@ Resources: default: !Ref DefaultConnectionString FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-hortonworks-hive:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-hortonworks-hive:2022.47.1' Description: "Enables Amazon Athena to communicate with Hortonworks Hive using JDBC" Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory diff --git a/athena-kafka/athena-kafka.yaml b/athena-kafka/athena-kafka.yaml index ddc96bf785..4b88028448 100644 --- a/athena-kafka/athena-kafka.yaml +++ b/athena-kafka/athena-kafka.yaml @@ -102,7 +102,7 @@ Resources: auth_type: !Ref AuthType FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-kafka:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-kafka:2022.47.1' Description: "Enables Amazon Athena to communicate with Kafka clusters" Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory diff --git a/athena-msk/athena-msk-connection.yaml b/athena-msk/athena-msk-connection.yaml index 43b9277df9..8f18bbbe35 100644 --- a/athena-msk/athena-msk-connection.yaml +++ b/athena-msk/athena-msk-connection.yaml @@ -61,7 +61,7 @@ Resources: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-msk:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-msk:2022.47.1' Description: "Enables Amazon Athena to communicate with MSK clusters" Timeout: 900 MemorySize: 3008 diff --git a/athena-msk/athena-msk.yaml b/athena-msk/athena-msk.yaml index 90beba4f76..a6dd3b0f6a 100644 --- a/athena-msk/athena-msk.yaml +++ b/athena-msk/athena-msk.yaml @@ -97,7 +97,7 @@ Resources: auth_type: !Ref AuthType FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-msk:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-msk:2022.47.1' Description: "Enables Amazon Athena to communicate with MSK clusters" Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory diff --git a/athena-mysql/athena-mysql-connection.yaml b/athena-mysql/athena-mysql-connection.yaml index 1a51271003..0134e1af00 100644 --- a/athena-mysql/athena-mysql-connection.yaml +++ b/athena-mysql/athena-mysql-connection.yaml @@ -55,7 +55,7 @@ Resources: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-mysql:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-mysql:2022.47.1' Description: "Enables Amazon Athena to communicate with MySQL using JDBC" Timeout: 900 MemorySize: 3008 diff --git a/athena-mysql/athena-mysql.yaml b/athena-mysql/athena-mysql.yaml index 9908f13da3..31c9e11f69 100644 --- a/athena-mysql/athena-mysql.yaml +++ b/athena-mysql/athena-mysql.yaml @@ -71,7 +71,7 @@ Resources: default: !Ref DefaultConnectionString FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-mysql:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-mysql:2022.47.1' Description: "Enables Amazon Athena to communicate with MySQL using JDBC" Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory diff --git a/athena-neptune/athena-neptune-connection.yaml b/athena-neptune/athena-neptune-connection.yaml index f1c02b0eab..46705c4938 100644 --- a/athena-neptune/athena-neptune-connection.yaml +++ b/athena-neptune/athena-neptune-connection.yaml @@ -58,7 +58,7 @@ Resources: SERVICE_REGION: !Ref AWS::Region FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-neptune:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-neptune:2022.47.1' Description: "Enables Amazon Athena to communicate with Neptune, making your Neptune graph data accessible via SQL." Timeout: 900 MemorySize: 3008 diff --git a/athena-neptune/athena-neptune.yaml b/athena-neptune/athena-neptune.yaml index 10a5d36a1e..b1b92e2c9c 100644 --- a/athena-neptune/athena-neptune.yaml +++ b/athena-neptune/athena-neptune.yaml @@ -97,7 +97,7 @@ Resources: enable_caseinsensitivematch: !Ref EnableCaseInsensitiveMatch FunctionName: !Ref AthenaCatalogName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-neptune:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-neptune:2022.47.1' Description: "Enables Amazon Athena to communicate with Neptune, making your Neptune graph data accessible via SQL." Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory diff --git a/athena-oracle/athena-oracle-connection.yaml b/athena-oracle/athena-oracle-connection.yaml index bcc3a830a3..e6f47fce93 100644 --- a/athena-oracle/athena-oracle-connection.yaml +++ b/athena-oracle/athena-oracle-connection.yaml @@ -56,7 +56,7 @@ Resources: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-oracle:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-oracle:2022.47.1' Description: "Enables Amazon Athena to communicate with ORACLE using JDBC" Timeout: 900 MemorySize: 3008 diff --git a/athena-oracle/athena-oracle.yaml b/athena-oracle/athena-oracle.yaml index 7e9a5fe0dd..e086cf82cb 100644 --- a/athena-oracle/athena-oracle.yaml +++ b/athena-oracle/athena-oracle.yaml @@ -83,7 +83,7 @@ Resources: is_FIPS_Enabled: !Ref IsFIPSEnabled FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-oracle:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-oracle:2022.47.1' Description: "Enables Amazon Athena to communicate with ORACLE using JDBC" Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory diff --git a/athena-postgresql/athena-postgresql-connection.yaml b/athena-postgresql/athena-postgresql-connection.yaml index 94f5710312..59905e0528 100644 --- a/athena-postgresql/athena-postgresql-connection.yaml +++ b/athena-postgresql/athena-postgresql-connection.yaml @@ -64,7 +64,7 @@ Resources: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-postgresql:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-postgresql:2022.47.1' Description: "Enables Amazon Athena to communicate with PostgreSQL using JDBC" Runtime: java11 Timeout: 900 diff --git a/athena-postgresql/athena-postgresql.yaml b/athena-postgresql/athena-postgresql.yaml index 27449a022d..dccd609e78 100644 --- a/athena-postgresql/athena-postgresql.yaml +++ b/athena-postgresql/athena-postgresql.yaml @@ -82,7 +82,7 @@ Resources: default_scale: !Ref DefaultScale FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-postgresql:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-postgresql:2022.47.1' ImageConfig: Command: [ !Sub "com.amazonaws.athena.connectors.postgresql.${CompositeHandler}" ] Description: "Enables Amazon Athena to communicate with PostgreSQL using JDBC" diff --git a/athena-redis/athena-redis-connection.yaml b/athena-redis/athena-redis-connection.yaml index 78813117e5..b901cfb39b 100644 --- a/athena-redis/athena-redis-connection.yaml +++ b/athena-redis/athena-redis-connection.yaml @@ -53,7 +53,7 @@ Resources: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-redis:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-redis:2022.47.1' Description: "Enables Amazon Athena to communicate with Redis, making your Redis data accessible via SQL" Timeout: 900 MemorySize: 3008 diff --git a/athena-redis/athena-redis.yaml b/athena-redis/athena-redis.yaml index 1fabf14d1d..c3bc541752 100644 --- a/athena-redis/athena-redis.yaml +++ b/athena-redis/athena-redis.yaml @@ -82,7 +82,7 @@ Resources: qpt_db_number: !Ref QPTConnectionDBNumber FunctionName: !Ref AthenaCatalogName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-redis:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-redis:2022.47.1' Description: "Enables Amazon Athena to communicate with Redis, making your Redis data accessible via SQL" Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory diff --git a/athena-redshift/athena-redshift-connection.yaml b/athena-redshift/athena-redshift-connection.yaml index 8aec82475e..806954ed0b 100644 --- a/athena-redshift/athena-redshift-connection.yaml +++ b/athena-redshift/athena-redshift-connection.yaml @@ -53,7 +53,7 @@ Resources: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-redshift:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-redshift:2022.47.1' Description: "Enables Amazon Athena to communicate with Redshift using JDBC" Timeout: 900 MemorySize: 3008 diff --git a/athena-redshift/athena-redshift.yaml b/athena-redshift/athena-redshift.yaml index 2d950373fb..4a4df3c79b 100644 --- a/athena-redshift/athena-redshift.yaml +++ b/athena-redshift/athena-redshift.yaml @@ -80,7 +80,7 @@ Resources: kms_key_id: !If [HasKMSKeyId, !Ref KMSKeyId, !Ref "AWS::NoValue"] FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-redshift:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-redshift:2022.47.1' Description: "Enables Amazon Athena to communicate with Redshift using JDBC" Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory diff --git a/athena-saphana/athena-saphana-connection.yaml b/athena-saphana/athena-saphana-connection.yaml index 7a7cdd3dd7..9d6440d57f 100644 --- a/athena-saphana/athena-saphana-connection.yaml +++ b/athena-saphana/athena-saphana-connection.yaml @@ -59,7 +59,7 @@ Resources: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-saphana:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-saphana:2022.47.1' Description: "Enables Amazon Athena to communicate with Teradata using JDBC" Timeout: 900 MemorySize: 3008 diff --git a/athena-saphana/athena-saphana.yaml b/athena-saphana/athena-saphana.yaml index 68a4bdd254..7918bb1b32 100644 --- a/athena-saphana/athena-saphana.yaml +++ b/athena-saphana/athena-saphana.yaml @@ -70,7 +70,7 @@ Resources: default: !Ref DefaultConnectionString FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-saphana:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-saphana:2022.47.1' Description: "Enables Amazon Athena to communicate with Teradata using JDBC" Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory diff --git a/athena-snowflake/athena-snowflake-connection.yaml b/athena-snowflake/athena-snowflake-connection.yaml index e9496f04e1..c3273eddac 100644 --- a/athena-snowflake/athena-snowflake-connection.yaml +++ b/athena-snowflake/athena-snowflake-connection.yaml @@ -59,7 +59,7 @@ Resources: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-snowflake:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-snowflake:2022.47.1' Description: "Enables Amazon Athena to communicate with Snowflake using JDBC" Timeout: 900 MemorySize: 3008 diff --git a/athena-snowflake/athena-snowflake.yaml b/athena-snowflake/athena-snowflake.yaml index 651883f9a5..67bac6e7aa 100644 --- a/athena-snowflake/athena-snowflake.yaml +++ b/athena-snowflake/athena-snowflake.yaml @@ -70,7 +70,7 @@ Resources: default: !Ref DefaultConnectionString FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-snowflake:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-snowflake:2022.47.1' Description: "Enables Amazon Athena to communicate with Snowflake using JDBC" Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory diff --git a/athena-sqlserver/athena-sqlserver-connection.yaml b/athena-sqlserver/athena-sqlserver-connection.yaml index 39c49754eb..72693531f7 100644 --- a/athena-sqlserver/athena-sqlserver-connection.yaml +++ b/athena-sqlserver/athena-sqlserver-connection.yaml @@ -59,7 +59,7 @@ Resources: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-sqlserver:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-sqlserver:2022.47.1' Description: "Enables Amazon Athena to communicate with SQLSERVER using JDBC" Timeout: 900 MemorySize: 3008 diff --git a/athena-sqlserver/athena-sqlserver.yaml b/athena-sqlserver/athena-sqlserver.yaml index f66ab91cce..bec2cbcf9c 100644 --- a/athena-sqlserver/athena-sqlserver.yaml +++ b/athena-sqlserver/athena-sqlserver.yaml @@ -77,7 +77,7 @@ Resources: default: !Ref DefaultConnectionString FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-sqlserver:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-sqlserver:2022.47.1' Description: "Enables Amazon Athena to communicate with SQLSERVER using JDBC" Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory diff --git a/athena-synapse/athena-synapse-connection.yaml b/athena-synapse/athena-synapse-connection.yaml index e2779b2fb0..8e0e0951e2 100644 --- a/athena-synapse/athena-synapse-connection.yaml +++ b/athena-synapse/athena-synapse-connection.yaml @@ -61,7 +61,7 @@ Resources: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-synapse:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-synapse:2022.47.1' Description: "Enables Amazon Athena to communicate with SYNPASE using JDBC" Timeout: 900 MemorySize: 3008 diff --git a/athena-synapse/athena-synapse.yaml b/athena-synapse/athena-synapse.yaml index 106c0d94b4..05ab974e75 100644 --- a/athena-synapse/athena-synapse.yaml +++ b/athena-synapse/athena-synapse.yaml @@ -79,7 +79,7 @@ Resources: default: !Ref DefaultConnectionString FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-synapse:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-synapse:2022.47.1' Description: "Enables Amazon Athena to communicate with SYNPASE using JDBC" Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory diff --git a/athena-teradata/athena-teradata-connection.yaml b/athena-teradata/athena-teradata-connection.yaml index 576edd79bd..4b3271cfd9 100644 --- a/athena-teradata/athena-teradata-connection.yaml +++ b/athena-teradata/athena-teradata-connection.yaml @@ -57,7 +57,7 @@ Resources: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-teradata:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-teradata:2022.47.1' Description: "Enables Amazon Athena to communicate with Teradata using JDBC" Timeout: 900 MemorySize: 3008 diff --git a/athena-teradata/athena-teradata.yaml b/athena-teradata/athena-teradata.yaml index 8b4760a912..3226e6081d 100644 --- a/athena-teradata/athena-teradata.yaml +++ b/athena-teradata/athena-teradata.yaml @@ -80,7 +80,7 @@ Resources: Layers: - !Ref LambdaJDBCLayername PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-teradata:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-teradata:2022.47.1' Description: "Enables Amazon Athena to communicate with Teradata using JDBC" Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory diff --git a/athena-timestream/athena-timestream-connection.yaml b/athena-timestream/athena-timestream-connection.yaml index f2f3c7cd84..5d73156b3c 100644 --- a/athena-timestream/athena-timestream-connection.yaml +++ b/athena-timestream/athena-timestream-connection.yaml @@ -44,7 +44,7 @@ Resources: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-timestream:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-timestream:2022.47.1' Description: "Enables Amazon Athena to communicate with Amazon Timestream, making your time series data accessible from Athena." Timeout: 900 MemorySize: 3008 diff --git a/athena-timestream/athena-timestream.yaml b/athena-timestream/athena-timestream.yaml index 908bc01d6e..0f48b9393b 100644 --- a/athena-timestream/athena-timestream.yaml +++ b/athena-timestream/athena-timestream.yaml @@ -53,7 +53,7 @@ Resources: spill_prefix: !Ref SpillPrefix FunctionName: !Ref AthenaCatalogName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-timestream:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-timestream:2022.47.1' Description: "Enables Amazon Athena to communicate with Amazon Timestream, making your time series data accessible from Athena." Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory diff --git a/athena-tpcds/athena-tpcds-connection.yaml b/athena-tpcds/athena-tpcds-connection.yaml index a91cbda763..db51aa5664 100644 --- a/athena-tpcds/athena-tpcds-connection.yaml +++ b/athena-tpcds/athena-tpcds-connection.yaml @@ -46,7 +46,7 @@ Resources: glue_connection: Ref GlueConnection FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-tpcds:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-tpcds:2022.47.1' Description: "This connector enables Amazon Athena to communicate with a randomly generated TPC-DS data source." Timeout: 900 MemorySize: 3008 diff --git a/athena-tpcds/athena-tpcds.yaml b/athena-tpcds/athena-tpcds.yaml index 40b76f7194..4086565f7e 100644 --- a/athena-tpcds/athena-tpcds.yaml +++ b/athena-tpcds/athena-tpcds.yaml @@ -53,7 +53,7 @@ Resources: spill_prefix: !Ref SpillPrefix FunctionName: !Ref AthenaCatalogName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-tpcds:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-tpcds:2022.47.1' Description: "This connector enables Amazon Athena to communicate with a randomly generated TPC-DS data source." Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory diff --git a/athena-udfs/athena-udfs.yaml b/athena-udfs/athena-udfs.yaml index 2bad60b3e0..ff428e4c0d 100644 --- a/athena-udfs/athena-udfs.yaml +++ b/athena-udfs/athena-udfs.yaml @@ -40,7 +40,7 @@ Resources: Properties: FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-udfs:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-udfs:2022.47.1' Description: "This connector enables Amazon Athena to leverage common UDFs made available via Lambda." Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory diff --git a/athena-vertica/athena-vertica-connection.yaml b/athena-vertica/athena-vertica-connection.yaml index 71cadb8bec..778cc7511c 100644 --- a/athena-vertica/athena-vertica-connection.yaml +++ b/athena-vertica/athena-vertica-connection.yaml @@ -62,7 +62,7 @@ Resources: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-vertica:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-vertica:2022.47.1' Description: "Amazon Athena Vertica Connector" Timeout: 900 MemorySize: 3008 diff --git a/athena-vertica/athena-vertica.yaml b/athena-vertica/athena-vertica.yaml index 96a07cb42a..d02c913b3a 100644 --- a/athena-vertica/athena-vertica.yaml +++ b/athena-vertica/athena-vertica.yaml @@ -83,7 +83,7 @@ Resources: FunctionName: !Sub "${AthenaCatalogName}" PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-vertica:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-vertica:2022.47.1' Description: "Amazon Athena Vertica Connector" Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory From 3e7ce1eb3c8e51ed54691c6b4d9c7774150d5c68 Mon Sep 17 00:00:00 2001 From: Aimery Methena Date: Fri, 20 Sep 2024 11:32:40 -0400 Subject: [PATCH 31/87] missed a Runtime property --- athena-postgresql/athena-postgresql-connection.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/athena-postgresql/athena-postgresql-connection.yaml b/athena-postgresql/athena-postgresql-connection.yaml index 59905e0528..26e1c096bb 100644 --- a/athena-postgresql/athena-postgresql-connection.yaml +++ b/athena-postgresql/athena-postgresql-connection.yaml @@ -66,7 +66,6 @@ Resources: PackageType: "Image" ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-postgresql:2022.47.1' Description: "Enables Amazon Athena to communicate with PostgreSQL using JDBC" - Runtime: java11 Timeout: 900 MemorySize: 3008 Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn] From e03a1089a9890149229dda7cdd7a68b7023ae5af Mon Sep 17 00:00:00 2001 From: Aimery Methena Date: Mon, 23 Sep 2024 10:51:08 -0400 Subject: [PATCH 32/87] update all ImageURI to use correct repo name (no prod) --- athena-aws-cmdb/athena-aws-cmdb.yaml | 2 +- athena-clickhouse/athena-clickhouse.yaml | 2 +- athena-cloudera-hive/athena-cloudera-hive.yaml | 2 +- athena-cloudera-impala/athena-cloudera-impala.yaml | 2 +- athena-cloudwatch-metrics/athena-cloudwatch-metrics.yaml | 2 +- athena-cloudwatch/athena-cloudwatch.yaml | 2 +- athena-datalakegen2/athena-datalakegen2.yaml | 2 +- athena-db2-as400/athena-db2-as400.yaml | 2 +- athena-db2/athena-db2.yaml | 2 +- athena-docdb/athena-docdb.yaml | 2 +- athena-dynamodb/athena-dynamodb.yaml | 2 +- athena-elasticsearch/athena-elasticsearch.yaml | 2 +- athena-gcs/athena-gcs.yaml | 2 +- athena-google-bigquery/athena-google-bigquery.yaml | 2 +- athena-hbase/athena-hbase.yaml | 2 +- athena-hortonworks-hive/athena-hortonworks-hive.yaml | 2 +- athena-kafka/athena-kafka.yaml | 2 +- athena-msk/athena-msk.yaml | 2 +- athena-mysql/athena-mysql.yaml | 2 +- athena-neptune/athena-neptune.yaml | 2 +- athena-oracle/athena-oracle.yaml | 2 +- athena-postgresql/athena-postgresql.yaml | 2 +- athena-redis/athena-redis.yaml | 2 +- athena-redshift/athena-redshift.yaml | 2 +- athena-saphana/athena-saphana.yaml | 2 +- athena-snowflake/athena-snowflake.yaml | 2 +- athena-sqlserver/athena-sqlserver.yaml | 2 +- athena-synapse/athena-synapse.yaml | 2 +- athena-teradata/athena-teradata.yaml | 2 +- athena-timestream/athena-timestream.yaml | 2 +- athena-tpcds/athena-tpcds.yaml | 2 +- athena-udfs/athena-udfs.yaml | 2 +- athena-vertica/athena-vertica.yaml | 2 +- 33 files changed, 33 insertions(+), 33 deletions(-) diff --git a/athena-aws-cmdb/athena-aws-cmdb.yaml b/athena-aws-cmdb/athena-aws-cmdb.yaml index 28640a646d..4365e6781d 100644 --- a/athena-aws-cmdb/athena-aws-cmdb.yaml +++ b/athena-aws-cmdb/athena-aws-cmdb.yaml @@ -53,7 +53,7 @@ Resources: spill_prefix: !Ref SpillPrefix FunctionName: !Ref AthenaCatalogName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-aws-cmdb:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-aws-cmdb:2022.47.1' Description: "Enables Amazon Athena to communicate with various AWS Services, making your resource inventories accessible via SQL." Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory diff --git a/athena-clickhouse/athena-clickhouse.yaml b/athena-clickhouse/athena-clickhouse.yaml index 39b4164381..259aae7198 100644 --- a/athena-clickhouse/athena-clickhouse.yaml +++ b/athena-clickhouse/athena-clickhouse.yaml @@ -71,7 +71,7 @@ Resources: default: !Ref DefaultConnectionString FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-clickhouse:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-clickhouse:2022.47.1' Description: "Enables Amazon Athena to communicate with ClickHouse using JDBC" Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory diff --git a/athena-cloudera-hive/athena-cloudera-hive.yaml b/athena-cloudera-hive/athena-cloudera-hive.yaml index 23f5201623..70f2775b1b 100644 --- a/athena-cloudera-hive/athena-cloudera-hive.yaml +++ b/athena-cloudera-hive/athena-cloudera-hive.yaml @@ -66,7 +66,7 @@ Resources: default: !Ref DefaultConnectionString FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-cloudera-hive:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-cloudera-hive:2022.47.1' Description: "Enables Amazon Athena to communicate with Coludera Hive using JDBC" Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory diff --git a/athena-cloudera-impala/athena-cloudera-impala.yaml b/athena-cloudera-impala/athena-cloudera-impala.yaml index 399cad4769..60dc37ed9e 100644 --- a/athena-cloudera-impala/athena-cloudera-impala.yaml +++ b/athena-cloudera-impala/athena-cloudera-impala.yaml @@ -71,7 +71,7 @@ Resources: default: !Ref DefaultConnectionString FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-cloudera-impala:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-cloudera-impala:2022.47.1' Description: "Enables Amazon Athena to communicate with Cloudera Impala using JDBC" Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory diff --git a/athena-cloudwatch-metrics/athena-cloudwatch-metrics.yaml b/athena-cloudwatch-metrics/athena-cloudwatch-metrics.yaml index fd66c1081a..974b979e37 100644 --- a/athena-cloudwatch-metrics/athena-cloudwatch-metrics.yaml +++ b/athena-cloudwatch-metrics/athena-cloudwatch-metrics.yaml @@ -53,7 +53,7 @@ Resources: spill_prefix: !Ref SpillPrefix FunctionName: !Ref AthenaCatalogName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-cloudwatch-metrics:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-cloudwatch-metrics:2022.47.1' Description: "Enables Amazon Athena to communicate with Cloudwatch Metrics, making your metrics data accessible via SQL" Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory diff --git a/athena-cloudwatch/athena-cloudwatch.yaml b/athena-cloudwatch/athena-cloudwatch.yaml index 82a58b4e1b..2e301dc882 100644 --- a/athena-cloudwatch/athena-cloudwatch.yaml +++ b/athena-cloudwatch/athena-cloudwatch.yaml @@ -67,7 +67,7 @@ Resources: kms_key_id: !If [HasKMSKeyId, !Ref KMSKeyId, !Ref "AWS::NoValue"] FunctionName: !Ref AthenaCatalogName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-cloudwatch:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-cloudwatch:2022.47.1' Description: "Enables Amazon Athena to communicate with Cloudwatch, making your log accessible via SQL" Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory diff --git a/athena-datalakegen2/athena-datalakegen2.yaml b/athena-datalakegen2/athena-datalakegen2.yaml index 0f2f19a92a..5890402513 100644 --- a/athena-datalakegen2/athena-datalakegen2.yaml +++ b/athena-datalakegen2/athena-datalakegen2.yaml @@ -72,7 +72,7 @@ Resources: default: !Ref DefaultConnectionString FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-datalakegen2:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-datalakegen2:2022.47.1' Description: "Enables Amazon Athena to communicate with DataLake Gen2 using JDBC" Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory diff --git a/athena-db2-as400/athena-db2-as400.yaml b/athena-db2-as400/athena-db2-as400.yaml index 1d7643dae8..ea0a331051 100644 --- a/athena-db2-as400/athena-db2-as400.yaml +++ b/athena-db2-as400/athena-db2-as400.yaml @@ -73,7 +73,7 @@ Resources: default: !Ref DefaultConnectionString FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-db2-as400:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-db2-as400:2022.47.1' Description: "Enables Amazon Athena to communicate with DB2 on iSeries (AS400) using JDBC" Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory diff --git a/athena-db2/athena-db2.yaml b/athena-db2/athena-db2.yaml index 4a930929e8..7508f16712 100644 --- a/athena-db2/athena-db2.yaml +++ b/athena-db2/athena-db2.yaml @@ -73,7 +73,7 @@ Resources: default: !Ref DefaultConnectionString FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-db2:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-db2:2022.47.1' Description: "Enables Amazon Athena to communicate with DB2 using JDBC" Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory diff --git a/athena-docdb/athena-docdb.yaml b/athena-docdb/athena-docdb.yaml index b713852b26..588b05f52e 100644 --- a/athena-docdb/athena-docdb.yaml +++ b/athena-docdb/athena-docdb.yaml @@ -67,7 +67,7 @@ Resources: default_docdb: !Ref DocDBConnectionString FunctionName: !Ref AthenaCatalogName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-docdb:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-docdb:2022.47.1' Description: "Enables Amazon Athena to communicate with DocumentDB, making your DocumentDB data accessible via SQL." Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory diff --git a/athena-dynamodb/athena-dynamodb.yaml b/athena-dynamodb/athena-dynamodb.yaml index d460c41e14..f8e7fc7be5 100644 --- a/athena-dynamodb/athena-dynamodb.yaml +++ b/athena-dynamodb/athena-dynamodb.yaml @@ -67,7 +67,7 @@ Resources: kms_key_id: !If [HasKMSKeyId, !Ref KMSKeyId, !Ref "AWS::NoValue"] FunctionName: !Ref AthenaCatalogName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-dynamodb:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-dynamodb:2022.47.1' Description: "Enables Amazon Athena to communicate with DynamoDB, making your tables accessible via SQL" Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory diff --git a/athena-elasticsearch/athena-elasticsearch.yaml b/athena-elasticsearch/athena-elasticsearch.yaml index c4106b1de8..c2e6603bf9 100644 --- a/athena-elasticsearch/athena-elasticsearch.yaml +++ b/athena-elasticsearch/athena-elasticsearch.yaml @@ -103,7 +103,7 @@ Resources: query_scroll_timeout: !Ref QueryScrollTimeout FunctionName: !Sub "${AthenaCatalogName}" PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-elasticsearch:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-elasticsearch:2022.47.1' Description: "The Elasticsearch Lambda Connector provides Athena users the ability to query data stored on Elasticsearch clusters." Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory diff --git a/athena-gcs/athena-gcs.yaml b/athena-gcs/athena-gcs.yaml index 46079ad5f7..fa97bc7f86 100644 --- a/athena-gcs/athena-gcs.yaml +++ b/athena-gcs/athena-gcs.yaml @@ -60,7 +60,7 @@ Resources: secret_manager_gcp_creds_name: !Ref GCSSecretName FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-gcs:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-gcs:2022.47.1' Description: "Amazon Athena GCS Connector" Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory diff --git a/athena-google-bigquery/athena-google-bigquery.yaml b/athena-google-bigquery/athena-google-bigquery.yaml index 294b417504..6cdf0cb299 100644 --- a/athena-google-bigquery/athena-google-bigquery.yaml +++ b/athena-google-bigquery/athena-google-bigquery.yaml @@ -80,7 +80,7 @@ Resources: GOOGLE_APPLICATION_CREDENTIALS: '/tmp/service-account.json' FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-google-bigquery:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-google-bigquery:2022.47.1' Description: "Enables Amazon Athena to communicate with BigQuery using Google SDK" Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory diff --git a/athena-hbase/athena-hbase.yaml b/athena-hbase/athena-hbase.yaml index cb059e5088..c9d70a24e0 100644 --- a/athena-hbase/athena-hbase.yaml +++ b/athena-hbase/athena-hbase.yaml @@ -86,7 +86,7 @@ Resources: hbase_rpc_protection: !Ref HbaseRpcProtection FunctionName: !Ref AthenaCatalogName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-hbase:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-hbase:2022.47.1' Description: "Enables Amazon Athena to communicate with HBase, making your HBase data accessible via SQL" Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory diff --git a/athena-hortonworks-hive/athena-hortonworks-hive.yaml b/athena-hortonworks-hive/athena-hortonworks-hive.yaml index 9b32050951..8f941be498 100644 --- a/athena-hortonworks-hive/athena-hortonworks-hive.yaml +++ b/athena-hortonworks-hive/athena-hortonworks-hive.yaml @@ -70,7 +70,7 @@ Resources: default: !Ref DefaultConnectionString FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-hortonworks-hive:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-hortonworks-hive:2022.47.1' Description: "Enables Amazon Athena to communicate with Hortonworks Hive using JDBC" Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory diff --git a/athena-kafka/athena-kafka.yaml b/athena-kafka/athena-kafka.yaml index 0c95d94491..27fd31b9c2 100644 --- a/athena-kafka/athena-kafka.yaml +++ b/athena-kafka/athena-kafka.yaml @@ -102,7 +102,7 @@ Resources: auth_type: !Ref AuthType FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-kafka:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-kafka:2022.47.1' Description: "Enables Amazon Athena to communicate with Kafka clusters" Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory diff --git a/athena-msk/athena-msk.yaml b/athena-msk/athena-msk.yaml index 00b63aa85f..4921ad4d07 100644 --- a/athena-msk/athena-msk.yaml +++ b/athena-msk/athena-msk.yaml @@ -97,7 +97,7 @@ Resources: auth_type: !Ref AuthType FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-msk:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-msk:2022.47.1' Description: "Enables Amazon Athena to communicate with MSK clusters" Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory diff --git a/athena-mysql/athena-mysql.yaml b/athena-mysql/athena-mysql.yaml index 49ec08fbd2..145745b880 100644 --- a/athena-mysql/athena-mysql.yaml +++ b/athena-mysql/athena-mysql.yaml @@ -71,7 +71,7 @@ Resources: default: !Ref DefaultConnectionString FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-mysql:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-mysql:2022.47.1' Description: "Enables Amazon Athena to communicate with MySQL using JDBC" Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory diff --git a/athena-neptune/athena-neptune.yaml b/athena-neptune/athena-neptune.yaml index 804d8e880c..114314291a 100644 --- a/athena-neptune/athena-neptune.yaml +++ b/athena-neptune/athena-neptune.yaml @@ -97,7 +97,7 @@ Resources: enable_caseinsensitivematch: !Ref EnableCaseInsensitiveMatch FunctionName: !Ref AthenaCatalogName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-neptune:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-neptune:2022.47.1' Description: "Enables Amazon Athena to communicate with Neptune, making your Neptune graph data accessible via SQL." Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory diff --git a/athena-oracle/athena-oracle.yaml b/athena-oracle/athena-oracle.yaml index 7e9a5fe0dd..e086cf82cb 100644 --- a/athena-oracle/athena-oracle.yaml +++ b/athena-oracle/athena-oracle.yaml @@ -83,7 +83,7 @@ Resources: is_FIPS_Enabled: !Ref IsFIPSEnabled FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-oracle:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-oracle:2022.47.1' Description: "Enables Amazon Athena to communicate with ORACLE using JDBC" Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory diff --git a/athena-postgresql/athena-postgresql.yaml b/athena-postgresql/athena-postgresql.yaml index beda73c0fd..30553623ec 100644 --- a/athena-postgresql/athena-postgresql.yaml +++ b/athena-postgresql/athena-postgresql.yaml @@ -82,7 +82,7 @@ Resources: default_scale: !Ref DefaultScale FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-postgresql:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-postgresql:2022.47.1' ImageConfig: Command: [ !Sub "com.amazonaws.athena.connectors.postgresql.${CompositeHandler}" ] Description: "Enables Amazon Athena to communicate with PostgreSQL using JDBC" diff --git a/athena-redis/athena-redis.yaml b/athena-redis/athena-redis.yaml index 1fabf14d1d..c3bc541752 100644 --- a/athena-redis/athena-redis.yaml +++ b/athena-redis/athena-redis.yaml @@ -82,7 +82,7 @@ Resources: qpt_db_number: !Ref QPTConnectionDBNumber FunctionName: !Ref AthenaCatalogName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-redis:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-redis:2022.47.1' Description: "Enables Amazon Athena to communicate with Redis, making your Redis data accessible via SQL" Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory diff --git a/athena-redshift/athena-redshift.yaml b/athena-redshift/athena-redshift.yaml index fd5f90fe5c..47cd238f89 100644 --- a/athena-redshift/athena-redshift.yaml +++ b/athena-redshift/athena-redshift.yaml @@ -80,7 +80,7 @@ Resources: kms_key_id: !If [HasKMSKeyId, !Ref KMSKeyId, !Ref "AWS::NoValue"] FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-redshift:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-redshift:2022.47.1' Description: "Enables Amazon Athena to communicate with Redshift using JDBC" Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory diff --git a/athena-saphana/athena-saphana.yaml b/athena-saphana/athena-saphana.yaml index 66d1ee30f1..5a1d895933 100644 --- a/athena-saphana/athena-saphana.yaml +++ b/athena-saphana/athena-saphana.yaml @@ -70,7 +70,7 @@ Resources: default: !Ref DefaultConnectionString FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-saphana:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-saphana:2022.47.1' Description: "Enables Amazon Athena to communicate with Teradata using JDBC" Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory diff --git a/athena-snowflake/athena-snowflake.yaml b/athena-snowflake/athena-snowflake.yaml index 651883f9a5..67bac6e7aa 100644 --- a/athena-snowflake/athena-snowflake.yaml +++ b/athena-snowflake/athena-snowflake.yaml @@ -70,7 +70,7 @@ Resources: default: !Ref DefaultConnectionString FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-snowflake:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-snowflake:2022.47.1' Description: "Enables Amazon Athena to communicate with Snowflake using JDBC" Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory diff --git a/athena-sqlserver/athena-sqlserver.yaml b/athena-sqlserver/athena-sqlserver.yaml index 59c369eedf..9f09edfc88 100644 --- a/athena-sqlserver/athena-sqlserver.yaml +++ b/athena-sqlserver/athena-sqlserver.yaml @@ -77,7 +77,7 @@ Resources: default: !Ref DefaultConnectionString FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-sqlserver:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-sqlserver:2022.47.1' Description: "Enables Amazon Athena to communicate with SQLSERVER using JDBC" Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory diff --git a/athena-synapse/athena-synapse.yaml b/athena-synapse/athena-synapse.yaml index 2e3e80653e..8e578899cb 100644 --- a/athena-synapse/athena-synapse.yaml +++ b/athena-synapse/athena-synapse.yaml @@ -79,7 +79,7 @@ Resources: default: !Ref DefaultConnectionString FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-synapse:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-synapse:2022.47.1' Description: "Enables Amazon Athena to communicate with SYNPASE using JDBC" Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory diff --git a/athena-teradata/athena-teradata.yaml b/athena-teradata/athena-teradata.yaml index 8b4760a912..3226e6081d 100644 --- a/athena-teradata/athena-teradata.yaml +++ b/athena-teradata/athena-teradata.yaml @@ -80,7 +80,7 @@ Resources: Layers: - !Ref LambdaJDBCLayername PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-teradata:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-teradata:2022.47.1' Description: "Enables Amazon Athena to communicate with Teradata using JDBC" Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory diff --git a/athena-timestream/athena-timestream.yaml b/athena-timestream/athena-timestream.yaml index d036577be1..1850ffbecc 100644 --- a/athena-timestream/athena-timestream.yaml +++ b/athena-timestream/athena-timestream.yaml @@ -53,7 +53,7 @@ Resources: spill_prefix: !Ref SpillPrefix FunctionName: !Ref AthenaCatalogName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-timestream:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-timestream:2022.47.1' Description: "Enables Amazon Athena to communicate with Amazon Timestream, making your time series data accessible from Athena." Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory diff --git a/athena-tpcds/athena-tpcds.yaml b/athena-tpcds/athena-tpcds.yaml index 40b76f7194..4086565f7e 100644 --- a/athena-tpcds/athena-tpcds.yaml +++ b/athena-tpcds/athena-tpcds.yaml @@ -53,7 +53,7 @@ Resources: spill_prefix: !Ref SpillPrefix FunctionName: !Ref AthenaCatalogName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-tpcds:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-tpcds:2022.47.1' Description: "This connector enables Amazon Athena to communicate with a randomly generated TPC-DS data source." Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory diff --git a/athena-udfs/athena-udfs.yaml b/athena-udfs/athena-udfs.yaml index 968582181d..a6f04ad182 100644 --- a/athena-udfs/athena-udfs.yaml +++ b/athena-udfs/athena-udfs.yaml @@ -40,7 +40,7 @@ Resources: Properties: FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-udfs:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-udfs:2022.47.1' Description: "This connector enables Amazon Athena to leverage common UDFs made available via Lambda." Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory diff --git a/athena-vertica/athena-vertica.yaml b/athena-vertica/athena-vertica.yaml index c70c97cc2b..a0fed75e01 100644 --- a/athena-vertica/athena-vertica.yaml +++ b/athena-vertica/athena-vertica.yaml @@ -83,7 +83,7 @@ Resources: FunctionName: !Sub "${AthenaCatalogName}" PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/prod-athena-federation-repository-vertica:2022.47.1' + ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-vertica:2022.47.1' Description: "Amazon Athena Vertica Connector" Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory From 86437e276a2217be6db45507929135ec44f2accc Mon Sep 17 00:00:00 2001 From: Aimery Methena Date: Mon, 23 Sep 2024 17:54:23 -0400 Subject: [PATCH 33/87] update connectors with mux handlers to use normal in connections yaml --- athena-cloudera-hive/Dockerfile | 4 ++-- athena-cloudera-hive/athena-cloudera-hive-connection.yaml | 2 ++ athena-cloudera-hive/athena-cloudera-hive.yaml | 2 ++ athena-cloudera-impala/Dockerfile | 4 ++-- athena-cloudera-impala/athena-cloudera-impala-connection.yaml | 2 ++ athena-cloudera-impala/athena-cloudera-impala.yaml | 2 ++ athena-datalakegen2/Dockerfile | 4 ++-- athena-datalakegen2/athena-datalakegen2-connection.yaml | 2 ++ athena-datalakegen2/athena-datalakegen2.yaml | 2 ++ athena-db2-as400/Dockerfile | 4 ++-- athena-db2-as400/athena-db2-as400-connection.yaml | 2 ++ athena-db2-as400/athena-db2-as400.yaml | 2 ++ athena-db2/Dockerfile | 4 ++-- athena-db2/athena-db2-connection.yaml | 2 ++ athena-db2/athena-db2.yaml | 2 ++ athena-hortonworks-hive/Dockerfile | 4 ++-- .../athena-hortonworks-hive-connection.yaml | 2 ++ athena-hortonworks-hive/athena-hortonworks-hive.yaml | 2 ++ athena-mysql/Dockerfile | 4 ++-- athena-mysql/athena-mysql-connection.yaml | 2 ++ athena-mysql/athena-mysql.yaml | 2 ++ athena-oracle/Dockerfile | 4 ++-- athena-oracle/athena-oracle-connection.yaml | 2 ++ athena-oracle/athena-oracle.yaml | 2 ++ athena-redshift/Dockerfile | 4 ++-- athena-redshift/athena-redshift-connection.yaml | 2 ++ athena-redshift/athena-redshift.yaml | 2 ++ athena-saphana/Dockerfile | 4 ++-- athena-saphana/athena-saphana-connection.yaml | 2 ++ athena-saphana/athena-saphana.yaml | 2 ++ athena-snowflake/Dockerfile | 4 ++-- athena-snowflake/athena-snowflake-connection.yaml | 2 ++ athena-snowflake/athena-snowflake.yaml | 2 ++ athena-sqlserver/Dockerfile | 4 ++-- athena-sqlserver/athena-sqlserver-connection.yaml | 2 ++ athena-sqlserver/athena-sqlserver.yaml | 2 ++ athena-synapse/Dockerfile | 4 ++-- athena-synapse/athena-synapse-connection.yaml | 2 ++ athena-synapse/athena-synapse.yaml | 2 ++ athena-teradata/Dockerfile | 4 ++-- athena-teradata/athena-teradata-connection.yaml | 2 ++ athena-teradata/athena-teradata.yaml | 2 ++ 42 files changed, 84 insertions(+), 28 deletions(-) diff --git a/athena-cloudera-hive/Dockerfile b/athena-cloudera-hive/Dockerfile index a56019f693..b386d6db35 100644 --- a/athena-cloudera-hive/Dockerfile +++ b/athena-cloudera-hive/Dockerfile @@ -5,5 +5,5 @@ COPY target/athena-cloudera-hive-2022.47.1.jar ${LAMBDA_TASK_ROOT} # Unpack the jar RUN jar xf athena-cloudera-hive-2022.47.1.jar -# Set the CMD to your handler (could also be done as a parameter override outside of the Dockerfile) -CMD [ "com.amazonaws.athena.connectors.cloudera.HiveMuxCompositeHandler" ] \ No newline at end of file +# Command can be overwritten by providing a different command in the template directly. +# No need to specify here (already defined in .yaml file because legacy and connections use different) \ No newline at end of file diff --git a/athena-cloudera-hive/athena-cloudera-hive-connection.yaml b/athena-cloudera-hive/athena-cloudera-hive-connection.yaml index b37e5ef1b1..f958e4718c 100644 --- a/athena-cloudera-hive/athena-cloudera-hive-connection.yaml +++ b/athena-cloudera-hive/athena-cloudera-hive-connection.yaml @@ -54,6 +54,8 @@ Resources: FunctionName: !Ref LambdaFunctionName PackageType: "Image" ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-cloudera-hive:2022.47.1' + ImageConfig: + Command: [ "com.amazonaws.athena.connectors.cloudera.HiveCompositeHandler" ] Description: "Enables Amazon Athena to communicate with Cloudera Hive using JDBC" Timeout: 900 MemorySize: 3008 diff --git a/athena-cloudera-hive/athena-cloudera-hive.yaml b/athena-cloudera-hive/athena-cloudera-hive.yaml index 3095ec1407..46dea219ae 100644 --- a/athena-cloudera-hive/athena-cloudera-hive.yaml +++ b/athena-cloudera-hive/athena-cloudera-hive.yaml @@ -67,6 +67,8 @@ Resources: FunctionName: !Ref LambdaFunctionName PackageType: "Image" ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-cloudera-hive:2022.47.1' + ImageConfig: + Command: [ "com.amazonaws.athena.connectors.cloudera.HiveMuxCompositeHandler" ] Description: "Enables Amazon Athena to communicate with Coludera Hive using JDBC" Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory diff --git a/athena-cloudera-impala/Dockerfile b/athena-cloudera-impala/Dockerfile index 2ed43aeaa9..0f0736a9af 100644 --- a/athena-cloudera-impala/Dockerfile +++ b/athena-cloudera-impala/Dockerfile @@ -5,5 +5,5 @@ COPY target/athena-cloudera-impala-2022.47.1.jar ${LAMBDA_TASK_ROOT} # Unpack the jar RUN jar xf athena-cloudera-impala-2022.47.1.jar -# Set the CMD to your handler (could also be done as a parameter override outside of the Dockerfile) -CMD [ "com.amazonaws.athena.connectors.cloudera.ImpalaMuxCompositeHandler" ] \ No newline at end of file +# Command can be overwritten by providing a different command in the template directly. +# No need to specify here (already defined in .yaml file because legacy and connections use different) \ No newline at end of file diff --git a/athena-cloudera-impala/athena-cloudera-impala-connection.yaml b/athena-cloudera-impala/athena-cloudera-impala-connection.yaml index 717dc5fc5f..d3d5c568df 100644 --- a/athena-cloudera-impala/athena-cloudera-impala-connection.yaml +++ b/athena-cloudera-impala/athena-cloudera-impala-connection.yaml @@ -54,6 +54,8 @@ Resources: FunctionName: !Ref LambdaFunctionName PackageType: "Image" ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-cloudera-impala:2022.47.1' + ImageConfig: + Command: [ "com.amazonaws.athena.connectors.cloudera.ImpalaCompositeHandler" ] Description: "Enables Amazon Athena to communicate with Cloudera Impala using JDBC" Timeout: 900 MemorySize: 3008 diff --git a/athena-cloudera-impala/athena-cloudera-impala.yaml b/athena-cloudera-impala/athena-cloudera-impala.yaml index 812045c764..5d9f9d2aab 100644 --- a/athena-cloudera-impala/athena-cloudera-impala.yaml +++ b/athena-cloudera-impala/athena-cloudera-impala.yaml @@ -72,6 +72,8 @@ Resources: FunctionName: !Ref LambdaFunctionName PackageType: "Image" ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-cloudera-impala:2022.47.1' + ImageConfig: + Command: [ "com.amazonaws.athena.connectors.cloudera.ImpalaMuxCompositeHandler" ] Description: "Enables Amazon Athena to communicate with Cloudera Impala using JDBC" Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory diff --git a/athena-datalakegen2/Dockerfile b/athena-datalakegen2/Dockerfile index 4e1929f607..3bc46f1ded 100644 --- a/athena-datalakegen2/Dockerfile +++ b/athena-datalakegen2/Dockerfile @@ -5,5 +5,5 @@ COPY target/athena-datalakegen2-2022.47.1.jar ${LAMBDA_TASK_ROOT} # Unpack the jar RUN jar xf athena-datalakegen2-2022.47.1.jar -# Set the CMD to your handler (could also be done as a parameter override outside of the Dockerfile) -CMD [ "com.amazonaws.athena.connectors.datalakegen2.DataLakeGen2MuxCompositeHandler" ] \ No newline at end of file +# Command can be overwritten by providing a different command in the template directly. +# No need to specify here (already defined in .yaml file because legacy and connections use different) \ No newline at end of file diff --git a/athena-datalakegen2/athena-datalakegen2-connection.yaml b/athena-datalakegen2/athena-datalakegen2-connection.yaml index 6978dbc462..1ec55b5af0 100644 --- a/athena-datalakegen2/athena-datalakegen2-connection.yaml +++ b/athena-datalakegen2/athena-datalakegen2-connection.yaml @@ -62,6 +62,8 @@ Resources: FunctionName: !Ref LambdaFunctionName PackageType: "Image" ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-datalakegen2:2022.47.1' + ImageConfig: + Command: [ "com.amazonaws.athena.connectors.datalakegen2.DataLakeGen2CompositeHandler" ] Description: "Enables Amazon Athena to communicate with DataLake Gen2 using JDBC" Timeout: !Ref 900 MemorySize: !Ref 3008 diff --git a/athena-datalakegen2/athena-datalakegen2.yaml b/athena-datalakegen2/athena-datalakegen2.yaml index 5890402513..4fe96f0e65 100644 --- a/athena-datalakegen2/athena-datalakegen2.yaml +++ b/athena-datalakegen2/athena-datalakegen2.yaml @@ -73,6 +73,8 @@ Resources: FunctionName: !Ref LambdaFunctionName PackageType: "Image" ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-datalakegen2:2022.47.1' + ImageConfig: + Command: [ "com.amazonaws.athena.connectors.datalakegen2.DataLakeGen2MuxCompositeHandler" ] Description: "Enables Amazon Athena to communicate with DataLake Gen2 using JDBC" Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory diff --git a/athena-db2-as400/Dockerfile b/athena-db2-as400/Dockerfile index affd37e7bb..7c2e7b4367 100644 --- a/athena-db2-as400/Dockerfile +++ b/athena-db2-as400/Dockerfile @@ -5,5 +5,5 @@ COPY target/athena-db2-as400-2022.47.1.jar ${LAMBDA_TASK_ROOT} # Unpack the jar RUN jar xf athena-db2-as400-2022.47.1.jar -# Set the CMD to your handler (could also be done as a parameter override outside of the Dockerfile) -CMD [ "com.amazonaws.athena.connectors.db2as400.Db2As400MuxCompositeHandler" ] \ No newline at end of file +# Command can be overwritten by providing a different command in the template directly. +# No need to specify here (already defined in .yaml file because legacy and connections use different) \ No newline at end of file diff --git a/athena-db2-as400/athena-db2-as400-connection.yaml b/athena-db2-as400/athena-db2-as400-connection.yaml index 17ed8ee54e..a55586143c 100644 --- a/athena-db2-as400/athena-db2-as400-connection.yaml +++ b/athena-db2-as400/athena-db2-as400-connection.yaml @@ -63,6 +63,8 @@ Resources: FunctionName: !Ref LambdaFunctionName PackageType: "Image" ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-db2-as400:2022.47.1' + ImageConfig: + Command: [ "com.amazonaws.athena.connectors.db2as400.Db2As400CompositeHandler" ] Description: "Enables Amazon Athena to communicate with DB2 on iSeries (AS400) using JDBC" Timeout: 900 MemorySize: 3008 diff --git a/athena-db2-as400/athena-db2-as400.yaml b/athena-db2-as400/athena-db2-as400.yaml index ea0a331051..8faa8eda20 100644 --- a/athena-db2-as400/athena-db2-as400.yaml +++ b/athena-db2-as400/athena-db2-as400.yaml @@ -74,6 +74,8 @@ Resources: FunctionName: !Ref LambdaFunctionName PackageType: "Image" ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-db2-as400:2022.47.1' + ImageConfig: + Command: [ "com.amazonaws.athena.connectors.db2as400.Db2As400MuxCompositeHandler" ] Description: "Enables Amazon Athena to communicate with DB2 on iSeries (AS400) using JDBC" Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory diff --git a/athena-db2/Dockerfile b/athena-db2/Dockerfile index 0d8231fa29..c3172649e7 100644 --- a/athena-db2/Dockerfile +++ b/athena-db2/Dockerfile @@ -5,5 +5,5 @@ COPY target/athena-db2-2022.47.1.jar ${LAMBDA_TASK_ROOT} # Unpack the jar RUN jar xf athena-db2-2022.47.1.jar -# Set the CMD to your handler (could also be done as a parameter override outside of the Dockerfile) -CMD [ "com.amazonaws.athena.connectors.db2.Db2MuxCompositeHandler" ] \ No newline at end of file +# Command can be overwritten by providing a different command in the template directly. +# No need to specify here (already defined in .yaml file because legacy and connections use different) \ No newline at end of file diff --git a/athena-db2/athena-db2-connection.yaml b/athena-db2/athena-db2-connection.yaml index 27222f077c..5d4fa52892 100644 --- a/athena-db2/athena-db2-connection.yaml +++ b/athena-db2/athena-db2-connection.yaml @@ -63,6 +63,8 @@ Resources: FunctionName: !Ref LambdaFunctionName PackageType: "Image" ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-db2:2022.47.1' + ImageConfig: + Command: [ "com.amazonaws.athena.connectors.db2.Db2CompositeHandler" ] Description: "Enables Amazon Athena to communicate with DB2 using JDBC" Timeout: 900 MemorySize: 3008 diff --git a/athena-db2/athena-db2.yaml b/athena-db2/athena-db2.yaml index 7508f16712..4a7567d589 100644 --- a/athena-db2/athena-db2.yaml +++ b/athena-db2/athena-db2.yaml @@ -74,6 +74,8 @@ Resources: FunctionName: !Ref LambdaFunctionName PackageType: "Image" ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-db2:2022.47.1' + ImageConfig: + Command: [ "com.amazonaws.athena.connectors.db2.Db2MuxCompositeHandler" ] Description: "Enables Amazon Athena to communicate with DB2 using JDBC" Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory diff --git a/athena-hortonworks-hive/Dockerfile b/athena-hortonworks-hive/Dockerfile index 3a68e6d997..20b44e9ca1 100644 --- a/athena-hortonworks-hive/Dockerfile +++ b/athena-hortonworks-hive/Dockerfile @@ -5,5 +5,5 @@ COPY target/athena-hortonworks-hive-2022.47.1.jar ${LAMBDA_TASK_ROOT} # Unpack the jar RUN jar xf athena-hortonworks-hive-2022.47.1.jar -# Set the CMD to your handler (could also be done as a parameter override outside of the Dockerfile) -CMD [ "com.amazonaws.athena.connectors.hortonworks.HiveMuxCompositeHandler" ] \ No newline at end of file +# Command can be overwritten by providing a different command in the template directly. +# No need to specify here (already defined in .yaml file because legacy and connections use different) \ No newline at end of file diff --git a/athena-hortonworks-hive/athena-hortonworks-hive-connection.yaml b/athena-hortonworks-hive/athena-hortonworks-hive-connection.yaml index 397a08c04b..d75c03443b 100644 --- a/athena-hortonworks-hive/athena-hortonworks-hive-connection.yaml +++ b/athena-hortonworks-hive/athena-hortonworks-hive-connection.yaml @@ -60,6 +60,8 @@ Resources: FunctionName: !Ref LambdaFunctionName PackageType: "Image" ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-hortonworks-hive:2022.47.1' + ImageConfig: + Command: [ "com.amazonaws.athena.connectors.hortonworks.HiveCompositeHandler" ] Description: "Enables Amazon Athena to communicate with Hortonworks Hive using JDBC" Timeout: 900 MemorySize: 3008 diff --git a/athena-hortonworks-hive/athena-hortonworks-hive.yaml b/athena-hortonworks-hive/athena-hortonworks-hive.yaml index 4180c48d15..8613bf4e83 100644 --- a/athena-hortonworks-hive/athena-hortonworks-hive.yaml +++ b/athena-hortonworks-hive/athena-hortonworks-hive.yaml @@ -71,6 +71,8 @@ Resources: FunctionName: !Ref LambdaFunctionName PackageType: "Image" ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-hortonworks-hive:2022.47.1' + ImageConfig: + Command: [ "com.amazonaws.athena.connectors.hortonworks.HiveMuxCompositeHandler" ] Description: "Enables Amazon Athena to communicate with Hortonworks Hive using JDBC" Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory diff --git a/athena-mysql/Dockerfile b/athena-mysql/Dockerfile index 08f27b704d..ca7a43e642 100644 --- a/athena-mysql/Dockerfile +++ b/athena-mysql/Dockerfile @@ -5,5 +5,5 @@ COPY target/athena-mysql-2022.47.1.jar ${LAMBDA_TASK_ROOT} # Unpack the jar RUN jar xf athena-mysql-2022.47.1.jar -# Set the CMD to your handler (could also be done as a parameter override outside of the Dockerfile) -CMD [ "com.amazonaws.athena.connectors.mysql.MySqlMuxCompositeHandler" ] \ No newline at end of file +# Command can be overwritten by providing a different command in the template directly. +# No need to specify here (already defined in .yaml file because legacy and connections use different) \ No newline at end of file diff --git a/athena-mysql/athena-mysql-connection.yaml b/athena-mysql/athena-mysql-connection.yaml index 0134e1af00..657e55a4a7 100644 --- a/athena-mysql/athena-mysql-connection.yaml +++ b/athena-mysql/athena-mysql-connection.yaml @@ -56,6 +56,8 @@ Resources: FunctionName: !Ref LambdaFunctionName PackageType: "Image" ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-mysql:2022.47.1' + ImageConfig: + Command: [ "com.amazonaws.athena.connectors.mysql.MySqlMuxCompositeHandler" ] Description: "Enables Amazon Athena to communicate with MySQL using JDBC" Timeout: 900 MemorySize: 3008 diff --git a/athena-mysql/athena-mysql.yaml b/athena-mysql/athena-mysql.yaml index 31c9e11f69..0901916aee 100644 --- a/athena-mysql/athena-mysql.yaml +++ b/athena-mysql/athena-mysql.yaml @@ -72,6 +72,8 @@ Resources: FunctionName: !Ref LambdaFunctionName PackageType: "Image" ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-mysql:2022.47.1' + ImageConfig: + Command: [ "com.amazonaws.athena.connectors.mysql.MySqlMuxCompositeHandler" ] Description: "Enables Amazon Athena to communicate with MySQL using JDBC" Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory diff --git a/athena-oracle/Dockerfile b/athena-oracle/Dockerfile index e85f8c566e..3cfbda8854 100644 --- a/athena-oracle/Dockerfile +++ b/athena-oracle/Dockerfile @@ -5,5 +5,5 @@ COPY target/athena-oracle-2022.47.1.jar ${LAMBDA_TASK_ROOT} # Unpack the jar RUN jar xf athena-oracle-2022.47.1.jar -# Set the CMD to your handler (could also be done as a parameter override outside of the Dockerfile) -CMD [ "com.amazonaws.athena.connectors.oracle.OracleMuxCompositeHandler" ] \ No newline at end of file +# Command can be overwritten by providing a different command in the template directly. +# No need to specify here (already defined in .yaml file because legacy and connections use different) \ No newline at end of file diff --git a/athena-oracle/athena-oracle-connection.yaml b/athena-oracle/athena-oracle-connection.yaml index e6f47fce93..abd8648fce 100644 --- a/athena-oracle/athena-oracle-connection.yaml +++ b/athena-oracle/athena-oracle-connection.yaml @@ -57,6 +57,8 @@ Resources: FunctionName: !Ref LambdaFunctionName PackageType: "Image" ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-oracle:2022.47.1' + ImageConfig: + Command: [ "com.amazonaws.athena.connectors.oracle.OracleCompositeHandler" ] Description: "Enables Amazon Athena to communicate with ORACLE using JDBC" Timeout: 900 MemorySize: 3008 diff --git a/athena-oracle/athena-oracle.yaml b/athena-oracle/athena-oracle.yaml index e086cf82cb..dc723ea6d2 100644 --- a/athena-oracle/athena-oracle.yaml +++ b/athena-oracle/athena-oracle.yaml @@ -84,6 +84,8 @@ Resources: FunctionName: !Ref LambdaFunctionName PackageType: "Image" ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-oracle:2022.47.1' + ImageConfig: + Command: [ "com.amazonaws.athena.connectors.oracle.OracleMuxCompositeHandler" ] Description: "Enables Amazon Athena to communicate with ORACLE using JDBC" Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory diff --git a/athena-redshift/Dockerfile b/athena-redshift/Dockerfile index 0e7d808823..36d20144ab 100644 --- a/athena-redshift/Dockerfile +++ b/athena-redshift/Dockerfile @@ -5,5 +5,5 @@ COPY target/athena-redshift-2022.47.1.jar ${LAMBDA_TASK_ROOT} # Unpack the jar RUN jar xf athena-redshift-2022.47.1.jar -# Set the CMD to your handler (could also be done as a parameter override outside of the Dockerfile) -CMD [ "com.amazonaws.athena.connectors.redshift.RedshiftMuxCompositeHandler" ] \ No newline at end of file +# Command can be overwritten by providing a different command in the template directly. +# No need to specify here (already defined in .yaml file because legacy and connections use different) \ No newline at end of file diff --git a/athena-redshift/athena-redshift-connection.yaml b/athena-redshift/athena-redshift-connection.yaml index 806954ed0b..05a4728372 100644 --- a/athena-redshift/athena-redshift-connection.yaml +++ b/athena-redshift/athena-redshift-connection.yaml @@ -54,6 +54,8 @@ Resources: FunctionName: !Ref LambdaFunctionName PackageType: "Image" ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-redshift:2022.47.1' + ImageConfig: + Command: [ "com.amazonaws.athena.connectors.redshift.RedshiftCompositeHandler" ] Description: "Enables Amazon Athena to communicate with Redshift using JDBC" Timeout: 900 MemorySize: 3008 diff --git a/athena-redshift/athena-redshift.yaml b/athena-redshift/athena-redshift.yaml index 4a4df3c79b..1b1ed8671c 100644 --- a/athena-redshift/athena-redshift.yaml +++ b/athena-redshift/athena-redshift.yaml @@ -81,6 +81,8 @@ Resources: FunctionName: !Ref LambdaFunctionName PackageType: "Image" ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-redshift:2022.47.1' + ImageConfig: + Command: [ "com.amazonaws.athena.connectors.redshift.RedshiftMuxCompositeHandler" ] Description: "Enables Amazon Athena to communicate with Redshift using JDBC" Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory diff --git a/athena-saphana/Dockerfile b/athena-saphana/Dockerfile index 5e55d28a12..f87466c2b3 100644 --- a/athena-saphana/Dockerfile +++ b/athena-saphana/Dockerfile @@ -5,5 +5,5 @@ COPY target/athena-saphana.zip ${LAMBDA_TASK_ROOT} # Unpack the jar RUN jar xf athena-saphana.zip -# Set the CMD to your handler (could also be done as a parameter override outside of the Dockerfile) -CMD [ "com.amazonaws.athena.connectors.saphana.SaphanaMuxCompositeHandler" ] \ No newline at end of file +# Command can be overwritten by providing a different command in the template directly. +# No need to specify here (already defined in .yaml file because legacy and connections use different) \ No newline at end of file diff --git a/athena-saphana/athena-saphana-connection.yaml b/athena-saphana/athena-saphana-connection.yaml index 9d6440d57f..656049b169 100644 --- a/athena-saphana/athena-saphana-connection.yaml +++ b/athena-saphana/athena-saphana-connection.yaml @@ -60,6 +60,8 @@ Resources: FunctionName: !Ref LambdaFunctionName PackageType: "Image" ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-saphana:2022.47.1' + ImageConfig: + Command: [ "com.amazonaws.athena.connectors.saphana.SaphanaCompositeHandler" ] Description: "Enables Amazon Athena to communicate with Teradata using JDBC" Timeout: 900 MemorySize: 3008 diff --git a/athena-saphana/athena-saphana.yaml b/athena-saphana/athena-saphana.yaml index 7918bb1b32..cbfbf7fd92 100644 --- a/athena-saphana/athena-saphana.yaml +++ b/athena-saphana/athena-saphana.yaml @@ -71,6 +71,8 @@ Resources: FunctionName: !Ref LambdaFunctionName PackageType: "Image" ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-saphana:2022.47.1' + ImageConfig: + Command: [ "com.amazonaws.athena.connectors.saphana.SaphanaMuxCompositeHandler" ] Description: "Enables Amazon Athena to communicate with Teradata using JDBC" Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory diff --git a/athena-snowflake/Dockerfile b/athena-snowflake/Dockerfile index 8d4d9081a6..006be5ce85 100644 --- a/athena-snowflake/Dockerfile +++ b/athena-snowflake/Dockerfile @@ -5,5 +5,5 @@ COPY target/athena-snowflake.zip ${LAMBDA_TASK_ROOT} # Unpack the jar RUN jar xf athena-snowflake.zip -# Set the CMD to your handler (could also be done as a parameter override outside of the Dockerfile) -CMD [ "com.amazonaws.athena.connectors.snowflake.SnowflakeMuxCompositeHandler" ] \ No newline at end of file +# Command can be overwritten by providing a different command in the template directly. +# No need to specify here (already defined in .yaml file because legacy and connections use different) \ No newline at end of file diff --git a/athena-snowflake/athena-snowflake-connection.yaml b/athena-snowflake/athena-snowflake-connection.yaml index c3273eddac..b1cd847087 100644 --- a/athena-snowflake/athena-snowflake-connection.yaml +++ b/athena-snowflake/athena-snowflake-connection.yaml @@ -60,6 +60,8 @@ Resources: FunctionName: !Ref LambdaFunctionName PackageType: "Image" ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-snowflake:2022.47.1' + ImageConfig: + Command: [ "com.amazonaws.athena.connectors.snowflake.SnowflakeCompositeHandler" ] Description: "Enables Amazon Athena to communicate with Snowflake using JDBC" Timeout: 900 MemorySize: 3008 diff --git a/athena-snowflake/athena-snowflake.yaml b/athena-snowflake/athena-snowflake.yaml index 67bac6e7aa..409d6788e8 100644 --- a/athena-snowflake/athena-snowflake.yaml +++ b/athena-snowflake/athena-snowflake.yaml @@ -71,6 +71,8 @@ Resources: FunctionName: !Ref LambdaFunctionName PackageType: "Image" ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-snowflake:2022.47.1' + ImageConfig: + Command: [ "com.amazonaws.athena.connectors.snowflake.SnowflakeMuxCompositeHandler" ] Description: "Enables Amazon Athena to communicate with Snowflake using JDBC" Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory diff --git a/athena-sqlserver/Dockerfile b/athena-sqlserver/Dockerfile index e602b9fc50..da1abd4f51 100644 --- a/athena-sqlserver/Dockerfile +++ b/athena-sqlserver/Dockerfile @@ -5,5 +5,5 @@ COPY target/athena-sqlserver-2022.47.1.jar ${LAMBDA_TASK_ROOT} # Unpack the jar RUN jar xf athena-sqlserver-2022.47.1.jar -# Set the CMD to your handler (could also be done as a parameter override outside of the Dockerfile) -CMD [ "com.amazonaws.athena.connectors.sqlserver.SqlServerMuxCompositeHandler" ] \ No newline at end of file +# Command can be overwritten by providing a different command in the template directly. +# No need to specify here (already defined in .yaml file because legacy and connections use different) \ No newline at end of file diff --git a/athena-sqlserver/athena-sqlserver-connection.yaml b/athena-sqlserver/athena-sqlserver-connection.yaml index 72693531f7..59d952b43a 100644 --- a/athena-sqlserver/athena-sqlserver-connection.yaml +++ b/athena-sqlserver/athena-sqlserver-connection.yaml @@ -60,6 +60,8 @@ Resources: FunctionName: !Ref LambdaFunctionName PackageType: "Image" ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-sqlserver:2022.47.1' + ImageConfig: + Command: [ "com.amazonaws.athena.connectors.sqlserver.SqlServerCompositeHandler" ] Description: "Enables Amazon Athena to communicate with SQLSERVER using JDBC" Timeout: 900 MemorySize: 3008 diff --git a/athena-sqlserver/athena-sqlserver.yaml b/athena-sqlserver/athena-sqlserver.yaml index bec2cbcf9c..4829e10d6f 100644 --- a/athena-sqlserver/athena-sqlserver.yaml +++ b/athena-sqlserver/athena-sqlserver.yaml @@ -78,6 +78,8 @@ Resources: FunctionName: !Ref LambdaFunctionName PackageType: "Image" ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-sqlserver:2022.47.1' + ImageConfig: + Command: [ "com.amazonaws.athena.connectors.sqlserver.SqlServerMuxCompositeHandler" ] Description: "Enables Amazon Athena to communicate with SQLSERVER using JDBC" Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory diff --git a/athena-synapse/Dockerfile b/athena-synapse/Dockerfile index 2a7a05ec98..d9bdb76676 100644 --- a/athena-synapse/Dockerfile +++ b/athena-synapse/Dockerfile @@ -5,5 +5,5 @@ COPY target/athena-synapse-2022.47.1.jar ${LAMBDA_TASK_ROOT} # Unpack the jar RUN jar xf athena-synapse-2022.47.1.jar -# Set the CMD to your handler (could also be done as a parameter override outside of the Dockerfile) -CMD [ "com.amazonaws.athena.connectors.synapse.SynapseMuxCompositeHandler" ] \ No newline at end of file +# Command can be overwritten by providing a different command in the template directly. +# No need to specify here (already defined in .yaml file because legacy and connections use different) \ No newline at end of file diff --git a/athena-synapse/athena-synapse-connection.yaml b/athena-synapse/athena-synapse-connection.yaml index 8e0e0951e2..bf9d3fd9fe 100644 --- a/athena-synapse/athena-synapse-connection.yaml +++ b/athena-synapse/athena-synapse-connection.yaml @@ -62,6 +62,8 @@ Resources: FunctionName: !Ref LambdaFunctionName PackageType: "Image" ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-synapse:2022.47.1' + ImageConfig: + Command: [ "com.amazonaws.athena.connectors.synapse.SynapseCompositeHandler" ] Description: "Enables Amazon Athena to communicate with SYNPASE using JDBC" Timeout: 900 MemorySize: 3008 diff --git a/athena-synapse/athena-synapse.yaml b/athena-synapse/athena-synapse.yaml index 05ab974e75..14e52f2d45 100644 --- a/athena-synapse/athena-synapse.yaml +++ b/athena-synapse/athena-synapse.yaml @@ -80,6 +80,8 @@ Resources: FunctionName: !Ref LambdaFunctionName PackageType: "Image" ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-synapse:2022.47.1' + ImageConfig: + Command: [ "com.amazonaws.athena.connectors.synapse.SynapseMuxCompositeHandler" ] Description: "Enables Amazon Athena to communicate with SYNPASE using JDBC" Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory diff --git a/athena-teradata/Dockerfile b/athena-teradata/Dockerfile index 8f58411065..6dea76d17d 100644 --- a/athena-teradata/Dockerfile +++ b/athena-teradata/Dockerfile @@ -5,5 +5,5 @@ COPY target/athena-teradata-2022.47.1.jar ${LAMBDA_TASK_ROOT} # Unpack the jar RUN jar xf athena-teradata-2022.47.1.jar -# Set the CMD to your handler (could also be done as a parameter override outside of the Dockerfile) -CMD [ "com.amazonaws.athena.connectors.teradata.TeradataMuxCompositeHandler" ] \ No newline at end of file +# Command can be overwritten by providing a different command in the template directly. +# No need to specify here (already defined in .yaml file because legacy and connections use different) \ No newline at end of file diff --git a/athena-teradata/athena-teradata-connection.yaml b/athena-teradata/athena-teradata-connection.yaml index 4b3271cfd9..51f1074674 100644 --- a/athena-teradata/athena-teradata-connection.yaml +++ b/athena-teradata/athena-teradata-connection.yaml @@ -58,6 +58,8 @@ Resources: FunctionName: !Ref LambdaFunctionName PackageType: "Image" ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-teradata:2022.47.1' + ImageConfig: + Command: [ "com.amazonaws.athena.connectors.teradata.TeradataCompositeHandler" ] Description: "Enables Amazon Athena to communicate with Teradata using JDBC" Timeout: 900 MemorySize: 3008 diff --git a/athena-teradata/athena-teradata.yaml b/athena-teradata/athena-teradata.yaml index 3226e6081d..55c5b29ffb 100644 --- a/athena-teradata/athena-teradata.yaml +++ b/athena-teradata/athena-teradata.yaml @@ -81,6 +81,8 @@ Resources: - !Ref LambdaJDBCLayername PackageType: "Image" ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-teradata:2022.47.1' + ImageConfig: + Command: [ "com.amazonaws.athena.connectors.teradata.TeradataMuxCompositeHandler" ] Description: "Enables Amazon Athena to communicate with Teradata using JDBC" Timeout: !Ref LambdaTimeout MemorySize: !Ref LambdaMemory From 080c7c15e9fecb8036a5600980a8d636f0b5965b Mon Sep 17 00:00:00 2001 From: Aimery Methena Date: Mon, 23 Sep 2024 18:04:34 -0400 Subject: [PATCH 34/87] fix mysql connection yaml --- athena-mysql/athena-mysql-connection.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/athena-mysql/athena-mysql-connection.yaml b/athena-mysql/athena-mysql-connection.yaml index 657e55a4a7..b5165260d8 100644 --- a/athena-mysql/athena-mysql-connection.yaml +++ b/athena-mysql/athena-mysql-connection.yaml @@ -57,7 +57,7 @@ Resources: PackageType: "Image" ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-mysql:2022.47.1' ImageConfig: - Command: [ "com.amazonaws.athena.connectors.mysql.MySqlMuxCompositeHandler" ] + Command: [ "com.amazonaws.athena.connectors.mysql.MySqlCompositeHandler" ] Description: "Enables Amazon Athena to communicate with MySQL using JDBC" Timeout: 900 MemorySize: 3008 From ee7804d1bce7035f6561c7e908f5ec411fa66f19 Mon Sep 17 00:00:00 2001 From: Trianz-Akshay <108925344+Trianz-Akshay@users.noreply.github.com> Date: Tue, 24 Sep 2024 23:04:36 +0530 Subject: [PATCH 35/87] v2 sdk changes emr (#2288) --- athena-aws-cmdb/pom.xml | 6 +- .../aws/cmdb/TableProviderFactory.java | 7 +- .../cmdb/tables/EmrClusterTableProvider.java | 76 ++++++++--------- .../aws/cmdb/TableProviderFactoryTest.java | 4 +- .../tables/EmrClusterTableProviderTest.java | 81 +++++++++---------- .../aws/cmdb/tables/RdsTableProviderTest.java | 10 --- athena-hbase/pom.xml | 8 +- .../hbase/integ/HbaseIntegTest.java | 49 ++++++----- 8 files changed, 111 insertions(+), 130 deletions(-) diff --git a/athena-aws-cmdb/pom.xml b/athena-aws-cmdb/pom.xml index 90c1ad7b59..7579f17301 100644 --- a/athena-aws-cmdb/pom.xml +++ b/athena-aws-cmdb/pom.xml @@ -28,9 +28,9 @@ - com.amazonaws - aws-java-sdk-emr - ${aws-sdk.version} + software.amazon.awssdk + emr + ${aws-sdk-v2.version} com.amazonaws diff --git a/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/TableProviderFactory.java b/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/TableProviderFactory.java index 7a5099e0a7..8c553c4797 100644 --- a/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/TableProviderFactory.java +++ b/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/TableProviderFactory.java @@ -34,11 +34,10 @@ import com.amazonaws.athena.connectors.aws.cmdb.tables.s3.S3ObjectsTableProvider; import com.amazonaws.services.ec2.AmazonEC2; import com.amazonaws.services.ec2.AmazonEC2ClientBuilder; -import com.amazonaws.services.elasticmapreduce.AmazonElasticMapReduce; -import com.amazonaws.services.elasticmapreduce.AmazonElasticMapReduceClientBuilder; import com.amazonaws.services.rds.AmazonRDS; import com.amazonaws.services.rds.AmazonRDSClientBuilder; import org.apache.arrow.util.VisibleForTesting; +import software.amazon.awssdk.services.emr.EmrClient; import software.amazon.awssdk.services.s3.S3Client; import java.util.ArrayList; @@ -59,14 +58,14 @@ public TableProviderFactory(java.util.Map configOptions) { this( AmazonEC2ClientBuilder.standard().build(), - AmazonElasticMapReduceClientBuilder.standard().build(), + EmrClient.create(), AmazonRDSClientBuilder.standard().build(), S3Client.create(), configOptions); } @VisibleForTesting - protected TableProviderFactory(AmazonEC2 ec2, AmazonElasticMapReduce emr, AmazonRDS rds, S3Client amazonS3, java.util.Map configOptions) + protected TableProviderFactory(AmazonEC2 ec2, EmrClient emr, AmazonRDS rds, S3Client amazonS3, java.util.Map configOptions) { addProvider(new Ec2TableProvider(ec2)); addProvider(new EbsTableProvider(ec2)); diff --git a/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/EmrClusterTableProvider.java b/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/EmrClusterTableProvider.java index ee3b15da91..c3d10c7233 100644 --- a/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/EmrClusterTableProvider.java +++ b/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/EmrClusterTableProvider.java @@ -29,15 +29,15 @@ import com.amazonaws.athena.connector.lambda.metadata.GetTableRequest; import com.amazonaws.athena.connector.lambda.metadata.GetTableResponse; import com.amazonaws.athena.connector.lambda.records.ReadRecordsRequest; -import com.amazonaws.services.elasticmapreduce.AmazonElasticMapReduce; -import com.amazonaws.services.elasticmapreduce.model.Cluster; -import com.amazonaws.services.elasticmapreduce.model.ClusterSummary; -import com.amazonaws.services.elasticmapreduce.model.DescribeClusterRequest; -import com.amazonaws.services.elasticmapreduce.model.DescribeClusterResult; -import com.amazonaws.services.elasticmapreduce.model.ListClustersRequest; -import com.amazonaws.services.elasticmapreduce.model.ListClustersResult; import org.apache.arrow.vector.types.Types; import org.apache.arrow.vector.types.pojo.Schema; +import software.amazon.awssdk.services.emr.EmrClient; +import software.amazon.awssdk.services.emr.model.Cluster; +import software.amazon.awssdk.services.emr.model.ClusterSummary; +import software.amazon.awssdk.services.emr.model.DescribeClusterRequest; +import software.amazon.awssdk.services.emr.model.DescribeClusterResponse; +import software.amazon.awssdk.services.emr.model.ListClustersRequest; +import software.amazon.awssdk.services.emr.model.ListClustersResponse; import java.util.List; import java.util.stream.Collectors; @@ -49,9 +49,9 @@ public class EmrClusterTableProvider implements TableProvider { private static final Schema SCHEMA; - private AmazonElasticMapReduce emr; + private EmrClient emr; - public EmrClusterTableProvider(AmazonElasticMapReduce emr) + public EmrClusterTableProvider(EmrClient emr) { this.emr = emr; } @@ -93,23 +93,23 @@ public GetTableResponse getTable(BlockAllocator blockAllocator, GetTableRequest public void readWithConstraint(BlockSpiller spiller, ReadRecordsRequest recordsRequest, QueryStatusChecker queryStatusChecker) { boolean done = false; - ListClustersRequest request = new ListClustersRequest(); + ListClustersRequest request = ListClustersRequest.builder().build(); while (!done) { - ListClustersResult response = emr.listClusters(request); + ListClustersResponse response = emr.listClusters(request); - for (ClusterSummary next : response.getClusters()) { + for (ClusterSummary next : response.clusters()) { Cluster cluster = null; - if (!next.getStatus().getState().toLowerCase().contains("terminated")) { - DescribeClusterResult clusterResponse = emr.describeCluster(new DescribeClusterRequest().withClusterId(next.getId())); - cluster = clusterResponse.getCluster(); + if (!next.status().stateAsString().toLowerCase().contains("terminated")) { + DescribeClusterResponse clusterResponse = emr.describeCluster(DescribeClusterRequest.builder().clusterId(next.id()).build()); + cluster = clusterResponse.cluster(); } clusterToRow(next, cluster, spiller); } - request.setMarker(response.getMarker()); + request = request.toBuilder().marker(response.marker()).build(); - if (response.getMarker() == null || !queryStatusChecker.isQueryRunning()) { + if (response.marker() == null || !queryStatusChecker.isQueryRunning()) { done = true; } } @@ -131,31 +131,31 @@ private void clusterToRow(ClusterSummary clusterSummary, spiller.writeRows((Block block, int row) -> { boolean matched = true; - matched &= block.offerValue("id", row, clusterSummary.getId()); - matched &= block.offerValue("name", row, clusterSummary.getName()); - matched &= block.offerValue("instance_hours", row, clusterSummary.getNormalizedInstanceHours()); - matched &= block.offerValue("state", row, clusterSummary.getStatus().getState()); - matched &= block.offerValue("state_code", row, clusterSummary.getStatus().getStateChangeReason().getCode()); - matched &= block.offerValue("state_msg", row, clusterSummary.getStatus().getStateChangeReason().getMessage()); + matched &= block.offerValue("id", row, clusterSummary.id()); + matched &= block.offerValue("name", row, clusterSummary.name()); + matched &= block.offerValue("instance_hours", row, clusterSummary.normalizedInstanceHours()); + matched &= block.offerValue("state", row, clusterSummary.status().stateAsString()); + matched &= block.offerValue("state_code", row, clusterSummary.status().stateChangeReason().codeAsString()); + matched &= block.offerValue("state_msg", row, clusterSummary.status().stateChangeReason().message()); if (cluster != null) { - matched &= block.offerValue("autoscaling_role", row, cluster.getAutoScalingRole()); - matched &= block.offerValue("custom_ami", row, cluster.getCustomAmiId()); - matched &= block.offerValue("instance_collection_type", row, cluster.getInstanceCollectionType()); - matched &= block.offerValue("log_uri", row, cluster.getLogUri()); - matched &= block.offerValue("master_public_dns", row, cluster.getMasterPublicDnsName()); - matched &= block.offerValue("release_label", row, cluster.getReleaseLabel()); - matched &= block.offerValue("running_ami", row, cluster.getRunningAmiVersion()); - matched &= block.offerValue("scale_down_behavior", row, cluster.getScaleDownBehavior()); - matched &= block.offerValue("service_role", row, cluster.getServiceRole()); - matched &= block.offerValue("service_role", row, cluster.getServiceRole()); - - List applications = cluster.getApplications().stream() - .map(next -> next.getName() + ":" + next.getVersion()).collect(Collectors.toList()); + matched &= block.offerValue("autoscaling_role", row, cluster.autoScalingRole()); + matched &= block.offerValue("custom_ami", row, cluster.customAmiId()); + matched &= block.offerValue("instance_collection_type", row, cluster.instanceCollectionTypeAsString()); + matched &= block.offerValue("log_uri", row, cluster.logUri()); + matched &= block.offerValue("master_public_dns", row, cluster.masterPublicDnsName()); + matched &= block.offerValue("release_label", row, cluster.releaseLabel()); + matched &= block.offerValue("running_ami", row, cluster.runningAmiVersion()); + matched &= block.offerValue("scale_down_behavior", row, cluster.scaleDownBehaviorAsString()); + matched &= block.offerValue("service_role", row, cluster.serviceRole()); + matched &= block.offerValue("service_role", row, cluster.serviceRole()); + + List applications = cluster.applications().stream() + .map(next -> next.name() + ":" + next.version()).collect(Collectors.toList()); matched &= block.offerComplexValue("applications", row, FieldResolver.DEFAULT, applications); - List tags = cluster.getTags().stream() - .map(next -> next.getKey() + ":" + next.getValue()).collect(Collectors.toList()); + List tags = cluster.tags().stream() + .map(next -> next.key() + ":" + next.value()).collect(Collectors.toList()); matched &= block.offerComplexValue("tags", row, FieldResolver.DEFAULT, tags); } diff --git a/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/TableProviderFactoryTest.java b/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/TableProviderFactoryTest.java index c196e379d6..090b4e991d 100644 --- a/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/TableProviderFactoryTest.java +++ b/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/TableProviderFactoryTest.java @@ -22,12 +22,12 @@ import com.amazonaws.athena.connector.lambda.domain.TableName; import com.amazonaws.athena.connectors.aws.cmdb.tables.TableProvider; import com.amazonaws.services.ec2.AmazonEC2; -import com.amazonaws.services.elasticmapreduce.AmazonElasticMapReduce; import com.amazonaws.services.rds.AmazonRDS; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; import org.mockito.junit.MockitoJUnitRunner; +import software.amazon.awssdk.services.emr.EmrClient; import software.amazon.awssdk.services.s3.S3Client; import java.util.List; @@ -45,7 +45,7 @@ public class TableProviderFactoryTest private AmazonEC2 mockEc2; @Mock - private AmazonElasticMapReduce mockEmr; + private EmrClient mockEmr; @Mock private AmazonRDS mockRds; diff --git a/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/tables/EmrClusterTableProviderTest.java b/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/tables/EmrClusterTableProviderTest.java index c88fc6943b..b593b275a2 100644 --- a/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/tables/EmrClusterTableProviderTest.java +++ b/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/tables/EmrClusterTableProviderTest.java @@ -21,17 +21,6 @@ import com.amazonaws.athena.connector.lambda.data.Block; import com.amazonaws.athena.connector.lambda.data.BlockUtils; -import com.amazonaws.services.elasticmapreduce.AmazonElasticMapReduce; -import com.amazonaws.services.elasticmapreduce.model.Application; -import com.amazonaws.services.elasticmapreduce.model.Cluster; -import com.amazonaws.services.elasticmapreduce.model.ClusterStateChangeReason; -import com.amazonaws.services.elasticmapreduce.model.ClusterStatus; -import com.amazonaws.services.elasticmapreduce.model.ClusterSummary; -import com.amazonaws.services.elasticmapreduce.model.DescribeClusterRequest; -import com.amazonaws.services.elasticmapreduce.model.DescribeClusterResult; -import com.amazonaws.services.elasticmapreduce.model.ListClustersRequest; -import com.amazonaws.services.elasticmapreduce.model.ListClustersResult; -import com.amazonaws.services.elasticmapreduce.model.Tag; import org.apache.arrow.vector.complex.reader.FieldReader; import org.apache.arrow.vector.types.Types; import org.apache.arrow.vector.types.pojo.Field; @@ -41,6 +30,17 @@ import org.mockito.junit.MockitoJUnitRunner; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.emr.EmrClient; +import software.amazon.awssdk.services.emr.model.Application; +import software.amazon.awssdk.services.emr.model.Cluster; +import software.amazon.awssdk.services.emr.model.ClusterStateChangeReason; +import software.amazon.awssdk.services.emr.model.ClusterStatus; +import software.amazon.awssdk.services.emr.model.ClusterSummary; +import software.amazon.awssdk.services.emr.model.DescribeClusterRequest; +import software.amazon.awssdk.services.emr.model.DescribeClusterResponse; +import software.amazon.awssdk.services.emr.model.ListClustersRequest; +import software.amazon.awssdk.services.emr.model.ListClustersResponse; +import software.amazon.awssdk.services.emr.model.Tag; import java.util.ArrayList; import java.util.List; @@ -49,7 +49,6 @@ import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; import static org.mockito.ArgumentMatchers.nullable; -import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @RunWith(MockitoJUnitRunner.class) @@ -59,7 +58,7 @@ public class EmrClusterTableProviderTest private static final Logger logger = LoggerFactory.getLogger(EmrClusterTableProviderTest.class); @Mock - private AmazonElasticMapReduce mockEmr; + private EmrClient mockEmr; protected String getIdField() { @@ -96,24 +95,18 @@ protected void setUpRead() { when(mockEmr.listClusters(nullable(ListClustersRequest.class))) .thenAnswer((InvocationOnMock invocation) -> { - ListClustersResult mockResult = mock(ListClustersResult.class); List values = new ArrayList<>(); values.add(makeClusterSummary(getIdValue())); values.add(makeClusterSummary(getIdValue())); values.add(makeClusterSummary("fake-id")); - when(mockResult.getClusters()).thenReturn(values); + ListClustersResponse mockResult = ListClustersResponse.builder().clusters(values).build(); return mockResult; }); when(mockEmr.describeCluster(nullable(DescribeClusterRequest.class))) .thenAnswer((InvocationOnMock invocation) -> { DescribeClusterRequest request = (DescribeClusterRequest) invocation.getArguments()[0]; - DescribeClusterResult mockResult = mock(DescribeClusterResult.class); - List values = new ArrayList<>(); - values.add(makeClusterSummary(getIdValue())); - values.add(makeClusterSummary(getIdValue())); - values.add(makeClusterSummary("fake-id")); - when(mockResult.getCluster()).thenReturn(makeCluster(request.getClusterId())); + DescribeClusterResponse mockResult = DescribeClusterResponse.builder().cluster(makeCluster(request.clusterId())).build(); return mockResult; }); } @@ -170,32 +163,32 @@ private void validate(FieldReader fieldReader) private ClusterSummary makeClusterSummary(String id) { - return new ClusterSummary() - .withName("name") - .withId(id) - .withStatus(new ClusterStatus() - .withState("state") - .withStateChangeReason(new ClusterStateChangeReason() - .withCode("state_code") - .withMessage("state_msg"))) - .withNormalizedInstanceHours(100); + return ClusterSummary.builder() + .name("name") + .id(id) + .status(ClusterStatus.builder().state("state") + .stateChangeReason(ClusterStateChangeReason.builder() + .code("state_code") + .message("state_msg").build()).build()) + .normalizedInstanceHours(100).build(); } private Cluster makeCluster(String id) { - return new Cluster() - .withId(id) - .withName("name") - .withAutoScalingRole("autoscaling_role") - .withCustomAmiId("custom_ami") - .withInstanceCollectionType("instance_collection_type") - .withLogUri("log_uri") - .withMasterPublicDnsName("master_public_dns") - .withReleaseLabel("release_label") - .withRunningAmiVersion("running_ami") - .withScaleDownBehavior("scale_down_behavior") - .withServiceRole("service_role") - .withApplications(new Application().withName("name").withVersion("version")) - .withTags(new Tag("key", "value")); + return Cluster.builder() + .id(id) + .name("name") + .autoScalingRole("autoscaling_role") + .customAmiId("custom_ami") + .instanceCollectionType("instance_collection_type") + .logUri("log_uri") + .masterPublicDnsName("master_public_dns") + .releaseLabel("release_label") + .runningAmiVersion("running_ami") + .scaleDownBehavior("scale_down_behavior") + .serviceRole("service_role") + .applications(Application.builder().name("name").version("version").build()) + .tags(Tag.builder().key("key").value("value").build()) + .build(); } } diff --git a/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/tables/RdsTableProviderTest.java b/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/tables/RdsTableProviderTest.java index 76b8c858ef..99b8356bf2 100644 --- a/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/tables/RdsTableProviderTest.java +++ b/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/tables/RdsTableProviderTest.java @@ -21,16 +21,6 @@ import com.amazonaws.athena.connector.lambda.data.Block; import com.amazonaws.athena.connector.lambda.data.BlockUtils; -import com.amazonaws.services.elasticmapreduce.AmazonElasticMapReduce; -import com.amazonaws.services.elasticmapreduce.model.Application; -import com.amazonaws.services.elasticmapreduce.model.Cluster; -import com.amazonaws.services.elasticmapreduce.model.ClusterStateChangeReason; -import com.amazonaws.services.elasticmapreduce.model.ClusterStatus; -import com.amazonaws.services.elasticmapreduce.model.ClusterSummary; -import com.amazonaws.services.elasticmapreduce.model.DescribeClusterRequest; -import com.amazonaws.services.elasticmapreduce.model.DescribeClusterResult; -import com.amazonaws.services.elasticmapreduce.model.ListClustersRequest; -import com.amazonaws.services.elasticmapreduce.model.ListClustersResult; import com.amazonaws.services.rds.AmazonRDS; import com.amazonaws.services.rds.model.DBInstance; import com.amazonaws.services.rds.model.DBInstanceStatusInfo; diff --git a/athena-hbase/pom.xml b/athena-hbase/pom.xml index 6b6d2d04d3..c736ffa678 100644 --- a/athena-hbase/pom.xml +++ b/athena-hbase/pom.xml @@ -40,11 +40,11 @@ ${aws-cdk.version} test - + - com.amazonaws - aws-java-sdk-emr - ${aws-sdk.version} + software.amazon.awssdk + emr + ${aws-sdk-v2.version} test diff --git a/athena-hbase/src/test/java/com/amazonaws/athena/connectors/hbase/integ/HbaseIntegTest.java b/athena-hbase/src/test/java/com/amazonaws/athena/connectors/hbase/integ/HbaseIntegTest.java index ac1b59237e..1435e09b70 100644 --- a/athena-hbase/src/test/java/com/amazonaws/athena/connectors/hbase/integ/HbaseIntegTest.java +++ b/athena-hbase/src/test/java/com/amazonaws/athena/connectors/hbase/integ/HbaseIntegTest.java @@ -26,14 +26,6 @@ import com.amazonaws.athena.connector.integ.data.ConnectorStackAttributes; import com.amazonaws.athena.connector.integ.data.ConnectorVpcAttributes; import com.amazonaws.athena.connector.integ.providers.ConnectorPackagingAttributesProvider; -import com.amazonaws.services.elasticmapreduce.AmazonElasticMapReduce; -import com.amazonaws.services.elasticmapreduce.AmazonElasticMapReduceClientBuilder; -import com.amazonaws.services.elasticmapreduce.model.Application; -import com.amazonaws.services.elasticmapreduce.model.ClusterSummary; -import com.amazonaws.services.elasticmapreduce.model.DescribeClusterRequest; -import com.amazonaws.services.elasticmapreduce.model.DescribeClusterResult; -import com.amazonaws.services.elasticmapreduce.model.ListClustersRequest; -import com.amazonaws.services.elasticmapreduce.model.ListClustersResult; import com.google.common.collect.ImmutableList; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -46,6 +38,13 @@ import software.amazon.awscdk.services.emr.CfnCluster; import software.amazon.awscdk.services.iam.PolicyDocument; import software.amazon.awssdk.services.athena.model.Row; +import software.amazon.awssdk.services.emr.EmrClient; +import software.amazon.awssdk.services.emr.model.Application; +import software.amazon.awssdk.services.emr.model.ClusterSummary; +import software.amazon.awssdk.services.emr.model.DescribeClusterRequest; +import software.amazon.awssdk.services.emr.model.DescribeClusterResponse; +import software.amazon.awssdk.services.emr.model.ListClustersRequest; +import software.amazon.awssdk.services.emr.model.ListClustersResponse; import software.amazon.awssdk.services.lambda.LambdaClient; import software.amazon.awssdk.services.lambda.model.InvocationType; import software.amazon.awssdk.services.lambda.model.InvokeRequest; @@ -145,10 +144,10 @@ private Pair getHbaseStack() { .name(dbClusterName) .visibleToAllUsers(Boolean.TRUE) .applications(ImmutableList.of( - new Application().withName("HBase"), - new Application().withName("Hive"), - new Application().withName("Hue"), - new Application().withName("Phoenix"))) + Application.builder().name("HBase").build(), + Application.builder().name("Hive").build(), + Application.builder().name("Hue").build(), + Application.builder().name("Phoenix").build())) .instances(CfnCluster.JobFlowInstancesConfigProperty.builder() .emrManagedMasterSecurityGroup(vpcAttributes.getSecurityGroupId()) .emrManagedSlaveSecurityGroup(vpcAttributes.getSecurityGroupId()) @@ -179,27 +178,27 @@ private Pair getHbaseStack() { */ private String getClusterData() { - AmazonElasticMapReduce emrClient = AmazonElasticMapReduceClientBuilder.defaultClient(); + EmrClient emrClient = EmrClient.create(); try { - ListClustersResult listClustersResult; + ListClustersResponse listClustersResult; String marker = null; Optional dbClusterId; do { // While cluster Id has not yet been found and there are more paginated results. // Get paginated list of EMR clusters. - listClustersResult = emrClient.listClusters(new ListClustersRequest().withMarker(marker)); + listClustersResult = emrClient.listClusters(ListClustersRequest.builder().marker(marker).build()); // Get the cluster id. dbClusterId = getClusterId(listClustersResult); // Get the marker for the next paginated request. - marker = listClustersResult.getMarker(); + marker = listClustersResult.marker(); } while (!dbClusterId.isPresent() && marker != null); // Get the cluster description using the cluster id. - DescribeClusterResult clusterResult = emrClient.describeCluster(new DescribeClusterRequest() - .withClusterId(dbClusterId.orElseThrow(() -> - new RuntimeException("Unable to get cluster description for: " + dbClusterName)))); - return clusterResult.getCluster().getMasterPublicDnsName(); + DescribeClusterResponse clusterResult = emrClient.describeCluster(DescribeClusterRequest.builder() + .clusterId(dbClusterId.orElseThrow(() -> + new RuntimeException("Unable to get cluster description for: " + dbClusterName))).build()); + return clusterResult.cluster().masterPublicDnsName(); } finally { - emrClient.shutdown(); + emrClient.close(); } } @@ -209,12 +208,12 @@ private String getClusterData() * @return Optional String containing the cluster Id that matches the cluster name, or Optional.empty() if match * was not found. */ - private Optional getClusterId(ListClustersResult listClustersResult) + private Optional getClusterId(ListClustersResponse listClustersResult) { - for (ClusterSummary clusterSummary : listClustersResult.getClusters()) { - if (clusterSummary.getName().equals(dbClusterName)) { + for (ClusterSummary clusterSummary : listClustersResult.clusters()) { + if (clusterSummary.name().equals(dbClusterName)) { // Found match for cluster name - return cluster id. - String clusterId = clusterSummary.getId(); + String clusterId = clusterSummary.id(); logger.info("Found Cluster Id for {}: {}", dbClusterName, clusterId); return Optional.of(clusterId); } From 1e7cd1fdb9dd2444879b43bbad8a1130f38543a6 Mon Sep 17 00:00:00 2001 From: Trianz-Akshay <108925344+Trianz-Akshay@users.noreply.github.com> Date: Tue, 24 Sep 2024 23:27:59 +0530 Subject: [PATCH 36/87] v2 sdk changes redshift (#2289) --- athena-jdbc/pom.xml | 14 ------------ athena-redshift/pom.xml | 14 ++++++------ .../redshift/integ/RedshiftIntegTest.java | 22 +++++++++---------- 3 files changed, 17 insertions(+), 33 deletions(-) diff --git a/athena-jdbc/pom.xml b/athena-jdbc/pom.xml index 53cb6df19b..4256870c10 100644 --- a/athena-jdbc/pom.xml +++ b/athena-jdbc/pom.xml @@ -99,20 +99,6 @@ ${log4j2Version} runtime - - - com.amazonaws - aws-java-sdk-redshift - ${aws-sdk.version} - test - - - - software.amazon.awscdk - redshift - ${aws-cdk.version} - test - com.amazonaws diff --git a/athena-redshift/pom.xml b/athena-redshift/pom.xml index 7119660c3e..a60ac38828 100644 --- a/athena-redshift/pom.xml +++ b/athena-redshift/pom.xml @@ -38,16 +38,16 @@ test-jar test - + - com.amazonaws - aws-java-sdk-redshift - ${aws-sdk.version} + software.amazon.awssdk + redshift + ${aws-sdk-v2.version} - com.amazonaws - aws-java-sdk-redshiftserverless - ${aws-sdk.version} + software.amazon.awssdk + redshiftserverless + ${aws-sdk-v2.version} diff --git a/athena-redshift/src/test/java/com/amazonaws/athena/connectors/redshift/integ/RedshiftIntegTest.java b/athena-redshift/src/test/java/com/amazonaws/athena/connectors/redshift/integ/RedshiftIntegTest.java index d32334a0e2..d103901df4 100644 --- a/athena-redshift/src/test/java/com/amazonaws/athena/connectors/redshift/integ/RedshiftIntegTest.java +++ b/athena-redshift/src/test/java/com/amazonaws/athena/connectors/redshift/integ/RedshiftIntegTest.java @@ -26,14 +26,8 @@ import com.amazonaws.athena.connector.lambda.domain.TableName; import com.amazonaws.athena.connectors.jdbc.connection.DatabaseConnectionInfo; import com.amazonaws.athena.connectors.jdbc.integ.JdbcTableUtils; -import com.amazonaws.services.redshift.AmazonRedshift; -import com.amazonaws.services.redshift.AmazonRedshiftClientBuilder; -import com.amazonaws.services.redshift.model.DescribeClustersRequest; -import com.amazonaws.services.redshift.model.DescribeClustersResult; -import com.amazonaws.services.redshift.model.Endpoint; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.testng.AssertJUnit; import org.testng.annotations.AfterClass; import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; @@ -50,6 +44,10 @@ import software.amazon.awscdk.services.redshift.Login; import software.amazon.awscdk.services.redshift.NodeType; import software.amazon.awssdk.services.athena.model.Row; +import software.amazon.awssdk.services.redshift.RedshiftClient; +import software.amazon.awssdk.services.redshift.model.DescribeClustersRequest; +import software.amazon.awssdk.services.redshift.model.DescribeClustersResponse; +import software.amazon.awssdk.services.redshift.model.Endpoint; import java.util.ArrayList; import java.util.Collections; @@ -189,14 +187,14 @@ private Stack getRedshiftStack() */ private Endpoint getClusterData() { - AmazonRedshift redshiftClient = AmazonRedshiftClientBuilder.defaultClient(); + RedshiftClient redshiftClient = RedshiftClient.create(); try { - DescribeClustersResult clustersResult = redshiftClient.describeClusters(new DescribeClustersRequest() - .withClusterIdentifier(clusterName)); - return clustersResult.getClusters().get(0).getEndpoint(); + DescribeClustersResponse clustersResult = redshiftClient.describeClusters(DescribeClustersRequest.builder() + .clusterIdentifier(clusterName).build()); + return clustersResult.clusters().get(0).endpoint(); } finally { - redshiftClient.shutdown(); + redshiftClient.close(); } } @@ -207,7 +205,7 @@ private Endpoint getClusterData() private void setEnvironmentVars(Endpoint endpoint) { String connectionString = String.format("redshift://jdbc:redshift://%s:%s/public?user=%s&password=%s", - endpoint.getAddress(), endpoint.getPort(), username, password); + endpoint.address(), endpoint.port(), username, password); String connectionStringTag = lambdaFunctionName + "_connection_string"; environmentVars.put("default", connectionString); environmentVars.put(connectionStringTag, connectionString); From 2a0bb4983f3b98220cc423619d9efb661b77448e Mon Sep 17 00:00:00 2001 From: ejeffrli <144148373+ejeffrli@users.noreply.github.com> Date: Wed, 25 Sep 2024 10:25:41 -0400 Subject: [PATCH 37/87] Add RedshiftCompositeHandler class to populate environment from glue connections (#18) --- .../redshift/RedshiftCompositeHandler.java | 30 +++++++++++++++++ .../RedshiftEnvironmentProperties.java | 33 +++++++++++++++++++ 2 files changed, 63 insertions(+) create mode 100644 athena-redshift/src/main/java/com/amazonaws/athena/connectors/redshift/RedshiftCompositeHandler.java create mode 100644 athena-redshift/src/main/java/com/amazonaws/athena/connectors/redshift/RedshiftEnvironmentProperties.java diff --git a/athena-redshift/src/main/java/com/amazonaws/athena/connectors/redshift/RedshiftCompositeHandler.java b/athena-redshift/src/main/java/com/amazonaws/athena/connectors/redshift/RedshiftCompositeHandler.java new file mode 100644 index 0000000000..765919c6c7 --- /dev/null +++ b/athena-redshift/src/main/java/com/amazonaws/athena/connectors/redshift/RedshiftCompositeHandler.java @@ -0,0 +1,30 @@ +/*- + * #%L + * athena-redshift + * %% + * Copyright (C) 2019 - 2024 Amazon Web Services + * %% + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * #L% + */ +package com.amazonaws.athena.connectors.redshift; + +import com.amazonaws.athena.connector.lambda.handlers.CompositeHandler; + +public class RedshiftCompositeHandler extends CompositeHandler +{ + public RedshiftCompositeHandler() + { + super(new RedshiftMetadataHandler(new RedshiftEnvironmentProperties().createEnvironment()), new RedshiftRecordHandler(new RedshiftEnvironmentProperties().createEnvironment())); + } +} diff --git a/athena-redshift/src/main/java/com/amazonaws/athena/connectors/redshift/RedshiftEnvironmentProperties.java b/athena-redshift/src/main/java/com/amazonaws/athena/connectors/redshift/RedshiftEnvironmentProperties.java new file mode 100644 index 0000000000..c9bc6c29a3 --- /dev/null +++ b/athena-redshift/src/main/java/com/amazonaws/athena/connectors/redshift/RedshiftEnvironmentProperties.java @@ -0,0 +1,33 @@ +/*- + * #%L + * athena-redshift + * %% + * Copyright (C) 2019 - 2024 Amazon Web Services + * %% + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * #L% + */ +package com.amazonaws.athena.connectors.redshift; + +import com.amazonaws.athena.connectors.jdbc.JdbcEnvironmentProperties; + +import java.util.Map; + +public class RedshiftEnvironmentProperties extends JdbcEnvironmentProperties +{ + @Override + protected String getConnectionStringPrefix(Map connectionProperties) + { + return "redshift://jdbc:redshift://"; + } +} From 1e685f4093da3165dc3a6d49e8a08b2db7a955b8 Mon Sep 17 00:00:00 2001 From: Aimery Methena <159072740+aimethed@users.noreply.github.com> Date: Wed, 25 Sep 2024 12:04:27 -0400 Subject: [PATCH 38/87] migrate awslogs to cloudwatchlogs (v1 to v2) (#2272) --- athena-cloudwatch/pom.xml | 26 +++- .../cloudwatch/CloudwatchExceptionFilter.java | 6 +- .../cloudwatch/CloudwatchMetadataHandler.java | 95 ++++++++------- .../cloudwatch/CloudwatchRecordHandler.java | 57 ++++----- .../cloudwatch/CloudwatchTableResolver.java | 68 +++++------ .../cloudwatch/CloudwatchUtils.java | 37 +++--- .../CloudwatchMetadataHandlerTest.java | 114 +++++++++--------- .../CloudwatchRecordHandlerTest.java | 39 +++--- .../cloudwatch/integ/CloudwatchIntegTest.java | 29 +++-- 9 files changed, 248 insertions(+), 223 deletions(-) diff --git a/athena-cloudwatch/pom.xml b/athena-cloudwatch/pom.xml index bd2dad00d8..95a34d1d37 100644 --- a/athena-cloudwatch/pom.xml +++ b/athena-cloudwatch/pom.xml @@ -29,15 +29,35 @@ test - com.amazonaws - aws-java-sdk-logs - ${aws-sdk.version} + software.amazon.awssdk + cloudwatchlogs + 2.28.2 + + + + commons-logging + commons-logging + + + software.amazon.awssdk + netty-nio-client + + + + + software.amazon.awssdk + cloudwatch + ${aws-sdk-v2.version} commons-logging commons-logging + + software.amazon.awssdk + netty-nio-client + diff --git a/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchExceptionFilter.java b/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchExceptionFilter.java index c71db552cf..093aeedd7e 100644 --- a/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchExceptionFilter.java +++ b/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchExceptionFilter.java @@ -20,8 +20,8 @@ package com.amazonaws.athena.connectors.cloudwatch; import com.amazonaws.athena.connector.lambda.ThrottlingInvoker; -import com.amazonaws.services.logs.model.AWSLogsException; -import com.amazonaws.services.logs.model.LimitExceededException; +import software.amazon.awssdk.services.cloudwatch.model.LimitExceededException; +import software.amazon.awssdk.services.cloudwatchlogs.model.CloudWatchLogsException; /** * Used to identify Exceptions that are related to Cloudwatch Logs throttling events. @@ -36,7 +36,7 @@ private CloudwatchExceptionFilter() {} @Override public boolean isMatch(Exception ex) { - if (ex instanceof AWSLogsException && ex.getMessage().startsWith("Rate exceeded")) { + if (ex instanceof CloudWatchLogsException && ex.getMessage().startsWith("Rate exceeded")) { return true; } diff --git a/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchMetadataHandler.java b/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchMetadataHandler.java index e07c6f5422..e62ca50477 100644 --- a/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchMetadataHandler.java +++ b/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchMetadataHandler.java @@ -43,15 +43,6 @@ import com.amazonaws.athena.connector.lambda.metadata.optimizations.OptimizationSubType; import com.amazonaws.athena.connector.lambda.security.EncryptionKeyFactory; import com.amazonaws.athena.connectors.cloudwatch.qpt.CloudwatchQueryPassthrough; -import com.amazonaws.services.logs.AWSLogs; -import com.amazonaws.services.logs.AWSLogsClientBuilder; -import com.amazonaws.services.logs.model.DescribeLogGroupsRequest; -import com.amazonaws.services.logs.model.DescribeLogGroupsResult; -import com.amazonaws.services.logs.model.DescribeLogStreamsRequest; -import com.amazonaws.services.logs.model.DescribeLogStreamsResult; -import com.amazonaws.services.logs.model.GetQueryResultsResult; -import com.amazonaws.services.logs.model.LogStream; -import com.amazonaws.services.logs.model.ResultField; import com.google.common.collect.ImmutableMap; import org.apache.arrow.util.VisibleForTesting; import org.apache.arrow.vector.complex.reader.FieldReader; @@ -61,6 +52,14 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.cloudwatchlogs.CloudWatchLogsClient; +import software.amazon.awssdk.services.cloudwatchlogs.model.DescribeLogGroupsRequest; +import software.amazon.awssdk.services.cloudwatchlogs.model.DescribeLogGroupsResponse; +import software.amazon.awssdk.services.cloudwatchlogs.model.DescribeLogStreamsRequest; +import software.amazon.awssdk.services.cloudwatchlogs.model.DescribeLogStreamsResponse; +import software.amazon.awssdk.services.cloudwatchlogs.model.GetQueryResultsResponse; +import software.amazon.awssdk.services.cloudwatchlogs.model.LogStream; +import software.amazon.awssdk.services.cloudwatchlogs.model.ResultField; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.ArrayList; @@ -123,7 +122,7 @@ public class CloudwatchMetadataHandler .build(); } - private final AWSLogs awsLogs; + private final CloudWatchLogsClient awsLogs; private final ThrottlingInvoker invoker; private final CloudwatchTableResolver tableResolver; private final CloudwatchQueryPassthrough queryPassthrough = new CloudwatchQueryPassthrough(); @@ -131,14 +130,14 @@ public class CloudwatchMetadataHandler public CloudwatchMetadataHandler(java.util.Map configOptions) { super(SOURCE_TYPE, configOptions); - this.awsLogs = AWSLogsClientBuilder.standard().build(); + this.awsLogs = CloudWatchLogsClient.create(); this.invoker = ThrottlingInvoker.newDefaultBuilder(EXCEPTION_FILTER, configOptions).build(); - this.tableResolver = new CloudwatchTableResolver(this.invoker, awsLogs, MAX_RESULTS, MAX_RESULTS); + this.tableResolver = new CloudwatchTableResolver(this.invoker, awsLogs, MAX_RESULTS, MAX_RESULTS); } @VisibleForTesting protected CloudwatchMetadataHandler( - AWSLogs awsLogs, + CloudWatchLogsClient awsLogs, EncryptionKeyFactory keyFactory, SecretsManagerClient secretsManager, AthenaClient athena, @@ -161,19 +160,19 @@ protected CloudwatchMetadataHandler( public ListSchemasResponse doListSchemaNames(BlockAllocator blockAllocator, ListSchemasRequest listSchemasRequest) throws TimeoutException { - DescribeLogGroupsRequest request = new DescribeLogGroupsRequest(); - DescribeLogGroupsResult result; + DescribeLogGroupsRequest.Builder requestBuilder = DescribeLogGroupsRequest.builder(); + DescribeLogGroupsResponse response; List schemas = new ArrayList<>(); do { if (schemas.size() > MAX_RESULTS) { throw new RuntimeException("Too many log groups, exceeded max metadata results for schema count."); } - result = invoker.invoke(() -> awsLogs.describeLogGroups(request)); - result.getLogGroups().forEach(next -> schemas.add(next.getLogGroupName())); - request.setNextToken(result.getNextToken()); - logger.info("doListSchemaNames: Listing log groups {} {}", result.getNextToken(), schemas.size()); + response = invoker.invoke(() -> awsLogs.describeLogGroups(requestBuilder.build())); + response.logGroups().forEach(next -> schemas.add(next.logGroupName())); + requestBuilder.nextToken(response.nextToken()); + logger.info("doListSchemaNames: Listing log groups {} {}", response.nextToken(), schemas.size()); } - while (result.getNextToken() != null); + while (response.nextToken() != null); return new ListSchemasResponse(listSchemasRequest.getCatalogName(), schemas); } @@ -189,28 +188,28 @@ public ListTablesResponse doListTables(BlockAllocator blockAllocator, ListTables { String nextToken = null; String logGroupName = tableResolver.validateSchema(listTablesRequest.getSchemaName()); - DescribeLogStreamsRequest request = new DescribeLogStreamsRequest(logGroupName); - DescribeLogStreamsResult result; + DescribeLogStreamsRequest.Builder requestBuilder = DescribeLogStreamsRequest.builder().logGroupName(logGroupName); + DescribeLogStreamsResponse response; List tables = new ArrayList<>(); if (listTablesRequest.getPageSize() == UNLIMITED_PAGE_SIZE_VALUE) { do { if (tables.size() > MAX_RESULTS) { throw new RuntimeException("Too many log streams, exceeded max metadata results for table count."); } - result = invoker.invoke(() -> awsLogs.describeLogStreams(request)); - result.getLogStreams().forEach(next -> tables.add(toTableName(listTablesRequest, next))); - request.setNextToken(result.getNextToken()); - logger.info("doListTables: Listing log streams with token {} and size {}", result.getNextToken(), tables.size()); + response = invoker.invoke(() -> awsLogs.describeLogStreams(requestBuilder.build())); + response.logStreams().forEach(next -> tables.add(toTableName(listTablesRequest, next))); + requestBuilder.nextToken(response.nextToken()); + logger.info("doListTables: Listing log streams with token {} and size {}", response.nextToken(), tables.size()); } - while (result.getNextToken() != null); + while (response.nextToken() != null); } else { - request.setNextToken(listTablesRequest.getNextToken()); - request.setLimit(listTablesRequest.getPageSize()); - result = invoker.invoke(() -> awsLogs.describeLogStreams(request)); - result.getLogStreams().forEach(next -> tables.add(toTableName(listTablesRequest, next))); - nextToken = result.getNextToken(); - logger.info("doListTables: Listing log streams with token {} and size {}", result.getNextToken(), tables.size()); + requestBuilder.nextToken(listTablesRequest.getNextToken()); + requestBuilder.limit(listTablesRequest.getPageSize()); + response = invoker.invoke(() -> awsLogs.describeLogStreams(requestBuilder.build())); + response.logStreams().forEach(next -> tables.add(toTableName(listTablesRequest, next))); + nextToken = response.nextToken(); + logger.info("doListTables: Listing log streams with token {} and size {}", response.nextToken(), tables.size()); } // Don't add the ALL_LOG_STREAMS_TABLE unless we're at the end of listing out all the tables. @@ -276,26 +275,26 @@ public void getPartitions(BlockWriter blockWriter, GetTableLayoutRequest request CloudwatchTableName cwTableName = tableResolver.validateTable(request.getTableName()); - DescribeLogStreamsRequest cwRequest = new DescribeLogStreamsRequest(cwTableName.getLogGroupName()); + DescribeLogStreamsRequest.Builder cwRequestBuilder = DescribeLogStreamsRequest.builder().logGroupName(cwTableName.getLogGroupName()); if (!ALL_LOG_STREAMS_TABLE.equals(cwTableName.getLogStreamName())) { - cwRequest.setLogStreamNamePrefix(cwTableName.getLogStreamName()); + cwRequestBuilder.logStreamNamePrefix(cwTableName.getLogStreamName()); } - DescribeLogStreamsResult result; + DescribeLogStreamsResponse response; do { - result = invoker.invoke(() -> awsLogs.describeLogStreams(cwRequest)); - for (LogStream next : result.getLogStreams()) { + response = invoker.invoke(() -> awsLogs.describeLogStreams(cwRequestBuilder.build())); + for (LogStream next : response.logStreams()) { //Each log stream that matches any possible partition pruning should be added to the partition list. blockWriter.writeRows((Block block, int rowNum) -> { - boolean matched = block.setValue(LOG_GROUP_FIELD, rowNum, cwRequest.getLogGroupName()); - matched &= block.setValue(LOG_STREAM_FIELD, rowNum, next.getLogStreamName()); - matched &= block.setValue(LOG_STREAM_SIZE_FIELD, rowNum, next.getStoredBytes()); + boolean matched = block.setValue(LOG_GROUP_FIELD, rowNum, cwRequestBuilder.build().logGroupName()); + matched &= block.setValue(LOG_STREAM_FIELD, rowNum, next.logStreamName()); + matched &= block.setValue(LOG_STREAM_SIZE_FIELD, rowNum, next.storedBytes()); return matched ? 1 : 0; }); } - cwRequest.setNextToken(result.getNextToken()); + cwRequestBuilder.nextToken(response.nextToken()); } - while (result.getNextToken() != null && queryStatusChecker.isQueryRunning()); + while (response.nextToken() != null && queryStatusChecker.isQueryRunning()); } /** @@ -367,11 +366,11 @@ public GetTableResponse doGetQueryPassthroughSchema(BlockAllocator allocator, Ge throw new IllegalArgumentException("No Query passed through [{}]" + request); } // to get column names with limit 1 - GetQueryResultsResult getQueryResultsResult = getResult(invoker, awsLogs, request.getQueryPassthroughArguments(), 1); + GetQueryResultsResponse getQueryResultsResponse = getResult(invoker, awsLogs, request.getQueryPassthroughArguments(), 1); SchemaBuilder schemaBuilder = SchemaBuilder.newBuilder(); - if (!getQueryResultsResult.getResults().isEmpty()) { - for (ResultField field : getQueryResultsResult.getResults().get(0)) { - schemaBuilder.addField(field.getField(), Types.MinorType.VARCHAR.getType()); + if (!getQueryResultsResponse.results().isEmpty()) { + for (ResultField field : getQueryResultsResponse.results().get(0)) { + schemaBuilder.addField(field.field(), Types.MinorType.VARCHAR.getType()); } } @@ -415,6 +414,6 @@ private String encodeContinuationToken(int partition) */ private TableName toTableName(ListTablesRequest request, LogStream logStream) { - return new TableName(request.getSchemaName(), logStream.getLogStreamName()); + return new TableName(request.getSchemaName(), logStream.logStreamName()); } } diff --git a/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchRecordHandler.java b/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchRecordHandler.java index 7b4aa47596..912b94d218 100644 --- a/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchRecordHandler.java +++ b/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchRecordHandler.java @@ -32,17 +32,16 @@ import com.amazonaws.athena.connector.lambda.handlers.RecordHandler; import com.amazonaws.athena.connector.lambda.records.ReadRecordsRequest; import com.amazonaws.athena.connectors.cloudwatch.qpt.CloudwatchQueryPassthrough; -import com.amazonaws.services.logs.AWSLogs; -import com.amazonaws.services.logs.AWSLogsClientBuilder; -import com.amazonaws.services.logs.model.GetLogEventsRequest; -import com.amazonaws.services.logs.model.GetLogEventsResult; -import com.amazonaws.services.logs.model.GetQueryResultsResult; -import com.amazonaws.services.logs.model.OutputLogEvent; -import com.amazonaws.services.logs.model.ResultField; import org.apache.arrow.util.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.cloudwatchlogs.CloudWatchLogsClient; +import software.amazon.awssdk.services.cloudwatchlogs.model.GetLogEventsRequest; +import software.amazon.awssdk.services.cloudwatchlogs.model.GetLogEventsResponse; +import software.amazon.awssdk.services.cloudwatchlogs.model.GetQueryResultsResponse; +import software.amazon.awssdk.services.cloudwatchlogs.model.OutputLogEvent; +import software.amazon.awssdk.services.cloudwatchlogs.model.ResultField; import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; @@ -75,7 +74,7 @@ public class CloudwatchRecordHandler //Used to handle Throttling events and apply AIMD congestion control private final ThrottlingInvoker invoker; private final AtomicLong count = new AtomicLong(0); - private final AWSLogs awsLogs; + private final CloudWatchLogsClient awsLogs; private final CloudwatchQueryPassthrough queryPassthrough = new CloudwatchQueryPassthrough(); public CloudwatchRecordHandler(java.util.Map configOptions) @@ -84,12 +83,12 @@ public CloudwatchRecordHandler(java.util.Map configOptions) S3Client.create(), SecretsManagerClient.create(), AthenaClient.create(), - AWSLogsClientBuilder.defaultClient(), + CloudWatchLogsClient.create(), configOptions); } @VisibleForTesting - protected CloudwatchRecordHandler(S3Client amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, AWSLogs awsLogs, java.util.Map configOptions) + protected CloudwatchRecordHandler(S3Client amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, CloudWatchLogsClient awsLogs, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, SOURCE_TYPE, configOptions); this.awsLogs = awsLogs; @@ -115,37 +114,38 @@ protected void readWithConstraint(BlockSpiller spiller, ReadRecordsRequest recor invoker.setBlockSpiller(spiller); do { final String actualContinuationToken = continuationToken; - GetLogEventsResult logEventsResult = invoker.invoke(() -> awsLogs.getLogEvents( + GetLogEventsResponse logEventsResponse = invoker.invoke(() -> awsLogs.getLogEvents( pushDownConstraints(recordsRequest.getConstraints(), - new GetLogEventsRequest() - .withLogGroupName(split.getProperty(LOG_GROUP_FIELD)) + GetLogEventsRequest.builder() + .logGroupName(split.getProperty(LOG_GROUP_FIELD)) //We use the property instead of the table name because of the special all_streams table - .withLogStreamName(split.getProperty(LOG_STREAM_FIELD)) - .withNextToken(actualContinuationToken) + .logStreamName(split.getProperty(LOG_STREAM_FIELD)) + .nextToken(actualContinuationToken) // must be set to use nextToken correctly - .withStartFromHead(true) + .startFromHead(true) + .build() ))); - if (continuationToken == null || !continuationToken.equals(logEventsResult.getNextForwardToken())) { - continuationToken = logEventsResult.getNextForwardToken(); + if (continuationToken == null || !continuationToken.equals(logEventsResponse.nextForwardToken())) { + continuationToken = logEventsResponse.nextForwardToken(); } else { continuationToken = null; } - for (OutputLogEvent ole : logEventsResult.getEvents()) { + for (OutputLogEvent ole : logEventsResponse.events()) { spiller.writeRows((Block block, int rowNum) -> { boolean matched = true; matched &= block.offerValue(LOG_STREAM_FIELD, rowNum, split.getProperty(LOG_STREAM_FIELD)); - matched &= block.offerValue(LOG_TIME_FIELD, rowNum, ole.getTimestamp()); - matched &= block.offerValue(LOG_MSG_FIELD, rowNum, ole.getMessage()); + matched &= block.offerValue(LOG_TIME_FIELD, rowNum, ole.timestamp()); + matched &= block.offerValue(LOG_MSG_FIELD, rowNum, ole.message()); return matched ? 1 : 0; }); } logger.info("readWithConstraint: LogGroup[{}] LogStream[{}] Continuation[{}] rows[{}]", tableName.getSchemaName(), tableName.getTableName(), continuationToken, - logEventsResult.getEvents().size()); + logEventsResponse.events().size()); } while (continuationToken != null && queryStatusChecker.isQueryRunning()); } @@ -155,13 +155,13 @@ private void getQueryPassthreoughResults(BlockSpiller spiller, ReadRecordsReques { Map qptArguments = recordsRequest.getConstraints().getQueryPassthroughArguments(); queryPassthrough.verify(qptArguments); - GetQueryResultsResult getQueryResultsResult = getResult(invoker, awsLogs, qptArguments, Integer.parseInt(qptArguments.get(CloudwatchQueryPassthrough.LIMIT))); + GetQueryResultsResponse getQueryResultsResponse = getResult(invoker, awsLogs, qptArguments, Integer.parseInt(qptArguments.get(CloudwatchQueryPassthrough.LIMIT))); - for (List resultList : getQueryResultsResult.getResults()) { + for (List resultList : getQueryResultsResponse.results()) { spiller.writeRows((Block block, int rowNum) -> { for (ResultField resultField : resultList) { boolean matched = true; - matched &= block.offerValue(resultField.getField(), rowNum, resultField.getValue()); + matched &= block.offerValue(resultField.field(), rowNum, resultField.value()); if (!matched) { return 0; } @@ -181,6 +181,7 @@ private void getQueryPassthreoughResults(BlockSpiller spiller, ReadRecordsReques */ private GetLogEventsRequest pushDownConstraints(Constraints constraints, GetLogEventsRequest request) { + GetLogEventsRequest.Builder requestBuilder = request.toBuilder(); ValueSet timeConstraint = constraints.getSummary().get(LOG_TIME_FIELD); if (timeConstraint instanceof SortedRangeSet && !timeConstraint.isNullAllowed()) { //SortedRangeSet is how >, <, between is represented which are easiest and most common when @@ -192,15 +193,15 @@ private GetLogEventsRequest pushDownConstraints(Constraints constraints, GetLogE if (!basicPredicate.getLow().isNullValue()) { Long lowerBound = (Long) basicPredicate.getLow().getValue(); - request.setStartTime(lowerBound); + requestBuilder.startTime(lowerBound); } if (!basicPredicate.getHigh().isNullValue()) { Long upperBound = (Long) basicPredicate.getHigh().getValue(); - request.setEndTime(upperBound); + requestBuilder.endTime(upperBound); } } - return request; + return requestBuilder.build(); } } diff --git a/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchTableResolver.java b/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchTableResolver.java index 4c7f25ec7e..d4059b0438 100644 --- a/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchTableResolver.java +++ b/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchTableResolver.java @@ -21,18 +21,18 @@ import com.amazonaws.athena.connector.lambda.ThrottlingInvoker; import com.amazonaws.athena.connector.lambda.domain.TableName; -import com.amazonaws.services.logs.AWSLogs; -import com.amazonaws.services.logs.model.DescribeLogGroupsRequest; -import com.amazonaws.services.logs.model.DescribeLogGroupsResult; -import com.amazonaws.services.logs.model.DescribeLogStreamsRequest; -import com.amazonaws.services.logs.model.DescribeLogStreamsResult; -import com.amazonaws.services.logs.model.LogGroup; -import com.amazonaws.services.logs.model.LogStream; import com.google.common.cache.CacheBuilder; import com.google.common.cache.CacheLoader; import com.google.common.cache.LoadingCache; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.cloudwatchlogs.CloudWatchLogsClient; +import software.amazon.awssdk.services.cloudwatchlogs.model.DescribeLogGroupsRequest; +import software.amazon.awssdk.services.cloudwatchlogs.model.DescribeLogGroupsResponse; +import software.amazon.awssdk.services.cloudwatchlogs.model.DescribeLogStreamsRequest; +import software.amazon.awssdk.services.cloudwatchlogs.model.DescribeLogStreamsResponse; +import software.amazon.awssdk.services.cloudwatchlogs.model.LogGroup; +import software.amazon.awssdk.services.cloudwatchlogs.model.LogStream; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeoutException; @@ -51,7 +51,7 @@ public class CloudwatchTableResolver { private static final Logger logger = LoggerFactory.getLogger(CloudwatchTableResolver.class); - private AWSLogs awsLogs; + private CloudWatchLogsClient logsClient; //Used to handle Throttling events using an AIMD strategy for congestion control. private ThrottlingInvoker invoker; //The LogStream pattern that is capitalized by LAMBDA @@ -67,14 +67,14 @@ public class CloudwatchTableResolver * Constructs an instance of the table resolver. * * @param invoker The ThrottlingInvoker to use to handle throttling events. - * @param awsLogs The AWSLogs client to use for cache misses. + * @param logsClient The AWSLogs client to use for cache misses. * @param maxSchemaCacheSize The max number of schemas to cache. * @param maxTableCacheSize The max tables to cache. */ - public CloudwatchTableResolver(ThrottlingInvoker invoker, AWSLogs awsLogs, long maxSchemaCacheSize, long maxTableCacheSize) + public CloudwatchTableResolver(ThrottlingInvoker invoker, CloudWatchLogsClient logsClient, long maxSchemaCacheSize, long maxTableCacheSize) { this.invoker = invoker; - this.awsLogs = awsLogs; + this.logsClient = logsClient; this.tableCache = CacheBuilder.newBuilder() .maximumSize(maxTableCacheSize) .build( @@ -119,12 +119,12 @@ private CloudwatchTableName loadLogStreams(String logGroup, String logStream) logger.info("loadLogStreams: Did not find a match for the table, falling back to LogGroup scan for {}:{}", logGroup, logStream); - DescribeLogStreamsRequest validateTableRequest = new DescribeLogStreamsRequest(logGroup); - DescribeLogStreamsResult validateTableResult; + DescribeLogStreamsRequest.Builder validateTableRequestBuilder = DescribeLogStreamsRequest.builder().logGroupName(logGroup); + DescribeLogStreamsResponse validateTableResponse; do { - validateTableResult = invoker.invoke(() -> awsLogs.describeLogStreams(validateTableRequest)); - for (LogStream nextStream : validateTableResult.getLogStreams()) { - String logStreamName = nextStream.getLogStreamName(); + validateTableResponse = invoker.invoke(() -> logsClient.describeLogStreams(validateTableRequestBuilder.build())); + for (LogStream nextStream : validateTableResponse.logStreams()) { + String logStreamName = nextStream.logStreamName(); CloudwatchTableName nextCloudwatch = new CloudwatchTableName(logGroup, logStreamName); tableCache.put(nextCloudwatch.toTableName(), nextCloudwatch); if (nextCloudwatch.getLogStreamName().equalsIgnoreCase(logStream)) { @@ -134,9 +134,9 @@ private CloudwatchTableName loadLogStreams(String logGroup, String logStream) return nextCloudwatch; } } - validateTableRequest.setNextToken(validateTableResult.getNextToken()); + validateTableRequestBuilder.nextToken(validateTableResponse.nextToken()); } - while (validateTableResult.getNextToken() != null); + while (validateTableResponse.nextToken() != null); //We could not find a match throw new IllegalArgumentException("No such table " + logGroup + " " + logStream); @@ -163,11 +163,11 @@ private CloudwatchTableName loadLogStream(String logGroup, String logStream) LAMBDA_PATTERN, effectiveTableName); effectiveTableName = effectiveTableName.replace(LAMBDA_PATTERN, LAMBDA_ACTUAL_PATTERN); } - DescribeLogStreamsRequest request = new DescribeLogStreamsRequest(logGroup) - .withLogStreamNamePrefix(effectiveTableName); - DescribeLogStreamsResult result = invoker.invoke(() -> awsLogs.describeLogStreams(request)); - for (LogStream nextStream : result.getLogStreams()) { - String logStreamName = nextStream.getLogStreamName(); + DescribeLogStreamsRequest request = DescribeLogStreamsRequest.builder().logGroupName(logGroup) + .logStreamNamePrefix(effectiveTableName).build(); + DescribeLogStreamsResponse response = invoker.invoke(() -> logsClient.describeLogStreams(request)); + for (LogStream nextStream : response.logStreams()) { + String logStreamName = nextStream.logStreamName(); CloudwatchTableName nextCloudwatch = new CloudwatchTableName(logGroup, logStreamName); if (nextCloudwatch.getLogStreamName().equalsIgnoreCase(logStream)) { logger.info("loadLogStream: Matched {} for {}:{}", nextCloudwatch, logGroup, logStream); @@ -195,21 +195,21 @@ private String loadLogGroups(String schemaName) } logger.info("loadLogGroups: Did not find a match for the schema, falling back to LogGroup scan for {}", schemaName); - DescribeLogGroupsRequest validateSchemaRequest = new DescribeLogGroupsRequest(); - DescribeLogGroupsResult validateSchemaResult; + DescribeLogGroupsRequest.Builder validateSchemaRequestBuilder = DescribeLogGroupsRequest.builder(); + DescribeLogGroupsResponse validateSchemaResponse; do { - validateSchemaResult = invoker.invoke(() -> awsLogs.describeLogGroups(validateSchemaRequest)); - for (LogGroup next : validateSchemaResult.getLogGroups()) { - String nextLogGroupName = next.getLogGroupName(); + validateSchemaResponse = invoker.invoke(() -> logsClient.describeLogGroups(validateSchemaRequestBuilder.build())); + for (LogGroup next : validateSchemaResponse.logGroups()) { + String nextLogGroupName = next.logGroupName(); schemaCache.put(schemaName, nextLogGroupName); if (nextLogGroupName.equalsIgnoreCase(schemaName)) { logger.info("loadLogGroups: Matched {} for {}", nextLogGroupName, schemaName); return nextLogGroupName; } } - validateSchemaRequest.setNextToken(validateSchemaResult.getNextToken()); + validateSchemaRequestBuilder.nextToken(validateSchemaResponse.nextToken()); } - while (validateSchemaResult.getNextToken() != null); + while (validateSchemaResponse.nextToken() != null); //We could not find a match throw new IllegalArgumentException("No such schema " + schemaName); @@ -224,10 +224,10 @@ private String loadLogGroups(String schemaName) private String loadLogGroup(String schemaName) throws TimeoutException { - DescribeLogGroupsRequest request = new DescribeLogGroupsRequest().withLogGroupNamePrefix(schemaName); - DescribeLogGroupsResult result = invoker.invoke(() -> awsLogs.describeLogGroups(request)); - for (LogGroup next : result.getLogGroups()) { - String nextLogGroupName = next.getLogGroupName(); + DescribeLogGroupsRequest request = DescribeLogGroupsRequest.builder().logGroupNamePrefix(schemaName).build(); + DescribeLogGroupsResponse response = invoker.invoke(() -> logsClient.describeLogGroups(request)); + for (LogGroup next : response.logGroups()) { + String nextLogGroupName = next.logGroupName(); if (nextLogGroupName.equalsIgnoreCase(schemaName)) { logger.info("loadLogGroup: Matched {} for {}", nextLogGroupName, schemaName); return nextLogGroupName; diff --git a/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchUtils.java b/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchUtils.java index 5c19ec17ee..bb8a209d47 100644 --- a/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchUtils.java +++ b/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchUtils.java @@ -21,13 +21,14 @@ import com.amazonaws.athena.connector.lambda.ThrottlingInvoker; import com.amazonaws.athena.connectors.cloudwatch.qpt.CloudwatchQueryPassthrough; -import com.amazonaws.services.logs.AWSLogs; -import com.amazonaws.services.logs.model.GetQueryResultsRequest; -import com.amazonaws.services.logs.model.GetQueryResultsResult; -import com.amazonaws.services.logs.model.StartQueryRequest; -import com.amazonaws.services.logs.model.StartQueryResult; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.cloudwatchlogs.CloudWatchLogsClient; +import software.amazon.awssdk.services.cloudwatchlogs.model.GetQueryResultsRequest; +import software.amazon.awssdk.services.cloudwatchlogs.model.GetQueryResultsResponse; +import software.amazon.awssdk.services.cloudwatchlogs.model.QueryStatus; +import software.amazon.awssdk.services.cloudwatchlogs.model.StartQueryRequest; +import software.amazon.awssdk.services.cloudwatchlogs.model.StartQueryResponse; import java.time.Instant; import java.time.temporal.ChronoUnit; @@ -41,8 +42,8 @@ public final class CloudwatchUtils private CloudwatchUtils() {} public static StartQueryRequest startQueryRequest(Map qptArguments) { - return new StartQueryRequest().withEndTime(Long.valueOf(qptArguments.get(CloudwatchQueryPassthrough.ENDTIME))).withStartTime(Long.valueOf(qptArguments.get(CloudwatchQueryPassthrough.STARTTIME))) - .withQueryString(qptArguments.get(CloudwatchQueryPassthrough.QUERYSTRING)).withLogGroupNames(getLogGroupNames(qptArguments)); + return StartQueryRequest.builder().endTime(Long.valueOf(qptArguments.get(CloudwatchQueryPassthrough.ENDTIME))).startTime(Long.valueOf(qptArguments.get(CloudwatchQueryPassthrough.STARTTIME))) + .queryString(qptArguments.get(CloudwatchQueryPassthrough.QUERYSTRING)).logGroupNames(getLogGroupNames(qptArguments)).build(); } private static String[] getLogGroupNames(Map qptArguments) @@ -55,25 +56,25 @@ private static String[] getLogGroupNames(Map qptArguments) return logGroupNames; } - public static StartQueryResult getQueryResult(AWSLogs awsLogs, StartQueryRequest startQueryRequest) + public static StartQueryResponse getQueryResult(CloudWatchLogsClient awsLogs, StartQueryRequest startQueryRequest) { return awsLogs.startQuery(startQueryRequest); } - public static GetQueryResultsResult getQueryResults(AWSLogs awsLogs, StartQueryResult startQueryResult) + public static GetQueryResultsResponse getQueryResults(CloudWatchLogsClient awsLogs, StartQueryResponse startQueryResponse) { - return awsLogs.getQueryResults(new GetQueryResultsRequest().withQueryId(startQueryResult.getQueryId())); + return awsLogs.getQueryResults(GetQueryResultsRequest.builder().queryId(startQueryResponse.queryId()).build()); } - public static GetQueryResultsResult getResult(ThrottlingInvoker invoker, AWSLogs awsLogs, Map qptArguments, int limit) throws TimeoutException, InterruptedException + public static GetQueryResultsResponse getResult(ThrottlingInvoker invoker, CloudWatchLogsClient awsLogs, Map qptArguments, int limit) throws TimeoutException, InterruptedException { - StartQueryResult startQueryResult = invoker.invoke(() -> getQueryResult(awsLogs, startQueryRequest(qptArguments).withLimit(limit))); - String status = null; - GetQueryResultsResult getQueryResultsResult; + StartQueryResponse startQueryResponse = invoker.invoke(() -> getQueryResult(awsLogs, startQueryRequest(qptArguments).toBuilder().limit(limit).build())); + QueryStatus status = null; + GetQueryResultsResponse getQueryResultsResponse; Instant startTime = Instant.now(); // Record the start time do { - getQueryResultsResult = invoker.invoke(() -> getQueryResults(awsLogs, startQueryResult)); - status = getQueryResultsResult.getStatus(); + getQueryResultsResponse = invoker.invoke(() -> getQueryResults(awsLogs, startQueryResponse)); + status = getQueryResultsResponse.status(); Thread.sleep(1000); // Check if 10 minutes have passed @@ -82,8 +83,8 @@ public static GetQueryResultsResult getResult(ThrottlingInvoker invoker, AWSLogs if (elapsedMinutes >= RESULT_TIMEOUT) { throw new RuntimeException("Query execution timeout exceeded."); } - } while (!status.equalsIgnoreCase("Complete")); + } while (!status.equals(QueryStatus.COMPLETE)); - return getQueryResultsResult; + return getQueryResultsResponse; } } diff --git a/athena-cloudwatch/src/test/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchMetadataHandlerTest.java b/athena-cloudwatch/src/test/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchMetadataHandlerTest.java index cc2ce27fb8..f615b3c7b1 100644 --- a/athena-cloudwatch/src/test/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchMetadataHandlerTest.java +++ b/athena-cloudwatch/src/test/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchMetadataHandlerTest.java @@ -43,13 +43,6 @@ import com.amazonaws.athena.connector.lambda.metadata.MetadataResponse; import com.amazonaws.athena.connector.lambda.security.FederatedIdentity; import com.amazonaws.athena.connector.lambda.security.LocalKeyFactory; -import com.amazonaws.services.logs.AWSLogs; -import com.amazonaws.services.logs.model.DescribeLogGroupsRequest; -import com.amazonaws.services.logs.model.DescribeLogGroupsResult; -import com.amazonaws.services.logs.model.DescribeLogStreamsRequest; -import com.amazonaws.services.logs.model.DescribeLogStreamsResult; -import com.amazonaws.services.logs.model.LogGroup; -import com.amazonaws.services.logs.model.LogStream; import org.apache.arrow.vector.types.Types; import org.apache.arrow.vector.types.pojo.ArrowType; import org.apache.arrow.vector.types.pojo.Schema; @@ -64,6 +57,13 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.cloudwatchlogs.CloudWatchLogsClient; +import software.amazon.awssdk.services.cloudwatchlogs.model.DescribeLogGroupsRequest; +import software.amazon.awssdk.services.cloudwatchlogs.model.DescribeLogGroupsResponse; +import software.amazon.awssdk.services.cloudwatchlogs.model.DescribeLogStreamsRequest; +import software.amazon.awssdk.services.cloudwatchlogs.model.DescribeLogStreamsResponse; +import software.amazon.awssdk.services.cloudwatchlogs.model.LogGroup; +import software.amazon.awssdk.services.cloudwatchlogs.model.LogStream; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.ArrayList; @@ -92,7 +92,7 @@ public class CloudwatchMetadataHandlerTest private BlockAllocator allocator; @Mock - private AWSLogs mockAwsLogs; + private CloudWatchLogsClient mockAwsLogs; @Mock private SecretsManagerClient mockSecretsManager; @@ -105,13 +105,19 @@ public void setUp() throws Exception { Mockito.lenient().when(mockAwsLogs.describeLogStreams(nullable(DescribeLogStreamsRequest.class))).thenAnswer((InvocationOnMock invocationOnMock) -> { - return new DescribeLogStreamsResult().withLogStreams(new LogStream().withLogStreamName("table-9"), - new LogStream().withLogStreamName("table-10")); + return DescribeLogStreamsResponse.builder() + .logStreams( + LogStream.builder().logStreamName("table-9").build(), + LogStream.builder().logStreamName("table-10").build()) + .build(); }); when(mockAwsLogs.describeLogGroups(nullable(DescribeLogGroupsRequest.class))).thenAnswer((InvocationOnMock invocationOnMock) -> { - return new DescribeLogGroupsResult().withLogGroups(new LogGroup().withLogGroupName("schema-1"), - new LogGroup().withLogGroupName("schema-20")); + return DescribeLogGroupsResponse.builder() + .logGroups( + LogGroup.builder().logGroupName("schema-1").build(), + LogGroup.builder().logGroupName("schema-20").build()) + .build(); }); handler = new CloudwatchMetadataHandler(mockAwsLogs, new LocalKeyFactory(), mockSecretsManager, mockAthena, "spillBucket", "spillPrefix", com.google.common.collect.ImmutableMap.of()); allocator = new BlockAllocatorImpl(); @@ -133,34 +139,33 @@ public void doListSchemaNames() when(mockAwsLogs.describeLogGroups(nullable(DescribeLogGroupsRequest.class))).thenAnswer((InvocationOnMock invocationOnMock) -> { DescribeLogGroupsRequest request = (DescribeLogGroupsRequest) invocationOnMock.getArguments()[0]; - DescribeLogGroupsResult result = new DescribeLogGroupsResult(); + DescribeLogGroupsResponse.Builder responseBuilder = DescribeLogGroupsResponse.builder(); Integer nextToken; - if (request.getNextToken() == null) { + if (request.nextToken() == null) { nextToken = 1; } - else if (Integer.valueOf(request.getNextToken()) < 3) { - nextToken = Integer.valueOf(request.getNextToken()) + 1; + else if (Integer.valueOf(request.nextToken()) < 3) { + nextToken = Integer.valueOf(request.nextToken()) + 1; } else { nextToken = null; } List logGroups = new ArrayList<>(); - if (request.getNextToken() == null || Integer.valueOf(request.getNextToken()) < 3) { + if (request.nextToken() == null || Integer.valueOf(request.nextToken()) < 3) { for (int i = 0; i < 10; i++) { - LogGroup nextLogGroup = new LogGroup(); - nextLogGroup.setLogGroupName("schema-" + String.valueOf(i)); + LogGroup nextLogGroup = LogGroup.builder().logGroupName("schema-" + String.valueOf(i)).build(); logGroups.add(nextLogGroup); } } - result.withLogGroups(logGroups); + responseBuilder.logGroups(logGroups); if (nextToken != null) { - result.setNextToken(String.valueOf(nextToken)); + responseBuilder.nextToken(String.valueOf(nextToken)); } - return result; + return responseBuilder.build(); }); ListSchemasRequest req = new ListSchemasRequest(identity, "queryId", "default"); @@ -183,34 +188,33 @@ public void doListTables() when(mockAwsLogs.describeLogStreams(nullable(DescribeLogStreamsRequest.class))).thenAnswer((InvocationOnMock invocationOnMock) -> { DescribeLogStreamsRequest request = (DescribeLogStreamsRequest) invocationOnMock.getArguments()[0]; - DescribeLogStreamsResult result = new DescribeLogStreamsResult(); + DescribeLogStreamsResponse.Builder responseBuilder = DescribeLogStreamsResponse.builder(); Integer nextToken; - if (request.getNextToken() == null) { + if (request.nextToken() == null) { nextToken = 1; } - else if (Integer.valueOf(request.getNextToken()) < 3) { - nextToken = Integer.valueOf(request.getNextToken()) + 1; + else if (Integer.valueOf(request.nextToken()) < 3) { + nextToken = Integer.valueOf(request.nextToken()) + 1; } else { nextToken = null; } List logStreams = new ArrayList<>(); - if (request.getNextToken() == null || Integer.valueOf(request.getNextToken()) < 3) { + if (request.nextToken() == null || Integer.valueOf(request.nextToken()) < 3) { for (int i = 0; i < 10; i++) { - LogStream nextLogStream = new LogStream(); - nextLogStream.setLogStreamName("table-" + String.valueOf(i)); + LogStream nextLogStream = LogStream.builder().logStreamName("table-" + String.valueOf(i)).build(); logStreams.add(nextLogStream); } } - result.withLogStreams(logStreams); + responseBuilder.logStreams(logStreams); if (nextToken != null) { - result.setNextToken(String.valueOf(nextToken)); + responseBuilder.nextToken(String.valueOf(nextToken)); } - return result; + return responseBuilder.build(); }); ListTablesRequest req = new ListTablesRequest(identity, "queryId", "default", @@ -238,35 +242,34 @@ public void doGetTable() when(mockAwsLogs.describeLogStreams(nullable(DescribeLogStreamsRequest.class))).thenAnswer((InvocationOnMock invocationOnMock) -> { DescribeLogStreamsRequest request = (DescribeLogStreamsRequest) invocationOnMock.getArguments()[0]; - assertTrue(request.getLogGroupName().equals(expectedSchema)); - DescribeLogStreamsResult result = new DescribeLogStreamsResult(); + assertTrue(request.logGroupName().equals(expectedSchema)); + DescribeLogStreamsResponse.Builder responseBuilder = DescribeLogStreamsResponse.builder(); Integer nextToken; - if (request.getNextToken() == null) { + if (request.nextToken() == null) { nextToken = 1; } - else if (Integer.valueOf(request.getNextToken()) < 3) { - nextToken = Integer.valueOf(request.getNextToken()) + 1; + else if (Integer.valueOf(request.nextToken()) < 3) { + nextToken = Integer.valueOf(request.nextToken()) + 1; } else { nextToken = null; } List logStreams = new ArrayList<>(); - if (request.getNextToken() == null || Integer.valueOf(request.getNextToken()) < 3) { + if (request.nextToken() == null || Integer.valueOf(request.nextToken()) < 3) { for (int i = 0; i < 10; i++) { - LogStream nextLogStream = new LogStream(); - nextLogStream.setLogStreamName("table-" + String.valueOf(i)); + LogStream nextLogStream = LogStream.builder().logStreamName("table-" + String.valueOf(i)).build(); logStreams.add(nextLogStream); } } - result.withLogStreams(logStreams); + responseBuilder.logStreams(logStreams); if (nextToken != null) { - result.setNextToken(String.valueOf(nextToken)); + responseBuilder.nextToken(String.valueOf(nextToken)); } - return result; + return responseBuilder.build(); }); GetTableRequest req = new GetTableRequest(identity, "queryId", "default", new TableName(expectedSchema, "table-9"), Collections.emptyMap()); @@ -290,36 +293,37 @@ public void doGetTableLayout() when(mockAwsLogs.describeLogStreams(nullable(DescribeLogStreamsRequest.class))).thenAnswer((InvocationOnMock invocationOnMock) -> { DescribeLogStreamsRequest request = (DescribeLogStreamsRequest) invocationOnMock.getArguments()[0]; - DescribeLogStreamsResult result = new DescribeLogStreamsResult(); + DescribeLogStreamsResponse.Builder responseBuilder = DescribeLogStreamsResponse.builder(); Integer nextToken; - if (request.getNextToken() == null) { + if (request.nextToken() == null) { nextToken = 1; } - else if (Integer.valueOf(request.getNextToken()) < 3) { - nextToken = Integer.valueOf(request.getNextToken()) + 1; + else if (Integer.valueOf(request.nextToken()) < 3) { + nextToken = Integer.valueOf(request.nextToken()) + 1; } else { nextToken = null; } List logStreams = new ArrayList<>(); - if (request.getNextToken() == null || Integer.valueOf(request.getNextToken()) < 3) { - int continuation = request.getNextToken() == null ? 0 : Integer.valueOf(request.getNextToken()); + if (request.nextToken() == null || Integer.valueOf(request.nextToken()) < 3) { + int continuation = request.nextToken() == null ? 0 : Integer.valueOf(request.nextToken()); for (int i = 0 + continuation * 100; i < 300; i++) { - LogStream nextLogStream = new LogStream(); - nextLogStream.setLogStreamName("table-" + String.valueOf(i)); - nextLogStream.setStoredBytes(i * 1000L); + LogStream nextLogStream = LogStream.builder() + .logStreamName("table-" + String.valueOf(i)) + .storedBytes(i * 1000L) + .build(); logStreams.add(nextLogStream); } } - result.withLogStreams(logStreams); + responseBuilder.logStreams(logStreams); if (nextToken != null) { - result.setNextToken(String.valueOf(nextToken)); + responseBuilder.nextToken(String.valueOf(nextToken)); } - return result; + return responseBuilder.build(); }); Map constraintsMap = new HashMap<>(); diff --git a/athena-cloudwatch/src/test/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchRecordHandlerTest.java b/athena-cloudwatch/src/test/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchRecordHandlerTest.java index 758deacb50..f8b95fdafc 100644 --- a/athena-cloudwatch/src/test/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchRecordHandlerTest.java +++ b/athena-cloudwatch/src/test/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchRecordHandlerTest.java @@ -39,10 +39,6 @@ import com.amazonaws.athena.connector.lambda.security.EncryptionKeyFactory; import com.amazonaws.athena.connector.lambda.security.FederatedIdentity; import com.amazonaws.athena.connector.lambda.security.LocalKeyFactory; -import com.amazonaws.services.logs.AWSLogs; -import com.amazonaws.services.logs.model.GetLogEventsRequest; -import com.amazonaws.services.logs.model.GetLogEventsResult; -import com.amazonaws.services.logs.model.OutputLogEvent; import com.google.common.collect.ImmutableList; import com.google.common.io.ByteStreams; import org.apache.arrow.vector.types.Types; @@ -59,6 +55,10 @@ import software.amazon.awssdk.core.ResponseInputStream; import software.amazon.awssdk.core.sync.RequestBody; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.cloudwatchlogs.CloudWatchLogsClient; +import software.amazon.awssdk.services.cloudwatchlogs.model.GetLogEventsRequest; +import software.amazon.awssdk.services.cloudwatchlogs.model.GetLogEventsResponse; +import software.amazon.awssdk.services.cloudwatchlogs.model.OutputLogEvent; import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.s3.model.GetObjectRequest; import software.amazon.awssdk.services.s3.model.GetObjectResponse; @@ -95,7 +95,7 @@ public class CloudwatchRecordHandlerTest private EncryptionKeyFactory keyFactory = new LocalKeyFactory(); @Mock - private AWSLogs mockAwsLogs; + private CloudWatchLogsClient mockAwsLogs; @Mock private S3Client mockS3; @@ -144,39 +144,40 @@ public void setUp() GetLogEventsRequest request = (GetLogEventsRequest) invocationOnMock.getArguments()[0]; //Check that predicate pushdown was propagated to cloudwatch - assertNotNull(request.getStartTime()); - assertNotNull(request.getEndTime()); + assertNotNull(request.startTime()); + assertNotNull(request.endTime()); - GetLogEventsResult result = new GetLogEventsResult(); + GetLogEventsResponse.Builder responseBuilder = GetLogEventsResponse.builder(); Integer nextToken; - if (request.getNextToken() == null) { + if (request.nextToken() == null) { nextToken = 1; } - else if (Integer.valueOf(request.getNextToken()) < 3) { - nextToken = Integer.valueOf(request.getNextToken()) + 1; + else if (Integer.valueOf(request.nextToken()) < 3) { + nextToken = Integer.valueOf(request.nextToken()) + 1; } else { nextToken = null; } List logEvents = new ArrayList<>(); - if (request.getNextToken() == null || Integer.valueOf(request.getNextToken()) < 3) { - long continuation = request.getNextToken() == null ? 0 : Integer.valueOf(request.getNextToken()); + if (request.nextToken() == null || Integer.valueOf(request.nextToken()) < 3) { + long continuation = request.nextToken() == null ? 0 : Integer.valueOf(request.nextToken()); for (int i = 0; i < 100_000; i++) { - OutputLogEvent outputLogEvent = new OutputLogEvent(); - outputLogEvent.setMessage("message-" + (continuation * i)); - outputLogEvent.setTimestamp(i * 100L); + OutputLogEvent outputLogEvent = OutputLogEvent.builder() + .message("message-" + (continuation * i)) + .timestamp(i * 100L) + .build(); logEvents.add(outputLogEvent); } } - result.withEvents(logEvents); + responseBuilder.events(logEvents); if (nextToken != null) { - result.setNextForwardToken(String.valueOf(nextToken)); + responseBuilder.nextForwardToken(String.valueOf(nextToken)); } - return result; + return responseBuilder.build(); }); } diff --git a/athena-cloudwatch/src/test/java/com/amazonaws/athena/connectors/cloudwatch/integ/CloudwatchIntegTest.java b/athena-cloudwatch/src/test/java/com/amazonaws/athena/connectors/cloudwatch/integ/CloudwatchIntegTest.java index 9c2c9cd839..c9d1dd9f73 100644 --- a/athena-cloudwatch/src/test/java/com/amazonaws/athena/connectors/cloudwatch/integ/CloudwatchIntegTest.java +++ b/athena-cloudwatch/src/test/java/com/amazonaws/athena/connectors/cloudwatch/integ/CloudwatchIntegTest.java @@ -20,11 +20,6 @@ package com.amazonaws.athena.connectors.cloudwatch.integ; import com.amazonaws.athena.connector.integ.IntegrationTestBase; -import com.amazonaws.services.logs.AWSLogs; -import com.amazonaws.services.logs.AWSLogsClientBuilder; -import com.amazonaws.services.logs.model.DeleteLogGroupRequest; -import com.amazonaws.services.logs.model.InputLogEvent; -import com.amazonaws.services.logs.model.PutLogEventsRequest; import com.google.common.collect.ImmutableList; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -38,6 +33,9 @@ import software.amazon.awscdk.services.logs.LogGroup; import software.amazon.awscdk.services.logs.LogStream; import software.amazon.awssdk.services.athena.model.Row; +import software.amazon.awssdk.services.cloudwatchlogs.CloudWatchLogsClient; +import software.amazon.awssdk.services.cloudwatchlogs.model.InputLogEvent; +import software.amazon.awssdk.services.cloudwatchlogs.model.PutLogEventsRequest; import java.util.ArrayList; import java.util.List; @@ -134,20 +132,21 @@ protected void setUpTableData() logger.info("Setting up Log Group: {}, Log Stream: {}", logGroupName, logStreamName); logger.info("----------------------------------------------------"); - AWSLogs logsClient = AWSLogsClientBuilder.defaultClient(); + CloudWatchLogsClient logsClient = CloudWatchLogsClient.create(); try { - logsClient.putLogEvents(new PutLogEventsRequest() - .withLogGroupName(logGroupName) - .withLogStreamName(logStreamName) - .withLogEvents( - new InputLogEvent().withTimestamp(currentTimeMillis).withMessage("Space, the final frontier."), - new InputLogEvent().withTimestamp(fromTimeMillis).withMessage(logMessage), - new InputLogEvent().withTimestamp(toTimeMillis + 5000) - .withMessage("To boldly go where no man has gone before!"))); + logsClient.putLogEvents(PutLogEventsRequest.builder() + .logGroupName(logGroupName) + .logStreamName(logStreamName) + .logEvents( + InputLogEvent.builder().timestamp(currentTimeMillis).message("Space, the final frontier.").build(), + InputLogEvent.builder().timestamp(fromTimeMillis).message(logMessage).build(), + InputLogEvent.builder().timestamp(toTimeMillis + 5000) + .message("To boldly go where no man has gone before!").build()) + .build()); } finally { - logsClient.shutdown(); + logsClient.close(); } } From ff290f06b684b3a0a3acb21056abf574ecd2bca9 Mon Sep 17 00:00:00 2001 From: Aimery Methena <159072740+aimethed@users.noreply.github.com> Date: Wed, 25 Sep 2024 12:06:09 -0400 Subject: [PATCH 39/87] v2 Cloudformation (#2281) --- athena-federation-integ-test/pom.xml | 35 ++-------- .../integ/clients/CloudFormationClient.java | 69 +++++++++---------- 2 files changed, 38 insertions(+), 66 deletions(-) diff --git a/athena-federation-integ-test/pom.xml b/athena-federation-integ-test/pom.xml index 59df6b8e0f..bee6d6be9d 100644 --- a/athena-federation-integ-test/pom.xml +++ b/athena-federation-integ-test/pom.xml @@ -11,33 +11,6 @@ jar Amazon Athena Query Federation Integ Test - - com.amazonaws - aws-java-sdk-core - ${aws-sdk.version} - - - com.fasterxml.jackson.datatype - jackson-datatype-jsr310 - - - com.fasterxml.jackson.dataformat - jackson-dataformat-cbor - - - com.fasterxml.jackson.core - jackson-core - - - com.fasterxml.jackson.core - jackson-databind - - - com.fasterxml.jackson.core - jackson-annotations - - - commons-cli commons-cli @@ -99,11 +72,11 @@ athena ${aws-sdk-v2.version} - + - com.amazonaws - aws-java-sdk-cloudformation - ${aws-sdk.version} + software.amazon.awssdk + cloudformation + ${aws-sdk-v2.version} diff --git a/athena-federation-integ-test/src/main/java/com/amazonaws/athena/connector/integ/clients/CloudFormationClient.java b/athena-federation-integ-test/src/main/java/com/amazonaws/athena/connector/integ/clients/CloudFormationClient.java index af5a92f9b7..37b290f0ad 100644 --- a/athena-federation-integ-test/src/main/java/com/amazonaws/athena/connector/integ/clients/CloudFormationClient.java +++ b/athena-federation-integ-test/src/main/java/com/amazonaws/athena/connector/integ/clients/CloudFormationClient.java @@ -19,15 +19,6 @@ */ package com.amazonaws.athena.connector.integ.clients; -import com.amazonaws.services.cloudformation.AmazonCloudFormation; -import com.amazonaws.services.cloudformation.AmazonCloudFormationClientBuilder; -import com.amazonaws.services.cloudformation.model.Capability; -import com.amazonaws.services.cloudformation.model.CreateStackRequest; -import com.amazonaws.services.cloudformation.model.CreateStackResult; -import com.amazonaws.services.cloudformation.model.DeleteStackRequest; -import com.amazonaws.services.cloudformation.model.DescribeStackEventsRequest; -import com.amazonaws.services.cloudformation.model.DescribeStackEventsResult; -import com.amazonaws.services.cloudformation.model.StackEvent; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.SerializationFeature; import org.slf4j.Logger; @@ -35,6 +26,14 @@ import org.testng.internal.collections.Pair; import software.amazon.awscdk.core.App; import software.amazon.awscdk.core.Stack; +import software.amazon.awssdk.services.cloudformation.model.Capability; +import software.amazon.awssdk.services.cloudformation.model.CreateStackRequest; +import software.amazon.awssdk.services.cloudformation.model.CreateStackResponse; +import software.amazon.awssdk.services.cloudformation.model.DeleteStackRequest; +import software.amazon.awssdk.services.cloudformation.model.DescribeStackEventsRequest; +import software.amazon.awssdk.services.cloudformation.model.DescribeStackEventsResponse; +import software.amazon.awssdk.services.cloudformation.model.ResourceStatus; +import software.amazon.awssdk.services.cloudformation.model.StackEvent; import java.util.List; @@ -46,13 +45,11 @@ public class CloudFormationClient { private static final Logger logger = LoggerFactory.getLogger(CloudFormationClient.class); - private static final String CF_CREATE_RESOURCE_IN_PROGRESS_STATUS = "CREATE_IN_PROGRESS"; - private static final String CF_CREATE_RESOURCE_FAILED_STATUS = "CREATE_FAILED"; private static final long sleepTimeMillis = 5000L; private final String stackName; private final String stackTemplate; - private final AmazonCloudFormation cloudFormationClient; + private final software.amazon.awssdk.services.cloudformation.CloudFormationClient cloudFormationClient; public CloudFormationClient(Pair stackPair) { @@ -66,7 +63,7 @@ public CloudFormationClient(App theApp, Stack theStack) stackTemplate = objectMapper .valueToTree(theApp.synth().getStackArtifact(theStack.getArtifactId()).getTemplate()) .toPrettyString(); - this.cloudFormationClient = AmazonCloudFormationClientBuilder.defaultClient(); + this.cloudFormationClient = software.amazon.awssdk.services.cloudformation.CloudFormationClient.create(); } /** @@ -81,11 +78,12 @@ public void createStack() logger.info("------------------------------------------------------"); // logger.info(stackTemplate); - CreateStackRequest createStackRequest = new CreateStackRequest() - .withStackName(stackName) - .withTemplateBody(stackTemplate) - .withDisableRollback(true) - .withCapabilities(Capability.CAPABILITY_NAMED_IAM); + CreateStackRequest createStackRequest = CreateStackRequest.builder() + .stackName(stackName) + .templateBody(stackTemplate) + .disableRollback(true) + .capabilities(Capability.CAPABILITY_NAMED_IAM) + .build(); processCreateStackRequest(createStackRequest); } @@ -98,22 +96,23 @@ private void processCreateStackRequest(CreateStackRequest createStackRequest) throws RuntimeException { // Create CloudFormation stack. - CreateStackResult result = cloudFormationClient.createStack(createStackRequest); - logger.info("Stack ID: {}", result.getStackId()); + CreateStackResponse response = cloudFormationClient.createStack(createStackRequest); + logger.info("Stack ID: {}", response.stackId()); - DescribeStackEventsRequest describeStackEventsRequest = new DescribeStackEventsRequest() - .withStackName(createStackRequest.getStackName()); - DescribeStackEventsResult describeStackEventsResult; + DescribeStackEventsRequest describeStackEventsRequest = DescribeStackEventsRequest.builder() + .stackName(createStackRequest.stackName()) + .build(); + DescribeStackEventsResponse describeStackEventsResponse; // Poll status of stack until stack has been created or creation has failed while (true) { - describeStackEventsResult = cloudFormationClient.describeStackEvents(describeStackEventsRequest); - StackEvent event = describeStackEventsResult.getStackEvents().get(0); - String resourceId = event.getLogicalResourceId(); - String resourceStatus = event.getResourceStatus(); + describeStackEventsResponse = cloudFormationClient.describeStackEvents(describeStackEventsRequest); + StackEvent event = describeStackEventsResponse.stackEvents().get(0); + String resourceId = event.logicalResourceId(); + ResourceStatus resourceStatus = event.resourceStatus(); logger.info("Resource Id: {}, Resource status: {}", resourceId, resourceStatus); - if (!resourceId.equals(event.getStackName()) || - resourceStatus.equals(CF_CREATE_RESOURCE_IN_PROGRESS_STATUS)) { + if (!resourceId.equals(event.stackName()) || + resourceStatus.equals(ResourceStatus.CREATE_IN_PROGRESS)) { try { Thread.sleep(sleepTimeMillis); continue; @@ -122,8 +121,8 @@ private void processCreateStackRequest(CreateStackRequest createStackRequest) throw new RuntimeException("Thread.sleep interrupted: " + e.getMessage(), e); } } - else if (resourceStatus.equals(CF_CREATE_RESOURCE_FAILED_STATUS)) { - throw new RuntimeException(getCloudFormationErrorReasons(describeStackEventsResult.getStackEvents())); + else if (resourceStatus.equals(ResourceStatus.CREATE_FAILED)) { + throw new RuntimeException(getCloudFormationErrorReasons(describeStackEventsResponse.stackEvents())); } break; } @@ -140,9 +139,9 @@ private String getCloudFormationErrorReasons(List stackEvents) new StringBuilder("CloudFormation stack creation failed due to the following reason(s):\n"); stackEvents.forEach(stackEvent -> { - if (stackEvent.getResourceStatus().equals(CF_CREATE_RESOURCE_FAILED_STATUS)) { + if (stackEvent.resourceStatus().equals(ResourceStatus.CREATE_FAILED)) { String errorMessage = String.format("Resource: %s, Reason: %s\n", - stackEvent.getLogicalResourceId(), stackEvent.getResourceStatusReason()); + stackEvent.logicalResourceId(), stackEvent.resourceStatusReason()); errorMessageBuilder.append(errorMessage); } }); @@ -160,14 +159,14 @@ public void deleteStack() logger.info("------------------------------------------------------"); try { - DeleteStackRequest request = new DeleteStackRequest().withStackName(stackName); + DeleteStackRequest request = DeleteStackRequest.builder().stackName(stackName).build(); cloudFormationClient.deleteStack(request); } catch (Exception e) { logger.error("Something went wrong... Manual resource cleanup may be needed!!!", e); } finally { - cloudFormationClient.shutdown(); + cloudFormationClient.close(); } } } From 945ce6a063c4d6713dee4b0bbc06f7e273e17a33 Mon Sep 17 00:00:00 2001 From: Aimery Methena <159072740+aimethed@users.noreply.github.com> Date: Wed, 25 Sep 2024 12:06:39 -0400 Subject: [PATCH 40/87] v2 DocDB (#2282) --- athena-docdb/pom.xml | 8 +++---- .../docdb/integ/DocDbIntegTest.java | 22 +++++++++---------- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/athena-docdb/pom.xml b/athena-docdb/pom.xml index 5dd645c20a..8982ee0159 100644 --- a/athena-docdb/pom.xml +++ b/athena-docdb/pom.xml @@ -28,11 +28,11 @@ 2022.47.1 test - + - com.amazonaws - aws-java-sdk-docdb - ${aws-sdk.version} + software.amazon.awssdk + docdb + ${aws-sdk-v2.version} test diff --git a/athena-docdb/src/test/java/com/amazonaws/athena/connectors/docdb/integ/DocDbIntegTest.java b/athena-docdb/src/test/java/com/amazonaws/athena/connectors/docdb/integ/DocDbIntegTest.java index 3cca51f94e..bf0a314e8a 100644 --- a/athena-docdb/src/test/java/com/amazonaws/athena/connectors/docdb/integ/DocDbIntegTest.java +++ b/athena-docdb/src/test/java/com/amazonaws/athena/connectors/docdb/integ/DocDbIntegTest.java @@ -27,11 +27,6 @@ import com.amazonaws.athena.connector.integ.data.ConnectorVpcAttributes; import com.amazonaws.athena.connector.integ.data.SecretsManagerCredentials; import com.amazonaws.athena.connector.integ.providers.ConnectorPackagingAttributesProvider; -import com.amazonaws.services.docdb.AmazonDocDB; -import com.amazonaws.services.docdb.AmazonDocDBClientBuilder; -import com.amazonaws.services.docdb.model.DBCluster; -import com.amazonaws.services.docdb.model.DescribeDBClustersRequest; -import com.amazonaws.services.docdb.model.DescribeDBClustersResult; import com.google.common.collect.ImmutableMap; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -53,6 +48,10 @@ import software.amazon.awscdk.services.ec2.VpcAttributes; import software.amazon.awscdk.services.iam.PolicyDocument; import software.amazon.awssdk.services.athena.model.Row; +import software.amazon.awssdk.services.docdb.DocDbClient; +import software.amazon.awssdk.services.docdb.model.DBCluster; +import software.amazon.awssdk.services.docdb.model.DescribeDbClustersRequest; +import software.amazon.awssdk.services.docdb.model.DescribeDbClustersResponse; import software.amazon.awssdk.services.lambda.LambdaClient; import software.amazon.awssdk.services.lambda.model.InvocationType; import software.amazon.awssdk.services.lambda.model.InvokeRequest; @@ -191,15 +190,16 @@ private Stack getDocDbStack() { * Lambda. All exceptions thrown here will be caught in the calling function. */ private Endpoint getClusterData() { - AmazonDocDB docDbClient = AmazonDocDBClientBuilder.defaultClient(); + DocDbClient docDbClient = DocDbClient.create(); try { - DescribeDBClustersResult dbClustersResult = docDbClient.describeDBClusters(new DescribeDBClustersRequest() - .withDBClusterIdentifier(dbClusterName)); - DBCluster cluster = dbClustersResult.getDBClusters().get(0); - return new Endpoint(cluster.getEndpoint(), cluster.getPort()); + DescribeDbClustersResponse dbClustersResponse = docDbClient.describeDBClusters(DescribeDbClustersRequest.builder() + .dbClusterIdentifier(dbClusterName) + .build()); + DBCluster cluster = dbClustersResponse.dbClusters().get(0); + return new Endpoint(cluster.endpoint(), cluster.port()); } finally { - docDbClient.shutdown(); + docDbClient.close(); } } From 057956adbdd14f7724286fd99826c984c241ec2e Mon Sep 17 00:00:00 2001 From: Aimery Methena <159072740+aimethed@users.noreply.github.com> Date: Wed, 25 Sep 2024 12:38:51 -0400 Subject: [PATCH 41/87] V2 rds (#2273) --- athena-aws-cmdb/pom.xml | 12 +- .../aws/cmdb/TableProviderFactory.java | 7 +- .../aws/cmdb/tables/RdsTableProvider.java | 155 +++++++++--------- .../aws/cmdb/TableProviderFactoryTest.java | 4 +- .../aws/cmdb/tables/RdsTableProviderTest.java | 145 ++++++++-------- athena-cloudera-hive/pom.xml | 14 +- athena-cloudera-impala/pom.xml | 14 +- athena-datalakegen2/pom.xml | 14 +- athena-db2-as400/pom.xml | 14 +- athena-db2/pom.xml | 14 +- athena-google-bigquery/pom.xml | 10 +- athena-hortonworks-hive/pom.xml | 14 +- athena-jdbc/pom.xml | 24 ++- athena-mysql/pom.xml | 14 +- .../mysql/integ/MySqlIntegTest.java | 21 ++- athena-oracle/pom.xml | 14 +- athena-postgresql/pom.xml | 14 +- .../postgresql/integ/PostGreSqlIntegTest.java | 22 +-- athena-redshift/pom.xml | 14 +- athena-saphana/pom.xml | 14 +- athena-snowflake/pom.xml | 14 +- athena-sqlserver/pom.xml | 14 +- athena-synapse/pom.xml | 14 +- athena-teradata/pom.xml | 14 +- 24 files changed, 365 insertions(+), 245 deletions(-) diff --git a/athena-aws-cmdb/pom.xml b/athena-aws-cmdb/pom.xml index 7579f17301..187d46137a 100644 --- a/athena-aws-cmdb/pom.xml +++ b/athena-aws-cmdb/pom.xml @@ -33,9 +33,15 @@ ${aws-sdk-v2.version} - com.amazonaws - aws-java-sdk-rds - ${aws-sdk.version} + software.amazon.awssdk + rds + ${aws-sdk-v2.version} + + + software.amazon.awssdk + netty-nio-client + + org.slf4j diff --git a/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/TableProviderFactory.java b/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/TableProviderFactory.java index 8c553c4797..41ee3350c3 100644 --- a/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/TableProviderFactory.java +++ b/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/TableProviderFactory.java @@ -34,10 +34,9 @@ import com.amazonaws.athena.connectors.aws.cmdb.tables.s3.S3ObjectsTableProvider; import com.amazonaws.services.ec2.AmazonEC2; import com.amazonaws.services.ec2.AmazonEC2ClientBuilder; -import com.amazonaws.services.rds.AmazonRDS; -import com.amazonaws.services.rds.AmazonRDSClientBuilder; import org.apache.arrow.util.VisibleForTesting; import software.amazon.awssdk.services.emr.EmrClient; +import software.amazon.awssdk.services.rds.RdsClient; import software.amazon.awssdk.services.s3.S3Client; import java.util.ArrayList; @@ -59,13 +58,13 @@ public TableProviderFactory(java.util.Map configOptions) this( AmazonEC2ClientBuilder.standard().build(), EmrClient.create(), - AmazonRDSClientBuilder.standard().build(), + RdsClient.create(), S3Client.create(), configOptions); } @VisibleForTesting - protected TableProviderFactory(AmazonEC2 ec2, EmrClient emr, AmazonRDS rds, S3Client amazonS3, java.util.Map configOptions) + protected TableProviderFactory(AmazonEC2 ec2, EmrClient emr, RdsClient rds, S3Client amazonS3, java.util.Map configOptions) { addProvider(new Ec2TableProvider(ec2)); addProvider(new EbsTableProvider(ec2)); diff --git a/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/RdsTableProvider.java b/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/RdsTableProvider.java index f3d9a18a8b..d424476646 100644 --- a/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/RdsTableProvider.java +++ b/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/RdsTableProvider.java @@ -30,22 +30,22 @@ import com.amazonaws.athena.connector.lambda.metadata.GetTableRequest; import com.amazonaws.athena.connector.lambda.metadata.GetTableResponse; import com.amazonaws.athena.connector.lambda.records.ReadRecordsRequest; -import com.amazonaws.services.rds.AmazonRDS; -import com.amazonaws.services.rds.model.DBInstance; -import com.amazonaws.services.rds.model.DBInstanceStatusInfo; -import com.amazonaws.services.rds.model.DBParameterGroupStatus; -import com.amazonaws.services.rds.model.DBSecurityGroupMembership; -import com.amazonaws.services.rds.model.DBSubnetGroup; -import com.amazonaws.services.rds.model.DescribeDBInstancesRequest; -import com.amazonaws.services.rds.model.DescribeDBInstancesResult; -import com.amazonaws.services.rds.model.DomainMembership; -import com.amazonaws.services.rds.model.Endpoint; -import com.amazonaws.services.rds.model.Subnet; -import com.amazonaws.services.rds.model.Tag; import org.apache.arrow.vector.types.Types; import org.apache.arrow.vector.types.pojo.ArrowType; import org.apache.arrow.vector.types.pojo.Field; import org.apache.arrow.vector.types.pojo.Schema; +import software.amazon.awssdk.services.rds.RdsClient; +import software.amazon.awssdk.services.rds.model.DBInstance; +import software.amazon.awssdk.services.rds.model.DBInstanceStatusInfo; +import software.amazon.awssdk.services.rds.model.DBParameterGroupStatus; +import software.amazon.awssdk.services.rds.model.DBSecurityGroupMembership; +import software.amazon.awssdk.services.rds.model.DBSubnetGroup; +import software.amazon.awssdk.services.rds.model.DescribeDbInstancesRequest; +import software.amazon.awssdk.services.rds.model.DescribeDbInstancesResponse; +import software.amazon.awssdk.services.rds.model.DomainMembership; +import software.amazon.awssdk.services.rds.model.Endpoint; +import software.amazon.awssdk.services.rds.model.Subnet; +import software.amazon.awssdk.services.rds.model.Tag; import java.util.stream.Collectors; @@ -56,9 +56,9 @@ public class RdsTableProvider implements TableProvider { private static final Schema SCHEMA; - private AmazonRDS rds; + private RdsClient rds; - public RdsTableProvider(AmazonRDS rds) + public RdsTableProvider(RdsClient rds) { this.rds = rds; } @@ -99,27 +99,24 @@ public GetTableResponse getTable(BlockAllocator blockAllocator, GetTableRequest @Override public void readWithConstraint(BlockSpiller spiller, ReadRecordsRequest recordsRequest, QueryStatusChecker queryStatusChecker) { - boolean done = false; - DescribeDBInstancesRequest request = new DescribeDBInstancesRequest(); + DescribeDbInstancesRequest.Builder requestBuilder = DescribeDbInstancesRequest.builder(); ValueSet idConstraint = recordsRequest.getConstraints().getSummary().get("instance_id"); if (idConstraint != null && idConstraint.isSingleValue()) { - request.setDBInstanceIdentifier(idConstraint.getSingleValue().toString()); + requestBuilder.dbInstanceIdentifier(idConstraint.getSingleValue().toString()); } - while (!done) { - DescribeDBInstancesResult response = rds.describeDBInstances(request); + DescribeDbInstancesResponse response; + do { + response = rds.describeDBInstances(requestBuilder.build()); - for (DBInstance instance : response.getDBInstances()) { + for (DBInstance instance : response.dbInstances()) { instanceToRow(instance, spiller); } - request.setMarker(response.getMarker()); - - if (response.getMarker() == null || !queryStatusChecker.isQueryRunning()) { - done = true; - } + requestBuilder.marker(response.marker()); } + while (response.marker() != null && queryStatusChecker.isQueryRunning()); } /** @@ -136,145 +133,145 @@ private void instanceToRow(DBInstance instance, spiller.writeRows((Block block, int row) -> { boolean matched = true; - matched &= block.offerValue("instance_id", row, instance.getDBInstanceIdentifier()); - matched &= block.offerValue("primary_az", row, instance.getAvailabilityZone()); - matched &= block.offerValue("storage_gb", row, instance.getAllocatedStorage()); - matched &= block.offerValue("is_encrypted", row, instance.getStorageEncrypted()); - matched &= block.offerValue("storage_type", row, instance.getStorageType()); - matched &= block.offerValue("backup_retention_days", row, instance.getBackupRetentionPeriod()); - matched &= block.offerValue("auto_upgrade", row, instance.getAutoMinorVersionUpgrade()); - matched &= block.offerValue("instance_class", row, instance.getDBInstanceClass()); - matched &= block.offerValue("port", row, instance.getDbInstancePort()); - matched &= block.offerValue("status", row, instance.getDBInstanceStatus()); - matched &= block.offerValue("dbi_resource_id", row, instance.getDbiResourceId()); - matched &= block.offerValue("name", row, instance.getDBName()); - matched &= block.offerValue("engine", row, instance.getEngine()); - matched &= block.offerValue("engine_version", row, instance.getEngineVersion()); - matched &= block.offerValue("license_model", row, instance.getLicenseModel()); - matched &= block.offerValue("secondary_az", row, instance.getSecondaryAvailabilityZone()); - matched &= block.offerValue("backup_window", row, instance.getPreferredBackupWindow()); - matched &= block.offerValue("maint_window", row, instance.getPreferredMaintenanceWindow()); - matched &= block.offerValue("read_replica_source_id", row, instance.getReadReplicaSourceDBInstanceIdentifier()); - matched &= block.offerValue("create_time", row, instance.getInstanceCreateTime()); - matched &= block.offerValue("public_access", row, instance.getPubliclyAccessible()); - matched &= block.offerValue("iops", row, instance.getIops()); - matched &= block.offerValue("is_multi_az", row, instance.getMultiAZ()); + matched &= block.offerValue("instance_id", row, instance.dbInstanceIdentifier()); + matched &= block.offerValue("primary_az", row, instance.availabilityZone()); + matched &= block.offerValue("storage_gb", row, instance.allocatedStorage()); + matched &= block.offerValue("is_encrypted", row, instance.storageEncrypted()); + matched &= block.offerValue("storage_type", row, instance.storageType()); + matched &= block.offerValue("backup_retention_days", row, instance.backupRetentionPeriod()); + matched &= block.offerValue("auto_upgrade", row, instance.autoMinorVersionUpgrade()); + matched &= block.offerValue("instance_class", row, instance.dbInstanceClass()); + matched &= block.offerValue("port", row, instance.dbInstancePort()); + matched &= block.offerValue("status", row, instance.dbInstanceStatus()); + matched &= block.offerValue("dbi_resource_id", row, instance.dbiResourceId()); + matched &= block.offerValue("name", row, instance.dbName()); + matched &= block.offerValue("engine", row, instance.engine()); + matched &= block.offerValue("engine_version", row, instance.engineVersion()); + matched &= block.offerValue("license_model", row, instance.licenseModel()); + matched &= block.offerValue("secondary_az", row, instance.secondaryAvailabilityZone()); + matched &= block.offerValue("backup_window", row, instance.preferredBackupWindow()); + matched &= block.offerValue("maint_window", row, instance.preferredMaintenanceWindow()); + matched &= block.offerValue("read_replica_source_id", row, instance.readReplicaSourceDBInstanceIdentifier()); + matched &= block.offerValue("create_time", row, instance.instanceCreateTime()); + matched &= block.offerValue("public_access", row, instance.publiclyAccessible()); + matched &= block.offerValue("iops", row, instance.iops()); + matched &= block.offerValue("is_multi_az", row, instance.multiAZ()); matched &= block.offerComplexValue("domains", row, (Field field, Object val) -> { if (field.getName().equals("domain")) { - return ((DomainMembership) val).getDomain(); + return ((DomainMembership) val).domain(); } else if (field.getName().equals("fqdn")) { - return ((DomainMembership) val).getFQDN(); + return ((DomainMembership) val).fqdn(); } else if (field.getName().equals("iam_role")) { - return ((DomainMembership) val).getIAMRoleName(); + return ((DomainMembership) val).iamRoleName(); } else if (field.getName().equals("status")) { - return ((DomainMembership) val).getStatus(); + return ((DomainMembership) val).status(); } throw new RuntimeException("Unexpected field " + field.getName()); }, - instance.getDomainMemberships()); + instance.domainMemberships()); matched &= block.offerComplexValue("param_groups", row, (Field field, Object val) -> { if (field.getName().equals("name")) { - return ((DBParameterGroupStatus) val).getDBParameterGroupName(); + return ((DBParameterGroupStatus) val).dbParameterGroupName(); } else if (field.getName().equals("status")) { - return ((DBParameterGroupStatus) val).getParameterApplyStatus(); + return ((DBParameterGroupStatus) val).parameterApplyStatus(); } throw new RuntimeException("Unexpected field " + field.getName()); }, - instance.getDBParameterGroups()); + instance.dbParameterGroups()); matched &= block.offerComplexValue("db_security_groups", row, (Field field, Object val) -> { if (field.getName().equals("name")) { - return ((DBSecurityGroupMembership) val).getDBSecurityGroupName(); + return ((DBSecurityGroupMembership) val).dbSecurityGroupName(); } else if (field.getName().equals("status")) { - return ((DBSecurityGroupMembership) val).getStatus(); + return ((DBSecurityGroupMembership) val).status(); } throw new RuntimeException("Unexpected field " + field.getName()); }, - instance.getDBSecurityGroups()); + instance.dbSecurityGroups()); matched &= block.offerComplexValue("subnet_group", row, (Field field, Object val) -> { if (field.getName().equals("description")) { - return ((DBSubnetGroup) val).getDBSubnetGroupDescription(); + return ((DBSubnetGroup) val).dbSubnetGroupDescription(); } else if (field.getName().equals("name")) { - return ((DBSubnetGroup) val).getDBSubnetGroupName(); + return ((DBSubnetGroup) val).dbSubnetGroupName(); } else if (field.getName().equals("status")) { - return ((DBSubnetGroup) val).getSubnetGroupStatus(); + return ((DBSubnetGroup) val).subnetGroupStatus(); } else if (field.getName().equals("vpc")) { - return ((DBSubnetGroup) val).getVpcId(); + return ((DBSubnetGroup) val).vpcId(); } else if (field.getName().equals("subnets")) { - return ((DBSubnetGroup) val).getSubnets().stream() - .map(next -> next.getSubnetIdentifier()).collect(Collectors.toList()); + return ((DBSubnetGroup) val).subnets().stream() + .map(next -> next.subnetIdentifier()).collect(Collectors.toList()); } else if (val instanceof Subnet) { - return ((Subnet) val).getSubnetIdentifier(); + return ((Subnet) val).subnetIdentifier(); } throw new RuntimeException("Unexpected field " + field.getName()); }, - instance.getDBSubnetGroup()); + instance.dbSubnetGroup()); matched &= block.offerComplexValue("endpoint", row, (Field field, Object val) -> { if (field.getName().equals("address")) { - return ((Endpoint) val).getAddress(); + return ((Endpoint) val).address(); } else if (field.getName().equals("port")) { - return ((Endpoint) val).getPort(); + return ((Endpoint) val).port(); } else if (field.getName().equals("zone")) { - return ((Endpoint) val).getHostedZoneId(); + return ((Endpoint) val).hostedZoneId(); } throw new RuntimeException("Unexpected field " + field.getName()); }, - instance.getEndpoint()); + instance.endpoint()); matched &= block.offerComplexValue("status_infos", row, (Field field, Object val) -> { if (field.getName().equals("message")) { - return ((DBInstanceStatusInfo) val).getMessage(); + return ((DBInstanceStatusInfo) val).message(); } else if (field.getName().equals("is_normal")) { - return ((DBInstanceStatusInfo) val).getNormal(); + return ((DBInstanceStatusInfo) val).normal(); } else if (field.getName().equals("status")) { - return ((DBInstanceStatusInfo) val).getStatus(); + return ((DBInstanceStatusInfo) val).status(); } else if (field.getName().equals("type")) { - return ((DBInstanceStatusInfo) val).getStatusType(); + return ((DBInstanceStatusInfo) val).statusType(); } throw new RuntimeException("Unexpected field " + field.getName()); }, - instance.getStatusInfos()); + instance.statusInfos()); matched &= block.offerComplexValue("tags", row, (Field field, Object val) -> { if (field.getName().equals("key")) { - return ((Tag) val).getKey(); + return ((Tag) val).key(); } else if (field.getName().equals("value")) { - return ((Tag) val).getValue(); + return ((Tag) val).value(); } throw new RuntimeException("Unexpected field " + field.getName()); }, - instance.getTagList()); + instance.tagList()); return matched ? 1 : 0; }); diff --git a/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/TableProviderFactoryTest.java b/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/TableProviderFactoryTest.java index 090b4e991d..ce23513916 100644 --- a/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/TableProviderFactoryTest.java +++ b/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/TableProviderFactoryTest.java @@ -22,12 +22,12 @@ import com.amazonaws.athena.connector.lambda.domain.TableName; import com.amazonaws.athena.connectors.aws.cmdb.tables.TableProvider; import com.amazonaws.services.ec2.AmazonEC2; -import com.amazonaws.services.rds.AmazonRDS; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; import org.mockito.junit.MockitoJUnitRunner; import software.amazon.awssdk.services.emr.EmrClient; +import software.amazon.awssdk.services.rds.RdsClient; import software.amazon.awssdk.services.s3.S3Client; import java.util.List; @@ -48,7 +48,7 @@ public class TableProviderFactoryTest private EmrClient mockEmr; @Mock - private AmazonRDS mockRds; + private RdsClient mockRds; @Mock private S3Client amazonS3; diff --git a/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/tables/RdsTableProviderTest.java b/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/tables/RdsTableProviderTest.java index 99b8356bf2..7f3e586387 100644 --- a/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/tables/RdsTableProviderTest.java +++ b/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/tables/RdsTableProviderTest.java @@ -21,20 +21,6 @@ import com.amazonaws.athena.connector.lambda.data.Block; import com.amazonaws.athena.connector.lambda.data.BlockUtils; -import com.amazonaws.services.rds.AmazonRDS; -import com.amazonaws.services.rds.model.DBInstance; -import com.amazonaws.services.rds.model.DBInstanceStatusInfo; -import com.amazonaws.services.rds.model.DBParameterGroup; -import com.amazonaws.services.rds.model.DBParameterGroupStatus; -import com.amazonaws.services.rds.model.DBSecurityGroupMembership; -import com.amazonaws.services.rds.model.DBSubnetGroup; -import com.amazonaws.services.rds.model.DescribeDBInstancesRequest; -import com.amazonaws.services.rds.model.DescribeDBInstancesResult; -import com.amazonaws.services.rds.model.DomainMembership; -import com.amazonaws.services.rds.model.Endpoint; -import com.amazonaws.services.rds.model.Subnet; -import com.amazonaws.services.rds.model.Tag; - import org.apache.arrow.vector.complex.reader.FieldReader; import org.apache.arrow.vector.types.Types; import org.apache.arrow.vector.types.pojo.Field; @@ -44,6 +30,18 @@ import org.mockito.junit.MockitoJUnitRunner; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.rds.RdsClient; +import software.amazon.awssdk.services.rds.model.DBInstance; +import software.amazon.awssdk.services.rds.model.DBInstanceStatusInfo; +import software.amazon.awssdk.services.rds.model.DBParameterGroupStatus; +import software.amazon.awssdk.services.rds.model.DBSecurityGroupMembership; +import software.amazon.awssdk.services.rds.model.DBSubnetGroup; +import software.amazon.awssdk.services.rds.model.DescribeDbInstancesRequest; +import software.amazon.awssdk.services.rds.model.DescribeDbInstancesResponse; +import software.amazon.awssdk.services.rds.model.DomainMembership; +import software.amazon.awssdk.services.rds.model.Endpoint; +import software.amazon.awssdk.services.rds.model.Subnet; +import software.amazon.awssdk.services.rds.model.Tag; import java.util.ArrayList; import java.util.Date; @@ -64,7 +62,7 @@ public class RdsTableProviderTest private static final Logger logger = LoggerFactory.getLogger(RdsTableProviderTest.class); @Mock - private AmazonRDS mockRds; + private RdsClient mockRds; protected String getIdField() { @@ -100,19 +98,19 @@ protected TableProvider setUpSource() protected void setUpRead() { final AtomicLong requestCount = new AtomicLong(0); - when(mockRds.describeDBInstances(nullable(DescribeDBInstancesRequest.class))) + when(mockRds.describeDBInstances(nullable(DescribeDbInstancesRequest.class))) .thenAnswer((InvocationOnMock invocation) -> { - DescribeDBInstancesResult mockResult = mock(DescribeDBInstancesResult.class); List values = new ArrayList<>(); values.add(makeValue(getIdValue())); values.add(makeValue(getIdValue())); values.add(makeValue("fake-id")); - when(mockResult.getDBInstances()).thenReturn(values); + DescribeDbInstancesResponse.Builder resultBuilder = DescribeDbInstancesResponse.builder(); + resultBuilder.dbInstances(values); if (requestCount.incrementAndGet() < 3) { - when(mockResult.getMarker()).thenReturn(String.valueOf(requestCount.get())); + resultBuilder.marker(String.valueOf(requestCount.get())); } - return mockResult; + return resultBuilder.build(); }); } @@ -174,56 +172,61 @@ private void validate(FieldReader fieldReader) private DBInstance makeValue(String id) { - return new DBInstance() - .withDBInstanceIdentifier(id) - .withAvailabilityZone("primary_az") - .withAllocatedStorage(100) - .withStorageEncrypted(true) - .withBackupRetentionPeriod(100) - .withAutoMinorVersionUpgrade(true) - .withDBInstanceClass("instance_class") - .withDbInstancePort(100) - .withDBInstanceStatus("status") - .withStorageType("storage_type") - .withDbiResourceId("dbi_resource_id") - .withDBName("name") - .withDomainMemberships(new DomainMembership() - .withDomain("domain") - .withFQDN("fqdn") - .withIAMRoleName("iam_role") - .withStatus("status")) - .withEngine("engine") - .withEngineVersion("engine_version") - .withLicenseModel("license_model") - .withSecondaryAvailabilityZone("secondary_az") - .withPreferredBackupWindow("backup_window") - .withPreferredMaintenanceWindow("maint_window") - .withReadReplicaSourceDBInstanceIdentifier("read_replica_source_id") - .withDBParameterGroups(new DBParameterGroupStatus() - .withDBParameterGroupName("name") - .withParameterApplyStatus("status")) - .withDBSecurityGroups(new DBSecurityGroupMembership() - .withDBSecurityGroupName("name") - .withStatus("status")) - .withDBSubnetGroup(new DBSubnetGroup() - .withDBSubnetGroupName("name") - .withSubnetGroupStatus("status") - .withVpcId("vpc") - .withSubnets(new Subnet() - .withSubnetIdentifier("subnet"))) - .withStatusInfos(new DBInstanceStatusInfo() - .withStatus("status") - .withMessage("message") - .withNormal(true) - .withStatusType("type")) - .withEndpoint(new Endpoint() - .withAddress("address") - .withPort(100) - .withHostedZoneId("zone")) - .withInstanceCreateTime(new Date(100000)) - .withIops(100) - .withMultiAZ(true) - .withPubliclyAccessible(true) - .withTagList(new Tag().withKey("key").withValue("value")); + return DBInstance.builder() + .dbInstanceIdentifier(id) + .availabilityZone("primary_az") + .allocatedStorage(100) + .storageEncrypted(true) + .backupRetentionPeriod(100) + .autoMinorVersionUpgrade(true) + .dbInstanceClass("instance_class") + .dbInstancePort(100) + .dbInstanceStatus("status") + .storageType("storage_type") + .dbiResourceId("dbi_resource_id") + .dbName("name") + .domainMemberships(DomainMembership.builder() + .domain("domain") + .fqdn("fqdn") + .iamRoleName("iam_role") + .status("status") + .build()) + .engine("engine") + .engineVersion("engine_version") + .licenseModel("license_model") + .secondaryAvailabilityZone("secondary_az") + .preferredBackupWindow("backup_window") + .preferredMaintenanceWindow("maint_window") + .readReplicaSourceDBInstanceIdentifier("read_replica_source_id") + .dbParameterGroups(DBParameterGroupStatus.builder() + .dbParameterGroupName("name") + .parameterApplyStatus("status") + .build()) + .dbSecurityGroups(DBSecurityGroupMembership.builder() + .dbSecurityGroupName("name") + .status("status").build()) + .dbSubnetGroup(DBSubnetGroup.builder() + .dbSubnetGroupName("name") + .subnetGroupStatus("status") + .vpcId("vpc") + .subnets(Subnet.builder().subnetIdentifier("subnet").build()) + .build()) + .statusInfos(DBInstanceStatusInfo.builder() + .status("status") + .message("message") + .normal(true) + .statusType("type") + .build()) + .endpoint(Endpoint.builder() + .address("address") + .port(100) + .hostedZoneId("zone") + .build()) + .instanceCreateTime(new Date(100000).toInstant()) + .iops(100) + .multiAZ(true) + .publiclyAccessible(true) + .tagList(Tag.builder().key("key").value("value").build()) + .build(); } } diff --git a/athena-cloudera-hive/pom.xml b/athena-cloudera-hive/pom.xml index 7e4dfdbcc3..cd0a11a82d 100644 --- a/athena-cloudera-hive/pom.xml +++ b/athena-cloudera-hive/pom.xml @@ -52,12 +52,18 @@ ${mockito.version} test - + - com.amazonaws - aws-java-sdk-rds - ${aws-sdk.version} + software.amazon.awssdk + rds + ${aws-sdk-v2.version} test + + + software.amazon.awssdk + netty-nio-client + + diff --git a/athena-cloudera-impala/pom.xml b/athena-cloudera-impala/pom.xml index d3b2a73d3d..cfdb74e7b3 100644 --- a/athena-cloudera-impala/pom.xml +++ b/athena-cloudera-impala/pom.xml @@ -48,12 +48,18 @@ ${mockito.version} test - + - com.amazonaws - aws-java-sdk-rds - ${aws-sdk.version} + software.amazon.awssdk + rds + ${aws-sdk-v2.version} test + + + software.amazon.awssdk + netty-nio-client + + diff --git a/athena-datalakegen2/pom.xml b/athena-datalakegen2/pom.xml index c72c6c4813..670a4396b9 100644 --- a/athena-datalakegen2/pom.xml +++ b/athena-datalakegen2/pom.xml @@ -32,12 +32,18 @@ mssql-jdbc ${mssql.jdbc.version} - + - com.amazonaws - aws-java-sdk-rds - ${aws-sdk.version} + software.amazon.awssdk + rds + ${aws-sdk-v2.version} test + + + software.amazon.awssdk + netty-nio-client + + diff --git a/athena-db2-as400/pom.xml b/athena-db2-as400/pom.xml index 7c458b8caf..2165ff5019 100644 --- a/athena-db2-as400/pom.xml +++ b/athena-db2-as400/pom.xml @@ -33,12 +33,18 @@ jt400 20.0.7 - + - com.amazonaws - aws-java-sdk-rds - ${aws-sdk.version} + software.amazon.awssdk + rds + ${aws-sdk-v2.version} test + + + software.amazon.awssdk + netty-nio-client + + diff --git a/athena-db2/pom.xml b/athena-db2/pom.xml index fbe105f1b7..e018349754 100644 --- a/athena-db2/pom.xml +++ b/athena-db2/pom.xml @@ -33,12 +33,18 @@ jcc 11.5.9.0 - + - com.amazonaws - aws-java-sdk-rds - ${aws-sdk.version} + software.amazon.awssdk + rds + ${aws-sdk-v2.version} test + + + software.amazon.awssdk + netty-nio-client + + diff --git a/athena-google-bigquery/pom.xml b/athena-google-bigquery/pom.xml index c68fc6d1a8..c2df697553 100644 --- a/athena-google-bigquery/pom.xml +++ b/athena-google-bigquery/pom.xml @@ -27,10 +27,16 @@ - software.amazon.awscdk + software.amazon.awssdk rds - ${aws-cdk.version} + ${aws-sdk-v2.version} test + + + software.amazon.awssdk + netty-nio-client + + diff --git a/athena-hortonworks-hive/pom.xml b/athena-hortonworks-hive/pom.xml index 1b67ad4b8c..dc11525b4c 100644 --- a/athena-hortonworks-hive/pom.xml +++ b/athena-hortonworks-hive/pom.xml @@ -48,12 +48,18 @@ test - + - com.amazonaws - aws-java-sdk-rds - ${aws-sdk.version} + software.amazon.awssdk + rds + ${aws-sdk-v2.version} test + + + software.amazon.awssdk + netty-nio-client + + diff --git a/athena-jdbc/pom.xml b/athena-jdbc/pom.xml index 4256870c10..7aa8b0fd34 100644 --- a/athena-jdbc/pom.xml +++ b/athena-jdbc/pom.xml @@ -99,13 +99,33 @@ ${log4j2Version} runtime - + com.amazonaws - aws-java-sdk-rds + aws-java-sdk-redshift ${aws-sdk.version} test + + + software.amazon.awscdk + redshift + ${aws-cdk.version} + test + + + + software.amazon.awssdk + rds + ${aws-sdk-v2.version} + test + + + software.amazon.awssdk + netty-nio-client + + + software.amazon.awscdk diff --git a/athena-mysql/pom.xml b/athena-mysql/pom.xml index b980b85291..b281bdbd80 100644 --- a/athena-mysql/pom.xml +++ b/athena-mysql/pom.xml @@ -43,12 +43,18 @@ - + - com.amazonaws - aws-java-sdk-rds - ${aws-sdk.version} + software.amazon.awssdk + rds + ${aws-sdk-v2.version} test + + + software.amazon.awssdk + netty-nio-client + + diff --git a/athena-mysql/src/test/java/com/amazonaws/athena/connectors/mysql/integ/MySqlIntegTest.java b/athena-mysql/src/test/java/com/amazonaws/athena/connectors/mysql/integ/MySqlIntegTest.java index 9de09c16a4..c5f3cb7bbc 100644 --- a/athena-mysql/src/test/java/com/amazonaws/athena/connectors/mysql/integ/MySqlIntegTest.java +++ b/athena-mysql/src/test/java/com/amazonaws/athena/connectors/mysql/integ/MySqlIntegTest.java @@ -26,11 +26,6 @@ import com.amazonaws.athena.connector.lambda.domain.TableName; import com.amazonaws.athena.connectors.jdbc.connection.DatabaseConnectionInfo; import com.amazonaws.athena.connectors.jdbc.integ.JdbcTableUtils; -import com.amazonaws.services.rds.AmazonRDS; -import com.amazonaws.services.rds.AmazonRDSClientBuilder; -import com.amazonaws.services.rds.model.DescribeDBInstancesRequest; -import com.amazonaws.services.rds.model.DescribeDBInstancesResult; -import com.amazonaws.services.rds.model.Endpoint; import com.google.common.collect.ImmutableMap; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -53,6 +48,10 @@ import software.amazon.awscdk.services.rds.StorageType; import software.amazon.awscdk.services.secretsmanager.Secret; import software.amazon.awssdk.services.athena.model.Row; +import software.amazon.awssdk.services.rds.RdsClient; +import software.amazon.awssdk.services.rds.model.DescribeDbInstancesRequest; +import software.amazon.awssdk.services.rds.model.DescribeDbInstancesResponse; +import software.amazon.awssdk.services.rds.model.Endpoint; import java.util.ArrayList; import java.util.Collections; @@ -195,14 +194,14 @@ private Stack getMySqlStack() */ private Endpoint getInstanceData() { - AmazonRDS rdsClient = AmazonRDSClientBuilder.defaultClient(); + RdsClient rdsClient = RdsClient.create(); try { - DescribeDBInstancesResult instancesResult = rdsClient.describeDBInstances(new DescribeDBInstancesRequest() - .withDBInstanceIdentifier(dbInstanceName)); - return instancesResult.getDBInstances().get(0).getEndpoint(); + DescribeDbInstancesResponse instancesResponse = rdsClient.describeDBInstances(DescribeDbInstancesRequest.builder() + .dbInstanceIdentifier(dbInstanceName).build()); + return instancesResponse.dbInstances().get(0).endpoint(); } finally { - rdsClient.shutdown(); + rdsClient.close(); } } @@ -213,7 +212,7 @@ private Endpoint getInstanceData() private void setEnvironmentVars(Endpoint endpoint) { String connectionString = String.format("mysql://jdbc:mysql://%s:%s/mysql?user=%s&password=%s", - endpoint.getAddress(), endpoint.getPort(), username, password); + endpoint.address(), endpoint.port(), username, password); String connectionStringTag = lambdaFunctionName + "_connection_string"; environmentVars.put("default", connectionString); environmentVars.put(connectionStringTag, connectionString); diff --git a/athena-oracle/pom.xml b/athena-oracle/pom.xml index 5d5b2b6897..e727fd5e45 100644 --- a/athena-oracle/pom.xml +++ b/athena-oracle/pom.xml @@ -32,12 +32,18 @@ ojdbc8 23.5.0.24.07 - + - com.amazonaws - aws-java-sdk-rds - ${aws-sdk.version} + software.amazon.awssdk + rds + ${aws-sdk-v2.version} test + + + software.amazon.awssdk + netty-nio-client + + diff --git a/athena-postgresql/pom.xml b/athena-postgresql/pom.xml index 847b089a93..729080bbd9 100644 --- a/athena-postgresql/pom.xml +++ b/athena-postgresql/pom.xml @@ -39,12 +39,18 @@ ${mockito.version} test - + - com.amazonaws - aws-java-sdk-rds - ${aws-sdk.version} + software.amazon.awssdk + rds + ${aws-sdk-v2.version} test + + + software.amazon.awssdk + netty-nio-client + + diff --git a/athena-postgresql/src/test/java/com/amazonaws/athena/connectors/postgresql/integ/PostGreSqlIntegTest.java b/athena-postgresql/src/test/java/com/amazonaws/athena/connectors/postgresql/integ/PostGreSqlIntegTest.java index adb5646d00..68f3913340 100644 --- a/athena-postgresql/src/test/java/com/amazonaws/athena/connectors/postgresql/integ/PostGreSqlIntegTest.java +++ b/athena-postgresql/src/test/java/com/amazonaws/athena/connectors/postgresql/integ/PostGreSqlIntegTest.java @@ -26,11 +26,6 @@ import com.amazonaws.athena.connector.lambda.domain.TableName; import com.amazonaws.athena.connectors.jdbc.connection.DatabaseConnectionInfo; import com.amazonaws.athena.connectors.jdbc.integ.JdbcTableUtils; -import com.amazonaws.services.rds.AmazonRDS; -import com.amazonaws.services.rds.AmazonRDSClientBuilder; -import com.amazonaws.services.rds.model.DescribeDBInstancesRequest; -import com.amazonaws.services.rds.model.DescribeDBInstancesResult; -import com.amazonaws.services.rds.model.Endpoint; import com.google.common.collect.ImmutableMap; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -53,6 +48,10 @@ import software.amazon.awscdk.services.rds.StorageType; import software.amazon.awscdk.services.secretsmanager.Secret; import software.amazon.awssdk.services.athena.model.Row; +import software.amazon.awssdk.services.rds.RdsClient; +import software.amazon.awssdk.services.rds.model.DescribeDbInstancesRequest; +import software.amazon.awssdk.services.rds.model.DescribeDbInstancesResponse; +import software.amazon.awssdk.services.rds.model.Endpoint; import java.util.ArrayList; import java.util.Collections; @@ -195,14 +194,15 @@ private Stack getPostGreSqlStack() */ private Endpoint getInstanceData() { - AmazonRDS rdsClient = AmazonRDSClientBuilder.defaultClient(); + RdsClient rdsClient = RdsClient.create(); try { - DescribeDBInstancesResult instancesResult = rdsClient.describeDBInstances(new DescribeDBInstancesRequest() - .withDBInstanceIdentifier(dbInstanceName)); - return instancesResult.getDBInstances().get(0).getEndpoint(); + DescribeDbInstancesResponse instancesResponse = rdsClient.describeDBInstances(DescribeDbInstancesRequest.builder() + .dbInstanceIdentifier(dbInstanceName) + .build()); + return instancesResponse.dbInstances().get(0).endpoint(); } finally { - rdsClient.shutdown(); + rdsClient.close(); } } @@ -213,7 +213,7 @@ private Endpoint getInstanceData() private void setEnvironmentVars(Endpoint endpoint) { String connectionString = String.format("postgres://jdbc:postgresql://%s:%s/postgres?user=%s&password=%s", - endpoint.getAddress(), endpoint.getPort(), username, password); + endpoint.address(), endpoint.port(), username, password); String connectionStringTag = lambdaFunctionName + "_connection_string"; environmentVars.put("default", connectionString); environmentVars.put(connectionStringTag, connectionString); diff --git a/athena-redshift/pom.xml b/athena-redshift/pom.xml index a60ac38828..5ec2b541d0 100644 --- a/athena-redshift/pom.xml +++ b/athena-redshift/pom.xml @@ -56,12 +56,18 @@ ${aws-cdk.version} test - + - com.amazonaws - aws-java-sdk-rds - ${aws-sdk.version} + software.amazon.awssdk + rds + ${aws-sdk-v2.version} test + + + software.amazon.awssdk + netty-nio-client + + diff --git a/athena-saphana/pom.xml b/athena-saphana/pom.xml index 85ca8aa644..72bc778522 100644 --- a/athena-saphana/pom.xml +++ b/athena-saphana/pom.xml @@ -27,12 +27,18 @@ test-jar test - + - com.amazonaws - aws-java-sdk-rds - ${aws-sdk.version} + software.amazon.awssdk + rds + ${aws-sdk-v2.version} test + + + software.amazon.awssdk + netty-nio-client + + diff --git a/athena-snowflake/pom.xml b/athena-snowflake/pom.xml index fcb730044b..aec0e7f807 100644 --- a/athena-snowflake/pom.xml +++ b/athena-snowflake/pom.xml @@ -32,12 +32,18 @@ snowflake-jdbc 3.19.0 - + - com.amazonaws - aws-java-sdk-rds - ${aws-sdk.version} + software.amazon.awssdk + rds + ${aws-sdk-v2.version} test + + + software.amazon.awssdk + netty-nio-client + + diff --git a/athena-sqlserver/pom.xml b/athena-sqlserver/pom.xml index 6bfab70343..3b723f0a87 100644 --- a/athena-sqlserver/pom.xml +++ b/athena-sqlserver/pom.xml @@ -32,12 +32,18 @@ mssql-jdbc ${mssql.jdbc.version} - + - com.amazonaws - aws-java-sdk-rds - ${aws-sdk.version} + software.amazon.awssdk + rds + ${aws-sdk-v2.version} test + + + software.amazon.awssdk + netty-nio-client + + diff --git a/athena-synapse/pom.xml b/athena-synapse/pom.xml index 21fb490ca8..67b9bdc4c3 100644 --- a/athena-synapse/pom.xml +++ b/athena-synapse/pom.xml @@ -59,12 +59,18 @@ - + - com.amazonaws - aws-java-sdk-rds - ${aws-sdk.version} + software.amazon.awssdk + rds + ${aws-sdk-v2.version} test + + + software.amazon.awssdk + netty-nio-client + + diff --git a/athena-teradata/pom.xml b/athena-teradata/pom.xml index f643b8709d..25f7489180 100644 --- a/athena-teradata/pom.xml +++ b/athena-teradata/pom.xml @@ -27,12 +27,18 @@ test-jar test - + - com.amazonaws - aws-java-sdk-rds - ${aws-sdk.version} + software.amazon.awssdk + rds + ${aws-sdk-v2.version} test + + + software.amazon.awssdk + netty-nio-client + + From 0ab6fa32a736d0e8ad199357067930f74aee060e Mon Sep 17 00:00:00 2001 From: Aimery Methena Date: Wed, 25 Sep 2024 19:38:32 +0000 Subject: [PATCH 42/87] fix missed error from merge --- .../connector/lambda/exceptions/AthenaConnectorException.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/exceptions/AthenaConnectorException.java b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/exceptions/AthenaConnectorException.java index 59c5f3c75e..3743c552eb 100644 --- a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/exceptions/AthenaConnectorException.java +++ b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/exceptions/AthenaConnectorException.java @@ -19,7 +19,7 @@ */ package com.amazonaws.athena.connector.lambda.exceptions; -import com.amazonaws.services.glue.model.ErrorDetails; +import software.amazon.awssdk.services.glue.model.ErrorDetails; import javax.annotation.Nonnull; From c0ac9cc4ab610d1eedd2ebdcddfddb54fef1ce34 Mon Sep 17 00:00:00 2001 From: Aimery Methena <159072740+aimethed@users.noreply.github.com> Date: Wed, 25 Sep 2024 16:24:13 -0400 Subject: [PATCH 43/87] Merge in v2-master 9/25 (#19) * Don't declare jsii-runtime as a dependency of any component directly (#2228) * build(deps): bump software.amazon.awssdk:bom from 2.27.12 to 2.27.17 Bumps software.amazon.awssdk:bom from 2.27.12 to 2.27.17. --- updated-dependencies: - dependency-name: software.amazon.awssdk:bom dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * build(deps): bump software.amazon.jsii:jsii-runtime Bumps [software.amazon.jsii:jsii-runtime](https://github.com/aws/jsii) from 1.102.0 to 1.103.1. - [Release notes](https://github.com/aws/jsii/releases) - [Changelog](https://github.com/aws/jsii/blob/main/CHANGELOG.md) - [Commits](https://github.com/aws/jsii/compare/v1.102.0...v1.103.1) --- updated-dependencies: - dependency-name: software.amazon.jsii:jsii-runtime dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * build(deps): bump com.clickhouse:clickhouse-jdbc from 0.6.4 to 0.6.5 Bumps [com.clickhouse:clickhouse-jdbc](https://github.com/ClickHouse/clickhouse-java) from 0.6.4 to 0.6.5. - [Release notes](https://github.com/ClickHouse/clickhouse-java/releases) - [Changelog](https://github.com/ClickHouse/clickhouse-java/blob/main/CHANGELOG.md) - [Commits](https://github.com/ClickHouse/clickhouse-java/compare/v0.6.4...v0.6.5) --- updated-dependencies: - dependency-name: com.clickhouse:clickhouse-jdbc dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * build(deps): bump org.apache.commons:commons-lang3 from 3.16.0 to 3.17.0 Bumps org.apache.commons:commons-lang3 from 3.16.0 to 3.17.0. --- updated-dependencies: - dependency-name: org.apache.commons:commons-lang3 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * build(deps): bump com.microsoft.azure:msal4j from 1.17.0 to 1.17.1 Bumps [com.microsoft.azure:msal4j](https://github.com/AzureAD/microsoft-authentication-library-for-java) from 1.17.0 to 1.17.1. - [Release notes](https://github.com/AzureAD/microsoft-authentication-library-for-java/releases) - [Changelog](https://github.com/AzureAD/microsoft-authentication-library-for-java/blob/dev/changelog.txt) - [Commits](https://github.com/AzureAD/microsoft-authentication-library-for-java/commits) --- updated-dependencies: - dependency-name: com.microsoft.azure:msal4j dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * build(deps): bump org.apache.maven.plugins:maven-javadoc-plugin Bumps [org.apache.maven.plugins:maven-javadoc-plugin](https://github.com/apache/maven-javadoc-plugin) from 3.8.0 to 3.10.0. - [Release notes](https://github.com/apache/maven-javadoc-plugin/releases) - [Commits](https://github.com/apache/maven-javadoc-plugin/compare/maven-javadoc-plugin-3.8.0...maven-javadoc-plugin-3.10.0) --- updated-dependencies: - dependency-name: org.apache.maven.plugins:maven-javadoc-plugin dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * build(deps): bump net.snowflake:snowflake-jdbc from 3.18.0 to 3.19.0 Bumps [net.snowflake:snowflake-jdbc](https://github.com/snowflakedb/snowflake-jdbc) from 3.18.0 to 3.19.0. - [Release notes](https://github.com/snowflakedb/snowflake-jdbc/releases) - [Changelog](https://github.com/snowflakedb/snowflake-jdbc/blob/master/CHANGELOG.rst) - [Commits](https://github.com/snowflakedb/snowflake-jdbc/compare/v3.18.0...v3.19.0) --- updated-dependencies: - dependency-name: net.snowflake:snowflake-jdbc dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * build(deps): bump surefire.failsafe.version from 3.4.0 to 3.5.0 Bumps `surefire.failsafe.version` from 3.4.0 to 3.5.0. Updates `org.apache.maven.plugins:maven-surefire-plugin` from 3.4.0 to 3.5.0 - [Release notes](https://github.com/apache/maven-surefire/releases) - [Commits](https://github.com/apache/maven-surefire/compare/surefire-3.4.0...surefire-3.5.0) Updates `org.apache.maven.plugins:maven-failsafe-plugin` from 3.4.0 to 3.5.0 - [Release notes](https://github.com/apache/maven-surefire/releases) - [Commits](https://github.com/apache/maven-surefire/compare/surefire-3.4.0...surefire-3.5.0) --- updated-dependencies: - dependency-name: org.apache.maven.plugins:maven-surefire-plugin dependency-type: direct:production update-type: version-update:semver-minor - dependency-name: org.apache.maven.plugins:maven-failsafe-plugin dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * build(deps): bump org.yaml:snakeyaml from 2.2 to 2.3 Bumps [org.yaml:snakeyaml](https://bitbucket.org/snakeyaml/snakeyaml) from 2.2 to 2.3. - [Commits](https://bitbucket.org/snakeyaml/snakeyaml/branches/compare/snakeyaml-2.3..snakeyaml-2.2) --- updated-dependencies: - dependency-name: org.yaml:snakeyaml dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Collate Aware Postgresql String Builder (#2216) * Updated Postgresql Split Query Comment (#2242) * build(deps): bump aws-sdk.version from 1.12.770 to 1.12.771 Bumps `aws-sdk.version` from 1.12.770 to 1.12.771. Updates `com.amazonaws:jmespath-java` from 1.12.770 to 1.12.771 - [Changelog](https://github.com/aws/aws-sdk-java/blob/master/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-java/compare/1.12.770...1.12.771) Updates `com.amazonaws:aws-java-sdk-core` from 1.12.770 to 1.12.771 - [Changelog](https://github.com/aws/aws-sdk-java/blob/master/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-java/compare/1.12.770...1.12.771) Updates `com.amazonaws:aws-java-sdk-secretsmanager` from 1.12.770 to 1.12.771 - [Changelog](https://github.com/aws/aws-sdk-java/blob/master/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-java/compare/1.12.770...1.12.771) Updates `com.amazonaws:aws-java-sdk-sts` from 1.12.770 to 1.12.771 - [Changelog](https://github.com/aws/aws-sdk-java/blob/master/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-java/compare/1.12.770...1.12.771) Updates `com.amazonaws:aws-java-sdk-glue` from 1.12.770 to 1.12.771 - [Changelog](https://github.com/aws/aws-sdk-java/blob/master/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-java/compare/1.12.770...1.12.771) Updates `com.amazonaws:aws-java-sdk-athena` from 1.12.770 to 1.12.771 - [Changelog](https://github.com/aws/aws-sdk-java/blob/master/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-java/compare/1.12.770...1.12.771) Updates `com.amazonaws:aws-java-sdk-lambda` from 1.12.770 to 1.12.771 - [Changelog](https://github.com/aws/aws-sdk-java/blob/master/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-java/compare/1.12.770...1.12.771) Updates `com.amazonaws:aws-java-sdk-s3` from 1.12.770 to 1.12.771 - [Changelog](https://github.com/aws/aws-sdk-java/blob/master/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-java/compare/1.12.770...1.12.771) Updates `com.amazonaws:aws-java-sdk-kms` from 1.12.770 to 1.12.771 - [Changelog](https://github.com/aws/aws-sdk-java/blob/master/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-java/compare/1.12.770...1.12.771) Updates `com.amazonaws:aws-java-sdk-cloudformation` from 1.12.770 to 1.12.771 - [Changelog](https://github.com/aws/aws-sdk-java/blob/master/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-java/compare/1.12.770...1.12.771) Updates `com.amazonaws:aws-java-sdk-logs` from 1.12.770 to 1.12.771 - [Changelog](https://github.com/aws/aws-sdk-java/blob/master/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-java/compare/1.12.770...1.12.771) Updates `com.amazonaws:aws-java-sdk-cloudwatch` from 1.12.770 to 1.12.771 - [Changelog](https://github.com/aws/aws-sdk-java/blob/master/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-java/compare/1.12.770...1.12.771) Updates `com.amazonaws:aws-java-sdk-ec2` from 1.12.770 to 1.12.771 - [Changelog](https://github.com/aws/aws-sdk-java/blob/master/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-java/compare/1.12.770...1.12.771) Updates `com.amazonaws:aws-java-sdk-emr` from 1.12.770 to 1.12.771 - [Changelog](https://github.com/aws/aws-sdk-java/blob/master/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-java/compare/1.12.770...1.12.771) Updates `com.amazonaws:aws-java-sdk-rds` from 1.12.770 to 1.12.771 - [Changelog](https://github.com/aws/aws-sdk-java/blob/master/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-java/compare/1.12.770...1.12.771) Updates `com.amazonaws:aws-java-sdk-docdb` from 1.12.770 to 1.12.771 - [Changelog](https://github.com/aws/aws-sdk-java/blob/master/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-java/compare/1.12.770...1.12.771) Updates `com.amazonaws:aws-java-sdk-elasticache` from 1.12.770 to 1.12.771 - [Changelog](https://github.com/aws/aws-sdk-java/blob/master/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-java/compare/1.12.770...1.12.771) Updates `com.amazonaws:aws-java-sdk-elasticsearch` from 1.12.770 to 1.12.771 - [Changelog](https://github.com/aws/aws-sdk-java/blob/master/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-java/compare/1.12.770...1.12.771) Updates `com.amazonaws:aws-java-sdk-redshift` from 1.12.770 to 1.12.771 - [Changelog](https://github.com/aws/aws-sdk-java/blob/master/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-java/compare/1.12.770...1.12.771) Updates `com.amazonaws:aws-java-sdk-redshiftserverless` from 1.12.770 to 1.12.771 - [Changelog](https://github.com/aws/aws-sdk-java/blob/master/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-java/compare/1.12.770...1.12.771) Updates `com.amazonaws:aws-java-sdk-timestreamwrite` from 1.12.770 to 1.12.771 - [Changelog](https://github.com/aws/aws-sdk-java/blob/master/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-java/compare/1.12.770...1.12.771) Updates `com.amazonaws:aws-java-sdk-timestreamquery` from 1.12.770 to 1.12.771 - [Changelog](https://github.com/aws/aws-sdk-java/blob/master/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-java/compare/1.12.770...1.12.771) --- updated-dependencies: - dependency-name: com.amazonaws:jmespath-java dependency-type: direct:production update-type: version-update:semver-patch - dependency-name: com.amazonaws:aws-java-sdk-core dependency-type: direct:production update-type: version-update:semver-patch - dependency-name: com.amazonaws:aws-java-sdk-secretsmanager dependency-type: direct:production update-type: version-update:semver-patch - dependency-name: com.amazonaws:aws-java-sdk-sts dependency-type: direct:production update-type: version-update:semver-patch - dependency-name: com.amazonaws:aws-java-sdk-glue dependency-type: direct:production update-type: version-update:semver-patch - dependency-name: com.amazonaws:aws-java-sdk-athena dependency-type: direct:production update-type: version-update:semver-patch - dependency-name: com.amazonaws:aws-java-sdk-lambda dependency-type: direct:production update-type: version-update:semver-patch - dependency-name: com.amazonaws:aws-java-sdk-s3 dependency-type: direct:production update-type: version-update:semver-patch - dependency-name: com.amazonaws:aws-java-sdk-kms dependency-type: direct:production update-type: version-update:semver-patch - dependency-name: com.amazonaws:aws-java-sdk-cloudformation dependency-type: direct:production update-type: version-update:semver-patch - dependency-name: com.amazonaws:aws-java-sdk-logs dependency-type: direct:production update-type: version-update:semver-patch - dependency-name: com.amazonaws:aws-java-sdk-cloudwatch dependency-type: direct:production update-type: version-update:semver-patch - dependency-name: com.amazonaws:aws-java-sdk-ec2 dependency-type: direct:production update-type: version-update:semver-patch - dependency-name: com.amazonaws:aws-java-sdk-emr dependency-type: direct:development update-type: version-update:semver-patch - dependency-name: com.amazonaws:aws-java-sdk-rds dependency-type: direct:production update-type: version-update:semver-patch - dependency-name: com.amazonaws:aws-java-sdk-docdb dependency-type: direct:development update-type: version-update:semver-patch - dependency-name: com.amazonaws:aws-java-sdk-elasticache dependency-type: direct:development update-type: version-update:semver-patch - dependency-name: com.amazonaws:aws-java-sdk-elasticsearch dependency-type: direct:production update-type: version-update:semver-patch - dependency-name: com.amazonaws:aws-java-sdk-redshift dependency-type: direct:development update-type: version-update:semver-patch - dependency-name: com.amazonaws:aws-java-sdk-redshiftserverless dependency-type: direct:production update-type: version-update:semver-patch - dependency-name: com.amazonaws:aws-java-sdk-timestreamwrite dependency-type: direct:production update-type: version-update:semver-patch - dependency-name: com.amazonaws:aws-java-sdk-timestreamquery dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * build(deps): bump org.elasticsearch.client:elasticsearch-rest-client Bumps [org.elasticsearch.client:elasticsearch-rest-client](https://github.com/elastic/elasticsearch) from 8.15.0 to 8.15.1. - [Release notes](https://github.com/elastic/elasticsearch/releases) - [Changelog](https://github.com/elastic/elasticsearch/blob/main/CHANGELOG.md) - [Commits](https://github.com/elastic/elasticsearch/compare/v8.15.0...v8.15.1) --- updated-dependencies: - dependency-name: org.elasticsearch.client:elasticsearch-rest-client dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * build(deps-dev): bump log4j2Version from 2.23.1 to 2.24.0 Bumps `log4j2Version` from 2.23.1 to 2.24.0. Updates `org.apache.logging.log4j:log4j-slf4j2-impl` from 2.23.1 to 2.24.0 Updates `org.apache.logging.log4j:log4j-core` from 2.23.1 to 2.24.0 Updates `org.apache.logging.log4j:log4j-api` from 2.23.1 to 2.24.0 --- updated-dependencies: - dependency-name: org.apache.logging.log4j:log4j-slf4j2-impl dependency-type: direct:development update-type: version-update:semver-minor - dependency-name: org.apache.logging.log4j:log4j-core dependency-type: direct:development update-type: version-update:semver-minor - dependency-name: org.apache.logging.log4j:log4j-api dependency-type: direct:development update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * build(deps): bump software.amazon.awssdk:bom from 2.27.17 to 2.27.21 Bumps software.amazon.awssdk:bom from 2.27.17 to 2.27.21. --- updated-dependencies: - dependency-name: software.amazon.awssdk:bom dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Add athena connector exception class to classify Runtime Exception (#2241) * Remove dynamodb:ListSchemas as its no longer valid (#2252) * update new uses of semantic version (#2258) * build(deps): bump org.jetbrains.kotlin:kotlin-stdlib-common Bumps [org.jetbrains.kotlin:kotlin-stdlib-common](https://github.com/JetBrains/kotlin) from 1.9.10 to 2.0.20. - [Release notes](https://github.com/JetBrains/kotlin/releases) - [Changelog](https://github.com/JetBrains/kotlin/blob/v2.0.20/ChangeLog.md) - [Commits](https://github.com/JetBrains/kotlin/compare/v1.9.10...v2.0.20) --- updated-dependencies: - dependency-name: org.jetbrains.kotlin:kotlin-stdlib-common dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] * build(deps): bump org.jetbrains.kotlin:kotlin-reflect Bumps [org.jetbrains.kotlin:kotlin-reflect](https://github.com/JetBrains/kotlin) from 1.9.10 to 2.0.20. - [Release notes](https://github.com/JetBrains/kotlin/releases) - [Changelog](https://github.com/JetBrains/kotlin/blob/v2.0.20/ChangeLog.md) - [Commits](https://github.com/JetBrains/kotlin/compare/v1.9.10...v2.0.20) --- updated-dependencies: - dependency-name: org.jetbrains.kotlin:kotlin-reflect dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] * build(deps): bump org.jetbrains.kotlin:kotlin-stdlib Bumps [org.jetbrains.kotlin:kotlin-stdlib](https://github.com/JetBrains/kotlin) from 1.9.10 to 2.0.20. - [Release notes](https://github.com/JetBrains/kotlin/releases) - [Changelog](https://github.com/JetBrains/kotlin/blob/v2.0.20/ChangeLog.md) - [Commits](https://github.com/JetBrains/kotlin/compare/v1.9.10...v2.0.20) --- updated-dependencies: - dependency-name: org.jetbrains.kotlin:kotlin-stdlib dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] * build(deps): bump org.jetbrains.kotlin:kotlin-stdlib-jdk8 Bumps [org.jetbrains.kotlin:kotlin-stdlib-jdk8](https://github.com/JetBrains/kotlin) from 1.9.10 to 2.0.20. - [Release notes](https://github.com/JetBrains/kotlin/releases) - [Changelog](https://github.com/JetBrains/kotlin/blob/v2.0.20/ChangeLog.md) - [Commits](https://github.com/JetBrains/kotlin/compare/v1.9.10...v2.0.20) --- updated-dependencies: - dependency-name: org.jetbrains.kotlin:kotlin-stdlib-jdk8 dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] * v2 changes for timestream (#2239) * build(deps): bump com.squareup.wire:wire-compiler from 4.9.0 to 5.0.0 Bumps [com.squareup.wire:wire-compiler](https://github.com/square/wire) from 4.9.0 to 5.0.0. - [Changelog](https://github.com/square/wire/blob/master/CHANGELOG.md) - [Commits](https://github.com/square/wire/compare/4.9.0...5.0.0) --- updated-dependencies: - dependency-name: com.squareup.wire:wire-compiler dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] * build(deps): bump com.squareup.wire:wire-schema from 4.9.0 to 5.0.0 Bumps [com.squareup.wire:wire-schema](https://github.com/square/wire) from 4.9.0 to 5.0.0. - [Changelog](https://github.com/square/wire/blob/master/CHANGELOG.md) - [Commits](https://github.com/square/wire/compare/4.9.0...5.0.0) --- updated-dependencies: - dependency-name: com.squareup.wire:wire-schema dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] * build(deps): bump com.squareup.wire:wire-runtime-jvm from 4.9.0 to 5.0.0 Bumps [com.squareup.wire:wire-runtime-jvm](https://github.com/square/wire) from 4.9.0 to 5.0.0. - [Changelog](https://github.com/square/wire/blob/master/CHANGELOG.md) - [Commits](https://github.com/square/wire/compare/4.9.0...5.0.0) --- updated-dependencies: - dependency-name: com.squareup.wire:wire-runtime-jvm dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] * build(deps): bump software.amazon.awssdk:bom from 2.27.21 to 2.28.1 Bumps software.amazon.awssdk:bom from 2.27.21 to 2.28.1. --- updated-dependencies: - dependency-name: software.amazon.awssdk:bom dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * build(deps): bump net.java.dev.jna:jna-platform from 5.14.0 to 5.15.0 Bumps [net.java.dev.jna:jna-platform](https://github.com/java-native-access/jna) from 5.14.0 to 5.15.0. - [Changelog](https://github.com/java-native-access/jna/blob/master/CHANGES.md) - [Commits](https://github.com/java-native-access/jna/compare/5.14.0...5.15.0) --- updated-dependencies: - dependency-name: net.java.dev.jna:jna-platform dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * build(deps): bump io.confluent:kafka-protobuf-serializer Bumps [io.confluent:kafka-protobuf-serializer](https://github.com/confluentinc/schema-registry) from 7.7.0 to 7.7.1. - [Commits](https://github.com/confluentinc/schema-registry/commits) --- updated-dependencies: - dependency-name: io.confluent:kafka-protobuf-serializer dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * build(deps): bump com.squareup.wire:wire-runtime-jvm from 5.0.0 to 5.1.0 Bumps [com.squareup.wire:wire-runtime-jvm](https://github.com/square/wire) from 5.0.0 to 5.1.0. - [Changelog](https://github.com/square/wire/blob/master/CHANGELOG.md) - [Commits](https://github.com/square/wire/compare/5.0.0...5.1.0) --- updated-dependencies: - dependency-name: com.squareup.wire:wire-runtime-jvm dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * build(deps): bump io.confluent:kafka-avro-serializer from 7.7.0 to 7.7.1 Bumps [io.confluent:kafka-avro-serializer](https://github.com/confluentinc/schema-registry) from 7.7.0 to 7.7.1. - [Commits](https://github.com/confluentinc/schema-registry/commits) --- updated-dependencies: - dependency-name: io.confluent:kafka-avro-serializer dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * build(deps): bump io.confluent:kafka-protobuf-provider Bumps [io.confluent:kafka-protobuf-provider](https://github.com/confluentinc/schema-registry) from 7.7.0 to 7.7.1. - [Commits](https://github.com/confluentinc/schema-registry/commits) --- updated-dependencies: - dependency-name: io.confluent:kafka-protobuf-provider dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * build(deps): bump com.squareup.wire:wire-compiler from 5.0.0 to 5.1.0 Bumps [com.squareup.wire:wire-compiler](https://github.com/square/wire) from 5.0.0 to 5.1.0. - [Changelog](https://github.com/square/wire/blob/master/CHANGELOG.md) - [Commits](https://github.com/square/wire/compare/5.0.0...5.1.0) --- updated-dependencies: - dependency-name: com.squareup.wire:wire-compiler dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * build(deps): bump org.apache.maven.plugins:maven-gpg-plugin Bumps [org.apache.maven.plugins:maven-gpg-plugin](https://github.com/apache/maven-gpg-plugin) from 3.2.5 to 3.2.6. - [Release notes](https://github.com/apache/maven-gpg-plugin/releases) - [Commits](https://github.com/apache/maven-gpg-plugin/compare/maven-gpg-plugin-3.2.5...maven-gpg-plugin-3.2.6) --- updated-dependencies: - dependency-name: org.apache.maven.plugins:maven-gpg-plugin dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * build(deps): bump com.squareup.wire:wire-schema from 5.0.0 to 5.1.0 Bumps [com.squareup.wire:wire-schema](https://github.com/square/wire) from 5.0.0 to 5.1.0. - [Changelog](https://github.com/square/wire/blob/master/CHANGELOG.md) - [Commits](https://github.com/square/wire/compare/5.0.0...5.1.0) --- updated-dependencies: - dependency-name: com.squareup.wire:wire-schema dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * build(deps): bump aws-sdk.version from 1.12.771 to 1.12.772 Bumps `aws-sdk.version` from 1.12.771 to 1.12.772. Updates `com.amazonaws:jmespath-java` from 1.12.771 to 1.12.772 - [Changelog](https://github.com/aws/aws-sdk-java/blob/master/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-java/compare/1.12.771...1.12.772) Updates `com.amazonaws:aws-java-sdk-core` from 1.12.771 to 1.12.772 - [Changelog](https://github.com/aws/aws-sdk-java/blob/master/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-java/compare/1.12.771...1.12.772) Updates `com.amazonaws:aws-java-sdk-secretsmanager` from 1.12.771 to 1.12.772 - [Changelog](https://github.com/aws/aws-sdk-java/blob/master/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-java/compare/1.12.771...1.12.772) Updates `com.amazonaws:aws-java-sdk-sts` from 1.12.771 to 1.12.772 - [Changelog](https://github.com/aws/aws-sdk-java/blob/master/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-java/compare/1.12.771...1.12.772) Updates `com.amazonaws:aws-java-sdk-glue` from 1.12.771 to 1.12.772 - [Changelog](https://github.com/aws/aws-sdk-java/blob/master/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-java/compare/1.12.771...1.12.772) Updates `com.amazonaws:aws-java-sdk-athena` from 1.12.771 to 1.12.772 - [Changelog](https://github.com/aws/aws-sdk-java/blob/master/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-java/compare/1.12.771...1.12.772) Updates `com.amazonaws:aws-java-sdk-lambda` from 1.12.771 to 1.12.772 - [Changelog](https://github.com/aws/aws-sdk-java/blob/master/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-java/compare/1.12.771...1.12.772) Updates `com.amazonaws:aws-java-sdk-s3` from 1.12.771 to 1.12.772 - [Changelog](https://github.com/aws/aws-sdk-java/blob/master/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-java/compare/1.12.771...1.12.772) Updates `com.amazonaws:aws-java-sdk-kms` from 1.12.771 to 1.12.772 - [Changelog](https://github.com/aws/aws-sdk-java/blob/master/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-java/compare/1.12.771...1.12.772) Updates `com.amazonaws:aws-java-sdk-cloudformation` from 1.12.771 to 1.12.772 - [Changelog](https://github.com/aws/aws-sdk-java/blob/master/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-java/compare/1.12.771...1.12.772) Updates `com.amazonaws:aws-java-sdk-logs` from 1.12.771 to 1.12.772 - [Changelog](https://github.com/aws/aws-sdk-java/blob/master/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-java/compare/1.12.771...1.12.772) Updates `com.amazonaws:aws-java-sdk-cloudwatch` from 1.12.771 to 1.12.772 - [Changelog](https://github.com/aws/aws-sdk-java/blob/master/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-java/compare/1.12.771...1.12.772) Updates `com.amazonaws:aws-java-sdk-ec2` from 1.12.771 to 1.12.772 - [Changelog](https://github.com/aws/aws-sdk-java/blob/master/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-java/compare/1.12.771...1.12.772) Updates `com.amazonaws:aws-java-sdk-emr` from 1.12.771 to 1.12.772 - [Changelog](https://github.com/aws/aws-sdk-java/blob/master/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-java/compare/1.12.771...1.12.772) Updates `com.amazonaws:aws-java-sdk-rds` from 1.12.771 to 1.12.772 - [Changelog](https://github.com/aws/aws-sdk-java/blob/master/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-java/compare/1.12.771...1.12.772) Updates `com.amazonaws:aws-java-sdk-docdb` from 1.12.771 to 1.12.772 - [Changelog](https://github.com/aws/aws-sdk-java/blob/master/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-java/compare/1.12.771...1.12.772) Updates `com.amazonaws:aws-java-sdk-elasticache` from 1.12.771 to 1.12.772 - [Changelog](https://github.com/aws/aws-sdk-java/blob/master/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-java/compare/1.12.771...1.12.772) Updates `com.amazonaws:aws-java-sdk-elasticsearch` from 1.12.771 to 1.12.772 - [Changelog](https://github.com/aws/aws-sdk-java/blob/master/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-java/compare/1.12.771...1.12.772) Updates `com.amazonaws:aws-java-sdk-redshift` from 1.12.771 to 1.12.772 - [Changelog](https://github.com/aws/aws-sdk-java/blob/master/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-java/compare/1.12.771...1.12.772) Updates `com.amazonaws:aws-java-sdk-redshiftserverless` from 1.12.771 to 1.12.772 - [Changelog](https://github.com/aws/aws-sdk-java/blob/master/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-java/compare/1.12.771...1.12.772) Updates `com.amazonaws:aws-java-sdk-timestreamwrite` from 1.12.771 to 1.12.772 - [Changelog](https://github.com/aws/aws-sdk-java/blob/master/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-java/compare/1.12.771...1.12.772) Updates `com.amazonaws:aws-java-sdk-timestreamquery` from 1.12.771 to 1.12.772 - [Changelog](https://github.com/aws/aws-sdk-java/blob/master/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-java/compare/1.12.771...1.12.772) --- updated-dependencies: - dependency-name: com.amazonaws:jmespath-java dependency-type: direct:production update-type: version-update:semver-patch - dependency-name: com.amazonaws:aws-java-sdk-core dependency-type: direct:production update-type: version-update:semver-patch - dependency-name: com.amazonaws:aws-java-sdk-secretsmanager dependency-type: direct:production update-type: version-update:semver-patch - dependency-name: com.amazonaws:aws-java-sdk-sts dependency-type: direct:production update-type: version-update:semver-patch - dependency-name: com.amazonaws:aws-java-sdk-glue dependency-type: direct:production update-type: version-update:semver-patch - dependency-name: com.amazonaws:aws-java-sdk-athena dependency-type: direct:production update-type: version-update:semver-patch - dependency-name: com.amazonaws:aws-java-sdk-lambda dependency-type: direct:production update-type: version-update:semver-patch - dependency-name: com.amazonaws:aws-java-sdk-s3 dependency-type: direct:production update-type: version-update:semver-patch - dependency-name: com.amazonaws:aws-java-sdk-kms dependency-type: direct:production update-type: version-update:semver-patch - dependency-name: com.amazonaws:aws-java-sdk-cloudformation dependency-type: direct:production update-type: version-update:semver-patch - dependency-name: com.amazonaws:aws-java-sdk-logs dependency-type: direct:production update-type: version-update:semver-patch - dependency-name: com.amazonaws:aws-java-sdk-cloudwatch dependency-type: direct:production update-type: version-update:semver-patch - dependency-name: com.amazonaws:aws-java-sdk-ec2 dependency-type: direct:production update-type: version-update:semver-patch - dependency-name: com.amazonaws:aws-java-sdk-emr dependency-type: direct:development update-type: version-update:semver-patch - dependency-name: com.amazonaws:aws-java-sdk-rds dependency-type: direct:production update-type: version-update:semver-patch - dependency-name: com.amazonaws:aws-java-sdk-docdb dependency-type: direct:development update-type: version-update:semver-patch - dependency-name: com.amazonaws:aws-java-sdk-elasticache dependency-type: direct:development update-type: version-update:semver-patch - dependency-name: com.amazonaws:aws-java-sdk-elasticsearch dependency-type: direct:production update-type: version-update:semver-patch - dependency-name: com.amazonaws:aws-java-sdk-redshift dependency-type: direct:development update-type: version-update:semver-patch - dependency-name: com.amazonaws:aws-java-sdk-redshiftserverless dependency-type: direct:production update-type: version-update:semver-patch - dependency-name: com.amazonaws:aws-java-sdk-timestreamwrite dependency-type: direct:production update-type: version-update:semver-patch - dependency-name: com.amazonaws:aws-java-sdk-timestreamquery dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * build(deps): bump org.apache.kafka:kafka-clients from 3.8.0 to 7.7.0-ce (#2166) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * build(deps): bump com.google.protobuf:protobuf-java in /athena-msk Bumps [com.google.protobuf:protobuf-java](https://github.com/protocolbuffers/protobuf) from 3.25.3 to 3.25.5. - [Release notes](https://github.com/protocolbuffers/protobuf/releases) - [Changelog](https://github.com/protocolbuffers/protobuf/blob/main/protobuf_release.bzl) - [Commits](https://github.com/protocolbuffers/protobuf/compare/v3.25.3...v3.25.5) --- updated-dependencies: - dependency-name: com.google.protobuf:protobuf-java dependency-type: direct:production ... Signed-off-by: dependabot[bot] * build(deps): bump org.apache.kafka:kafka-clients Bumps org.apache.kafka:kafka-clients from 7.7.0-ce to 7.7.1-ce. --- updated-dependencies: - dependency-name: org.apache.kafka:kafka-clients dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * build(deps): bump com.sap.cloud.db.jdbc:ngdbc from 2.21.11 to 2.22.11 Bumps com.sap.cloud.db.jdbc:ngdbc from 2.21.11 to 2.22.11. --- updated-dependencies: - dependency-name: com.sap.cloud.db.jdbc:ngdbc dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * build(deps): bump software.amazon.awssdk:bom from 2.28.1 to 2.28.6 Bumps software.amazon.awssdk:bom from 2.28.1 to 2.28.6. --- updated-dependencies: - dependency-name: software.amazon.awssdk:bom dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * build(deps): bump com.google.cloud:google-cloud-storage Bumps [com.google.cloud:google-cloud-storage](https://github.com/googleapis/java-storage) from 2.42.0 to 2.43.0. - [Release notes](https://github.com/googleapis/java-storage/releases) - [Changelog](https://github.com/googleapis/java-storage/blob/main/CHANGELOG.md) - [Commits](https://github.com/googleapis/java-storage/compare/v2.42.0...v2.43.0) --- updated-dependencies: - dependency-name: com.google.cloud:google-cloud-storage dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * update all ImageURI to use correct repo name (no prod) * v2 sdk changes emr (#2288) * v2 sdk changes redshift (#2289) * migrate awslogs to cloudwatchlogs (v1 to v2) (#2272) * v2 Cloudformation (#2281) * v2 DocDB (#2282) * V2 rds (#2273) * fix missed error from merge --------- Signed-off-by: dependabot[bot] Co-authored-by: Mario Rial Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: AbdulRehman Co-authored-by: yipez-spec Co-authored-by: Trianz-Akshay <108925344+Trianz-Akshay@users.noreply.github.com> --- athena-aws-cmdb/pom.xml | 18 +- .../aws/cmdb/TableProviderFactory.java | 12 +- .../cmdb/tables/EmrClusterTableProvider.java | 76 ++++----- .../aws/cmdb/tables/RdsTableProvider.java | 155 +++++++++--------- .../aws/cmdb/TableProviderFactoryTest.java | 8 +- .../tables/EmrClusterTableProviderTest.java | 81 +++++---- .../aws/cmdb/tables/RdsTableProviderTest.java | 155 +++++++++--------- athena-cloudera-hive/pom.xml | 14 +- athena-cloudera-impala/pom.xml | 14 +- athena-cloudwatch/pom.xml | 26 ++- .../cloudwatch/CloudwatchExceptionFilter.java | 6 +- .../cloudwatch/CloudwatchMetadataHandler.java | 95 ++++++----- .../cloudwatch/CloudwatchRecordHandler.java | 57 +++---- .../cloudwatch/CloudwatchTableResolver.java | 68 ++++---- .../cloudwatch/CloudwatchUtils.java | 37 +++-- .../CloudwatchMetadataHandlerTest.java | 114 ++++++------- .../CloudwatchRecordHandlerTest.java | 39 ++--- .../cloudwatch/integ/CloudwatchIntegTest.java | 29 ++-- athena-datalakegen2/pom.xml | 14 +- athena-db2-as400/pom.xml | 14 +- athena-db2/pom.xml | 14 +- athena-docdb/pom.xml | 8 +- .../docdb/integ/DocDbIntegTest.java | 22 +-- .../dynamodb/DynamoDbIntegTest.java | 2 +- athena-federation-integ-test/pom.xml | 35 +--- .../integ/clients/CloudFormationClient.java | 69 ++++---- .../exceptions/AthenaConnectorException.java | 93 +++++++++++ athena-gcs/pom.xml | 4 +- athena-google-bigquery/pom.xml | 12 +- athena-hbase/pom.xml | 8 +- .../hbase/integ/HbaseIntegTest.java | 49 +++--- athena-hortonworks-hive/pom.xml | 14 +- athena-jdbc/pom.xml | 14 +- athena-kafka/pom.xml | 6 +- athena-msk/pom.xml | 22 +-- athena-mysql/pom.xml | 14 +- .../mysql/integ/MySqlIntegTest.java | 21 ++- athena-oracle/pom.xml | 14 +- athena-postgresql/pom.xml | 14 +- .../postgresql/integ/PostGreSqlIntegTest.java | 22 +-- athena-redshift/pom.xml | 28 ++-- .../redshift/integ/RedshiftIntegTest.java | 22 ++- athena-saphana/pom.xml | 16 +- athena-snowflake/pom.xml | 14 +- athena-sqlserver/pom.xml | 14 +- athena-synapse/pom.xml | 14 +- athena-teradata/pom.xml | 14 +- athena-timestream/pom.xml | 18 +- .../timestream/TimestreamClientBuilder.java | 30 ++-- .../timestream/TimestreamMetadataHandler.java | 100 +++++------ .../timestream/TimestreamRecordHandler.java | 48 +++--- .../connectors/timestream/TestUtils.java | 72 ++++---- .../TimestreamClientBuilderTest.java | 7 +- .../TimestreamMetadataHandlerTest.java | 95 ++++++----- .../TimestreamRecordHandlerTest.java | 38 ++--- .../timestream/integ/TimestreamIntegTest.java | 24 +-- .../TimestreamWriteRecordRequestBuilder.java | 34 ++-- pom.xml | 4 +- .../bump_versions/bump_connectors_version.py | 4 + tools/bump_versions/common.py | 7 + 60 files changed, 1138 insertions(+), 954 deletions(-) create mode 100644 athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/exceptions/AthenaConnectorException.java diff --git a/athena-aws-cmdb/pom.xml b/athena-aws-cmdb/pom.xml index 90c1ad7b59..187d46137a 100644 --- a/athena-aws-cmdb/pom.xml +++ b/athena-aws-cmdb/pom.xml @@ -28,14 +28,20 @@ - com.amazonaws - aws-java-sdk-emr - ${aws-sdk.version} + software.amazon.awssdk + emr + ${aws-sdk-v2.version} - com.amazonaws - aws-java-sdk-rds - ${aws-sdk.version} + software.amazon.awssdk + rds + ${aws-sdk-v2.version} + + + software.amazon.awssdk + netty-nio-client + + org.slf4j diff --git a/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/TableProviderFactory.java b/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/TableProviderFactory.java index 7a5099e0a7..41ee3350c3 100644 --- a/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/TableProviderFactory.java +++ b/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/TableProviderFactory.java @@ -34,11 +34,9 @@ import com.amazonaws.athena.connectors.aws.cmdb.tables.s3.S3ObjectsTableProvider; import com.amazonaws.services.ec2.AmazonEC2; import com.amazonaws.services.ec2.AmazonEC2ClientBuilder; -import com.amazonaws.services.elasticmapreduce.AmazonElasticMapReduce; -import com.amazonaws.services.elasticmapreduce.AmazonElasticMapReduceClientBuilder; -import com.amazonaws.services.rds.AmazonRDS; -import com.amazonaws.services.rds.AmazonRDSClientBuilder; import org.apache.arrow.util.VisibleForTesting; +import software.amazon.awssdk.services.emr.EmrClient; +import software.amazon.awssdk.services.rds.RdsClient; import software.amazon.awssdk.services.s3.S3Client; import java.util.ArrayList; @@ -59,14 +57,14 @@ public TableProviderFactory(java.util.Map configOptions) { this( AmazonEC2ClientBuilder.standard().build(), - AmazonElasticMapReduceClientBuilder.standard().build(), - AmazonRDSClientBuilder.standard().build(), + EmrClient.create(), + RdsClient.create(), S3Client.create(), configOptions); } @VisibleForTesting - protected TableProviderFactory(AmazonEC2 ec2, AmazonElasticMapReduce emr, AmazonRDS rds, S3Client amazonS3, java.util.Map configOptions) + protected TableProviderFactory(AmazonEC2 ec2, EmrClient emr, RdsClient rds, S3Client amazonS3, java.util.Map configOptions) { addProvider(new Ec2TableProvider(ec2)); addProvider(new EbsTableProvider(ec2)); diff --git a/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/EmrClusterTableProvider.java b/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/EmrClusterTableProvider.java index ee3b15da91..c3d10c7233 100644 --- a/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/EmrClusterTableProvider.java +++ b/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/EmrClusterTableProvider.java @@ -29,15 +29,15 @@ import com.amazonaws.athena.connector.lambda.metadata.GetTableRequest; import com.amazonaws.athena.connector.lambda.metadata.GetTableResponse; import com.amazonaws.athena.connector.lambda.records.ReadRecordsRequest; -import com.amazonaws.services.elasticmapreduce.AmazonElasticMapReduce; -import com.amazonaws.services.elasticmapreduce.model.Cluster; -import com.amazonaws.services.elasticmapreduce.model.ClusterSummary; -import com.amazonaws.services.elasticmapreduce.model.DescribeClusterRequest; -import com.amazonaws.services.elasticmapreduce.model.DescribeClusterResult; -import com.amazonaws.services.elasticmapreduce.model.ListClustersRequest; -import com.amazonaws.services.elasticmapreduce.model.ListClustersResult; import org.apache.arrow.vector.types.Types; import org.apache.arrow.vector.types.pojo.Schema; +import software.amazon.awssdk.services.emr.EmrClient; +import software.amazon.awssdk.services.emr.model.Cluster; +import software.amazon.awssdk.services.emr.model.ClusterSummary; +import software.amazon.awssdk.services.emr.model.DescribeClusterRequest; +import software.amazon.awssdk.services.emr.model.DescribeClusterResponse; +import software.amazon.awssdk.services.emr.model.ListClustersRequest; +import software.amazon.awssdk.services.emr.model.ListClustersResponse; import java.util.List; import java.util.stream.Collectors; @@ -49,9 +49,9 @@ public class EmrClusterTableProvider implements TableProvider { private static final Schema SCHEMA; - private AmazonElasticMapReduce emr; + private EmrClient emr; - public EmrClusterTableProvider(AmazonElasticMapReduce emr) + public EmrClusterTableProvider(EmrClient emr) { this.emr = emr; } @@ -93,23 +93,23 @@ public GetTableResponse getTable(BlockAllocator blockAllocator, GetTableRequest public void readWithConstraint(BlockSpiller spiller, ReadRecordsRequest recordsRequest, QueryStatusChecker queryStatusChecker) { boolean done = false; - ListClustersRequest request = new ListClustersRequest(); + ListClustersRequest request = ListClustersRequest.builder().build(); while (!done) { - ListClustersResult response = emr.listClusters(request); + ListClustersResponse response = emr.listClusters(request); - for (ClusterSummary next : response.getClusters()) { + for (ClusterSummary next : response.clusters()) { Cluster cluster = null; - if (!next.getStatus().getState().toLowerCase().contains("terminated")) { - DescribeClusterResult clusterResponse = emr.describeCluster(new DescribeClusterRequest().withClusterId(next.getId())); - cluster = clusterResponse.getCluster(); + if (!next.status().stateAsString().toLowerCase().contains("terminated")) { + DescribeClusterResponse clusterResponse = emr.describeCluster(DescribeClusterRequest.builder().clusterId(next.id()).build()); + cluster = clusterResponse.cluster(); } clusterToRow(next, cluster, spiller); } - request.setMarker(response.getMarker()); + request = request.toBuilder().marker(response.marker()).build(); - if (response.getMarker() == null || !queryStatusChecker.isQueryRunning()) { + if (response.marker() == null || !queryStatusChecker.isQueryRunning()) { done = true; } } @@ -131,31 +131,31 @@ private void clusterToRow(ClusterSummary clusterSummary, spiller.writeRows((Block block, int row) -> { boolean matched = true; - matched &= block.offerValue("id", row, clusterSummary.getId()); - matched &= block.offerValue("name", row, clusterSummary.getName()); - matched &= block.offerValue("instance_hours", row, clusterSummary.getNormalizedInstanceHours()); - matched &= block.offerValue("state", row, clusterSummary.getStatus().getState()); - matched &= block.offerValue("state_code", row, clusterSummary.getStatus().getStateChangeReason().getCode()); - matched &= block.offerValue("state_msg", row, clusterSummary.getStatus().getStateChangeReason().getMessage()); + matched &= block.offerValue("id", row, clusterSummary.id()); + matched &= block.offerValue("name", row, clusterSummary.name()); + matched &= block.offerValue("instance_hours", row, clusterSummary.normalizedInstanceHours()); + matched &= block.offerValue("state", row, clusterSummary.status().stateAsString()); + matched &= block.offerValue("state_code", row, clusterSummary.status().stateChangeReason().codeAsString()); + matched &= block.offerValue("state_msg", row, clusterSummary.status().stateChangeReason().message()); if (cluster != null) { - matched &= block.offerValue("autoscaling_role", row, cluster.getAutoScalingRole()); - matched &= block.offerValue("custom_ami", row, cluster.getCustomAmiId()); - matched &= block.offerValue("instance_collection_type", row, cluster.getInstanceCollectionType()); - matched &= block.offerValue("log_uri", row, cluster.getLogUri()); - matched &= block.offerValue("master_public_dns", row, cluster.getMasterPublicDnsName()); - matched &= block.offerValue("release_label", row, cluster.getReleaseLabel()); - matched &= block.offerValue("running_ami", row, cluster.getRunningAmiVersion()); - matched &= block.offerValue("scale_down_behavior", row, cluster.getScaleDownBehavior()); - matched &= block.offerValue("service_role", row, cluster.getServiceRole()); - matched &= block.offerValue("service_role", row, cluster.getServiceRole()); - - List applications = cluster.getApplications().stream() - .map(next -> next.getName() + ":" + next.getVersion()).collect(Collectors.toList()); + matched &= block.offerValue("autoscaling_role", row, cluster.autoScalingRole()); + matched &= block.offerValue("custom_ami", row, cluster.customAmiId()); + matched &= block.offerValue("instance_collection_type", row, cluster.instanceCollectionTypeAsString()); + matched &= block.offerValue("log_uri", row, cluster.logUri()); + matched &= block.offerValue("master_public_dns", row, cluster.masterPublicDnsName()); + matched &= block.offerValue("release_label", row, cluster.releaseLabel()); + matched &= block.offerValue("running_ami", row, cluster.runningAmiVersion()); + matched &= block.offerValue("scale_down_behavior", row, cluster.scaleDownBehaviorAsString()); + matched &= block.offerValue("service_role", row, cluster.serviceRole()); + matched &= block.offerValue("service_role", row, cluster.serviceRole()); + + List applications = cluster.applications().stream() + .map(next -> next.name() + ":" + next.version()).collect(Collectors.toList()); matched &= block.offerComplexValue("applications", row, FieldResolver.DEFAULT, applications); - List tags = cluster.getTags().stream() - .map(next -> next.getKey() + ":" + next.getValue()).collect(Collectors.toList()); + List tags = cluster.tags().stream() + .map(next -> next.key() + ":" + next.value()).collect(Collectors.toList()); matched &= block.offerComplexValue("tags", row, FieldResolver.DEFAULT, tags); } diff --git a/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/RdsTableProvider.java b/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/RdsTableProvider.java index f3d9a18a8b..d424476646 100644 --- a/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/RdsTableProvider.java +++ b/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/RdsTableProvider.java @@ -30,22 +30,22 @@ import com.amazonaws.athena.connector.lambda.metadata.GetTableRequest; import com.amazonaws.athena.connector.lambda.metadata.GetTableResponse; import com.amazonaws.athena.connector.lambda.records.ReadRecordsRequest; -import com.amazonaws.services.rds.AmazonRDS; -import com.amazonaws.services.rds.model.DBInstance; -import com.amazonaws.services.rds.model.DBInstanceStatusInfo; -import com.amazonaws.services.rds.model.DBParameterGroupStatus; -import com.amazonaws.services.rds.model.DBSecurityGroupMembership; -import com.amazonaws.services.rds.model.DBSubnetGroup; -import com.amazonaws.services.rds.model.DescribeDBInstancesRequest; -import com.amazonaws.services.rds.model.DescribeDBInstancesResult; -import com.amazonaws.services.rds.model.DomainMembership; -import com.amazonaws.services.rds.model.Endpoint; -import com.amazonaws.services.rds.model.Subnet; -import com.amazonaws.services.rds.model.Tag; import org.apache.arrow.vector.types.Types; import org.apache.arrow.vector.types.pojo.ArrowType; import org.apache.arrow.vector.types.pojo.Field; import org.apache.arrow.vector.types.pojo.Schema; +import software.amazon.awssdk.services.rds.RdsClient; +import software.amazon.awssdk.services.rds.model.DBInstance; +import software.amazon.awssdk.services.rds.model.DBInstanceStatusInfo; +import software.amazon.awssdk.services.rds.model.DBParameterGroupStatus; +import software.amazon.awssdk.services.rds.model.DBSecurityGroupMembership; +import software.amazon.awssdk.services.rds.model.DBSubnetGroup; +import software.amazon.awssdk.services.rds.model.DescribeDbInstancesRequest; +import software.amazon.awssdk.services.rds.model.DescribeDbInstancesResponse; +import software.amazon.awssdk.services.rds.model.DomainMembership; +import software.amazon.awssdk.services.rds.model.Endpoint; +import software.amazon.awssdk.services.rds.model.Subnet; +import software.amazon.awssdk.services.rds.model.Tag; import java.util.stream.Collectors; @@ -56,9 +56,9 @@ public class RdsTableProvider implements TableProvider { private static final Schema SCHEMA; - private AmazonRDS rds; + private RdsClient rds; - public RdsTableProvider(AmazonRDS rds) + public RdsTableProvider(RdsClient rds) { this.rds = rds; } @@ -99,27 +99,24 @@ public GetTableResponse getTable(BlockAllocator blockAllocator, GetTableRequest @Override public void readWithConstraint(BlockSpiller spiller, ReadRecordsRequest recordsRequest, QueryStatusChecker queryStatusChecker) { - boolean done = false; - DescribeDBInstancesRequest request = new DescribeDBInstancesRequest(); + DescribeDbInstancesRequest.Builder requestBuilder = DescribeDbInstancesRequest.builder(); ValueSet idConstraint = recordsRequest.getConstraints().getSummary().get("instance_id"); if (idConstraint != null && idConstraint.isSingleValue()) { - request.setDBInstanceIdentifier(idConstraint.getSingleValue().toString()); + requestBuilder.dbInstanceIdentifier(idConstraint.getSingleValue().toString()); } - while (!done) { - DescribeDBInstancesResult response = rds.describeDBInstances(request); + DescribeDbInstancesResponse response; + do { + response = rds.describeDBInstances(requestBuilder.build()); - for (DBInstance instance : response.getDBInstances()) { + for (DBInstance instance : response.dbInstances()) { instanceToRow(instance, spiller); } - request.setMarker(response.getMarker()); - - if (response.getMarker() == null || !queryStatusChecker.isQueryRunning()) { - done = true; - } + requestBuilder.marker(response.marker()); } + while (response.marker() != null && queryStatusChecker.isQueryRunning()); } /** @@ -136,145 +133,145 @@ private void instanceToRow(DBInstance instance, spiller.writeRows((Block block, int row) -> { boolean matched = true; - matched &= block.offerValue("instance_id", row, instance.getDBInstanceIdentifier()); - matched &= block.offerValue("primary_az", row, instance.getAvailabilityZone()); - matched &= block.offerValue("storage_gb", row, instance.getAllocatedStorage()); - matched &= block.offerValue("is_encrypted", row, instance.getStorageEncrypted()); - matched &= block.offerValue("storage_type", row, instance.getStorageType()); - matched &= block.offerValue("backup_retention_days", row, instance.getBackupRetentionPeriod()); - matched &= block.offerValue("auto_upgrade", row, instance.getAutoMinorVersionUpgrade()); - matched &= block.offerValue("instance_class", row, instance.getDBInstanceClass()); - matched &= block.offerValue("port", row, instance.getDbInstancePort()); - matched &= block.offerValue("status", row, instance.getDBInstanceStatus()); - matched &= block.offerValue("dbi_resource_id", row, instance.getDbiResourceId()); - matched &= block.offerValue("name", row, instance.getDBName()); - matched &= block.offerValue("engine", row, instance.getEngine()); - matched &= block.offerValue("engine_version", row, instance.getEngineVersion()); - matched &= block.offerValue("license_model", row, instance.getLicenseModel()); - matched &= block.offerValue("secondary_az", row, instance.getSecondaryAvailabilityZone()); - matched &= block.offerValue("backup_window", row, instance.getPreferredBackupWindow()); - matched &= block.offerValue("maint_window", row, instance.getPreferredMaintenanceWindow()); - matched &= block.offerValue("read_replica_source_id", row, instance.getReadReplicaSourceDBInstanceIdentifier()); - matched &= block.offerValue("create_time", row, instance.getInstanceCreateTime()); - matched &= block.offerValue("public_access", row, instance.getPubliclyAccessible()); - matched &= block.offerValue("iops", row, instance.getIops()); - matched &= block.offerValue("is_multi_az", row, instance.getMultiAZ()); + matched &= block.offerValue("instance_id", row, instance.dbInstanceIdentifier()); + matched &= block.offerValue("primary_az", row, instance.availabilityZone()); + matched &= block.offerValue("storage_gb", row, instance.allocatedStorage()); + matched &= block.offerValue("is_encrypted", row, instance.storageEncrypted()); + matched &= block.offerValue("storage_type", row, instance.storageType()); + matched &= block.offerValue("backup_retention_days", row, instance.backupRetentionPeriod()); + matched &= block.offerValue("auto_upgrade", row, instance.autoMinorVersionUpgrade()); + matched &= block.offerValue("instance_class", row, instance.dbInstanceClass()); + matched &= block.offerValue("port", row, instance.dbInstancePort()); + matched &= block.offerValue("status", row, instance.dbInstanceStatus()); + matched &= block.offerValue("dbi_resource_id", row, instance.dbiResourceId()); + matched &= block.offerValue("name", row, instance.dbName()); + matched &= block.offerValue("engine", row, instance.engine()); + matched &= block.offerValue("engine_version", row, instance.engineVersion()); + matched &= block.offerValue("license_model", row, instance.licenseModel()); + matched &= block.offerValue("secondary_az", row, instance.secondaryAvailabilityZone()); + matched &= block.offerValue("backup_window", row, instance.preferredBackupWindow()); + matched &= block.offerValue("maint_window", row, instance.preferredMaintenanceWindow()); + matched &= block.offerValue("read_replica_source_id", row, instance.readReplicaSourceDBInstanceIdentifier()); + matched &= block.offerValue("create_time", row, instance.instanceCreateTime()); + matched &= block.offerValue("public_access", row, instance.publiclyAccessible()); + matched &= block.offerValue("iops", row, instance.iops()); + matched &= block.offerValue("is_multi_az", row, instance.multiAZ()); matched &= block.offerComplexValue("domains", row, (Field field, Object val) -> { if (field.getName().equals("domain")) { - return ((DomainMembership) val).getDomain(); + return ((DomainMembership) val).domain(); } else if (field.getName().equals("fqdn")) { - return ((DomainMembership) val).getFQDN(); + return ((DomainMembership) val).fqdn(); } else if (field.getName().equals("iam_role")) { - return ((DomainMembership) val).getIAMRoleName(); + return ((DomainMembership) val).iamRoleName(); } else if (field.getName().equals("status")) { - return ((DomainMembership) val).getStatus(); + return ((DomainMembership) val).status(); } throw new RuntimeException("Unexpected field " + field.getName()); }, - instance.getDomainMemberships()); + instance.domainMemberships()); matched &= block.offerComplexValue("param_groups", row, (Field field, Object val) -> { if (field.getName().equals("name")) { - return ((DBParameterGroupStatus) val).getDBParameterGroupName(); + return ((DBParameterGroupStatus) val).dbParameterGroupName(); } else if (field.getName().equals("status")) { - return ((DBParameterGroupStatus) val).getParameterApplyStatus(); + return ((DBParameterGroupStatus) val).parameterApplyStatus(); } throw new RuntimeException("Unexpected field " + field.getName()); }, - instance.getDBParameterGroups()); + instance.dbParameterGroups()); matched &= block.offerComplexValue("db_security_groups", row, (Field field, Object val) -> { if (field.getName().equals("name")) { - return ((DBSecurityGroupMembership) val).getDBSecurityGroupName(); + return ((DBSecurityGroupMembership) val).dbSecurityGroupName(); } else if (field.getName().equals("status")) { - return ((DBSecurityGroupMembership) val).getStatus(); + return ((DBSecurityGroupMembership) val).status(); } throw new RuntimeException("Unexpected field " + field.getName()); }, - instance.getDBSecurityGroups()); + instance.dbSecurityGroups()); matched &= block.offerComplexValue("subnet_group", row, (Field field, Object val) -> { if (field.getName().equals("description")) { - return ((DBSubnetGroup) val).getDBSubnetGroupDescription(); + return ((DBSubnetGroup) val).dbSubnetGroupDescription(); } else if (field.getName().equals("name")) { - return ((DBSubnetGroup) val).getDBSubnetGroupName(); + return ((DBSubnetGroup) val).dbSubnetGroupName(); } else if (field.getName().equals("status")) { - return ((DBSubnetGroup) val).getSubnetGroupStatus(); + return ((DBSubnetGroup) val).subnetGroupStatus(); } else if (field.getName().equals("vpc")) { - return ((DBSubnetGroup) val).getVpcId(); + return ((DBSubnetGroup) val).vpcId(); } else if (field.getName().equals("subnets")) { - return ((DBSubnetGroup) val).getSubnets().stream() - .map(next -> next.getSubnetIdentifier()).collect(Collectors.toList()); + return ((DBSubnetGroup) val).subnets().stream() + .map(next -> next.subnetIdentifier()).collect(Collectors.toList()); } else if (val instanceof Subnet) { - return ((Subnet) val).getSubnetIdentifier(); + return ((Subnet) val).subnetIdentifier(); } throw new RuntimeException("Unexpected field " + field.getName()); }, - instance.getDBSubnetGroup()); + instance.dbSubnetGroup()); matched &= block.offerComplexValue("endpoint", row, (Field field, Object val) -> { if (field.getName().equals("address")) { - return ((Endpoint) val).getAddress(); + return ((Endpoint) val).address(); } else if (field.getName().equals("port")) { - return ((Endpoint) val).getPort(); + return ((Endpoint) val).port(); } else if (field.getName().equals("zone")) { - return ((Endpoint) val).getHostedZoneId(); + return ((Endpoint) val).hostedZoneId(); } throw new RuntimeException("Unexpected field " + field.getName()); }, - instance.getEndpoint()); + instance.endpoint()); matched &= block.offerComplexValue("status_infos", row, (Field field, Object val) -> { if (field.getName().equals("message")) { - return ((DBInstanceStatusInfo) val).getMessage(); + return ((DBInstanceStatusInfo) val).message(); } else if (field.getName().equals("is_normal")) { - return ((DBInstanceStatusInfo) val).getNormal(); + return ((DBInstanceStatusInfo) val).normal(); } else if (field.getName().equals("status")) { - return ((DBInstanceStatusInfo) val).getStatus(); + return ((DBInstanceStatusInfo) val).status(); } else if (field.getName().equals("type")) { - return ((DBInstanceStatusInfo) val).getStatusType(); + return ((DBInstanceStatusInfo) val).statusType(); } throw new RuntimeException("Unexpected field " + field.getName()); }, - instance.getStatusInfos()); + instance.statusInfos()); matched &= block.offerComplexValue("tags", row, (Field field, Object val) -> { if (field.getName().equals("key")) { - return ((Tag) val).getKey(); + return ((Tag) val).key(); } else if (field.getName().equals("value")) { - return ((Tag) val).getValue(); + return ((Tag) val).value(); } throw new RuntimeException("Unexpected field " + field.getName()); }, - instance.getTagList()); + instance.tagList()); return matched ? 1 : 0; }); diff --git a/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/TableProviderFactoryTest.java b/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/TableProviderFactoryTest.java index c196e379d6..ce23513916 100644 --- a/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/TableProviderFactoryTest.java +++ b/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/TableProviderFactoryTest.java @@ -22,12 +22,12 @@ import com.amazonaws.athena.connector.lambda.domain.TableName; import com.amazonaws.athena.connectors.aws.cmdb.tables.TableProvider; import com.amazonaws.services.ec2.AmazonEC2; -import com.amazonaws.services.elasticmapreduce.AmazonElasticMapReduce; -import com.amazonaws.services.rds.AmazonRDS; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; import org.mockito.junit.MockitoJUnitRunner; +import software.amazon.awssdk.services.emr.EmrClient; +import software.amazon.awssdk.services.rds.RdsClient; import software.amazon.awssdk.services.s3.S3Client; import java.util.List; @@ -45,10 +45,10 @@ public class TableProviderFactoryTest private AmazonEC2 mockEc2; @Mock - private AmazonElasticMapReduce mockEmr; + private EmrClient mockEmr; @Mock - private AmazonRDS mockRds; + private RdsClient mockRds; @Mock private S3Client amazonS3; diff --git a/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/tables/EmrClusterTableProviderTest.java b/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/tables/EmrClusterTableProviderTest.java index c88fc6943b..b593b275a2 100644 --- a/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/tables/EmrClusterTableProviderTest.java +++ b/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/tables/EmrClusterTableProviderTest.java @@ -21,17 +21,6 @@ import com.amazonaws.athena.connector.lambda.data.Block; import com.amazonaws.athena.connector.lambda.data.BlockUtils; -import com.amazonaws.services.elasticmapreduce.AmazonElasticMapReduce; -import com.amazonaws.services.elasticmapreduce.model.Application; -import com.amazonaws.services.elasticmapreduce.model.Cluster; -import com.amazonaws.services.elasticmapreduce.model.ClusterStateChangeReason; -import com.amazonaws.services.elasticmapreduce.model.ClusterStatus; -import com.amazonaws.services.elasticmapreduce.model.ClusterSummary; -import com.amazonaws.services.elasticmapreduce.model.DescribeClusterRequest; -import com.amazonaws.services.elasticmapreduce.model.DescribeClusterResult; -import com.amazonaws.services.elasticmapreduce.model.ListClustersRequest; -import com.amazonaws.services.elasticmapreduce.model.ListClustersResult; -import com.amazonaws.services.elasticmapreduce.model.Tag; import org.apache.arrow.vector.complex.reader.FieldReader; import org.apache.arrow.vector.types.Types; import org.apache.arrow.vector.types.pojo.Field; @@ -41,6 +30,17 @@ import org.mockito.junit.MockitoJUnitRunner; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.emr.EmrClient; +import software.amazon.awssdk.services.emr.model.Application; +import software.amazon.awssdk.services.emr.model.Cluster; +import software.amazon.awssdk.services.emr.model.ClusterStateChangeReason; +import software.amazon.awssdk.services.emr.model.ClusterStatus; +import software.amazon.awssdk.services.emr.model.ClusterSummary; +import software.amazon.awssdk.services.emr.model.DescribeClusterRequest; +import software.amazon.awssdk.services.emr.model.DescribeClusterResponse; +import software.amazon.awssdk.services.emr.model.ListClustersRequest; +import software.amazon.awssdk.services.emr.model.ListClustersResponse; +import software.amazon.awssdk.services.emr.model.Tag; import java.util.ArrayList; import java.util.List; @@ -49,7 +49,6 @@ import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; import static org.mockito.ArgumentMatchers.nullable; -import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @RunWith(MockitoJUnitRunner.class) @@ -59,7 +58,7 @@ public class EmrClusterTableProviderTest private static final Logger logger = LoggerFactory.getLogger(EmrClusterTableProviderTest.class); @Mock - private AmazonElasticMapReduce mockEmr; + private EmrClient mockEmr; protected String getIdField() { @@ -96,24 +95,18 @@ protected void setUpRead() { when(mockEmr.listClusters(nullable(ListClustersRequest.class))) .thenAnswer((InvocationOnMock invocation) -> { - ListClustersResult mockResult = mock(ListClustersResult.class); List values = new ArrayList<>(); values.add(makeClusterSummary(getIdValue())); values.add(makeClusterSummary(getIdValue())); values.add(makeClusterSummary("fake-id")); - when(mockResult.getClusters()).thenReturn(values); + ListClustersResponse mockResult = ListClustersResponse.builder().clusters(values).build(); return mockResult; }); when(mockEmr.describeCluster(nullable(DescribeClusterRequest.class))) .thenAnswer((InvocationOnMock invocation) -> { DescribeClusterRequest request = (DescribeClusterRequest) invocation.getArguments()[0]; - DescribeClusterResult mockResult = mock(DescribeClusterResult.class); - List values = new ArrayList<>(); - values.add(makeClusterSummary(getIdValue())); - values.add(makeClusterSummary(getIdValue())); - values.add(makeClusterSummary("fake-id")); - when(mockResult.getCluster()).thenReturn(makeCluster(request.getClusterId())); + DescribeClusterResponse mockResult = DescribeClusterResponse.builder().cluster(makeCluster(request.clusterId())).build(); return mockResult; }); } @@ -170,32 +163,32 @@ private void validate(FieldReader fieldReader) private ClusterSummary makeClusterSummary(String id) { - return new ClusterSummary() - .withName("name") - .withId(id) - .withStatus(new ClusterStatus() - .withState("state") - .withStateChangeReason(new ClusterStateChangeReason() - .withCode("state_code") - .withMessage("state_msg"))) - .withNormalizedInstanceHours(100); + return ClusterSummary.builder() + .name("name") + .id(id) + .status(ClusterStatus.builder().state("state") + .stateChangeReason(ClusterStateChangeReason.builder() + .code("state_code") + .message("state_msg").build()).build()) + .normalizedInstanceHours(100).build(); } private Cluster makeCluster(String id) { - return new Cluster() - .withId(id) - .withName("name") - .withAutoScalingRole("autoscaling_role") - .withCustomAmiId("custom_ami") - .withInstanceCollectionType("instance_collection_type") - .withLogUri("log_uri") - .withMasterPublicDnsName("master_public_dns") - .withReleaseLabel("release_label") - .withRunningAmiVersion("running_ami") - .withScaleDownBehavior("scale_down_behavior") - .withServiceRole("service_role") - .withApplications(new Application().withName("name").withVersion("version")) - .withTags(new Tag("key", "value")); + return Cluster.builder() + .id(id) + .name("name") + .autoScalingRole("autoscaling_role") + .customAmiId("custom_ami") + .instanceCollectionType("instance_collection_type") + .logUri("log_uri") + .masterPublicDnsName("master_public_dns") + .releaseLabel("release_label") + .runningAmiVersion("running_ami") + .scaleDownBehavior("scale_down_behavior") + .serviceRole("service_role") + .applications(Application.builder().name("name").version("version").build()) + .tags(Tag.builder().key("key").value("value").build()) + .build(); } } diff --git a/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/tables/RdsTableProviderTest.java b/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/tables/RdsTableProviderTest.java index 76b8c858ef..7f3e586387 100644 --- a/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/tables/RdsTableProviderTest.java +++ b/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/tables/RdsTableProviderTest.java @@ -21,30 +21,6 @@ import com.amazonaws.athena.connector.lambda.data.Block; import com.amazonaws.athena.connector.lambda.data.BlockUtils; -import com.amazonaws.services.elasticmapreduce.AmazonElasticMapReduce; -import com.amazonaws.services.elasticmapreduce.model.Application; -import com.amazonaws.services.elasticmapreduce.model.Cluster; -import com.amazonaws.services.elasticmapreduce.model.ClusterStateChangeReason; -import com.amazonaws.services.elasticmapreduce.model.ClusterStatus; -import com.amazonaws.services.elasticmapreduce.model.ClusterSummary; -import com.amazonaws.services.elasticmapreduce.model.DescribeClusterRequest; -import com.amazonaws.services.elasticmapreduce.model.DescribeClusterResult; -import com.amazonaws.services.elasticmapreduce.model.ListClustersRequest; -import com.amazonaws.services.elasticmapreduce.model.ListClustersResult; -import com.amazonaws.services.rds.AmazonRDS; -import com.amazonaws.services.rds.model.DBInstance; -import com.amazonaws.services.rds.model.DBInstanceStatusInfo; -import com.amazonaws.services.rds.model.DBParameterGroup; -import com.amazonaws.services.rds.model.DBParameterGroupStatus; -import com.amazonaws.services.rds.model.DBSecurityGroupMembership; -import com.amazonaws.services.rds.model.DBSubnetGroup; -import com.amazonaws.services.rds.model.DescribeDBInstancesRequest; -import com.amazonaws.services.rds.model.DescribeDBInstancesResult; -import com.amazonaws.services.rds.model.DomainMembership; -import com.amazonaws.services.rds.model.Endpoint; -import com.amazonaws.services.rds.model.Subnet; -import com.amazonaws.services.rds.model.Tag; - import org.apache.arrow.vector.complex.reader.FieldReader; import org.apache.arrow.vector.types.Types; import org.apache.arrow.vector.types.pojo.Field; @@ -54,6 +30,18 @@ import org.mockito.junit.MockitoJUnitRunner; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.rds.RdsClient; +import software.amazon.awssdk.services.rds.model.DBInstance; +import software.amazon.awssdk.services.rds.model.DBInstanceStatusInfo; +import software.amazon.awssdk.services.rds.model.DBParameterGroupStatus; +import software.amazon.awssdk.services.rds.model.DBSecurityGroupMembership; +import software.amazon.awssdk.services.rds.model.DBSubnetGroup; +import software.amazon.awssdk.services.rds.model.DescribeDbInstancesRequest; +import software.amazon.awssdk.services.rds.model.DescribeDbInstancesResponse; +import software.amazon.awssdk.services.rds.model.DomainMembership; +import software.amazon.awssdk.services.rds.model.Endpoint; +import software.amazon.awssdk.services.rds.model.Subnet; +import software.amazon.awssdk.services.rds.model.Tag; import java.util.ArrayList; import java.util.Date; @@ -74,7 +62,7 @@ public class RdsTableProviderTest private static final Logger logger = LoggerFactory.getLogger(RdsTableProviderTest.class); @Mock - private AmazonRDS mockRds; + private RdsClient mockRds; protected String getIdField() { @@ -110,19 +98,19 @@ protected TableProvider setUpSource() protected void setUpRead() { final AtomicLong requestCount = new AtomicLong(0); - when(mockRds.describeDBInstances(nullable(DescribeDBInstancesRequest.class))) + when(mockRds.describeDBInstances(nullable(DescribeDbInstancesRequest.class))) .thenAnswer((InvocationOnMock invocation) -> { - DescribeDBInstancesResult mockResult = mock(DescribeDBInstancesResult.class); List values = new ArrayList<>(); values.add(makeValue(getIdValue())); values.add(makeValue(getIdValue())); values.add(makeValue("fake-id")); - when(mockResult.getDBInstances()).thenReturn(values); + DescribeDbInstancesResponse.Builder resultBuilder = DescribeDbInstancesResponse.builder(); + resultBuilder.dbInstances(values); if (requestCount.incrementAndGet() < 3) { - when(mockResult.getMarker()).thenReturn(String.valueOf(requestCount.get())); + resultBuilder.marker(String.valueOf(requestCount.get())); } - return mockResult; + return resultBuilder.build(); }); } @@ -184,56 +172,61 @@ private void validate(FieldReader fieldReader) private DBInstance makeValue(String id) { - return new DBInstance() - .withDBInstanceIdentifier(id) - .withAvailabilityZone("primary_az") - .withAllocatedStorage(100) - .withStorageEncrypted(true) - .withBackupRetentionPeriod(100) - .withAutoMinorVersionUpgrade(true) - .withDBInstanceClass("instance_class") - .withDbInstancePort(100) - .withDBInstanceStatus("status") - .withStorageType("storage_type") - .withDbiResourceId("dbi_resource_id") - .withDBName("name") - .withDomainMemberships(new DomainMembership() - .withDomain("domain") - .withFQDN("fqdn") - .withIAMRoleName("iam_role") - .withStatus("status")) - .withEngine("engine") - .withEngineVersion("engine_version") - .withLicenseModel("license_model") - .withSecondaryAvailabilityZone("secondary_az") - .withPreferredBackupWindow("backup_window") - .withPreferredMaintenanceWindow("maint_window") - .withReadReplicaSourceDBInstanceIdentifier("read_replica_source_id") - .withDBParameterGroups(new DBParameterGroupStatus() - .withDBParameterGroupName("name") - .withParameterApplyStatus("status")) - .withDBSecurityGroups(new DBSecurityGroupMembership() - .withDBSecurityGroupName("name") - .withStatus("status")) - .withDBSubnetGroup(new DBSubnetGroup() - .withDBSubnetGroupName("name") - .withSubnetGroupStatus("status") - .withVpcId("vpc") - .withSubnets(new Subnet() - .withSubnetIdentifier("subnet"))) - .withStatusInfos(new DBInstanceStatusInfo() - .withStatus("status") - .withMessage("message") - .withNormal(true) - .withStatusType("type")) - .withEndpoint(new Endpoint() - .withAddress("address") - .withPort(100) - .withHostedZoneId("zone")) - .withInstanceCreateTime(new Date(100000)) - .withIops(100) - .withMultiAZ(true) - .withPubliclyAccessible(true) - .withTagList(new Tag().withKey("key").withValue("value")); + return DBInstance.builder() + .dbInstanceIdentifier(id) + .availabilityZone("primary_az") + .allocatedStorage(100) + .storageEncrypted(true) + .backupRetentionPeriod(100) + .autoMinorVersionUpgrade(true) + .dbInstanceClass("instance_class") + .dbInstancePort(100) + .dbInstanceStatus("status") + .storageType("storage_type") + .dbiResourceId("dbi_resource_id") + .dbName("name") + .domainMemberships(DomainMembership.builder() + .domain("domain") + .fqdn("fqdn") + .iamRoleName("iam_role") + .status("status") + .build()) + .engine("engine") + .engineVersion("engine_version") + .licenseModel("license_model") + .secondaryAvailabilityZone("secondary_az") + .preferredBackupWindow("backup_window") + .preferredMaintenanceWindow("maint_window") + .readReplicaSourceDBInstanceIdentifier("read_replica_source_id") + .dbParameterGroups(DBParameterGroupStatus.builder() + .dbParameterGroupName("name") + .parameterApplyStatus("status") + .build()) + .dbSecurityGroups(DBSecurityGroupMembership.builder() + .dbSecurityGroupName("name") + .status("status").build()) + .dbSubnetGroup(DBSubnetGroup.builder() + .dbSubnetGroupName("name") + .subnetGroupStatus("status") + .vpcId("vpc") + .subnets(Subnet.builder().subnetIdentifier("subnet").build()) + .build()) + .statusInfos(DBInstanceStatusInfo.builder() + .status("status") + .message("message") + .normal(true) + .statusType("type") + .build()) + .endpoint(Endpoint.builder() + .address("address") + .port(100) + .hostedZoneId("zone") + .build()) + .instanceCreateTime(new Date(100000).toInstant()) + .iops(100) + .multiAZ(true) + .publiclyAccessible(true) + .tagList(Tag.builder().key("key").value("value").build()) + .build(); } } diff --git a/athena-cloudera-hive/pom.xml b/athena-cloudera-hive/pom.xml index 7e4dfdbcc3..cd0a11a82d 100644 --- a/athena-cloudera-hive/pom.xml +++ b/athena-cloudera-hive/pom.xml @@ -52,12 +52,18 @@ ${mockito.version} test - + - com.amazonaws - aws-java-sdk-rds - ${aws-sdk.version} + software.amazon.awssdk + rds + ${aws-sdk-v2.version} test + + + software.amazon.awssdk + netty-nio-client + + diff --git a/athena-cloudera-impala/pom.xml b/athena-cloudera-impala/pom.xml index 8a2e88076d..396904c0ca 100644 --- a/athena-cloudera-impala/pom.xml +++ b/athena-cloudera-impala/pom.xml @@ -47,12 +47,18 @@ ${mockito.version} test - + - com.amazonaws - aws-java-sdk-rds - ${aws-sdk.version} + software.amazon.awssdk + rds + ${aws-sdk-v2.version} test + + + software.amazon.awssdk + netty-nio-client + + diff --git a/athena-cloudwatch/pom.xml b/athena-cloudwatch/pom.xml index bd2dad00d8..95a34d1d37 100644 --- a/athena-cloudwatch/pom.xml +++ b/athena-cloudwatch/pom.xml @@ -29,15 +29,35 @@ test - com.amazonaws - aws-java-sdk-logs - ${aws-sdk.version} + software.amazon.awssdk + cloudwatchlogs + 2.28.2 + + + + commons-logging + commons-logging + + + software.amazon.awssdk + netty-nio-client + + + + + software.amazon.awssdk + cloudwatch + ${aws-sdk-v2.version} commons-logging commons-logging + + software.amazon.awssdk + netty-nio-client + diff --git a/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchExceptionFilter.java b/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchExceptionFilter.java index c71db552cf..093aeedd7e 100644 --- a/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchExceptionFilter.java +++ b/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchExceptionFilter.java @@ -20,8 +20,8 @@ package com.amazonaws.athena.connectors.cloudwatch; import com.amazonaws.athena.connector.lambda.ThrottlingInvoker; -import com.amazonaws.services.logs.model.AWSLogsException; -import com.amazonaws.services.logs.model.LimitExceededException; +import software.amazon.awssdk.services.cloudwatch.model.LimitExceededException; +import software.amazon.awssdk.services.cloudwatchlogs.model.CloudWatchLogsException; /** * Used to identify Exceptions that are related to Cloudwatch Logs throttling events. @@ -36,7 +36,7 @@ private CloudwatchExceptionFilter() {} @Override public boolean isMatch(Exception ex) { - if (ex instanceof AWSLogsException && ex.getMessage().startsWith("Rate exceeded")) { + if (ex instanceof CloudWatchLogsException && ex.getMessage().startsWith("Rate exceeded")) { return true; } diff --git a/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchMetadataHandler.java b/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchMetadataHandler.java index e07c6f5422..e62ca50477 100644 --- a/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchMetadataHandler.java +++ b/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchMetadataHandler.java @@ -43,15 +43,6 @@ import com.amazonaws.athena.connector.lambda.metadata.optimizations.OptimizationSubType; import com.amazonaws.athena.connector.lambda.security.EncryptionKeyFactory; import com.amazonaws.athena.connectors.cloudwatch.qpt.CloudwatchQueryPassthrough; -import com.amazonaws.services.logs.AWSLogs; -import com.amazonaws.services.logs.AWSLogsClientBuilder; -import com.amazonaws.services.logs.model.DescribeLogGroupsRequest; -import com.amazonaws.services.logs.model.DescribeLogGroupsResult; -import com.amazonaws.services.logs.model.DescribeLogStreamsRequest; -import com.amazonaws.services.logs.model.DescribeLogStreamsResult; -import com.amazonaws.services.logs.model.GetQueryResultsResult; -import com.amazonaws.services.logs.model.LogStream; -import com.amazonaws.services.logs.model.ResultField; import com.google.common.collect.ImmutableMap; import org.apache.arrow.util.VisibleForTesting; import org.apache.arrow.vector.complex.reader.FieldReader; @@ -61,6 +52,14 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.cloudwatchlogs.CloudWatchLogsClient; +import software.amazon.awssdk.services.cloudwatchlogs.model.DescribeLogGroupsRequest; +import software.amazon.awssdk.services.cloudwatchlogs.model.DescribeLogGroupsResponse; +import software.amazon.awssdk.services.cloudwatchlogs.model.DescribeLogStreamsRequest; +import software.amazon.awssdk.services.cloudwatchlogs.model.DescribeLogStreamsResponse; +import software.amazon.awssdk.services.cloudwatchlogs.model.GetQueryResultsResponse; +import software.amazon.awssdk.services.cloudwatchlogs.model.LogStream; +import software.amazon.awssdk.services.cloudwatchlogs.model.ResultField; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.ArrayList; @@ -123,7 +122,7 @@ public class CloudwatchMetadataHandler .build(); } - private final AWSLogs awsLogs; + private final CloudWatchLogsClient awsLogs; private final ThrottlingInvoker invoker; private final CloudwatchTableResolver tableResolver; private final CloudwatchQueryPassthrough queryPassthrough = new CloudwatchQueryPassthrough(); @@ -131,14 +130,14 @@ public class CloudwatchMetadataHandler public CloudwatchMetadataHandler(java.util.Map configOptions) { super(SOURCE_TYPE, configOptions); - this.awsLogs = AWSLogsClientBuilder.standard().build(); + this.awsLogs = CloudWatchLogsClient.create(); this.invoker = ThrottlingInvoker.newDefaultBuilder(EXCEPTION_FILTER, configOptions).build(); - this.tableResolver = new CloudwatchTableResolver(this.invoker, awsLogs, MAX_RESULTS, MAX_RESULTS); + this.tableResolver = new CloudwatchTableResolver(this.invoker, awsLogs, MAX_RESULTS, MAX_RESULTS); } @VisibleForTesting protected CloudwatchMetadataHandler( - AWSLogs awsLogs, + CloudWatchLogsClient awsLogs, EncryptionKeyFactory keyFactory, SecretsManagerClient secretsManager, AthenaClient athena, @@ -161,19 +160,19 @@ protected CloudwatchMetadataHandler( public ListSchemasResponse doListSchemaNames(BlockAllocator blockAllocator, ListSchemasRequest listSchemasRequest) throws TimeoutException { - DescribeLogGroupsRequest request = new DescribeLogGroupsRequest(); - DescribeLogGroupsResult result; + DescribeLogGroupsRequest.Builder requestBuilder = DescribeLogGroupsRequest.builder(); + DescribeLogGroupsResponse response; List schemas = new ArrayList<>(); do { if (schemas.size() > MAX_RESULTS) { throw new RuntimeException("Too many log groups, exceeded max metadata results for schema count."); } - result = invoker.invoke(() -> awsLogs.describeLogGroups(request)); - result.getLogGroups().forEach(next -> schemas.add(next.getLogGroupName())); - request.setNextToken(result.getNextToken()); - logger.info("doListSchemaNames: Listing log groups {} {}", result.getNextToken(), schemas.size()); + response = invoker.invoke(() -> awsLogs.describeLogGroups(requestBuilder.build())); + response.logGroups().forEach(next -> schemas.add(next.logGroupName())); + requestBuilder.nextToken(response.nextToken()); + logger.info("doListSchemaNames: Listing log groups {} {}", response.nextToken(), schemas.size()); } - while (result.getNextToken() != null); + while (response.nextToken() != null); return new ListSchemasResponse(listSchemasRequest.getCatalogName(), schemas); } @@ -189,28 +188,28 @@ public ListTablesResponse doListTables(BlockAllocator blockAllocator, ListTables { String nextToken = null; String logGroupName = tableResolver.validateSchema(listTablesRequest.getSchemaName()); - DescribeLogStreamsRequest request = new DescribeLogStreamsRequest(logGroupName); - DescribeLogStreamsResult result; + DescribeLogStreamsRequest.Builder requestBuilder = DescribeLogStreamsRequest.builder().logGroupName(logGroupName); + DescribeLogStreamsResponse response; List tables = new ArrayList<>(); if (listTablesRequest.getPageSize() == UNLIMITED_PAGE_SIZE_VALUE) { do { if (tables.size() > MAX_RESULTS) { throw new RuntimeException("Too many log streams, exceeded max metadata results for table count."); } - result = invoker.invoke(() -> awsLogs.describeLogStreams(request)); - result.getLogStreams().forEach(next -> tables.add(toTableName(listTablesRequest, next))); - request.setNextToken(result.getNextToken()); - logger.info("doListTables: Listing log streams with token {} and size {}", result.getNextToken(), tables.size()); + response = invoker.invoke(() -> awsLogs.describeLogStreams(requestBuilder.build())); + response.logStreams().forEach(next -> tables.add(toTableName(listTablesRequest, next))); + requestBuilder.nextToken(response.nextToken()); + logger.info("doListTables: Listing log streams with token {} and size {}", response.nextToken(), tables.size()); } - while (result.getNextToken() != null); + while (response.nextToken() != null); } else { - request.setNextToken(listTablesRequest.getNextToken()); - request.setLimit(listTablesRequest.getPageSize()); - result = invoker.invoke(() -> awsLogs.describeLogStreams(request)); - result.getLogStreams().forEach(next -> tables.add(toTableName(listTablesRequest, next))); - nextToken = result.getNextToken(); - logger.info("doListTables: Listing log streams with token {} and size {}", result.getNextToken(), tables.size()); + requestBuilder.nextToken(listTablesRequest.getNextToken()); + requestBuilder.limit(listTablesRequest.getPageSize()); + response = invoker.invoke(() -> awsLogs.describeLogStreams(requestBuilder.build())); + response.logStreams().forEach(next -> tables.add(toTableName(listTablesRequest, next))); + nextToken = response.nextToken(); + logger.info("doListTables: Listing log streams with token {} and size {}", response.nextToken(), tables.size()); } // Don't add the ALL_LOG_STREAMS_TABLE unless we're at the end of listing out all the tables. @@ -276,26 +275,26 @@ public void getPartitions(BlockWriter blockWriter, GetTableLayoutRequest request CloudwatchTableName cwTableName = tableResolver.validateTable(request.getTableName()); - DescribeLogStreamsRequest cwRequest = new DescribeLogStreamsRequest(cwTableName.getLogGroupName()); + DescribeLogStreamsRequest.Builder cwRequestBuilder = DescribeLogStreamsRequest.builder().logGroupName(cwTableName.getLogGroupName()); if (!ALL_LOG_STREAMS_TABLE.equals(cwTableName.getLogStreamName())) { - cwRequest.setLogStreamNamePrefix(cwTableName.getLogStreamName()); + cwRequestBuilder.logStreamNamePrefix(cwTableName.getLogStreamName()); } - DescribeLogStreamsResult result; + DescribeLogStreamsResponse response; do { - result = invoker.invoke(() -> awsLogs.describeLogStreams(cwRequest)); - for (LogStream next : result.getLogStreams()) { + response = invoker.invoke(() -> awsLogs.describeLogStreams(cwRequestBuilder.build())); + for (LogStream next : response.logStreams()) { //Each log stream that matches any possible partition pruning should be added to the partition list. blockWriter.writeRows((Block block, int rowNum) -> { - boolean matched = block.setValue(LOG_GROUP_FIELD, rowNum, cwRequest.getLogGroupName()); - matched &= block.setValue(LOG_STREAM_FIELD, rowNum, next.getLogStreamName()); - matched &= block.setValue(LOG_STREAM_SIZE_FIELD, rowNum, next.getStoredBytes()); + boolean matched = block.setValue(LOG_GROUP_FIELD, rowNum, cwRequestBuilder.build().logGroupName()); + matched &= block.setValue(LOG_STREAM_FIELD, rowNum, next.logStreamName()); + matched &= block.setValue(LOG_STREAM_SIZE_FIELD, rowNum, next.storedBytes()); return matched ? 1 : 0; }); } - cwRequest.setNextToken(result.getNextToken()); + cwRequestBuilder.nextToken(response.nextToken()); } - while (result.getNextToken() != null && queryStatusChecker.isQueryRunning()); + while (response.nextToken() != null && queryStatusChecker.isQueryRunning()); } /** @@ -367,11 +366,11 @@ public GetTableResponse doGetQueryPassthroughSchema(BlockAllocator allocator, Ge throw new IllegalArgumentException("No Query passed through [{}]" + request); } // to get column names with limit 1 - GetQueryResultsResult getQueryResultsResult = getResult(invoker, awsLogs, request.getQueryPassthroughArguments(), 1); + GetQueryResultsResponse getQueryResultsResponse = getResult(invoker, awsLogs, request.getQueryPassthroughArguments(), 1); SchemaBuilder schemaBuilder = SchemaBuilder.newBuilder(); - if (!getQueryResultsResult.getResults().isEmpty()) { - for (ResultField field : getQueryResultsResult.getResults().get(0)) { - schemaBuilder.addField(field.getField(), Types.MinorType.VARCHAR.getType()); + if (!getQueryResultsResponse.results().isEmpty()) { + for (ResultField field : getQueryResultsResponse.results().get(0)) { + schemaBuilder.addField(field.field(), Types.MinorType.VARCHAR.getType()); } } @@ -415,6 +414,6 @@ private String encodeContinuationToken(int partition) */ private TableName toTableName(ListTablesRequest request, LogStream logStream) { - return new TableName(request.getSchemaName(), logStream.getLogStreamName()); + return new TableName(request.getSchemaName(), logStream.logStreamName()); } } diff --git a/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchRecordHandler.java b/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchRecordHandler.java index 7b4aa47596..912b94d218 100644 --- a/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchRecordHandler.java +++ b/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchRecordHandler.java @@ -32,17 +32,16 @@ import com.amazonaws.athena.connector.lambda.handlers.RecordHandler; import com.amazonaws.athena.connector.lambda.records.ReadRecordsRequest; import com.amazonaws.athena.connectors.cloudwatch.qpt.CloudwatchQueryPassthrough; -import com.amazonaws.services.logs.AWSLogs; -import com.amazonaws.services.logs.AWSLogsClientBuilder; -import com.amazonaws.services.logs.model.GetLogEventsRequest; -import com.amazonaws.services.logs.model.GetLogEventsResult; -import com.amazonaws.services.logs.model.GetQueryResultsResult; -import com.amazonaws.services.logs.model.OutputLogEvent; -import com.amazonaws.services.logs.model.ResultField; import org.apache.arrow.util.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.cloudwatchlogs.CloudWatchLogsClient; +import software.amazon.awssdk.services.cloudwatchlogs.model.GetLogEventsRequest; +import software.amazon.awssdk.services.cloudwatchlogs.model.GetLogEventsResponse; +import software.amazon.awssdk.services.cloudwatchlogs.model.GetQueryResultsResponse; +import software.amazon.awssdk.services.cloudwatchlogs.model.OutputLogEvent; +import software.amazon.awssdk.services.cloudwatchlogs.model.ResultField; import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; @@ -75,7 +74,7 @@ public class CloudwatchRecordHandler //Used to handle Throttling events and apply AIMD congestion control private final ThrottlingInvoker invoker; private final AtomicLong count = new AtomicLong(0); - private final AWSLogs awsLogs; + private final CloudWatchLogsClient awsLogs; private final CloudwatchQueryPassthrough queryPassthrough = new CloudwatchQueryPassthrough(); public CloudwatchRecordHandler(java.util.Map configOptions) @@ -84,12 +83,12 @@ public CloudwatchRecordHandler(java.util.Map configOptions) S3Client.create(), SecretsManagerClient.create(), AthenaClient.create(), - AWSLogsClientBuilder.defaultClient(), + CloudWatchLogsClient.create(), configOptions); } @VisibleForTesting - protected CloudwatchRecordHandler(S3Client amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, AWSLogs awsLogs, java.util.Map configOptions) + protected CloudwatchRecordHandler(S3Client amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, CloudWatchLogsClient awsLogs, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, SOURCE_TYPE, configOptions); this.awsLogs = awsLogs; @@ -115,37 +114,38 @@ protected void readWithConstraint(BlockSpiller spiller, ReadRecordsRequest recor invoker.setBlockSpiller(spiller); do { final String actualContinuationToken = continuationToken; - GetLogEventsResult logEventsResult = invoker.invoke(() -> awsLogs.getLogEvents( + GetLogEventsResponse logEventsResponse = invoker.invoke(() -> awsLogs.getLogEvents( pushDownConstraints(recordsRequest.getConstraints(), - new GetLogEventsRequest() - .withLogGroupName(split.getProperty(LOG_GROUP_FIELD)) + GetLogEventsRequest.builder() + .logGroupName(split.getProperty(LOG_GROUP_FIELD)) //We use the property instead of the table name because of the special all_streams table - .withLogStreamName(split.getProperty(LOG_STREAM_FIELD)) - .withNextToken(actualContinuationToken) + .logStreamName(split.getProperty(LOG_STREAM_FIELD)) + .nextToken(actualContinuationToken) // must be set to use nextToken correctly - .withStartFromHead(true) + .startFromHead(true) + .build() ))); - if (continuationToken == null || !continuationToken.equals(logEventsResult.getNextForwardToken())) { - continuationToken = logEventsResult.getNextForwardToken(); + if (continuationToken == null || !continuationToken.equals(logEventsResponse.nextForwardToken())) { + continuationToken = logEventsResponse.nextForwardToken(); } else { continuationToken = null; } - for (OutputLogEvent ole : logEventsResult.getEvents()) { + for (OutputLogEvent ole : logEventsResponse.events()) { spiller.writeRows((Block block, int rowNum) -> { boolean matched = true; matched &= block.offerValue(LOG_STREAM_FIELD, rowNum, split.getProperty(LOG_STREAM_FIELD)); - matched &= block.offerValue(LOG_TIME_FIELD, rowNum, ole.getTimestamp()); - matched &= block.offerValue(LOG_MSG_FIELD, rowNum, ole.getMessage()); + matched &= block.offerValue(LOG_TIME_FIELD, rowNum, ole.timestamp()); + matched &= block.offerValue(LOG_MSG_FIELD, rowNum, ole.message()); return matched ? 1 : 0; }); } logger.info("readWithConstraint: LogGroup[{}] LogStream[{}] Continuation[{}] rows[{}]", tableName.getSchemaName(), tableName.getTableName(), continuationToken, - logEventsResult.getEvents().size()); + logEventsResponse.events().size()); } while (continuationToken != null && queryStatusChecker.isQueryRunning()); } @@ -155,13 +155,13 @@ private void getQueryPassthreoughResults(BlockSpiller spiller, ReadRecordsReques { Map qptArguments = recordsRequest.getConstraints().getQueryPassthroughArguments(); queryPassthrough.verify(qptArguments); - GetQueryResultsResult getQueryResultsResult = getResult(invoker, awsLogs, qptArguments, Integer.parseInt(qptArguments.get(CloudwatchQueryPassthrough.LIMIT))); + GetQueryResultsResponse getQueryResultsResponse = getResult(invoker, awsLogs, qptArguments, Integer.parseInt(qptArguments.get(CloudwatchQueryPassthrough.LIMIT))); - for (List resultList : getQueryResultsResult.getResults()) { + for (List resultList : getQueryResultsResponse.results()) { spiller.writeRows((Block block, int rowNum) -> { for (ResultField resultField : resultList) { boolean matched = true; - matched &= block.offerValue(resultField.getField(), rowNum, resultField.getValue()); + matched &= block.offerValue(resultField.field(), rowNum, resultField.value()); if (!matched) { return 0; } @@ -181,6 +181,7 @@ private void getQueryPassthreoughResults(BlockSpiller spiller, ReadRecordsReques */ private GetLogEventsRequest pushDownConstraints(Constraints constraints, GetLogEventsRequest request) { + GetLogEventsRequest.Builder requestBuilder = request.toBuilder(); ValueSet timeConstraint = constraints.getSummary().get(LOG_TIME_FIELD); if (timeConstraint instanceof SortedRangeSet && !timeConstraint.isNullAllowed()) { //SortedRangeSet is how >, <, between is represented which are easiest and most common when @@ -192,15 +193,15 @@ private GetLogEventsRequest pushDownConstraints(Constraints constraints, GetLogE if (!basicPredicate.getLow().isNullValue()) { Long lowerBound = (Long) basicPredicate.getLow().getValue(); - request.setStartTime(lowerBound); + requestBuilder.startTime(lowerBound); } if (!basicPredicate.getHigh().isNullValue()) { Long upperBound = (Long) basicPredicate.getHigh().getValue(); - request.setEndTime(upperBound); + requestBuilder.endTime(upperBound); } } - return request; + return requestBuilder.build(); } } diff --git a/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchTableResolver.java b/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchTableResolver.java index 4c7f25ec7e..d4059b0438 100644 --- a/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchTableResolver.java +++ b/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchTableResolver.java @@ -21,18 +21,18 @@ import com.amazonaws.athena.connector.lambda.ThrottlingInvoker; import com.amazonaws.athena.connector.lambda.domain.TableName; -import com.amazonaws.services.logs.AWSLogs; -import com.amazonaws.services.logs.model.DescribeLogGroupsRequest; -import com.amazonaws.services.logs.model.DescribeLogGroupsResult; -import com.amazonaws.services.logs.model.DescribeLogStreamsRequest; -import com.amazonaws.services.logs.model.DescribeLogStreamsResult; -import com.amazonaws.services.logs.model.LogGroup; -import com.amazonaws.services.logs.model.LogStream; import com.google.common.cache.CacheBuilder; import com.google.common.cache.CacheLoader; import com.google.common.cache.LoadingCache; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.cloudwatchlogs.CloudWatchLogsClient; +import software.amazon.awssdk.services.cloudwatchlogs.model.DescribeLogGroupsRequest; +import software.amazon.awssdk.services.cloudwatchlogs.model.DescribeLogGroupsResponse; +import software.amazon.awssdk.services.cloudwatchlogs.model.DescribeLogStreamsRequest; +import software.amazon.awssdk.services.cloudwatchlogs.model.DescribeLogStreamsResponse; +import software.amazon.awssdk.services.cloudwatchlogs.model.LogGroup; +import software.amazon.awssdk.services.cloudwatchlogs.model.LogStream; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeoutException; @@ -51,7 +51,7 @@ public class CloudwatchTableResolver { private static final Logger logger = LoggerFactory.getLogger(CloudwatchTableResolver.class); - private AWSLogs awsLogs; + private CloudWatchLogsClient logsClient; //Used to handle Throttling events using an AIMD strategy for congestion control. private ThrottlingInvoker invoker; //The LogStream pattern that is capitalized by LAMBDA @@ -67,14 +67,14 @@ public class CloudwatchTableResolver * Constructs an instance of the table resolver. * * @param invoker The ThrottlingInvoker to use to handle throttling events. - * @param awsLogs The AWSLogs client to use for cache misses. + * @param logsClient The AWSLogs client to use for cache misses. * @param maxSchemaCacheSize The max number of schemas to cache. * @param maxTableCacheSize The max tables to cache. */ - public CloudwatchTableResolver(ThrottlingInvoker invoker, AWSLogs awsLogs, long maxSchemaCacheSize, long maxTableCacheSize) + public CloudwatchTableResolver(ThrottlingInvoker invoker, CloudWatchLogsClient logsClient, long maxSchemaCacheSize, long maxTableCacheSize) { this.invoker = invoker; - this.awsLogs = awsLogs; + this.logsClient = logsClient; this.tableCache = CacheBuilder.newBuilder() .maximumSize(maxTableCacheSize) .build( @@ -119,12 +119,12 @@ private CloudwatchTableName loadLogStreams(String logGroup, String logStream) logger.info("loadLogStreams: Did not find a match for the table, falling back to LogGroup scan for {}:{}", logGroup, logStream); - DescribeLogStreamsRequest validateTableRequest = new DescribeLogStreamsRequest(logGroup); - DescribeLogStreamsResult validateTableResult; + DescribeLogStreamsRequest.Builder validateTableRequestBuilder = DescribeLogStreamsRequest.builder().logGroupName(logGroup); + DescribeLogStreamsResponse validateTableResponse; do { - validateTableResult = invoker.invoke(() -> awsLogs.describeLogStreams(validateTableRequest)); - for (LogStream nextStream : validateTableResult.getLogStreams()) { - String logStreamName = nextStream.getLogStreamName(); + validateTableResponse = invoker.invoke(() -> logsClient.describeLogStreams(validateTableRequestBuilder.build())); + for (LogStream nextStream : validateTableResponse.logStreams()) { + String logStreamName = nextStream.logStreamName(); CloudwatchTableName nextCloudwatch = new CloudwatchTableName(logGroup, logStreamName); tableCache.put(nextCloudwatch.toTableName(), nextCloudwatch); if (nextCloudwatch.getLogStreamName().equalsIgnoreCase(logStream)) { @@ -134,9 +134,9 @@ private CloudwatchTableName loadLogStreams(String logGroup, String logStream) return nextCloudwatch; } } - validateTableRequest.setNextToken(validateTableResult.getNextToken()); + validateTableRequestBuilder.nextToken(validateTableResponse.nextToken()); } - while (validateTableResult.getNextToken() != null); + while (validateTableResponse.nextToken() != null); //We could not find a match throw new IllegalArgumentException("No such table " + logGroup + " " + logStream); @@ -163,11 +163,11 @@ private CloudwatchTableName loadLogStream(String logGroup, String logStream) LAMBDA_PATTERN, effectiveTableName); effectiveTableName = effectiveTableName.replace(LAMBDA_PATTERN, LAMBDA_ACTUAL_PATTERN); } - DescribeLogStreamsRequest request = new DescribeLogStreamsRequest(logGroup) - .withLogStreamNamePrefix(effectiveTableName); - DescribeLogStreamsResult result = invoker.invoke(() -> awsLogs.describeLogStreams(request)); - for (LogStream nextStream : result.getLogStreams()) { - String logStreamName = nextStream.getLogStreamName(); + DescribeLogStreamsRequest request = DescribeLogStreamsRequest.builder().logGroupName(logGroup) + .logStreamNamePrefix(effectiveTableName).build(); + DescribeLogStreamsResponse response = invoker.invoke(() -> logsClient.describeLogStreams(request)); + for (LogStream nextStream : response.logStreams()) { + String logStreamName = nextStream.logStreamName(); CloudwatchTableName nextCloudwatch = new CloudwatchTableName(logGroup, logStreamName); if (nextCloudwatch.getLogStreamName().equalsIgnoreCase(logStream)) { logger.info("loadLogStream: Matched {} for {}:{}", nextCloudwatch, logGroup, logStream); @@ -195,21 +195,21 @@ private String loadLogGroups(String schemaName) } logger.info("loadLogGroups: Did not find a match for the schema, falling back to LogGroup scan for {}", schemaName); - DescribeLogGroupsRequest validateSchemaRequest = new DescribeLogGroupsRequest(); - DescribeLogGroupsResult validateSchemaResult; + DescribeLogGroupsRequest.Builder validateSchemaRequestBuilder = DescribeLogGroupsRequest.builder(); + DescribeLogGroupsResponse validateSchemaResponse; do { - validateSchemaResult = invoker.invoke(() -> awsLogs.describeLogGroups(validateSchemaRequest)); - for (LogGroup next : validateSchemaResult.getLogGroups()) { - String nextLogGroupName = next.getLogGroupName(); + validateSchemaResponse = invoker.invoke(() -> logsClient.describeLogGroups(validateSchemaRequestBuilder.build())); + for (LogGroup next : validateSchemaResponse.logGroups()) { + String nextLogGroupName = next.logGroupName(); schemaCache.put(schemaName, nextLogGroupName); if (nextLogGroupName.equalsIgnoreCase(schemaName)) { logger.info("loadLogGroups: Matched {} for {}", nextLogGroupName, schemaName); return nextLogGroupName; } } - validateSchemaRequest.setNextToken(validateSchemaResult.getNextToken()); + validateSchemaRequestBuilder.nextToken(validateSchemaResponse.nextToken()); } - while (validateSchemaResult.getNextToken() != null); + while (validateSchemaResponse.nextToken() != null); //We could not find a match throw new IllegalArgumentException("No such schema " + schemaName); @@ -224,10 +224,10 @@ private String loadLogGroups(String schemaName) private String loadLogGroup(String schemaName) throws TimeoutException { - DescribeLogGroupsRequest request = new DescribeLogGroupsRequest().withLogGroupNamePrefix(schemaName); - DescribeLogGroupsResult result = invoker.invoke(() -> awsLogs.describeLogGroups(request)); - for (LogGroup next : result.getLogGroups()) { - String nextLogGroupName = next.getLogGroupName(); + DescribeLogGroupsRequest request = DescribeLogGroupsRequest.builder().logGroupNamePrefix(schemaName).build(); + DescribeLogGroupsResponse response = invoker.invoke(() -> logsClient.describeLogGroups(request)); + for (LogGroup next : response.logGroups()) { + String nextLogGroupName = next.logGroupName(); if (nextLogGroupName.equalsIgnoreCase(schemaName)) { logger.info("loadLogGroup: Matched {} for {}", nextLogGroupName, schemaName); return nextLogGroupName; diff --git a/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchUtils.java b/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchUtils.java index 5c19ec17ee..bb8a209d47 100644 --- a/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchUtils.java +++ b/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchUtils.java @@ -21,13 +21,14 @@ import com.amazonaws.athena.connector.lambda.ThrottlingInvoker; import com.amazonaws.athena.connectors.cloudwatch.qpt.CloudwatchQueryPassthrough; -import com.amazonaws.services.logs.AWSLogs; -import com.amazonaws.services.logs.model.GetQueryResultsRequest; -import com.amazonaws.services.logs.model.GetQueryResultsResult; -import com.amazonaws.services.logs.model.StartQueryRequest; -import com.amazonaws.services.logs.model.StartQueryResult; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.cloudwatchlogs.CloudWatchLogsClient; +import software.amazon.awssdk.services.cloudwatchlogs.model.GetQueryResultsRequest; +import software.amazon.awssdk.services.cloudwatchlogs.model.GetQueryResultsResponse; +import software.amazon.awssdk.services.cloudwatchlogs.model.QueryStatus; +import software.amazon.awssdk.services.cloudwatchlogs.model.StartQueryRequest; +import software.amazon.awssdk.services.cloudwatchlogs.model.StartQueryResponse; import java.time.Instant; import java.time.temporal.ChronoUnit; @@ -41,8 +42,8 @@ public final class CloudwatchUtils private CloudwatchUtils() {} public static StartQueryRequest startQueryRequest(Map qptArguments) { - return new StartQueryRequest().withEndTime(Long.valueOf(qptArguments.get(CloudwatchQueryPassthrough.ENDTIME))).withStartTime(Long.valueOf(qptArguments.get(CloudwatchQueryPassthrough.STARTTIME))) - .withQueryString(qptArguments.get(CloudwatchQueryPassthrough.QUERYSTRING)).withLogGroupNames(getLogGroupNames(qptArguments)); + return StartQueryRequest.builder().endTime(Long.valueOf(qptArguments.get(CloudwatchQueryPassthrough.ENDTIME))).startTime(Long.valueOf(qptArguments.get(CloudwatchQueryPassthrough.STARTTIME))) + .queryString(qptArguments.get(CloudwatchQueryPassthrough.QUERYSTRING)).logGroupNames(getLogGroupNames(qptArguments)).build(); } private static String[] getLogGroupNames(Map qptArguments) @@ -55,25 +56,25 @@ private static String[] getLogGroupNames(Map qptArguments) return logGroupNames; } - public static StartQueryResult getQueryResult(AWSLogs awsLogs, StartQueryRequest startQueryRequest) + public static StartQueryResponse getQueryResult(CloudWatchLogsClient awsLogs, StartQueryRequest startQueryRequest) { return awsLogs.startQuery(startQueryRequest); } - public static GetQueryResultsResult getQueryResults(AWSLogs awsLogs, StartQueryResult startQueryResult) + public static GetQueryResultsResponse getQueryResults(CloudWatchLogsClient awsLogs, StartQueryResponse startQueryResponse) { - return awsLogs.getQueryResults(new GetQueryResultsRequest().withQueryId(startQueryResult.getQueryId())); + return awsLogs.getQueryResults(GetQueryResultsRequest.builder().queryId(startQueryResponse.queryId()).build()); } - public static GetQueryResultsResult getResult(ThrottlingInvoker invoker, AWSLogs awsLogs, Map qptArguments, int limit) throws TimeoutException, InterruptedException + public static GetQueryResultsResponse getResult(ThrottlingInvoker invoker, CloudWatchLogsClient awsLogs, Map qptArguments, int limit) throws TimeoutException, InterruptedException { - StartQueryResult startQueryResult = invoker.invoke(() -> getQueryResult(awsLogs, startQueryRequest(qptArguments).withLimit(limit))); - String status = null; - GetQueryResultsResult getQueryResultsResult; + StartQueryResponse startQueryResponse = invoker.invoke(() -> getQueryResult(awsLogs, startQueryRequest(qptArguments).toBuilder().limit(limit).build())); + QueryStatus status = null; + GetQueryResultsResponse getQueryResultsResponse; Instant startTime = Instant.now(); // Record the start time do { - getQueryResultsResult = invoker.invoke(() -> getQueryResults(awsLogs, startQueryResult)); - status = getQueryResultsResult.getStatus(); + getQueryResultsResponse = invoker.invoke(() -> getQueryResults(awsLogs, startQueryResponse)); + status = getQueryResultsResponse.status(); Thread.sleep(1000); // Check if 10 minutes have passed @@ -82,8 +83,8 @@ public static GetQueryResultsResult getResult(ThrottlingInvoker invoker, AWSLogs if (elapsedMinutes >= RESULT_TIMEOUT) { throw new RuntimeException("Query execution timeout exceeded."); } - } while (!status.equalsIgnoreCase("Complete")); + } while (!status.equals(QueryStatus.COMPLETE)); - return getQueryResultsResult; + return getQueryResultsResponse; } } diff --git a/athena-cloudwatch/src/test/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchMetadataHandlerTest.java b/athena-cloudwatch/src/test/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchMetadataHandlerTest.java index cc2ce27fb8..f615b3c7b1 100644 --- a/athena-cloudwatch/src/test/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchMetadataHandlerTest.java +++ b/athena-cloudwatch/src/test/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchMetadataHandlerTest.java @@ -43,13 +43,6 @@ import com.amazonaws.athena.connector.lambda.metadata.MetadataResponse; import com.amazonaws.athena.connector.lambda.security.FederatedIdentity; import com.amazonaws.athena.connector.lambda.security.LocalKeyFactory; -import com.amazonaws.services.logs.AWSLogs; -import com.amazonaws.services.logs.model.DescribeLogGroupsRequest; -import com.amazonaws.services.logs.model.DescribeLogGroupsResult; -import com.amazonaws.services.logs.model.DescribeLogStreamsRequest; -import com.amazonaws.services.logs.model.DescribeLogStreamsResult; -import com.amazonaws.services.logs.model.LogGroup; -import com.amazonaws.services.logs.model.LogStream; import org.apache.arrow.vector.types.Types; import org.apache.arrow.vector.types.pojo.ArrowType; import org.apache.arrow.vector.types.pojo.Schema; @@ -64,6 +57,13 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.cloudwatchlogs.CloudWatchLogsClient; +import software.amazon.awssdk.services.cloudwatchlogs.model.DescribeLogGroupsRequest; +import software.amazon.awssdk.services.cloudwatchlogs.model.DescribeLogGroupsResponse; +import software.amazon.awssdk.services.cloudwatchlogs.model.DescribeLogStreamsRequest; +import software.amazon.awssdk.services.cloudwatchlogs.model.DescribeLogStreamsResponse; +import software.amazon.awssdk.services.cloudwatchlogs.model.LogGroup; +import software.amazon.awssdk.services.cloudwatchlogs.model.LogStream; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; import java.util.ArrayList; @@ -92,7 +92,7 @@ public class CloudwatchMetadataHandlerTest private BlockAllocator allocator; @Mock - private AWSLogs mockAwsLogs; + private CloudWatchLogsClient mockAwsLogs; @Mock private SecretsManagerClient mockSecretsManager; @@ -105,13 +105,19 @@ public void setUp() throws Exception { Mockito.lenient().when(mockAwsLogs.describeLogStreams(nullable(DescribeLogStreamsRequest.class))).thenAnswer((InvocationOnMock invocationOnMock) -> { - return new DescribeLogStreamsResult().withLogStreams(new LogStream().withLogStreamName("table-9"), - new LogStream().withLogStreamName("table-10")); + return DescribeLogStreamsResponse.builder() + .logStreams( + LogStream.builder().logStreamName("table-9").build(), + LogStream.builder().logStreamName("table-10").build()) + .build(); }); when(mockAwsLogs.describeLogGroups(nullable(DescribeLogGroupsRequest.class))).thenAnswer((InvocationOnMock invocationOnMock) -> { - return new DescribeLogGroupsResult().withLogGroups(new LogGroup().withLogGroupName("schema-1"), - new LogGroup().withLogGroupName("schema-20")); + return DescribeLogGroupsResponse.builder() + .logGroups( + LogGroup.builder().logGroupName("schema-1").build(), + LogGroup.builder().logGroupName("schema-20").build()) + .build(); }); handler = new CloudwatchMetadataHandler(mockAwsLogs, new LocalKeyFactory(), mockSecretsManager, mockAthena, "spillBucket", "spillPrefix", com.google.common.collect.ImmutableMap.of()); allocator = new BlockAllocatorImpl(); @@ -133,34 +139,33 @@ public void doListSchemaNames() when(mockAwsLogs.describeLogGroups(nullable(DescribeLogGroupsRequest.class))).thenAnswer((InvocationOnMock invocationOnMock) -> { DescribeLogGroupsRequest request = (DescribeLogGroupsRequest) invocationOnMock.getArguments()[0]; - DescribeLogGroupsResult result = new DescribeLogGroupsResult(); + DescribeLogGroupsResponse.Builder responseBuilder = DescribeLogGroupsResponse.builder(); Integer nextToken; - if (request.getNextToken() == null) { + if (request.nextToken() == null) { nextToken = 1; } - else if (Integer.valueOf(request.getNextToken()) < 3) { - nextToken = Integer.valueOf(request.getNextToken()) + 1; + else if (Integer.valueOf(request.nextToken()) < 3) { + nextToken = Integer.valueOf(request.nextToken()) + 1; } else { nextToken = null; } List logGroups = new ArrayList<>(); - if (request.getNextToken() == null || Integer.valueOf(request.getNextToken()) < 3) { + if (request.nextToken() == null || Integer.valueOf(request.nextToken()) < 3) { for (int i = 0; i < 10; i++) { - LogGroup nextLogGroup = new LogGroup(); - nextLogGroup.setLogGroupName("schema-" + String.valueOf(i)); + LogGroup nextLogGroup = LogGroup.builder().logGroupName("schema-" + String.valueOf(i)).build(); logGroups.add(nextLogGroup); } } - result.withLogGroups(logGroups); + responseBuilder.logGroups(logGroups); if (nextToken != null) { - result.setNextToken(String.valueOf(nextToken)); + responseBuilder.nextToken(String.valueOf(nextToken)); } - return result; + return responseBuilder.build(); }); ListSchemasRequest req = new ListSchemasRequest(identity, "queryId", "default"); @@ -183,34 +188,33 @@ public void doListTables() when(mockAwsLogs.describeLogStreams(nullable(DescribeLogStreamsRequest.class))).thenAnswer((InvocationOnMock invocationOnMock) -> { DescribeLogStreamsRequest request = (DescribeLogStreamsRequest) invocationOnMock.getArguments()[0]; - DescribeLogStreamsResult result = new DescribeLogStreamsResult(); + DescribeLogStreamsResponse.Builder responseBuilder = DescribeLogStreamsResponse.builder(); Integer nextToken; - if (request.getNextToken() == null) { + if (request.nextToken() == null) { nextToken = 1; } - else if (Integer.valueOf(request.getNextToken()) < 3) { - nextToken = Integer.valueOf(request.getNextToken()) + 1; + else if (Integer.valueOf(request.nextToken()) < 3) { + nextToken = Integer.valueOf(request.nextToken()) + 1; } else { nextToken = null; } List logStreams = new ArrayList<>(); - if (request.getNextToken() == null || Integer.valueOf(request.getNextToken()) < 3) { + if (request.nextToken() == null || Integer.valueOf(request.nextToken()) < 3) { for (int i = 0; i < 10; i++) { - LogStream nextLogStream = new LogStream(); - nextLogStream.setLogStreamName("table-" + String.valueOf(i)); + LogStream nextLogStream = LogStream.builder().logStreamName("table-" + String.valueOf(i)).build(); logStreams.add(nextLogStream); } } - result.withLogStreams(logStreams); + responseBuilder.logStreams(logStreams); if (nextToken != null) { - result.setNextToken(String.valueOf(nextToken)); + responseBuilder.nextToken(String.valueOf(nextToken)); } - return result; + return responseBuilder.build(); }); ListTablesRequest req = new ListTablesRequest(identity, "queryId", "default", @@ -238,35 +242,34 @@ public void doGetTable() when(mockAwsLogs.describeLogStreams(nullable(DescribeLogStreamsRequest.class))).thenAnswer((InvocationOnMock invocationOnMock) -> { DescribeLogStreamsRequest request = (DescribeLogStreamsRequest) invocationOnMock.getArguments()[0]; - assertTrue(request.getLogGroupName().equals(expectedSchema)); - DescribeLogStreamsResult result = new DescribeLogStreamsResult(); + assertTrue(request.logGroupName().equals(expectedSchema)); + DescribeLogStreamsResponse.Builder responseBuilder = DescribeLogStreamsResponse.builder(); Integer nextToken; - if (request.getNextToken() == null) { + if (request.nextToken() == null) { nextToken = 1; } - else if (Integer.valueOf(request.getNextToken()) < 3) { - nextToken = Integer.valueOf(request.getNextToken()) + 1; + else if (Integer.valueOf(request.nextToken()) < 3) { + nextToken = Integer.valueOf(request.nextToken()) + 1; } else { nextToken = null; } List logStreams = new ArrayList<>(); - if (request.getNextToken() == null || Integer.valueOf(request.getNextToken()) < 3) { + if (request.nextToken() == null || Integer.valueOf(request.nextToken()) < 3) { for (int i = 0; i < 10; i++) { - LogStream nextLogStream = new LogStream(); - nextLogStream.setLogStreamName("table-" + String.valueOf(i)); + LogStream nextLogStream = LogStream.builder().logStreamName("table-" + String.valueOf(i)).build(); logStreams.add(nextLogStream); } } - result.withLogStreams(logStreams); + responseBuilder.logStreams(logStreams); if (nextToken != null) { - result.setNextToken(String.valueOf(nextToken)); + responseBuilder.nextToken(String.valueOf(nextToken)); } - return result; + return responseBuilder.build(); }); GetTableRequest req = new GetTableRequest(identity, "queryId", "default", new TableName(expectedSchema, "table-9"), Collections.emptyMap()); @@ -290,36 +293,37 @@ public void doGetTableLayout() when(mockAwsLogs.describeLogStreams(nullable(DescribeLogStreamsRequest.class))).thenAnswer((InvocationOnMock invocationOnMock) -> { DescribeLogStreamsRequest request = (DescribeLogStreamsRequest) invocationOnMock.getArguments()[0]; - DescribeLogStreamsResult result = new DescribeLogStreamsResult(); + DescribeLogStreamsResponse.Builder responseBuilder = DescribeLogStreamsResponse.builder(); Integer nextToken; - if (request.getNextToken() == null) { + if (request.nextToken() == null) { nextToken = 1; } - else if (Integer.valueOf(request.getNextToken()) < 3) { - nextToken = Integer.valueOf(request.getNextToken()) + 1; + else if (Integer.valueOf(request.nextToken()) < 3) { + nextToken = Integer.valueOf(request.nextToken()) + 1; } else { nextToken = null; } List logStreams = new ArrayList<>(); - if (request.getNextToken() == null || Integer.valueOf(request.getNextToken()) < 3) { - int continuation = request.getNextToken() == null ? 0 : Integer.valueOf(request.getNextToken()); + if (request.nextToken() == null || Integer.valueOf(request.nextToken()) < 3) { + int continuation = request.nextToken() == null ? 0 : Integer.valueOf(request.nextToken()); for (int i = 0 + continuation * 100; i < 300; i++) { - LogStream nextLogStream = new LogStream(); - nextLogStream.setLogStreamName("table-" + String.valueOf(i)); - nextLogStream.setStoredBytes(i * 1000L); + LogStream nextLogStream = LogStream.builder() + .logStreamName("table-" + String.valueOf(i)) + .storedBytes(i * 1000L) + .build(); logStreams.add(nextLogStream); } } - result.withLogStreams(logStreams); + responseBuilder.logStreams(logStreams); if (nextToken != null) { - result.setNextToken(String.valueOf(nextToken)); + responseBuilder.nextToken(String.valueOf(nextToken)); } - return result; + return responseBuilder.build(); }); Map constraintsMap = new HashMap<>(); diff --git a/athena-cloudwatch/src/test/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchRecordHandlerTest.java b/athena-cloudwatch/src/test/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchRecordHandlerTest.java index 758deacb50..f8b95fdafc 100644 --- a/athena-cloudwatch/src/test/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchRecordHandlerTest.java +++ b/athena-cloudwatch/src/test/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchRecordHandlerTest.java @@ -39,10 +39,6 @@ import com.amazonaws.athena.connector.lambda.security.EncryptionKeyFactory; import com.amazonaws.athena.connector.lambda.security.FederatedIdentity; import com.amazonaws.athena.connector.lambda.security.LocalKeyFactory; -import com.amazonaws.services.logs.AWSLogs; -import com.amazonaws.services.logs.model.GetLogEventsRequest; -import com.amazonaws.services.logs.model.GetLogEventsResult; -import com.amazonaws.services.logs.model.OutputLogEvent; import com.google.common.collect.ImmutableList; import com.google.common.io.ByteStreams; import org.apache.arrow.vector.types.Types; @@ -59,6 +55,10 @@ import software.amazon.awssdk.core.ResponseInputStream; import software.amazon.awssdk.core.sync.RequestBody; import software.amazon.awssdk.services.athena.AthenaClient; +import software.amazon.awssdk.services.cloudwatchlogs.CloudWatchLogsClient; +import software.amazon.awssdk.services.cloudwatchlogs.model.GetLogEventsRequest; +import software.amazon.awssdk.services.cloudwatchlogs.model.GetLogEventsResponse; +import software.amazon.awssdk.services.cloudwatchlogs.model.OutputLogEvent; import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.s3.model.GetObjectRequest; import software.amazon.awssdk.services.s3.model.GetObjectResponse; @@ -95,7 +95,7 @@ public class CloudwatchRecordHandlerTest private EncryptionKeyFactory keyFactory = new LocalKeyFactory(); @Mock - private AWSLogs mockAwsLogs; + private CloudWatchLogsClient mockAwsLogs; @Mock private S3Client mockS3; @@ -144,39 +144,40 @@ public void setUp() GetLogEventsRequest request = (GetLogEventsRequest) invocationOnMock.getArguments()[0]; //Check that predicate pushdown was propagated to cloudwatch - assertNotNull(request.getStartTime()); - assertNotNull(request.getEndTime()); + assertNotNull(request.startTime()); + assertNotNull(request.endTime()); - GetLogEventsResult result = new GetLogEventsResult(); + GetLogEventsResponse.Builder responseBuilder = GetLogEventsResponse.builder(); Integer nextToken; - if (request.getNextToken() == null) { + if (request.nextToken() == null) { nextToken = 1; } - else if (Integer.valueOf(request.getNextToken()) < 3) { - nextToken = Integer.valueOf(request.getNextToken()) + 1; + else if (Integer.valueOf(request.nextToken()) < 3) { + nextToken = Integer.valueOf(request.nextToken()) + 1; } else { nextToken = null; } List logEvents = new ArrayList<>(); - if (request.getNextToken() == null || Integer.valueOf(request.getNextToken()) < 3) { - long continuation = request.getNextToken() == null ? 0 : Integer.valueOf(request.getNextToken()); + if (request.nextToken() == null || Integer.valueOf(request.nextToken()) < 3) { + long continuation = request.nextToken() == null ? 0 : Integer.valueOf(request.nextToken()); for (int i = 0; i < 100_000; i++) { - OutputLogEvent outputLogEvent = new OutputLogEvent(); - outputLogEvent.setMessage("message-" + (continuation * i)); - outputLogEvent.setTimestamp(i * 100L); + OutputLogEvent outputLogEvent = OutputLogEvent.builder() + .message("message-" + (continuation * i)) + .timestamp(i * 100L) + .build(); logEvents.add(outputLogEvent); } } - result.withEvents(logEvents); + responseBuilder.events(logEvents); if (nextToken != null) { - result.setNextForwardToken(String.valueOf(nextToken)); + responseBuilder.nextForwardToken(String.valueOf(nextToken)); } - return result; + return responseBuilder.build(); }); } diff --git a/athena-cloudwatch/src/test/java/com/amazonaws/athena/connectors/cloudwatch/integ/CloudwatchIntegTest.java b/athena-cloudwatch/src/test/java/com/amazonaws/athena/connectors/cloudwatch/integ/CloudwatchIntegTest.java index 9c2c9cd839..c9d1dd9f73 100644 --- a/athena-cloudwatch/src/test/java/com/amazonaws/athena/connectors/cloudwatch/integ/CloudwatchIntegTest.java +++ b/athena-cloudwatch/src/test/java/com/amazonaws/athena/connectors/cloudwatch/integ/CloudwatchIntegTest.java @@ -20,11 +20,6 @@ package com.amazonaws.athena.connectors.cloudwatch.integ; import com.amazonaws.athena.connector.integ.IntegrationTestBase; -import com.amazonaws.services.logs.AWSLogs; -import com.amazonaws.services.logs.AWSLogsClientBuilder; -import com.amazonaws.services.logs.model.DeleteLogGroupRequest; -import com.amazonaws.services.logs.model.InputLogEvent; -import com.amazonaws.services.logs.model.PutLogEventsRequest; import com.google.common.collect.ImmutableList; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -38,6 +33,9 @@ import software.amazon.awscdk.services.logs.LogGroup; import software.amazon.awscdk.services.logs.LogStream; import software.amazon.awssdk.services.athena.model.Row; +import software.amazon.awssdk.services.cloudwatchlogs.CloudWatchLogsClient; +import software.amazon.awssdk.services.cloudwatchlogs.model.InputLogEvent; +import software.amazon.awssdk.services.cloudwatchlogs.model.PutLogEventsRequest; import java.util.ArrayList; import java.util.List; @@ -134,20 +132,21 @@ protected void setUpTableData() logger.info("Setting up Log Group: {}, Log Stream: {}", logGroupName, logStreamName); logger.info("----------------------------------------------------"); - AWSLogs logsClient = AWSLogsClientBuilder.defaultClient(); + CloudWatchLogsClient logsClient = CloudWatchLogsClient.create(); try { - logsClient.putLogEvents(new PutLogEventsRequest() - .withLogGroupName(logGroupName) - .withLogStreamName(logStreamName) - .withLogEvents( - new InputLogEvent().withTimestamp(currentTimeMillis).withMessage("Space, the final frontier."), - new InputLogEvent().withTimestamp(fromTimeMillis).withMessage(logMessage), - new InputLogEvent().withTimestamp(toTimeMillis + 5000) - .withMessage("To boldly go where no man has gone before!"))); + logsClient.putLogEvents(PutLogEventsRequest.builder() + .logGroupName(logGroupName) + .logStreamName(logStreamName) + .logEvents( + InputLogEvent.builder().timestamp(currentTimeMillis).message("Space, the final frontier.").build(), + InputLogEvent.builder().timestamp(fromTimeMillis).message(logMessage).build(), + InputLogEvent.builder().timestamp(toTimeMillis + 5000) + .message("To boldly go where no man has gone before!").build()) + .build()); } finally { - logsClient.shutdown(); + logsClient.close(); } } diff --git a/athena-datalakegen2/pom.xml b/athena-datalakegen2/pom.xml index c72c6c4813..670a4396b9 100644 --- a/athena-datalakegen2/pom.xml +++ b/athena-datalakegen2/pom.xml @@ -32,12 +32,18 @@ mssql-jdbc ${mssql.jdbc.version} - + - com.amazonaws - aws-java-sdk-rds - ${aws-sdk.version} + software.amazon.awssdk + rds + ${aws-sdk-v2.version} test + + + software.amazon.awssdk + netty-nio-client + + diff --git a/athena-db2-as400/pom.xml b/athena-db2-as400/pom.xml index 7c458b8caf..2165ff5019 100644 --- a/athena-db2-as400/pom.xml +++ b/athena-db2-as400/pom.xml @@ -33,12 +33,18 @@ jt400 20.0.7 - + - com.amazonaws - aws-java-sdk-rds - ${aws-sdk.version} + software.amazon.awssdk + rds + ${aws-sdk-v2.version} test + + + software.amazon.awssdk + netty-nio-client + + diff --git a/athena-db2/pom.xml b/athena-db2/pom.xml index 919f91b2b3..8601caf25d 100644 --- a/athena-db2/pom.xml +++ b/athena-db2/pom.xml @@ -33,12 +33,18 @@ jcc 11.5.9.0 - + - com.amazonaws - aws-java-sdk-rds - ${aws-sdk.version} + software.amazon.awssdk + rds + ${aws-sdk-v2.version} test + + + software.amazon.awssdk + netty-nio-client + + diff --git a/athena-docdb/pom.xml b/athena-docdb/pom.xml index 5dd645c20a..8982ee0159 100644 --- a/athena-docdb/pom.xml +++ b/athena-docdb/pom.xml @@ -28,11 +28,11 @@ 2022.47.1 test - + - com.amazonaws - aws-java-sdk-docdb - ${aws-sdk.version} + software.amazon.awssdk + docdb + ${aws-sdk-v2.version} test diff --git a/athena-docdb/src/test/java/com/amazonaws/athena/connectors/docdb/integ/DocDbIntegTest.java b/athena-docdb/src/test/java/com/amazonaws/athena/connectors/docdb/integ/DocDbIntegTest.java index 3cca51f94e..bf0a314e8a 100644 --- a/athena-docdb/src/test/java/com/amazonaws/athena/connectors/docdb/integ/DocDbIntegTest.java +++ b/athena-docdb/src/test/java/com/amazonaws/athena/connectors/docdb/integ/DocDbIntegTest.java @@ -27,11 +27,6 @@ import com.amazonaws.athena.connector.integ.data.ConnectorVpcAttributes; import com.amazonaws.athena.connector.integ.data.SecretsManagerCredentials; import com.amazonaws.athena.connector.integ.providers.ConnectorPackagingAttributesProvider; -import com.amazonaws.services.docdb.AmazonDocDB; -import com.amazonaws.services.docdb.AmazonDocDBClientBuilder; -import com.amazonaws.services.docdb.model.DBCluster; -import com.amazonaws.services.docdb.model.DescribeDBClustersRequest; -import com.amazonaws.services.docdb.model.DescribeDBClustersResult; import com.google.common.collect.ImmutableMap; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -53,6 +48,10 @@ import software.amazon.awscdk.services.ec2.VpcAttributes; import software.amazon.awscdk.services.iam.PolicyDocument; import software.amazon.awssdk.services.athena.model.Row; +import software.amazon.awssdk.services.docdb.DocDbClient; +import software.amazon.awssdk.services.docdb.model.DBCluster; +import software.amazon.awssdk.services.docdb.model.DescribeDbClustersRequest; +import software.amazon.awssdk.services.docdb.model.DescribeDbClustersResponse; import software.amazon.awssdk.services.lambda.LambdaClient; import software.amazon.awssdk.services.lambda.model.InvocationType; import software.amazon.awssdk.services.lambda.model.InvokeRequest; @@ -191,15 +190,16 @@ private Stack getDocDbStack() { * Lambda. All exceptions thrown here will be caught in the calling function. */ private Endpoint getClusterData() { - AmazonDocDB docDbClient = AmazonDocDBClientBuilder.defaultClient(); + DocDbClient docDbClient = DocDbClient.create(); try { - DescribeDBClustersResult dbClustersResult = docDbClient.describeDBClusters(new DescribeDBClustersRequest() - .withDBClusterIdentifier(dbClusterName)); - DBCluster cluster = dbClustersResult.getDBClusters().get(0); - return new Endpoint(cluster.getEndpoint(), cluster.getPort()); + DescribeDbClustersResponse dbClustersResponse = docDbClient.describeDBClusters(DescribeDbClustersRequest.builder() + .dbClusterIdentifier(dbClusterName) + .build()); + DBCluster cluster = dbClustersResponse.dbClusters().get(0); + return new Endpoint(cluster.endpoint(), cluster.port()); } finally { - docDbClient.shutdown(); + docDbClient.close(); } } diff --git a/athena-dynamodb/src/test/java/com/amazonaws/athena/connectors/dynamodb/DynamoDbIntegTest.java b/athena-dynamodb/src/test/java/com/amazonaws/athena/connectors/dynamodb/DynamoDbIntegTest.java index 37fa6734da..4e23966c90 100644 --- a/athena-dynamodb/src/test/java/com/amazonaws/athena/connectors/dynamodb/DynamoDbIntegTest.java +++ b/athena-dynamodb/src/test/java/com/amazonaws/athena/connectors/dynamodb/DynamoDbIntegTest.java @@ -143,7 +143,7 @@ protected Optional getConnectorAccessPolicy() { return Optional.of(PolicyDocument.Builder.create() .statements(ImmutableList.of(PolicyStatement.Builder.create() - .actions(ImmutableList.of("dynamodb:DescribeTable", "dynamodb:ListSchemas", + .actions(ImmutableList.of("dynamodb:DescribeTable", "dynamodb:ListTables", "dynamodb:Query", "dynamodb:Scan")) .resources(ImmutableList.of("*")) .effect(Effect.ALLOW) diff --git a/athena-federation-integ-test/pom.xml b/athena-federation-integ-test/pom.xml index 59df6b8e0f..bee6d6be9d 100644 --- a/athena-federation-integ-test/pom.xml +++ b/athena-federation-integ-test/pom.xml @@ -11,33 +11,6 @@ jar Amazon Athena Query Federation Integ Test - - com.amazonaws - aws-java-sdk-core - ${aws-sdk.version} - - - com.fasterxml.jackson.datatype - jackson-datatype-jsr310 - - - com.fasterxml.jackson.dataformat - jackson-dataformat-cbor - - - com.fasterxml.jackson.core - jackson-core - - - com.fasterxml.jackson.core - jackson-databind - - - com.fasterxml.jackson.core - jackson-annotations - - - commons-cli commons-cli @@ -99,11 +72,11 @@ athena ${aws-sdk-v2.version} - + - com.amazonaws - aws-java-sdk-cloudformation - ${aws-sdk.version} + software.amazon.awssdk + cloudformation + ${aws-sdk-v2.version} diff --git a/athena-federation-integ-test/src/main/java/com/amazonaws/athena/connector/integ/clients/CloudFormationClient.java b/athena-federation-integ-test/src/main/java/com/amazonaws/athena/connector/integ/clients/CloudFormationClient.java index af5a92f9b7..37b290f0ad 100644 --- a/athena-federation-integ-test/src/main/java/com/amazonaws/athena/connector/integ/clients/CloudFormationClient.java +++ b/athena-federation-integ-test/src/main/java/com/amazonaws/athena/connector/integ/clients/CloudFormationClient.java @@ -19,15 +19,6 @@ */ package com.amazonaws.athena.connector.integ.clients; -import com.amazonaws.services.cloudformation.AmazonCloudFormation; -import com.amazonaws.services.cloudformation.AmazonCloudFormationClientBuilder; -import com.amazonaws.services.cloudformation.model.Capability; -import com.amazonaws.services.cloudformation.model.CreateStackRequest; -import com.amazonaws.services.cloudformation.model.CreateStackResult; -import com.amazonaws.services.cloudformation.model.DeleteStackRequest; -import com.amazonaws.services.cloudformation.model.DescribeStackEventsRequest; -import com.amazonaws.services.cloudformation.model.DescribeStackEventsResult; -import com.amazonaws.services.cloudformation.model.StackEvent; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.SerializationFeature; import org.slf4j.Logger; @@ -35,6 +26,14 @@ import org.testng.internal.collections.Pair; import software.amazon.awscdk.core.App; import software.amazon.awscdk.core.Stack; +import software.amazon.awssdk.services.cloudformation.model.Capability; +import software.amazon.awssdk.services.cloudformation.model.CreateStackRequest; +import software.amazon.awssdk.services.cloudformation.model.CreateStackResponse; +import software.amazon.awssdk.services.cloudformation.model.DeleteStackRequest; +import software.amazon.awssdk.services.cloudformation.model.DescribeStackEventsRequest; +import software.amazon.awssdk.services.cloudformation.model.DescribeStackEventsResponse; +import software.amazon.awssdk.services.cloudformation.model.ResourceStatus; +import software.amazon.awssdk.services.cloudformation.model.StackEvent; import java.util.List; @@ -46,13 +45,11 @@ public class CloudFormationClient { private static final Logger logger = LoggerFactory.getLogger(CloudFormationClient.class); - private static final String CF_CREATE_RESOURCE_IN_PROGRESS_STATUS = "CREATE_IN_PROGRESS"; - private static final String CF_CREATE_RESOURCE_FAILED_STATUS = "CREATE_FAILED"; private static final long sleepTimeMillis = 5000L; private final String stackName; private final String stackTemplate; - private final AmazonCloudFormation cloudFormationClient; + private final software.amazon.awssdk.services.cloudformation.CloudFormationClient cloudFormationClient; public CloudFormationClient(Pair stackPair) { @@ -66,7 +63,7 @@ public CloudFormationClient(App theApp, Stack theStack) stackTemplate = objectMapper .valueToTree(theApp.synth().getStackArtifact(theStack.getArtifactId()).getTemplate()) .toPrettyString(); - this.cloudFormationClient = AmazonCloudFormationClientBuilder.defaultClient(); + this.cloudFormationClient = software.amazon.awssdk.services.cloudformation.CloudFormationClient.create(); } /** @@ -81,11 +78,12 @@ public void createStack() logger.info("------------------------------------------------------"); // logger.info(stackTemplate); - CreateStackRequest createStackRequest = new CreateStackRequest() - .withStackName(stackName) - .withTemplateBody(stackTemplate) - .withDisableRollback(true) - .withCapabilities(Capability.CAPABILITY_NAMED_IAM); + CreateStackRequest createStackRequest = CreateStackRequest.builder() + .stackName(stackName) + .templateBody(stackTemplate) + .disableRollback(true) + .capabilities(Capability.CAPABILITY_NAMED_IAM) + .build(); processCreateStackRequest(createStackRequest); } @@ -98,22 +96,23 @@ private void processCreateStackRequest(CreateStackRequest createStackRequest) throws RuntimeException { // Create CloudFormation stack. - CreateStackResult result = cloudFormationClient.createStack(createStackRequest); - logger.info("Stack ID: {}", result.getStackId()); + CreateStackResponse response = cloudFormationClient.createStack(createStackRequest); + logger.info("Stack ID: {}", response.stackId()); - DescribeStackEventsRequest describeStackEventsRequest = new DescribeStackEventsRequest() - .withStackName(createStackRequest.getStackName()); - DescribeStackEventsResult describeStackEventsResult; + DescribeStackEventsRequest describeStackEventsRequest = DescribeStackEventsRequest.builder() + .stackName(createStackRequest.stackName()) + .build(); + DescribeStackEventsResponse describeStackEventsResponse; // Poll status of stack until stack has been created or creation has failed while (true) { - describeStackEventsResult = cloudFormationClient.describeStackEvents(describeStackEventsRequest); - StackEvent event = describeStackEventsResult.getStackEvents().get(0); - String resourceId = event.getLogicalResourceId(); - String resourceStatus = event.getResourceStatus(); + describeStackEventsResponse = cloudFormationClient.describeStackEvents(describeStackEventsRequest); + StackEvent event = describeStackEventsResponse.stackEvents().get(0); + String resourceId = event.logicalResourceId(); + ResourceStatus resourceStatus = event.resourceStatus(); logger.info("Resource Id: {}, Resource status: {}", resourceId, resourceStatus); - if (!resourceId.equals(event.getStackName()) || - resourceStatus.equals(CF_CREATE_RESOURCE_IN_PROGRESS_STATUS)) { + if (!resourceId.equals(event.stackName()) || + resourceStatus.equals(ResourceStatus.CREATE_IN_PROGRESS)) { try { Thread.sleep(sleepTimeMillis); continue; @@ -122,8 +121,8 @@ private void processCreateStackRequest(CreateStackRequest createStackRequest) throw new RuntimeException("Thread.sleep interrupted: " + e.getMessage(), e); } } - else if (resourceStatus.equals(CF_CREATE_RESOURCE_FAILED_STATUS)) { - throw new RuntimeException(getCloudFormationErrorReasons(describeStackEventsResult.getStackEvents())); + else if (resourceStatus.equals(ResourceStatus.CREATE_FAILED)) { + throw new RuntimeException(getCloudFormationErrorReasons(describeStackEventsResponse.stackEvents())); } break; } @@ -140,9 +139,9 @@ private String getCloudFormationErrorReasons(List stackEvents) new StringBuilder("CloudFormation stack creation failed due to the following reason(s):\n"); stackEvents.forEach(stackEvent -> { - if (stackEvent.getResourceStatus().equals(CF_CREATE_RESOURCE_FAILED_STATUS)) { + if (stackEvent.resourceStatus().equals(ResourceStatus.CREATE_FAILED)) { String errorMessage = String.format("Resource: %s, Reason: %s\n", - stackEvent.getLogicalResourceId(), stackEvent.getResourceStatusReason()); + stackEvent.logicalResourceId(), stackEvent.resourceStatusReason()); errorMessageBuilder.append(errorMessage); } }); @@ -160,14 +159,14 @@ public void deleteStack() logger.info("------------------------------------------------------"); try { - DeleteStackRequest request = new DeleteStackRequest().withStackName(stackName); + DeleteStackRequest request = DeleteStackRequest.builder().stackName(stackName).build(); cloudFormationClient.deleteStack(request); } catch (Exception e) { logger.error("Something went wrong... Manual resource cleanup may be needed!!!", e); } finally { - cloudFormationClient.shutdown(); + cloudFormationClient.close(); } } } diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/exceptions/AthenaConnectorException.java b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/exceptions/AthenaConnectorException.java new file mode 100644 index 0000000000..3743c552eb --- /dev/null +++ b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/exceptions/AthenaConnectorException.java @@ -0,0 +1,93 @@ +/*- + * #%L + * Amazon Athena Query Federation SDK + * %% + * Copyright (C) 2019 - 2024 Amazon Web Services + * %% + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * #L% + */ +package com.amazonaws.athena.connector.lambda.exceptions; + +import software.amazon.awssdk.services.glue.model.ErrorDetails; + +import javax.annotation.Nonnull; + +import static java.util.Objects.requireNonNull; + +/** + * Exception that should be thrown by each individual Connector when an error is encountered. + * That error will be using these following ErrorCode + * + * FederationSourceErrorCode: + * AccessDeniedException("AccessDeniedException"), + * EntityNotFoundException("EntityNotFoundException"), + * InvalidCredentialsException("InvalidCredentialsException"), + * InvalidInputException("InvalidInputException"), + * InvalidResponseException("InvalidResponseException"), + * OperationTimeoutException("OperationTimeoutException"), + * OperationNotSupportedException("OperationNotSupportedException"), + * InternalServiceException("InternalServiceException"), + * PartialFailureException("PartialFailureException"), + * ThrottlingException("ThrottlingException"); + * + */ + +public class AthenaConnectorException extends RuntimeException +{ + private final Object response; + + private final ErrorDetails errorDetails; + + public AthenaConnectorException(@Nonnull final Object response, + @Nonnull final String message, + @Nonnull final ErrorDetails errorDetails) + { + super(message); + this.errorDetails = requireNonNull(errorDetails); + this.response = requireNonNull(response); + requireNonNull(message); + } + + public AthenaConnectorException(@Nonnull final String message, + @Nonnull final ErrorDetails errorDetails) + { + super(message); + response = null; + this.errorDetails = requireNonNull(errorDetails); + requireNonNull(message); + } + + public AthenaConnectorException(@Nonnull final Object response, + @Nonnull final String message, + @Nonnull final Exception e, + + @Nonnull final ErrorDetails errorDetails) + { + super(message, e); + this.errorDetails = requireNonNull(errorDetails); + this.response = requireNonNull(response); + requireNonNull(message); + requireNonNull(e); + } + + public Object getResponse() + { + return response; + } + + public ErrorDetails getErrorDetails() + { + return errorDetails; + } +} diff --git a/athena-gcs/pom.xml b/athena-gcs/pom.xml index 5e1605a0a3..c1c3b33af7 100644 --- a/athena-gcs/pom.xml +++ b/athena-gcs/pom.xml @@ -24,7 +24,7 @@ net.java.dev.jna jna-platform - 5.14.0 + 5.15.0 org.slf4j @@ -75,7 +75,7 @@ com.google.cloud google-cloud-storage - 2.42.0 + 2.43.0 diff --git a/athena-google-bigquery/pom.xml b/athena-google-bigquery/pom.xml index c68fc6d1a8..45980e6263 100644 --- a/athena-google-bigquery/pom.xml +++ b/athena-google-bigquery/pom.xml @@ -23,14 +23,20 @@ net.java.dev.jna jna-platform - 5.14.0 + 5.15.0 - software.amazon.awscdk + software.amazon.awssdk rds - ${aws-cdk.version} + ${aws-sdk-v2.version} test + + + software.amazon.awssdk + netty-nio-client + + diff --git a/athena-hbase/pom.xml b/athena-hbase/pom.xml index 6b6d2d04d3..c736ffa678 100644 --- a/athena-hbase/pom.xml +++ b/athena-hbase/pom.xml @@ -40,11 +40,11 @@ ${aws-cdk.version} test - + - com.amazonaws - aws-java-sdk-emr - ${aws-sdk.version} + software.amazon.awssdk + emr + ${aws-sdk-v2.version} test diff --git a/athena-hbase/src/test/java/com/amazonaws/athena/connectors/hbase/integ/HbaseIntegTest.java b/athena-hbase/src/test/java/com/amazonaws/athena/connectors/hbase/integ/HbaseIntegTest.java index ac1b59237e..1435e09b70 100644 --- a/athena-hbase/src/test/java/com/amazonaws/athena/connectors/hbase/integ/HbaseIntegTest.java +++ b/athena-hbase/src/test/java/com/amazonaws/athena/connectors/hbase/integ/HbaseIntegTest.java @@ -26,14 +26,6 @@ import com.amazonaws.athena.connector.integ.data.ConnectorStackAttributes; import com.amazonaws.athena.connector.integ.data.ConnectorVpcAttributes; import com.amazonaws.athena.connector.integ.providers.ConnectorPackagingAttributesProvider; -import com.amazonaws.services.elasticmapreduce.AmazonElasticMapReduce; -import com.amazonaws.services.elasticmapreduce.AmazonElasticMapReduceClientBuilder; -import com.amazonaws.services.elasticmapreduce.model.Application; -import com.amazonaws.services.elasticmapreduce.model.ClusterSummary; -import com.amazonaws.services.elasticmapreduce.model.DescribeClusterRequest; -import com.amazonaws.services.elasticmapreduce.model.DescribeClusterResult; -import com.amazonaws.services.elasticmapreduce.model.ListClustersRequest; -import com.amazonaws.services.elasticmapreduce.model.ListClustersResult; import com.google.common.collect.ImmutableList; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -46,6 +38,13 @@ import software.amazon.awscdk.services.emr.CfnCluster; import software.amazon.awscdk.services.iam.PolicyDocument; import software.amazon.awssdk.services.athena.model.Row; +import software.amazon.awssdk.services.emr.EmrClient; +import software.amazon.awssdk.services.emr.model.Application; +import software.amazon.awssdk.services.emr.model.ClusterSummary; +import software.amazon.awssdk.services.emr.model.DescribeClusterRequest; +import software.amazon.awssdk.services.emr.model.DescribeClusterResponse; +import software.amazon.awssdk.services.emr.model.ListClustersRequest; +import software.amazon.awssdk.services.emr.model.ListClustersResponse; import software.amazon.awssdk.services.lambda.LambdaClient; import software.amazon.awssdk.services.lambda.model.InvocationType; import software.amazon.awssdk.services.lambda.model.InvokeRequest; @@ -145,10 +144,10 @@ private Pair getHbaseStack() { .name(dbClusterName) .visibleToAllUsers(Boolean.TRUE) .applications(ImmutableList.of( - new Application().withName("HBase"), - new Application().withName("Hive"), - new Application().withName("Hue"), - new Application().withName("Phoenix"))) + Application.builder().name("HBase").build(), + Application.builder().name("Hive").build(), + Application.builder().name("Hue").build(), + Application.builder().name("Phoenix").build())) .instances(CfnCluster.JobFlowInstancesConfigProperty.builder() .emrManagedMasterSecurityGroup(vpcAttributes.getSecurityGroupId()) .emrManagedSlaveSecurityGroup(vpcAttributes.getSecurityGroupId()) @@ -179,27 +178,27 @@ private Pair getHbaseStack() { */ private String getClusterData() { - AmazonElasticMapReduce emrClient = AmazonElasticMapReduceClientBuilder.defaultClient(); + EmrClient emrClient = EmrClient.create(); try { - ListClustersResult listClustersResult; + ListClustersResponse listClustersResult; String marker = null; Optional dbClusterId; do { // While cluster Id has not yet been found and there are more paginated results. // Get paginated list of EMR clusters. - listClustersResult = emrClient.listClusters(new ListClustersRequest().withMarker(marker)); + listClustersResult = emrClient.listClusters(ListClustersRequest.builder().marker(marker).build()); // Get the cluster id. dbClusterId = getClusterId(listClustersResult); // Get the marker for the next paginated request. - marker = listClustersResult.getMarker(); + marker = listClustersResult.marker(); } while (!dbClusterId.isPresent() && marker != null); // Get the cluster description using the cluster id. - DescribeClusterResult clusterResult = emrClient.describeCluster(new DescribeClusterRequest() - .withClusterId(dbClusterId.orElseThrow(() -> - new RuntimeException("Unable to get cluster description for: " + dbClusterName)))); - return clusterResult.getCluster().getMasterPublicDnsName(); + DescribeClusterResponse clusterResult = emrClient.describeCluster(DescribeClusterRequest.builder() + .clusterId(dbClusterId.orElseThrow(() -> + new RuntimeException("Unable to get cluster description for: " + dbClusterName))).build()); + return clusterResult.cluster().masterPublicDnsName(); } finally { - emrClient.shutdown(); + emrClient.close(); } } @@ -209,12 +208,12 @@ private String getClusterData() * @return Optional String containing the cluster Id that matches the cluster name, or Optional.empty() if match * was not found. */ - private Optional getClusterId(ListClustersResult listClustersResult) + private Optional getClusterId(ListClustersResponse listClustersResult) { - for (ClusterSummary clusterSummary : listClustersResult.getClusters()) { - if (clusterSummary.getName().equals(dbClusterName)) { + for (ClusterSummary clusterSummary : listClustersResult.clusters()) { + if (clusterSummary.name().equals(dbClusterName)) { // Found match for cluster name - return cluster id. - String clusterId = clusterSummary.getId(); + String clusterId = clusterSummary.id(); logger.info("Found Cluster Id for {}: {}", dbClusterName, clusterId); return Optional.of(clusterId); } diff --git a/athena-hortonworks-hive/pom.xml b/athena-hortonworks-hive/pom.xml index 14a2ca89c3..cbb6440964 100644 --- a/athena-hortonworks-hive/pom.xml +++ b/athena-hortonworks-hive/pom.xml @@ -47,12 +47,18 @@ ${mockito.version} test - + - com.amazonaws - aws-java-sdk-rds - ${aws-sdk.version} + software.amazon.awssdk + rds + ${aws-sdk-v2.version} test + + + software.amazon.awssdk + netty-nio-client + + diff --git a/athena-jdbc/pom.xml b/athena-jdbc/pom.xml index 53cb6df19b..7aa8b0fd34 100644 --- a/athena-jdbc/pom.xml +++ b/athena-jdbc/pom.xml @@ -113,12 +113,18 @@ ${aws-cdk.version} test - + - com.amazonaws - aws-java-sdk-rds - ${aws-sdk.version} + software.amazon.awssdk + rds + ${aws-sdk-v2.version} test + + + software.amazon.awssdk + netty-nio-client + + diff --git a/athena-kafka/pom.xml b/athena-kafka/pom.xml index 9d0b983d66..e29b1a87cc 100644 --- a/athena-kafka/pom.xml +++ b/athena-kafka/pom.xml @@ -24,7 +24,7 @@ org.apache.kafka kafka-clients - 3.8.0 + 7.7.1-ce org.apache.avro @@ -34,12 +34,12 @@ io.confluent kafka-avro-serializer - 7.7.0 + 7.7.1 io.confluent kafka-protobuf-serializer - 7.7.0 + 7.7.1 com.fasterxml.jackson.core diff --git a/athena-msk/pom.xml b/athena-msk/pom.xml index a03c099b60..eb2ef81f98 100644 --- a/athena-msk/pom.xml +++ b/athena-msk/pom.xml @@ -18,28 +18,28 @@ org.jetbrains.kotlin kotlin-stdlib - 1.9.10 + 2.0.20 org.jetbrains.kotlin kotlin-stdlib-common - 1.9.10 + 2.0.20 org.jetbrains.kotlin kotlin-stdlib-jdk8 - 1.9.10 + 2.0.20 org.jetbrains.kotlin kotlin-reflect - 1.9.10 + 2.0.20 runtime com.squareup.wire wire-schema - 4.9.0 + 5.1.0 com.squareup.wire @@ -49,19 +49,19 @@ com.squareup.wire wire-runtime-jvm - 4.9.0 + 5.1.0 runtime com.squareup.wire wire-compiler - 4.9.0 + 5.1.0 runtime com.amazonaws aws-java-sdk-sts - 1.12.771 + 1.12.772 software.amazon.msk @@ -81,7 +81,7 @@ org.apache.kafka kafka-clients - 3.8.0 + 7.7.1-ce org.apache.avro @@ -91,7 +91,7 @@ com.google.protobuf protobuf-java - 3.25.3 + 3.25.5 software.amazon.glue @@ -101,7 +101,7 @@ io.confluent kafka-protobuf-provider - 7.7.0 + 7.7.1 com.fasterxml.jackson.core diff --git a/athena-mysql/pom.xml b/athena-mysql/pom.xml index b980b85291..b281bdbd80 100644 --- a/athena-mysql/pom.xml +++ b/athena-mysql/pom.xml @@ -43,12 +43,18 @@ - + - com.amazonaws - aws-java-sdk-rds - ${aws-sdk.version} + software.amazon.awssdk + rds + ${aws-sdk-v2.version} test + + + software.amazon.awssdk + netty-nio-client + + diff --git a/athena-mysql/src/test/java/com/amazonaws/athena/connectors/mysql/integ/MySqlIntegTest.java b/athena-mysql/src/test/java/com/amazonaws/athena/connectors/mysql/integ/MySqlIntegTest.java index 9de09c16a4..c5f3cb7bbc 100644 --- a/athena-mysql/src/test/java/com/amazonaws/athena/connectors/mysql/integ/MySqlIntegTest.java +++ b/athena-mysql/src/test/java/com/amazonaws/athena/connectors/mysql/integ/MySqlIntegTest.java @@ -26,11 +26,6 @@ import com.amazonaws.athena.connector.lambda.domain.TableName; import com.amazonaws.athena.connectors.jdbc.connection.DatabaseConnectionInfo; import com.amazonaws.athena.connectors.jdbc.integ.JdbcTableUtils; -import com.amazonaws.services.rds.AmazonRDS; -import com.amazonaws.services.rds.AmazonRDSClientBuilder; -import com.amazonaws.services.rds.model.DescribeDBInstancesRequest; -import com.amazonaws.services.rds.model.DescribeDBInstancesResult; -import com.amazonaws.services.rds.model.Endpoint; import com.google.common.collect.ImmutableMap; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -53,6 +48,10 @@ import software.amazon.awscdk.services.rds.StorageType; import software.amazon.awscdk.services.secretsmanager.Secret; import software.amazon.awssdk.services.athena.model.Row; +import software.amazon.awssdk.services.rds.RdsClient; +import software.amazon.awssdk.services.rds.model.DescribeDbInstancesRequest; +import software.amazon.awssdk.services.rds.model.DescribeDbInstancesResponse; +import software.amazon.awssdk.services.rds.model.Endpoint; import java.util.ArrayList; import java.util.Collections; @@ -195,14 +194,14 @@ private Stack getMySqlStack() */ private Endpoint getInstanceData() { - AmazonRDS rdsClient = AmazonRDSClientBuilder.defaultClient(); + RdsClient rdsClient = RdsClient.create(); try { - DescribeDBInstancesResult instancesResult = rdsClient.describeDBInstances(new DescribeDBInstancesRequest() - .withDBInstanceIdentifier(dbInstanceName)); - return instancesResult.getDBInstances().get(0).getEndpoint(); + DescribeDbInstancesResponse instancesResponse = rdsClient.describeDBInstances(DescribeDbInstancesRequest.builder() + .dbInstanceIdentifier(dbInstanceName).build()); + return instancesResponse.dbInstances().get(0).endpoint(); } finally { - rdsClient.shutdown(); + rdsClient.close(); } } @@ -213,7 +212,7 @@ private Endpoint getInstanceData() private void setEnvironmentVars(Endpoint endpoint) { String connectionString = String.format("mysql://jdbc:mysql://%s:%s/mysql?user=%s&password=%s", - endpoint.getAddress(), endpoint.getPort(), username, password); + endpoint.address(), endpoint.port(), username, password); String connectionStringTag = lambdaFunctionName + "_connection_string"; environmentVars.put("default", connectionString); environmentVars.put(connectionStringTag, connectionString); diff --git a/athena-oracle/pom.xml b/athena-oracle/pom.xml index 217a194e29..c5c591b33b 100644 --- a/athena-oracle/pom.xml +++ b/athena-oracle/pom.xml @@ -32,12 +32,18 @@ ojdbc8 23.5.0.24.07 - + - com.amazonaws - aws-java-sdk-rds - ${aws-sdk.version} + software.amazon.awssdk + rds + ${aws-sdk-v2.version} test + + + software.amazon.awssdk + netty-nio-client + + diff --git a/athena-postgresql/pom.xml b/athena-postgresql/pom.xml index 847b089a93..729080bbd9 100644 --- a/athena-postgresql/pom.xml +++ b/athena-postgresql/pom.xml @@ -39,12 +39,18 @@ ${mockito.version} test - + - com.amazonaws - aws-java-sdk-rds - ${aws-sdk.version} + software.amazon.awssdk + rds + ${aws-sdk-v2.version} test + + + software.amazon.awssdk + netty-nio-client + + diff --git a/athena-postgresql/src/test/java/com/amazonaws/athena/connectors/postgresql/integ/PostGreSqlIntegTest.java b/athena-postgresql/src/test/java/com/amazonaws/athena/connectors/postgresql/integ/PostGreSqlIntegTest.java index adb5646d00..68f3913340 100644 --- a/athena-postgresql/src/test/java/com/amazonaws/athena/connectors/postgresql/integ/PostGreSqlIntegTest.java +++ b/athena-postgresql/src/test/java/com/amazonaws/athena/connectors/postgresql/integ/PostGreSqlIntegTest.java @@ -26,11 +26,6 @@ import com.amazonaws.athena.connector.lambda.domain.TableName; import com.amazonaws.athena.connectors.jdbc.connection.DatabaseConnectionInfo; import com.amazonaws.athena.connectors.jdbc.integ.JdbcTableUtils; -import com.amazonaws.services.rds.AmazonRDS; -import com.amazonaws.services.rds.AmazonRDSClientBuilder; -import com.amazonaws.services.rds.model.DescribeDBInstancesRequest; -import com.amazonaws.services.rds.model.DescribeDBInstancesResult; -import com.amazonaws.services.rds.model.Endpoint; import com.google.common.collect.ImmutableMap; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -53,6 +48,10 @@ import software.amazon.awscdk.services.rds.StorageType; import software.amazon.awscdk.services.secretsmanager.Secret; import software.amazon.awssdk.services.athena.model.Row; +import software.amazon.awssdk.services.rds.RdsClient; +import software.amazon.awssdk.services.rds.model.DescribeDbInstancesRequest; +import software.amazon.awssdk.services.rds.model.DescribeDbInstancesResponse; +import software.amazon.awssdk.services.rds.model.Endpoint; import java.util.ArrayList; import java.util.Collections; @@ -195,14 +194,15 @@ private Stack getPostGreSqlStack() */ private Endpoint getInstanceData() { - AmazonRDS rdsClient = AmazonRDSClientBuilder.defaultClient(); + RdsClient rdsClient = RdsClient.create(); try { - DescribeDBInstancesResult instancesResult = rdsClient.describeDBInstances(new DescribeDBInstancesRequest() - .withDBInstanceIdentifier(dbInstanceName)); - return instancesResult.getDBInstances().get(0).getEndpoint(); + DescribeDbInstancesResponse instancesResponse = rdsClient.describeDBInstances(DescribeDbInstancesRequest.builder() + .dbInstanceIdentifier(dbInstanceName) + .build()); + return instancesResponse.dbInstances().get(0).endpoint(); } finally { - rdsClient.shutdown(); + rdsClient.close(); } } @@ -213,7 +213,7 @@ private Endpoint getInstanceData() private void setEnvironmentVars(Endpoint endpoint) { String connectionString = String.format("postgres://jdbc:postgresql://%s:%s/postgres?user=%s&password=%s", - endpoint.getAddress(), endpoint.getPort(), username, password); + endpoint.address(), endpoint.port(), username, password); String connectionStringTag = lambdaFunctionName + "_connection_string"; environmentVars.put("default", connectionString); environmentVars.put(connectionStringTag, connectionString); diff --git a/athena-redshift/pom.xml b/athena-redshift/pom.xml index 7119660c3e..5ec2b541d0 100644 --- a/athena-redshift/pom.xml +++ b/athena-redshift/pom.xml @@ -38,16 +38,16 @@ test-jar test - + - com.amazonaws - aws-java-sdk-redshift - ${aws-sdk.version} + software.amazon.awssdk + redshift + ${aws-sdk-v2.version} - com.amazonaws - aws-java-sdk-redshiftserverless - ${aws-sdk.version} + software.amazon.awssdk + redshiftserverless + ${aws-sdk-v2.version} @@ -56,12 +56,18 @@ ${aws-cdk.version} test - + - com.amazonaws - aws-java-sdk-rds - ${aws-sdk.version} + software.amazon.awssdk + rds + ${aws-sdk-v2.version} test + + + software.amazon.awssdk + netty-nio-client + + diff --git a/athena-redshift/src/test/java/com/amazonaws/athena/connectors/redshift/integ/RedshiftIntegTest.java b/athena-redshift/src/test/java/com/amazonaws/athena/connectors/redshift/integ/RedshiftIntegTest.java index d32334a0e2..d103901df4 100644 --- a/athena-redshift/src/test/java/com/amazonaws/athena/connectors/redshift/integ/RedshiftIntegTest.java +++ b/athena-redshift/src/test/java/com/amazonaws/athena/connectors/redshift/integ/RedshiftIntegTest.java @@ -26,14 +26,8 @@ import com.amazonaws.athena.connector.lambda.domain.TableName; import com.amazonaws.athena.connectors.jdbc.connection.DatabaseConnectionInfo; import com.amazonaws.athena.connectors.jdbc.integ.JdbcTableUtils; -import com.amazonaws.services.redshift.AmazonRedshift; -import com.amazonaws.services.redshift.AmazonRedshiftClientBuilder; -import com.amazonaws.services.redshift.model.DescribeClustersRequest; -import com.amazonaws.services.redshift.model.DescribeClustersResult; -import com.amazonaws.services.redshift.model.Endpoint; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.testng.AssertJUnit; import org.testng.annotations.AfterClass; import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; @@ -50,6 +44,10 @@ import software.amazon.awscdk.services.redshift.Login; import software.amazon.awscdk.services.redshift.NodeType; import software.amazon.awssdk.services.athena.model.Row; +import software.amazon.awssdk.services.redshift.RedshiftClient; +import software.amazon.awssdk.services.redshift.model.DescribeClustersRequest; +import software.amazon.awssdk.services.redshift.model.DescribeClustersResponse; +import software.amazon.awssdk.services.redshift.model.Endpoint; import java.util.ArrayList; import java.util.Collections; @@ -189,14 +187,14 @@ private Stack getRedshiftStack() */ private Endpoint getClusterData() { - AmazonRedshift redshiftClient = AmazonRedshiftClientBuilder.defaultClient(); + RedshiftClient redshiftClient = RedshiftClient.create(); try { - DescribeClustersResult clustersResult = redshiftClient.describeClusters(new DescribeClustersRequest() - .withClusterIdentifier(clusterName)); - return clustersResult.getClusters().get(0).getEndpoint(); + DescribeClustersResponse clustersResult = redshiftClient.describeClusters(DescribeClustersRequest.builder() + .clusterIdentifier(clusterName).build()); + return clustersResult.clusters().get(0).endpoint(); } finally { - redshiftClient.shutdown(); + redshiftClient.close(); } } @@ -207,7 +205,7 @@ private Endpoint getClusterData() private void setEnvironmentVars(Endpoint endpoint) { String connectionString = String.format("redshift://jdbc:redshift://%s:%s/public?user=%s&password=%s", - endpoint.getAddress(), endpoint.getPort(), username, password); + endpoint.address(), endpoint.port(), username, password); String connectionStringTag = lambdaFunctionName + "_connection_string"; environmentVars.put("default", connectionString); environmentVars.put(connectionStringTag, connectionString); diff --git a/athena-saphana/pom.xml b/athena-saphana/pom.xml index 85ca8aa644..31ca570654 100644 --- a/athena-saphana/pom.xml +++ b/athena-saphana/pom.xml @@ -27,12 +27,18 @@ test-jar test - + - com.amazonaws - aws-java-sdk-rds - ${aws-sdk.version} + software.amazon.awssdk + rds + ${aws-sdk-v2.version} test + + + software.amazon.awssdk + netty-nio-client + + @@ -62,7 +68,7 @@ com.sap.cloud.db.jdbc ngdbc - 2.21.11 + 2.22.11 diff --git a/athena-snowflake/pom.xml b/athena-snowflake/pom.xml index fcb730044b..aec0e7f807 100644 --- a/athena-snowflake/pom.xml +++ b/athena-snowflake/pom.xml @@ -32,12 +32,18 @@ snowflake-jdbc 3.19.0 - + - com.amazonaws - aws-java-sdk-rds - ${aws-sdk.version} + software.amazon.awssdk + rds + ${aws-sdk-v2.version} test + + + software.amazon.awssdk + netty-nio-client + + diff --git a/athena-sqlserver/pom.xml b/athena-sqlserver/pom.xml index 6bfab70343..3b723f0a87 100644 --- a/athena-sqlserver/pom.xml +++ b/athena-sqlserver/pom.xml @@ -32,12 +32,18 @@ mssql-jdbc ${mssql.jdbc.version} - + - com.amazonaws - aws-java-sdk-rds - ${aws-sdk.version} + software.amazon.awssdk + rds + ${aws-sdk-v2.version} test + + + software.amazon.awssdk + netty-nio-client + + diff --git a/athena-synapse/pom.xml b/athena-synapse/pom.xml index 21fb490ca8..67b9bdc4c3 100644 --- a/athena-synapse/pom.xml +++ b/athena-synapse/pom.xml @@ -59,12 +59,18 @@ - + - com.amazonaws - aws-java-sdk-rds - ${aws-sdk.version} + software.amazon.awssdk + rds + ${aws-sdk-v2.version} test + + + software.amazon.awssdk + netty-nio-client + + diff --git a/athena-teradata/pom.xml b/athena-teradata/pom.xml index 81d977c211..731002f877 100644 --- a/athena-teradata/pom.xml +++ b/athena-teradata/pom.xml @@ -27,12 +27,18 @@ test-jar test - + - com.amazonaws - aws-java-sdk-rds - ${aws-sdk.version} + software.amazon.awssdk + rds + ${aws-sdk-v2.version} test + + + software.amazon.awssdk + netty-nio-client + + diff --git a/athena-timestream/pom.xml b/athena-timestream/pom.xml index a58b2c13c0..f00279ec9d 100644 --- a/athena-timestream/pom.xml +++ b/athena-timestream/pom.xml @@ -47,14 +47,14 @@ ${slf4j-log4j.version} - com.amazonaws - aws-java-sdk-timestreamwrite - ${aws-sdk.version} + software.amazon.awssdk + timestreamwrite + ${aws-sdk-v2.version} - com.amazonaws - aws-java-sdk-timestreamquery - ${aws-sdk.version} + software.amazon.awssdk + timestreamquery + ${aws-sdk-v2.version} org.slf4j @@ -85,6 +85,12 @@ ${log4j2Version} runtime + + org.mockito + mockito-inline + ${mockito.version} + test + diff --git a/athena-timestream/src/main/java/com/amazonaws/athena/connectors/timestream/TimestreamClientBuilder.java b/athena-timestream/src/main/java/com/amazonaws/athena/connectors/timestream/TimestreamClientBuilder.java index 5f0a228f73..473ea85932 100644 --- a/athena-timestream/src/main/java/com/amazonaws/athena/connectors/timestream/TimestreamClientBuilder.java +++ b/athena-timestream/src/main/java/com/amazonaws/athena/connectors/timestream/TimestreamClientBuilder.java @@ -19,38 +19,42 @@ */ package com.amazonaws.athena.connectors.timestream; -import com.amazonaws.ClientConfiguration; -import com.amazonaws.services.timestreamquery.AmazonTimestreamQuery; -import com.amazonaws.services.timestreamquery.AmazonTimestreamQueryClientBuilder; -import com.amazonaws.services.timestreamwrite.AmazonTimestreamWrite; -import com.amazonaws.services.timestreamwrite.AmazonTimestreamWriteClientBuilder; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.auth.credentials.DefaultCredentialsProvider; +import software.amazon.awssdk.core.client.config.ClientOverrideConfiguration; +import software.amazon.awssdk.core.client.config.SdkAdvancedClientOption; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.regions.providers.DefaultAwsRegionProviderChain; +import software.amazon.awssdk.services.timestreamquery.TimestreamQueryClient; +import software.amazon.awssdk.services.timestreamwrite.TimestreamWriteClient; public class TimestreamClientBuilder { private static final Logger logger = LoggerFactory.getLogger(TimestreamClientBuilder.class); - + static Region defaultRegion = DefaultAwsRegionProviderChain.builder().build().getRegion(); private TimestreamClientBuilder() { // prevent instantiation with private constructor } - public static AmazonTimestreamQuery buildQueryClient(String sourceType) + public static TimestreamQueryClient buildQueryClient(String sourceType) { - return AmazonTimestreamQueryClientBuilder.standard().withClientConfiguration(buildClientConfiguration(sourceType)).build(); + return TimestreamQueryClient.builder().region(defaultRegion).credentialsProvider(DefaultCredentialsProvider.create()) + .overrideConfiguration(buildClientConfiguration(sourceType)).build(); } - public static AmazonTimestreamWrite buildWriteClient(String sourceType) + public static TimestreamWriteClient buildWriteClient(String sourceType) { - return AmazonTimestreamWriteClientBuilder.standard().withClientConfiguration(buildClientConfiguration(sourceType)).build(); + return TimestreamWriteClient.builder().region(defaultRegion).credentialsProvider(DefaultCredentialsProvider.create()) + .overrideConfiguration(buildClientConfiguration(sourceType)).build(); } - static ClientConfiguration buildClientConfiguration(String sourceType) + static ClientOverrideConfiguration buildClientConfiguration(String sourceType) { String userAgent = "aws-athena-" + sourceType + "-connector"; - ClientConfiguration clientConfiguration = new ClientConfiguration().withUserAgentPrefix(userAgent); - logger.info("Created client configuration with user agent {} for Timestream SDK", clientConfiguration.getUserAgentPrefix()); + ClientOverrideConfiguration clientConfiguration = ClientOverrideConfiguration.builder().putAdvancedOption(SdkAdvancedClientOption.USER_AGENT_PREFIX, userAgent).build(); + logger.info("Created client configuration with user agent {} for Timestream SDK is present", clientConfiguration.advancedOption(SdkAdvancedClientOption.USER_AGENT_PREFIX).isPresent()); return clientConfiguration; } } diff --git a/athena-timestream/src/main/java/com/amazonaws/athena/connectors/timestream/TimestreamMetadataHandler.java b/athena-timestream/src/main/java/com/amazonaws/athena/connectors/timestream/TimestreamMetadataHandler.java index 23fb89ad37..84cdb9fb76 100644 --- a/athena-timestream/src/main/java/com/amazonaws/athena/connectors/timestream/TimestreamMetadataHandler.java +++ b/athena-timestream/src/main/java/com/amazonaws/athena/connectors/timestream/TimestreamMetadataHandler.java @@ -42,16 +42,6 @@ import com.amazonaws.athena.connector.util.PaginatedRequestIterator; import com.amazonaws.athena.connectors.timestream.qpt.TimestreamQueryPassthrough; import com.amazonaws.athena.connectors.timestream.query.QueryFactory; -import com.amazonaws.services.timestreamquery.AmazonTimestreamQuery; -import com.amazonaws.services.timestreamquery.model.ColumnInfo; -import com.amazonaws.services.timestreamquery.model.Datum; -import com.amazonaws.services.timestreamquery.model.QueryRequest; -import com.amazonaws.services.timestreamquery.model.QueryResult; -import com.amazonaws.services.timestreamquery.model.Row; -import com.amazonaws.services.timestreamwrite.AmazonTimestreamWrite; -import com.amazonaws.services.timestreamwrite.model.ListDatabasesRequest; -import com.amazonaws.services.timestreamwrite.model.ListDatabasesResult; -import com.amazonaws.services.timestreamwrite.model.ListTablesResult; import com.google.common.collect.ImmutableMap; import org.apache.arrow.util.VisibleForTesting; import org.apache.arrow.vector.types.pojo.Field; @@ -62,6 +52,16 @@ import software.amazon.awssdk.services.glue.GlueClient; import software.amazon.awssdk.services.glue.model.Table; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; +import software.amazon.awssdk.services.timestreamquery.TimestreamQueryClient; +import software.amazon.awssdk.services.timestreamquery.model.ColumnInfo; +import software.amazon.awssdk.services.timestreamquery.model.Datum; +import software.amazon.awssdk.services.timestreamquery.model.QueryRequest; +import software.amazon.awssdk.services.timestreamquery.model.QueryResponse; +import software.amazon.awssdk.services.timestreamquery.model.Row; +import software.amazon.awssdk.services.timestreamwrite.TimestreamWriteClient; +import software.amazon.awssdk.services.timestreamwrite.model.Database; +import software.amazon.awssdk.services.timestreamwrite.model.ListDatabasesRequest; +import software.amazon.awssdk.services.timestreamwrite.model.ListDatabasesResponse; import java.util.Collections; import java.util.List; @@ -90,8 +90,8 @@ public class TimestreamMetadataHandler private final QueryFactory queryFactory = new QueryFactory(); private final GlueClient glue; - private final AmazonTimestreamQuery tsQuery; - private final AmazonTimestreamWrite tsMeta; + private final TimestreamQueryClient tsQuery; + private final TimestreamWriteClient tsMeta; private final TimestreamQueryPassthrough queryPassthrough; @@ -106,8 +106,8 @@ public TimestreamMetadataHandler(java.util.Map configOptions) @VisibleForTesting protected TimestreamMetadataHandler( - AmazonTimestreamQuery tsQuery, - AmazonTimestreamWrite tsMeta, + TimestreamQueryClient tsQuery, + TimestreamWriteClient tsMeta, GlueClient glue, EncryptionKeyFactory keyFactory, SecretsManagerClient secretsManager, @@ -136,9 +136,9 @@ public GetDataSourceCapabilitiesResponse doGetDataSourceCapabilities(BlockAlloca public ListSchemasResponse doListSchemaNames(BlockAllocator blockAllocator, ListSchemasRequest request) throws Exception { - List schemas = PaginatedRequestIterator.stream(this::doListSchemaNamesOnePage, ListDatabasesResult::getNextToken) - .flatMap(result -> result.getDatabases().stream()) - .map(db -> db.getDatabaseName()) + List schemas = PaginatedRequestIterator.stream(this::doListSchemaNamesOnePage, ListDatabasesResponse::nextToken) + .flatMap(result -> result.databases().stream()) + .map(Database::databaseName) .collect(Collectors.toList()); return new ListSchemasResponse( @@ -146,9 +146,9 @@ public ListSchemasResponse doListSchemaNames(BlockAllocator blockAllocator, List schemas); } - private ListDatabasesResult doListSchemaNamesOnePage(String nextToken) + private ListDatabasesResponse doListSchemaNamesOnePage(String nextToken) { - return tsMeta.listDatabases(new ListDatabasesRequest().withNextToken(nextToken)); + return tsMeta.listDatabases(ListDatabasesRequest.builder().nextToken(nextToken).build()); } @Override @@ -159,7 +159,7 @@ public ListTablesResponse doListTables(BlockAllocator blockAllocator, ListTables try { return doListTablesInternal(blockAllocator, request); } - catch (com.amazonaws.services.timestreamwrite.model.ResourceNotFoundException ex) { + catch (software.amazon.awssdk.services.timestreamwrite.model.ResourceNotFoundException ex) { // If it fails then we will retry after resolving the schema name by ignoring the casing String resolvedSchemaName = findSchemaNameIgnoringCase(request.getSchemaName()); request = new ListTablesRequest(request.getIdentity(), request.getQueryId(), request.getCatalogName(), resolvedSchemaName, request.getNextToken(), request.getPageSize()); @@ -191,43 +191,43 @@ private ListTablesResponse doListTablesInternal(BlockAllocator blockAllocator, L } // Otherwise don't retrieve all pages, just pass through the page token. - ListTablesResult timestreamResults = doListTablesOnePage(request.getSchemaName(), request.getNextToken()); - List tableNames = timestreamResults.getTables() + software.amazon.awssdk.services.timestreamwrite.model.ListTablesResponse timestreamResults = doListTablesOnePage(request.getSchemaName(), request.getNextToken()); + List tableNames = timestreamResults.tables() .stream() - .map(table -> new TableName(request.getSchemaName(), table.getTableName())) + .map(table -> new TableName(request.getSchemaName(), table.tableName())) .collect(Collectors.toList()); // Pass through whatever token we got from Glue to the user ListTablesResponse result = new ListTablesResponse( request.getCatalogName(), tableNames, - timestreamResults.getNextToken()); + timestreamResults.nextToken()); logger.debug("doListTables [paginated] result: {}", result); return result; } - private ListTablesResult doListTablesOnePage(String schemaName, String nextToken) + private software.amazon.awssdk.services.timestreamwrite.model.ListTablesResponse doListTablesOnePage(String schemaName, String nextToken) { // TODO: We should pass through the pageSize as withMaxResults(pageSize) - com.amazonaws.services.timestreamwrite.model.ListTablesRequest listTablesRequest = - new com.amazonaws.services.timestreamwrite.model.ListTablesRequest() - .withDatabaseName(schemaName) - .withNextToken(nextToken); + software.amazon.awssdk.services.timestreamwrite.model.ListTablesRequest listTablesRequest = software.amazon.awssdk.services.timestreamwrite.model.ListTablesRequest.builder() + .databaseName(schemaName) + .nextToken(nextToken) + .build(); return tsMeta.listTables(listTablesRequest); } private Stream getTableNamesInSchema(String schemaName) { - return PaginatedRequestIterator.stream((pageToken) -> doListTablesOnePage(schemaName, pageToken), ListTablesResult::getNextToken) - .flatMap(currResult -> currResult.getTables().stream()) - .map(table -> new TableName(schemaName, table.getTableName())); + return PaginatedRequestIterator.stream((pageToken) -> doListTablesOnePage(schemaName, pageToken), software.amazon.awssdk.services.timestreamwrite.model.ListTablesResponse::nextToken) + .flatMap(currResult -> currResult.tables().stream()) + .map(table -> new TableName(schemaName, table.tableName())); } private String findSchemaNameIgnoringCase(String schemaNameInsensitive) { - return PaginatedRequestIterator.stream(this::doListSchemaNamesOnePage, ListDatabasesResult::getNextToken) - .flatMap(result -> result.getDatabases().stream()) - .map(db -> db.getDatabaseName()) + return PaginatedRequestIterator.stream(this::doListSchemaNamesOnePage, ListDatabasesResponse::nextToken) + .flatMap(result -> result.databases().stream()) + .map(Database::databaseName) .filter(name -> name.equalsIgnoreCase(schemaNameInsensitive)) .findAny() .orElseThrow(() -> new RuntimeException(String.format("Could not find a case-insensitive match for schema name %s", schemaNameInsensitive))); @@ -238,9 +238,9 @@ private TableName findTableNameIgnoringCase(BlockAllocator blockAllocator, GetTa String caseInsenstiveSchemaNameMatch = findSchemaNameIgnoringCase(getTableRequest.getTableName().getSchemaName()); // based on AmazonMskMetadataHandler::findGlueRegistryNameIgnoringCasing - return PaginatedRequestIterator.stream((pageToken) -> doListTablesOnePage(caseInsenstiveSchemaNameMatch, pageToken), ListTablesResult::getNextToken) - .flatMap(result -> result.getTables().stream()) - .map(tbl -> new TableName(caseInsenstiveSchemaNameMatch, tbl.getTableName())) + return PaginatedRequestIterator.stream((pageToken) -> doListTablesOnePage(caseInsenstiveSchemaNameMatch, pageToken), software.amazon.awssdk.services.timestreamwrite.model.ListTablesResponse::nextToken) + .flatMap(result -> result.tables().stream()) + .map(tbl -> new TableName(caseInsenstiveSchemaNameMatch, tbl.tableName())) .filter(tbl -> tbl.getTableName().equalsIgnoreCase(getTableRequest.getTableName().getTableName())) .findAny() .orElseThrow(() -> new RuntimeException(String.format("Could not find a case-insensitive match for table name %s", getTableRequest.getTableName().getTableName()))); @@ -256,24 +256,24 @@ private Schema inferSchemaForTable(TableName tableName) logger.info("doGetTable: Retrieving schema for table[{}] from TimeStream using describeQuery[{}].", tableName, describeQuery); - QueryRequest queryRequest = new QueryRequest().withQueryString(describeQuery); + QueryRequest queryRequest = QueryRequest.builder().queryString(describeQuery).build(); SchemaBuilder schemaBuilder = SchemaBuilder.newBuilder(); do { - QueryResult queryResult = tsQuery.query(queryRequest); - for (Row next : queryResult.getRows()) { - List datum = next.getData(); + QueryResponse queryResult = tsQuery.query(queryRequest); + for (Row next : queryResult.rows()) { + List datum = next.data(); if (datum.size() != 3) { throw new RuntimeException("Unexpected datum size " + datum.size() + " while getting schema from datum[" + datum.toString() + "]"); } - Field nextField = TimestreamSchemaUtils.makeField(datum.get(0).getScalarValue(), datum.get(1).getScalarValue()); + Field nextField = TimestreamSchemaUtils.makeField(datum.get(0).scalarValue(), datum.get(1).scalarValue()); schemaBuilder.addField(nextField); } - queryRequest = new QueryRequest().withNextToken(queryResult.getNextToken()); + queryRequest = QueryRequest.builder().nextToken(queryResult.nextToken()).build(); } - while (queryRequest.getNextToken() != null); + while (queryRequest.nextToken() != null); return schemaBuilder.build(); } @@ -300,7 +300,7 @@ public GetTableResponse doGetTable(BlockAllocator blockAllocator, GetTableReques Schema schema = inferSchemaForTable(request.getTableName()); return new GetTableResponse(request.getCatalogName(), request.getTableName(), schema); } - catch (com.amazonaws.services.timestreamquery.model.ValidationException ex) { + catch (software.amazon.awssdk.services.timestreamquery.model.ValidationException ex) { logger.debug("Could not find table name matching {} in database {}. Falling back to case-insensitive lookup.", request.getTableName().getTableName(), request.getTableName().getSchemaName()); TableName resolvedTableName = findTableNameIgnoringCase(blockAllocator, request); @@ -319,13 +319,13 @@ public GetTableResponse doGetQueryPassthroughSchema(BlockAllocator allocator, Ge queryPassthrough.verify(request.getQueryPassthroughArguments()); String customerPassedQuery = request.getQueryPassthroughArguments().get(TimestreamQueryPassthrough.QUERY); - QueryRequest queryRequest = new QueryRequest().withQueryString(customerPassedQuery).withMaxRows(1); + QueryRequest queryRequest = QueryRequest.builder().queryString(customerPassedQuery).maxRows(1).build(); // Timestream Query does not provide a way to conduct a dry run or retrieve metadata results without execution. Therefore, we need to "seek" at least once before obtaining metadata. - QueryResult queryResult = tsQuery.query(queryRequest); - List columnInfo = queryResult.getColumnInfo(); + QueryResponse queryResult = tsQuery.query(queryRequest); + List columnInfo = queryResult.columnInfo(); SchemaBuilder schemaBuilder = SchemaBuilder.newBuilder(); for (ColumnInfo column : columnInfo) { - Field nextField = TimestreamSchemaUtils.makeField(column.getName(), column.getType().getScalarType().toLowerCase()); + Field nextField = TimestreamSchemaUtils.makeField(column.name(), column.type().scalarTypeAsString().toLowerCase()); schemaBuilder.addField(nextField); } diff --git a/athena-timestream/src/main/java/com/amazonaws/athena/connectors/timestream/TimestreamRecordHandler.java b/athena-timestream/src/main/java/com/amazonaws/athena/connectors/timestream/TimestreamRecordHandler.java index a8cc2be021..f25b7d7b41 100644 --- a/athena-timestream/src/main/java/com/amazonaws/athena/connectors/timestream/TimestreamRecordHandler.java +++ b/athena-timestream/src/main/java/com/amazonaws/athena/connectors/timestream/TimestreamRecordHandler.java @@ -40,12 +40,6 @@ import com.amazonaws.athena.connectors.timestream.qpt.TimestreamQueryPassthrough; import com.amazonaws.athena.connectors.timestream.query.QueryFactory; import com.amazonaws.athena.connectors.timestream.query.SelectQueryBuilder; -import com.amazonaws.services.timestreamquery.AmazonTimestreamQuery; -import com.amazonaws.services.timestreamquery.model.Datum; -import com.amazonaws.services.timestreamquery.model.QueryRequest; -import com.amazonaws.services.timestreamquery.model.QueryResult; -import com.amazonaws.services.timestreamquery.model.Row; -import com.amazonaws.services.timestreamquery.model.TimeSeriesDataPoint; import org.apache.arrow.util.VisibleForTesting; import org.apache.arrow.vector.FieldVector; import org.apache.arrow.vector.holders.NullableBigIntHolder; @@ -59,6 +53,12 @@ import software.amazon.awssdk.services.athena.AthenaClient; import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; +import software.amazon.awssdk.services.timestreamquery.TimestreamQueryClient; +import software.amazon.awssdk.services.timestreamquery.model.Datum; +import software.amazon.awssdk.services.timestreamquery.model.QueryRequest; +import software.amazon.awssdk.services.timestreamquery.model.QueryResponse; +import software.amazon.awssdk.services.timestreamquery.model.Row; +import software.amazon.awssdk.services.timestreamquery.model.TimeSeriesDataPoint; import java.time.Instant; import java.time.ZoneId; @@ -85,7 +85,7 @@ public class TimestreamRecordHandler private static final String SOURCE_TYPE = "timestream"; private final QueryFactory queryFactory = new QueryFactory(); - private final AmazonTimestreamQuery tsQuery; + private final TimestreamQueryClient tsQuery; private final TimestreamQueryPassthrough queryPassthrough = new TimestreamQueryPassthrough(); public TimestreamRecordHandler(java.util.Map configOptions) @@ -99,7 +99,7 @@ public TimestreamRecordHandler(java.util.Map configOptions) } @VisibleForTesting - protected TimestreamRecordHandler(S3Client amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, AmazonTimestreamQuery tsQuery, java.util.Map configOptions) + protected TimestreamRecordHandler(S3Client amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, TimestreamQueryClient tsQuery, java.util.Map configOptions) { super(amazonS3, secretsManager, athena, SOURCE_TYPE, configOptions); this.tsQuery = tsQuery; @@ -135,15 +135,15 @@ protected void readWithConstraint(BlockSpiller spiller, ReadRecordsRequest recor long numRows = 0; do { - QueryResult queryResult = tsQuery.query(new QueryRequest().withQueryString(query).withNextToken(nextToken)); - List data = queryResult.getRows(); + QueryResponse queryResult = tsQuery.query(QueryRequest.builder().queryString(query).nextToken(nextToken).build()); + List data = queryResult.rows(); if (data != null) { numRows += data.size(); for (Row nextRow : data) { spiller.writeRows((Block block, int rowNum) -> rowWriter.writeRow(block, rowNum, nextRow) ? 1 : 0); } } - nextToken = queryResult.getNextToken(); + nextToken = queryResult.nextToken(); logger.info("readWithConstraint: numRows[{}]", numRows); } while (nextToken != null && !nextToken.isEmpty()); } @@ -158,7 +158,7 @@ private GeneratedRowWriter buildRowWriter(ReadRecordsRequest request) switch (Types.getMinorTypeForArrowType(nextField.getType())) { case VARCHAR: builder.withExtractor(nextField.getName(), (VarCharExtractor) (Object context, NullableVarCharHolder value) -> { - String stringValue = ((Row) context).getData().get(curFieldNum).getScalarValue(); + String stringValue = ((Row) context).data().get(curFieldNum).scalarValue(); if (stringValue != null) { value.isSet = 1; value.value = stringValue; @@ -170,7 +170,7 @@ private GeneratedRowWriter buildRowWriter(ReadRecordsRequest request) break; case FLOAT8: builder.withExtractor(nextField.getName(), (Float8Extractor) (Object context, NullableFloat8Holder value) -> { - String doubleValue = ((Row) context).getData().get(curFieldNum).getScalarValue(); + String doubleValue = ((Row) context).data().get(curFieldNum).scalarValue(); if (doubleValue != null) { value.isSet = 1; value.value = Double.valueOf(doubleValue); @@ -183,12 +183,12 @@ private GeneratedRowWriter buildRowWriter(ReadRecordsRequest request) case BIT: builder.withExtractor(nextField.getName(), (BitExtractor) (Object context, NullableBitHolder value) -> { value.isSet = 1; - value.value = Boolean.valueOf(((Row) context).getData().get(curFieldNum).getScalarValue()) == false ? 0 : 1; + value.value = Boolean.valueOf(((Row) context).data().get(curFieldNum).scalarValue()) == false ? 0 : 1; }); break; case BIGINT: builder.withExtractor(nextField.getName(), (BigIntExtractor) (Object context, NullableBigIntHolder value) -> { - String longValue = ((Row) context).getData().get(curFieldNum).getScalarValue(); + String longValue = ((Row) context).data().get(curFieldNum).scalarValue(); if (longValue != null) { value.isSet = 1; value.value = Long.valueOf(longValue); @@ -200,7 +200,7 @@ private GeneratedRowWriter buildRowWriter(ReadRecordsRequest request) break; case DATEMILLI: builder.withExtractor(nextField.getName(), (DateMilliExtractor) (Object context, NullableDateMilliHolder value) -> { - String dateMilliValue = ((Row) context).getData().get(curFieldNum).getScalarValue(); + String dateMilliValue = ((Row) context).data().get(curFieldNum).scalarValue(); if (dateMilliValue != null) { value.isSet = 1; value.value = Instant.from(TIMESTAMP_FORMATTER.parse(dateMilliValue)).toEpochMilli(); @@ -230,30 +230,30 @@ private void buildTimeSeriesExtractor(GeneratedRowWriter.RowWriterBuilder builde (FieldVector vector, Extractor extractor, ConstraintProjector constraint) -> (Object context, int rowNum) -> { Row row = (Row) context; - Datum datum = row.getData().get(curFieldNum); + Datum datum = row.data().get(curFieldNum); Field timeField = field.getChildren().get(0).getChildren().get(0); Field valueField = field.getChildren().get(0).getChildren().get(1); - if (datum.getTimeSeriesValue() != null) { + if (datum.timeSeriesValue() != null) { List> values = new ArrayList<>(); - for (TimeSeriesDataPoint nextDatum : datum.getTimeSeriesValue()) { + for (TimeSeriesDataPoint nextDatum : datum.timeSeriesValue()) { Map eventMap = new HashMap<>(); - eventMap.put(timeField.getName(), Instant.from(TIMESTAMP_FORMATTER.parse(nextDatum.getTime())).toEpochMilli()); + eventMap.put(timeField.getName(), Instant.from(TIMESTAMP_FORMATTER.parse(nextDatum.time())).toEpochMilli()); switch (Types.getMinorTypeForArrowType(valueField.getType())) { case FLOAT8: - eventMap.put(valueField.getName(), Double.valueOf(nextDatum.getValue().getScalarValue())); + eventMap.put(valueField.getName(), Double.valueOf(nextDatum.value().scalarValue())); break; case BIGINT: - eventMap.put(valueField.getName(), Long.valueOf(nextDatum.getValue().getScalarValue())); + eventMap.put(valueField.getName(), Long.valueOf(nextDatum.value().scalarValue())); break; case INT: - eventMap.put(valueField.getName(), Integer.valueOf(nextDatum.getValue().getScalarValue())); + eventMap.put(valueField.getName(), Integer.valueOf(nextDatum.value().scalarValue())); break; case BIT: eventMap.put(valueField.getName(), - Boolean.valueOf(((Row) context).getData().get(curFieldNum).getScalarValue()) == false ? 0 : 1); + Boolean.valueOf(((Row) context).data().get(curFieldNum).scalarValue()) == false ? 0 : 1); break; } values.add(eventMap); diff --git a/athena-timestream/src/test/java/com/amazonaws/athena/connectors/timestream/TestUtils.java b/athena-timestream/src/test/java/com/amazonaws/athena/connectors/timestream/TestUtils.java index b09fccbbbf..5656dccb58 100644 --- a/athena-timestream/src/test/java/com/amazonaws/athena/connectors/timestream/TestUtils.java +++ b/athena-timestream/src/test/java/com/amazonaws/athena/connectors/timestream/TestUtils.java @@ -19,32 +19,21 @@ */ package com.amazonaws.athena.connectors.timestream; -import com.amazonaws.athena.connector.lambda.data.BlockUtils; -import com.amazonaws.athena.connector.lambda.data.FieldResolver; -import com.amazonaws.athena.connector.lambda.data.writers.GeneratedRowWriter; -import com.amazonaws.athena.connector.lambda.data.writers.extractors.Extractor; -import com.amazonaws.athena.connector.lambda.domain.predicate.ConstraintProjector; -import com.amazonaws.services.timestreamquery.model.Datum; -import com.amazonaws.services.timestreamquery.model.QueryResult; -import com.amazonaws.services.timestreamquery.model.Row; -import com.amazonaws.services.timestreamquery.model.TimeSeriesDataPoint; -import org.apache.arrow.vector.FieldVector; import org.apache.arrow.vector.types.Types; import org.apache.arrow.vector.types.pojo.Field; import org.apache.arrow.vector.types.pojo.Schema; import org.mockito.stubbing.Answer; +import software.amazon.awssdk.services.timestreamquery.model.Datum; +import software.amazon.awssdk.services.timestreamquery.model.QueryResponse; +import software.amazon.awssdk.services.timestreamquery.model.Row; +import software.amazon.awssdk.services.timestreamquery.model.TimeSeriesDataPoint; -import java.text.SimpleDateFormat; import java.time.LocalDateTime; import java.util.ArrayList; -import java.util.Date; -import java.util.HashMap; import java.util.List; -import java.util.Map; import java.util.Random; import java.util.concurrent.atomic.AtomicLong; -import static org.apache.arrow.vector.types.Types.MinorType.FLOAT8; import static org.junit.Assert.assertEquals; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -59,17 +48,17 @@ private TestUtils() {} private static final String[] AZS = {"us-east-1a", "us-east-1b", "us-east-1c", "us-east-1d"}; - public static QueryResult makeMockQueryResult(Schema schemaForRead, int numRows) + public static QueryResponse makeMockQueryResult(Schema schemaForRead, int numRows) { return makeMockQueryResult(schemaForRead, numRows, 100, true); } - public static QueryResult makeMockQueryResult(Schema schemaForRead, int numRows, int maxDataGenerationRow, boolean isRandomAZ) + public static QueryResponse makeMockQueryResult(Schema schemaForRead, int numRows, int maxDataGenerationRow, boolean isRandomAZ) { - QueryResult mockResult = mock(QueryResult.class); + QueryResponse mockResult = mock(QueryResponse.class); final AtomicLong nextToken = new AtomicLong(0); - when(mockResult.getRows()).thenAnswer((Answer>) invocationOnMock -> { + when(mockResult.rows()).thenAnswer((Answer>) invocationOnMock -> { List rows = new ArrayList<>(); for (int i = 0; i < maxDataGenerationRow; i++) { nextToken.incrementAndGet(); @@ -78,15 +67,14 @@ public static QueryResult makeMockQueryResult(Schema schemaForRead, int numRows, columnData.add(makeValue(nextField, i, isRandomAZ)); } - Row row = new Row(); - row.setData(columnData); + Row row = Row.builder().data(columnData).build(); rows.add(row); } return rows; } ); - when(mockResult.getNextToken()).thenAnswer((Answer) invocationOnMock -> { + when(mockResult.nextToken()).thenAnswer((Answer) invocationOnMock -> { if (nextToken.get() < numRows) { return String.valueOf(nextToken.get()); } @@ -99,30 +87,30 @@ public static QueryResult makeMockQueryResult(Schema schemaForRead, int numRows, public static Datum makeValue(Field field, int num, boolean isRandomAZ) { - Datum datum = new Datum(); + Datum.Builder datum = Datum.builder(); switch (Types.getMinorTypeForArrowType(field.getType())) { case VARCHAR: if (field.getName().equals("az")) { - datum.setScalarValue(isRandomAZ ? AZS[RAND.nextInt(4)] : "us-east-1a"); + datum.scalarValue(isRandomAZ ? AZS[RAND.nextInt(4)] : "us-east-1a"); } else { - datum.setScalarValue(field.getName() + "_" + RAND.nextInt(10_000_000)); + datum.scalarValue(field.getName() + "_" + RAND.nextInt(10_000_000)); } break; case FLOAT8: - datum.setScalarValue(String.valueOf(RAND.nextFloat())); + datum.scalarValue(String.valueOf(RAND.nextFloat())); break; case INT: - datum.setScalarValue(String.valueOf(RAND.nextInt())); + datum.scalarValue(String.valueOf(RAND.nextInt())); break; case BIT: - datum.setScalarValue(String.valueOf(RAND.nextBoolean())); + datum.scalarValue(String.valueOf(RAND.nextBoolean())); break; case BIGINT: - datum.setScalarValue(String.valueOf(RAND.nextLong())); + datum.scalarValue(String.valueOf(RAND.nextLong())); break; case DATEMILLI: - datum.setScalarValue(startDate.plusDays(num).toString().replace('T', ' ')); + datum.scalarValue(startDate.plusDays(num).toString().replace('T', ' ')); break; case LIST: buildTimeSeries(field, datum, num); @@ -131,17 +119,17 @@ public static Datum makeValue(Field field, int num, boolean isRandomAZ) throw new RuntimeException("Unsupported field type[" + field.getType() + "] for field[" + field.getName() + "]"); } - return datum; + return datum.build(); } - private static void buildTimeSeries(Field field, Datum datum, int num) + private static void buildTimeSeries(Field field, Datum.Builder datum, int num) { List dataPoints = new ArrayList<>(); for (int i = 0; i < 10; i++) { - TimeSeriesDataPoint dataPoint = new TimeSeriesDataPoint(); - Datum dataPointValue = new Datum(); + TimeSeriesDataPoint.Builder dataPoint = TimeSeriesDataPoint.builder(); + Datum.Builder dataPointValue = Datum.builder(); - dataPoint.setTime(startDate.plusDays(num).toString().replace('T', ' ')); + dataPoint.time(startDate.plusDays(num).toString().replace('T', ' ')); /** * Presently we only support TimeSeries as LIST> @@ -152,22 +140,22 @@ private static void buildTimeSeries(Field field, Datum datum, int num) switch (Types.getMinorTypeForArrowType(baseSeriesType.getType())) { case FLOAT8: - dataPointValue.setScalarValue(String.valueOf(RAND.nextFloat())); + dataPointValue.scalarValue(String.valueOf(RAND.nextFloat())); break; case BIT: - dataPointValue.setScalarValue(String.valueOf(RAND.nextBoolean())); + dataPointValue.scalarValue(String.valueOf(RAND.nextBoolean())); break; case INT: - dataPointValue.setScalarValue(String.valueOf(RAND.nextInt())); + dataPointValue.scalarValue(String.valueOf(RAND.nextInt())); break; case BIGINT: - dataPointValue.setScalarValue(String.valueOf(RAND.nextLong())); + dataPointValue.scalarValue(String.valueOf(RAND.nextLong())); break; } - dataPoint.setValue(dataPointValue); - dataPoints.add(dataPoint); + dataPoint.value(dataPointValue.build()); + dataPoints.add(dataPoint.build()); } - datum.setTimeSeriesValue(dataPoints); + datum.timeSeriesValue(dataPoints); } } diff --git a/athena-timestream/src/test/java/com/amazonaws/athena/connectors/timestream/TimestreamClientBuilderTest.java b/athena-timestream/src/test/java/com/amazonaws/athena/connectors/timestream/TimestreamClientBuilderTest.java index c3c4d4a486..de4f83b0bb 100644 --- a/athena-timestream/src/test/java/com/amazonaws/athena/connectors/timestream/TimestreamClientBuilderTest.java +++ b/athena-timestream/src/test/java/com/amazonaws/athena/connectors/timestream/TimestreamClientBuilderTest.java @@ -19,8 +19,9 @@ */ package com.amazonaws.athena.connectors.timestream; -import com.amazonaws.ClientConfiguration; import org.junit.Test; +import software.amazon.awssdk.core.client.config.ClientOverrideConfiguration; +import software.amazon.awssdk.core.client.config.SdkAdvancedClientOption; import static org.junit.Assert.assertEquals; @@ -29,7 +30,7 @@ public class TimestreamClientBuilderTest { @Test public void testUserAgentField() { - ClientConfiguration clientConfiguration = TimestreamClientBuilder.buildClientConfiguration("timestream"); - assertEquals("aws-athena-timestream-connector", clientConfiguration.getUserAgentPrefix()); + ClientOverrideConfiguration clientConfiguration = TimestreamClientBuilder.buildClientConfiguration("timestream"); + assertEquals("aws-athena-timestream-connector", clientConfiguration.advancedOption(SdkAdvancedClientOption.USER_AGENT_PREFIX).get()); } } diff --git a/athena-timestream/src/test/java/com/amazonaws/athena/connectors/timestream/TimestreamMetadataHandlerTest.java b/athena-timestream/src/test/java/com/amazonaws/athena/connectors/timestream/TimestreamMetadataHandlerTest.java index 0744aae186..9262c45f68 100644 --- a/athena-timestream/src/test/java/com/amazonaws/athena/connectors/timestream/TimestreamMetadataHandlerTest.java +++ b/athena-timestream/src/test/java/com/amazonaws/athena/connectors/timestream/TimestreamMetadataHandlerTest.java @@ -40,17 +40,6 @@ import com.amazonaws.athena.connector.lambda.metadata.MetadataResponse; import com.amazonaws.athena.connector.lambda.security.FederatedIdentity; import com.amazonaws.athena.connector.lambda.security.LocalKeyFactory; -import com.amazonaws.services.timestreamquery.AmazonTimestreamQuery; -import com.amazonaws.services.timestreamquery.model.Datum; -import com.amazonaws.services.timestreamquery.model.QueryRequest; -import com.amazonaws.services.timestreamquery.model.QueryResult; -import com.amazonaws.services.timestreamquery.model.Row; -import com.amazonaws.services.timestreamwrite.AmazonTimestreamWrite; -import com.amazonaws.services.timestreamwrite.model.Database; -import com.amazonaws.services.timestreamwrite.model.ListDatabasesRequest; -import com.amazonaws.services.timestreamwrite.model.ListDatabasesResult; -import com.amazonaws.services.timestreamwrite.model.ListTablesResult; -import com.amazonaws.services.timestreamwrite.model.Table; import org.apache.arrow.vector.types.Types; import org.apache.arrow.vector.types.pojo.Field; import org.apache.arrow.vector.types.pojo.Schema; @@ -68,6 +57,16 @@ import software.amazon.awssdk.services.glue.model.Column; import software.amazon.awssdk.services.glue.model.StorageDescriptor; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; +import software.amazon.awssdk.services.timestreamquery.TimestreamQueryClient; +import software.amazon.awssdk.services.timestreamquery.model.Datum; +import software.amazon.awssdk.services.timestreamquery.model.QueryRequest; +import software.amazon.awssdk.services.timestreamquery.model.QueryResponse; +import software.amazon.awssdk.services.timestreamquery.model.Row; +import software.amazon.awssdk.services.timestreamwrite.TimestreamWriteClient; +import software.amazon.awssdk.services.timestreamwrite.model.Database; +import software.amazon.awssdk.services.timestreamwrite.model.ListDatabasesRequest; +import software.amazon.awssdk.services.timestreamwrite.model.ListDatabasesResponse; +import software.amazon.awssdk.services.timestreamwrite.model.Table; import java.util.ArrayList; import java.util.Collections; @@ -77,9 +76,9 @@ import static com.amazonaws.athena.connector.lambda.domain.predicate.Constraints.DEFAULT_NO_LIMIT; import static com.amazonaws.athena.connector.lambda.handlers.GlueMetadataHandler.VIEW_METADATA_FIELD; import static com.amazonaws.athena.connector.lambda.metadata.ListTablesRequest.UNLIMITED_PAGE_SIZE_VALUE; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; import static org.mockito.ArgumentMatchers.nullable; -import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -99,9 +98,9 @@ public class TimestreamMetadataHandlerTest @Mock protected AthenaClient mockAthena; @Mock - protected AmazonTimestreamQuery mockTsQuery; + protected TimestreamQueryClient mockTsQuery; @Mock - protected AmazonTimestreamWrite mockTsMeta; + protected TimestreamWriteClient mockTsMeta; @Mock protected GlueClient mockGlue; @@ -140,26 +139,26 @@ public void doListSchemaNames() String newNextToken = null; List databases = new ArrayList<>(); - if (request.getNextToken() == null) { + if (request.nextToken() == null) { for (int i = 0; i < 10; i++) { - databases.add(new Database().withDatabaseName("database_" + i)); + databases.add(Database.builder().databaseName("database_" + i).build()); } newNextToken = "1"; } - else if (request.getNextToken().equals("1")) { + else if (request.nextToken().equals("1")) { for (int i = 10; i < 100; i++) { - databases.add(new Database().withDatabaseName("database_" + i)); + databases.add(Database.builder().databaseName("database_" + i).build()); } newNextToken = "2"; } - else if (request.getNextToken().equals("2")) { + else if (request.nextToken().equals("2")) { for (int i = 100; i < 1000; i++) { - databases.add(new Database().withDatabaseName("database_" + i)); + databases.add(Database.builder().databaseName("database_" + i).build()); } newNextToken = null; } - return new ListDatabasesResult().withDatabases(databases).withNextToken(newNextToken); + return ListDatabasesResponse.builder().databases(databases).nextToken(newNextToken).build(); }); ListSchemasRequest req = new ListSchemasRequest(identity, "queryId", "default"); @@ -182,33 +181,33 @@ public void doListTables() { logger.info("doListTables - enter"); - when(mockTsMeta.listTables(nullable(com.amazonaws.services.timestreamwrite.model.ListTablesRequest.class))) + when(mockTsMeta.listTables(nullable(software.amazon.awssdk.services.timestreamwrite.model.ListTablesRequest.class))) .thenAnswer((InvocationOnMock invocation) -> { - com.amazonaws.services.timestreamwrite.model.ListTablesRequest request = - invocation.getArgument(0, com.amazonaws.services.timestreamwrite.model.ListTablesRequest.class); + software.amazon.awssdk.services.timestreamwrite.model.ListTablesRequest request = + invocation.getArgument(0, software.amazon.awssdk.services.timestreamwrite.model.ListTablesRequest.class); String newNextToken = null; List
tables = new ArrayList<>(); - if (request.getNextToken() == null) { + if (request.nextToken() == null) { for (int i = 0; i < 10; i++) { - tables.add(new Table().withDatabaseName(request.getDatabaseName()).withTableName("table_" + i)); + tables.add(Table.builder().databaseName(request.databaseName()).tableName("table_" + i).build()); } newNextToken = "1"; } - else if (request.getNextToken().equals("1")) { + else if (request.nextToken().equals("1")) { for (int i = 10; i < 100; i++) { - tables.add(new Table().withDatabaseName(request.getDatabaseName()).withTableName("table_" + i)); + tables.add(Table.builder().databaseName(request.databaseName()).tableName("table_" + i).build()); } newNextToken = "2"; } - else if (request.getNextToken().equals("2")) { + else if (request.nextToken().equals("2")) { for (int i = 100; i < 1000; i++) { - tables.add(new Table().withDatabaseName(request.getDatabaseName()).withTableName("table_" + i)); + tables.add(Table.builder().databaseName(request.databaseName()).tableName("table_" + i).build()); } newNextToken = null; } - return new ListTablesResult().withTables(tables).withNextToken(newNextToken); + return software.amazon.awssdk.services.timestreamwrite.model.ListTablesResponse.builder().tables(tables).nextToken(newNextToken).build(); }); ListTablesRequest req = new ListTablesRequest(identity, "queryId", "default", defaultSchema, @@ -218,7 +217,7 @@ else if (request.getNextToken().equals("2")) { assertEquals(1000, res.getTables().size()); verify(mockTsMeta, times(3)) - .listTables(nullable(com.amazonaws.services.timestreamwrite.model.ListTablesRequest.class)); + .listTables(nullable(software.amazon.awssdk.services.timestreamwrite.model.ListTablesRequest.class)); Iterator schemaItr = res.getTables().iterator(); for (int i = 0; i < 1000; i++) { @@ -241,24 +240,24 @@ public void doGetTable() when(mockTsQuery.query(nullable(QueryRequest.class))).thenAnswer((InvocationOnMock invocation) -> { QueryRequest request = invocation.getArgument(0, QueryRequest.class); - assertEquals("DESCRIBE \"default\".\"table1\"", request.getQueryString()); + assertEquals("DESCRIBE \"default\".\"table1\"", request.queryString()); List rows = new ArrayList<>(); //TODO: Add types here - rows.add(new Row().withData(new Datum().withScalarValue("availability_zone"), - new Datum().withScalarValue("varchar"), - new Datum().withScalarValue("dimension"))); - rows.add(new Row().withData(new Datum().withScalarValue("measure_value"), - new Datum().withScalarValue("double"), - new Datum().withScalarValue("measure_value"))); - rows.add(new Row().withData(new Datum().withScalarValue("measure_name"), - new Datum().withScalarValue("varchar"), - new Datum().withScalarValue("measure_name"))); - rows.add(new Row().withData(new Datum().withScalarValue("time"), - new Datum().withScalarValue("timestamp"), - new Datum().withScalarValue("timestamp"))); - - return new QueryResult().withRows(rows); + rows.add(Row.builder().data(Datum.builder().scalarValue("availability_zone").build(), + Datum.builder().scalarValue("varchar").build(), + Datum.builder().scalarValue("dimension").build()).build()); + rows.add(Row.builder().data(Datum.builder().scalarValue("measure_value").build(), + Datum.builder().scalarValue("double").build(), + Datum.builder().scalarValue("measure_value").build()).build()); + rows.add(Row.builder().data(Datum.builder().scalarValue("measure_name").build(), + Datum.builder().scalarValue("varchar").build(), + Datum.builder().scalarValue("measure_name").build()).build()); + rows.add(Row.builder().data(Datum.builder().scalarValue("time").build(), + Datum.builder().scalarValue("timestamp").build(), + Datum.builder().scalarValue("timestamp").build()).build()); + + return QueryResponse.builder().rows(rows).build(); }); GetTableRequest req = new GetTableRequest(identity, diff --git a/athena-timestream/src/test/java/com/amazonaws/athena/connectors/timestream/TimestreamRecordHandlerTest.java b/athena-timestream/src/test/java/com/amazonaws/athena/connectors/timestream/TimestreamRecordHandlerTest.java index d7ad28e816..f3daeaff80 100644 --- a/athena-timestream/src/test/java/com/amazonaws/athena/connectors/timestream/TimestreamRecordHandlerTest.java +++ b/athena-timestream/src/test/java/com/amazonaws/athena/connectors/timestream/TimestreamRecordHandlerTest.java @@ -40,9 +40,6 @@ import com.amazonaws.athena.connector.lambda.security.EncryptionKeyFactory; import com.amazonaws.athena.connector.lambda.security.FederatedIdentity; import com.amazonaws.athena.connector.lambda.security.LocalKeyFactory; -import com.amazonaws.services.timestreamquery.AmazonTimestreamQuery; -import com.amazonaws.services.timestreamquery.model.QueryRequest; -import com.amazonaws.services.timestreamquery.model.QueryResult; import com.google.common.io.ByteStreams; import org.apache.arrow.vector.complex.reader.FieldReader; import org.apache.arrow.vector.types.Types; @@ -68,6 +65,9 @@ import software.amazon.awssdk.services.s3.model.PutObjectRequest; import software.amazon.awssdk.services.s3.model.PutObjectResponse; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; +import software.amazon.awssdk.services.timestreamquery.TimestreamQueryClient; +import software.amazon.awssdk.services.timestreamquery.model.QueryRequest; +import software.amazon.awssdk.services.timestreamquery.model.QueryResponse; import java.io.ByteArrayInputStream; import java.io.IOException; @@ -115,7 +115,7 @@ public class TimestreamRecordHandlerTest public TestName testName = new TestName(); @Mock - private AmazonTimestreamQuery mockClient; + private TimestreamQueryClient mockClient; @Mock private SecretsManagerClient mockSecretsManager; @@ -198,11 +198,11 @@ public void doReadRecordsNoSpill() int numRowsGenerated = 1_000; String expectedQuery = "SELECT measure_name, measure_value::double, az, time, hostname, region FROM \"my_schema\".\"my_table\" WHERE (\"az\" IN ('us-east-1a','us-east-1b'))"; - QueryResult mockResult = makeMockQueryResult(schemaForRead, numRowsGenerated); + QueryResponse mockResult = makeMockQueryResult(schemaForRead, numRowsGenerated); when(mockClient.query(nullable(QueryRequest.class))) - .thenAnswer((Answer) invocationOnMock -> { + .thenAnswer((Answer) invocationOnMock -> { QueryRequest request = (QueryRequest) invocationOnMock.getArguments()[0]; - assertEquals(expectedQuery, request.getQueryString().replace("\n", "")); + assertEquals(expectedQuery, request.queryString().replace("\n", "")); return mockResult; } ); @@ -253,11 +253,11 @@ public void doReadRecordsSpill() { String expectedQuery = "SELECT measure_name, measure_value::double, az, time, hostname, region FROM \"my_schema\".\"my_table\" WHERE (\"az\" IN ('us-east-1a','us-east-1b'))"; - QueryResult mockResult = makeMockQueryResult(schemaForRead, 100_000); + QueryResponse mockResult = makeMockQueryResult(schemaForRead, 100_000); when(mockClient.query(nullable(QueryRequest.class))) - .thenAnswer((Answer) invocationOnMock -> { + .thenAnswer((Answer) invocationOnMock -> { QueryRequest request = (QueryRequest) invocationOnMock.getArguments()[0]; - assertEquals(expectedQuery, request.getQueryString().replace("\n", "")); + assertEquals(expectedQuery, request.queryString().replace("\n", "")); return mockResult; } ); @@ -327,11 +327,11 @@ public void readRecordsView() String expectedQuery = "WITH t1 AS ( select measure_name, az,sum(\"measure_value::double\") as value, count(*) as num_samples from \"my_schema\".\"my_table\" group by measure_name, az ) SELECT measure_name, az, value, num_samples FROM t1 WHERE (\"az\" IN ('us-east-1a','us-east-1b'))"; - QueryResult mockResult = makeMockQueryResult(schemaForReadView, 1_000); + QueryResponse mockResult = makeMockQueryResult(schemaForReadView, 1_000); when(mockClient.query(nullable(QueryRequest.class))) - .thenAnswer((Answer) invocationOnMock -> { + .thenAnswer((Answer) invocationOnMock -> { QueryRequest request = (QueryRequest) invocationOnMock.getArguments()[0]; - assertEquals(expectedQuery, request.getQueryString().replace("\n", "")); + assertEquals(expectedQuery, request.queryString().replace("\n", "")); return mockResult; } ); @@ -394,11 +394,11 @@ public void readRecordsTimeSeriesView() String expectedQuery = "WITH t1 AS ( select az, hostname, region, CREATE_TIME_SERIES(time, measure_value::double) as cpu_utilization from \"my_schema\".\"my_table\" WHERE measure_name = 'cpu_utilization' GROUP BY measure_name, az, hostname, region ) SELECT region, az, hostname, cpu_utilization FROM t1 WHERE (\"az\" IN ('us-east-1a','us-east-1b'))"; - QueryResult mockResult = makeMockQueryResult(schemaForReadView, 1_000); + QueryResponse mockResult = makeMockQueryResult(schemaForReadView, 1_000); when(mockClient.query(nullable(QueryRequest.class))) - .thenAnswer((Answer) invocationOnMock -> { + .thenAnswer((Answer) invocationOnMock -> { QueryRequest request = (QueryRequest) invocationOnMock.getArguments()[0]; - assertEquals("actual: " + request.getQueryString(), expectedQuery, request.getQueryString().replace("\n", "")); + assertEquals("actual: " + request.queryString(), expectedQuery, request.queryString().replace("\n", "")); return mockResult; } ); @@ -449,11 +449,11 @@ public void doReadRecordsNoSpillValidateTimeStamp() int numRows = 10; String expectedQuery = "SELECT measure_name, measure_value::double, az, time, hostname, region FROM \"my_schema\".\"my_table\" WHERE (\"az\" IN ('us-east-1a'))"; - QueryResult mockResult = makeMockQueryResult(schemaForRead, numRows, numRows, false); + QueryResponse mockResult = makeMockQueryResult(schemaForRead, numRows, numRows, false); when(mockClient.query(nullable(QueryRequest.class))) - .thenAnswer((Answer) invocationOnMock -> { + .thenAnswer((Answer) invocationOnMock -> { QueryRequest request = (QueryRequest) invocationOnMock.getArguments()[0]; - assertEquals(expectedQuery, request.getQueryString().replace("\n", "")); + assertEquals(expectedQuery, request.queryString().replace("\n", "")); return mockResult; } ); diff --git a/athena-timestream/src/test/java/com/amazonaws/athena/connectors/timestream/integ/TimestreamIntegTest.java b/athena-timestream/src/test/java/com/amazonaws/athena/connectors/timestream/integ/TimestreamIntegTest.java index 66dfd428ec..10f6575220 100644 --- a/athena-timestream/src/test/java/com/amazonaws/athena/connectors/timestream/integ/TimestreamIntegTest.java +++ b/athena-timestream/src/test/java/com/amazonaws/athena/connectors/timestream/integ/TimestreamIntegTest.java @@ -21,10 +21,6 @@ import com.amazonaws.athena.connector.integ.IntegrationTestBase; import com.amazonaws.athena.connectors.timestream.TimestreamClientBuilder; -import com.amazonaws.services.timestreamwrite.AmazonTimestreamWrite; -import com.amazonaws.services.timestreamwrite.model.CreateTableRequest; -import com.amazonaws.services.timestreamwrite.model.DeleteTableRequest; -import com.amazonaws.services.timestreamwrite.model.MeasureValueType; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import org.slf4j.Logger; @@ -38,6 +34,10 @@ import software.amazon.awscdk.services.iam.PolicyStatement; import software.amazon.awscdk.services.timestream.CfnDatabase; import software.amazon.awssdk.services.athena.model.Row; +import software.amazon.awssdk.services.timestreamwrite.TimestreamWriteClient; +import software.amazon.awssdk.services.timestreamwrite.model.CreateTableRequest; +import software.amazon.awssdk.services.timestreamwrite.model.DeleteTableRequest; +import software.amazon.awssdk.services.timestreamwrite.model.MeasureValueType; import java.util.ArrayList; import java.util.List; @@ -58,7 +58,7 @@ public class TimestreamIntegTest extends IntegrationTestBase private final String jokePunchline; private final String lambdaFunctionName; private final long[] timeStream; - private final AmazonTimestreamWrite timestreamWriteClient; + private final TimestreamWriteClient timestreamWriteClient; public TimestreamIntegTest() { @@ -123,9 +123,9 @@ private void createTimestreamTable() logger.info("Creating the Timestream table: {}", timestreamTableName); logger.info("----------------------------------------------------"); - timestreamWriteClient.createTable(new CreateTableRequest() - .withDatabaseName(timestreamDbName) - .withTableName(timestreamTableName)); + timestreamWriteClient.createTable(CreateTableRequest.builder() + .databaseName(timestreamDbName) + .tableName(timestreamTableName).build()); } /** @@ -138,16 +138,16 @@ private void deleteTimstreamTable() logger.info("----------------------------------------------------"); try { - timestreamWriteClient.deleteTable(new DeleteTableRequest() - .withDatabaseName(timestreamDbName) - .withTableName(timestreamTableName)); + timestreamWriteClient.deleteTable(DeleteTableRequest.builder() + .databaseName(timestreamDbName) + .tableName(timestreamTableName).build()); } catch (Exception e) { // Do not rethrow here. logger.error("Unable to delete Timestream table: " + e.getMessage(), e); } finally { - timestreamWriteClient.shutdown(); + timestreamWriteClient.close(); } } diff --git a/athena-timestream/src/test/java/com/amazonaws/athena/connectors/timestream/integ/TimestreamWriteRecordRequestBuilder.java b/athena-timestream/src/test/java/com/amazonaws/athena/connectors/timestream/integ/TimestreamWriteRecordRequestBuilder.java index 6c9b8acddc..73a9e63bd5 100644 --- a/athena-timestream/src/test/java/com/amazonaws/athena/connectors/timestream/integ/TimestreamWriteRecordRequestBuilder.java +++ b/athena-timestream/src/test/java/com/amazonaws/athena/connectors/timestream/integ/TimestreamWriteRecordRequestBuilder.java @@ -19,11 +19,11 @@ */ package com.amazonaws.athena.connectors.timestream.integ; -import com.amazonaws.services.timestreamwrite.model.Dimension; -import com.amazonaws.services.timestreamwrite.model.MeasureValueType; -import com.amazonaws.services.timestreamwrite.model.Record; -import com.amazonaws.services.timestreamwrite.model.TimeUnit; -import com.amazonaws.services.timestreamwrite.model.WriteRecordsRequest; +import software.amazon.awssdk.services.timestreamwrite.model.Dimension; +import software.amazon.awssdk.services.timestreamwrite.model.MeasureValueType; +import software.amazon.awssdk.services.timestreamwrite.model.Record; +import software.amazon.awssdk.services.timestreamwrite.model.TimeUnit; +import software.amazon.awssdk.services.timestreamwrite.model.WriteRecordsRequest; import java.util.ArrayList; import java.util.List; @@ -104,14 +104,14 @@ public TimestreamWriteRecordRequestBuilder withRecord(Map column long timeMillis) { List dimensions = new ArrayList<>(); - columns.forEach((k, v) -> dimensions.add(new Dimension().withName(k).withValue(v))); - records.add(new Record() - .withDimensions(dimensions) - .withMeasureName(measureName) - .withMeasureValue(measureValue) - .withMeasureValueType(measureValueType) - .withTime(String.valueOf(timeMillis)) - .withTimeUnit(TimeUnit.MILLISECONDS)); + columns.forEach((k, v) -> dimensions.add(Dimension.builder().name(k).value(v).build())); + records.add(Record.builder() + .dimensions(dimensions) + .measureName(measureName) + .measureValue(measureValue) + .measureValueType(measureValueType) + .time(String.valueOf(timeMillis)) + .timeUnit(TimeUnit.MILLISECONDS).build()); return this; } @@ -121,9 +121,9 @@ public TimestreamWriteRecordRequestBuilder withRecord(Map column */ public WriteRecordsRequest build() { - return new WriteRecordsRequest() - .withDatabaseName(databaseName) - .withTableName(tableName) - .withRecords(records); + return WriteRecordsRequest.builder() + .databaseName(databaseName) + .tableName(tableName) + .records(records).build(); } } diff --git a/pom.xml b/pom.xml index 1e9ce9a931..8567ef0a39 100644 --- a/pom.xml +++ b/pom.xml @@ -14,8 +14,8 @@ 11 3.13.0 + 1.12.772 2.25.56 - 1.12.770 1.2.2 1.6.0 1.204.0 @@ -433,7 +433,7 @@ org.apache.maven.plugins maven-gpg-plugin - 3.2.5 + 3.2.6 sign-artifacts diff --git a/tools/bump_versions/bump_connectors_version.py b/tools/bump_versions/bump_connectors_version.py index d478fd1c3e..d6cd78edd4 100755 --- a/tools/bump_versions/bump_connectors_version.py +++ b/tools/bump_versions/bump_connectors_version.py @@ -49,3 +49,7 @@ # Bump the versions in the yaml files yaml_files = glob.glob(f"{connector}/*.yaml") + glob.glob(f"{connector}/*.yml") common.update_yaml(yaml_files, new_version) + + # Bump the versions in the Dockerfiles + dockerfiles = glob.glob("Dockerfile") + common.update_dockerfile(dockerfiles, new_version) diff --git a/tools/bump_versions/common.py b/tools/bump_versions/common.py index 40ba70be79..bec31d3038 100755 --- a/tools/bump_versions/common.py +++ b/tools/bump_versions/common.py @@ -36,6 +36,13 @@ def update_yaml(yaml_files, new_version): for yml in yaml_files: subprocess.run(["sed", "-i", f"s/\(SemanticVersion:\s*\).*/\\1{new_version}/", yml]) subprocess.run(["sed", "-i", f"s/\(CodeUri:.*-\)[0-9]*\.[0-9]*\.[0-9]*\(-\?.*\.jar\)/\\1{new_version}\\2/", yml]) + subprocess.run(["sed", "-i", f"s/\(ImageUri:.*:\)[0-9]*\.[0-9]*\.[0-9]*\(\'\)/\\1{new_version}\\2/", yml]) + + +def update_dockerfile(dockerfiles, new_version): + for file in dockerfiles: + subprocess.run(["sed", "-i", f"s/\(COPY\s.*-\)[0-9]*\.[0-9]*\.[0-9]*\(\.jar.*\)/\\1{new_version}\\2/", file]) + subprocess.run(["sed", "-i", f"s/\(RUN\sjar\sxf.*-\)[0-9]*\.[0-9]*\.[0-9]*\(\.jar\)/\\1{new_version}\\2/", file]) def update_project_version(soup, new_version): From ed5719568bdf8b648001e1a217d0945bc268a715 Mon Sep 17 00:00:00 2001 From: Aimery Methena Date: Thu, 26 Sep 2024 10:55:49 -0400 Subject: [PATCH 44/87] remove unneeded Ref --- athena-datalakegen2/athena-datalakegen2-connection.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/athena-datalakegen2/athena-datalakegen2-connection.yaml b/athena-datalakegen2/athena-datalakegen2-connection.yaml index 1ec55b5af0..93a01cb338 100644 --- a/athena-datalakegen2/athena-datalakegen2-connection.yaml +++ b/athena-datalakegen2/athena-datalakegen2-connection.yaml @@ -65,8 +65,8 @@ Resources: ImageConfig: Command: [ "com.amazonaws.athena.connectors.datalakegen2.DataLakeGen2CompositeHandler" ] Description: "Enables Amazon Athena to communicate with DataLake Gen2 using JDBC" - Timeout: !Ref 900 - MemorySize: !Ref 3008 + Timeout: 900 + MemorySize: 3008 Role: !If [ NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRoleArn ] VpcConfig: SecurityGroupIds: !If [ HasSecurityGroups, !Ref SecurityGroupIds, !Ref "AWS::NoValue" ] From ce69cec20466447debe8994551561e45f9f18621 Mon Sep 17 00:00:00 2001 From: Trianz-Akshay <108925344+Trianz-Akshay@users.noreply.github.com> Date: Thu, 26 Sep 2024 20:59:10 +0530 Subject: [PATCH 45/87] SDK V2 ec2 changes (#2293) Co-authored-by: Aimery Methena <159072740+aimethed@users.noreply.github.com> --- athena-aws-cmdb/pom.xml | 6 +- .../aws/cmdb/TableProviderFactory.java | 7 +- .../aws/cmdb/tables/ec2/EbsTableProvider.java | 60 ++++---- .../aws/cmdb/tables/ec2/Ec2TableProvider.java | 126 ++++++++-------- .../cmdb/tables/ec2/ImagesTableProvider.java | 90 +++++------ .../cmdb/tables/ec2/RouteTableProvider.java | 74 ++++----- .../ec2/SecurityGroupsTableProvider.java | 62 ++++---- .../cmdb/tables/ec2/SubnetTableProvider.java | 43 +++--- .../aws/cmdb/tables/ec2/VpcTableProvider.java | 38 ++--- .../aws/cmdb/TableProviderFactoryTest.java | 6 +- .../cmdb/tables/ec2/EbsTableProviderTest.java | 56 ++++--- .../cmdb/tables/ec2/Ec2TableProviderTest.java | 142 +++++++++--------- .../tables/ec2/ImagesTableProviderTest.java | 81 +++++----- .../tables/ec2/RouteTableProviderTest.java | 69 ++++----- .../ec2/SecurityGroupsTableProviderTest.java | 54 +++---- .../tables/ec2/SubnetTableProviderTest.java | 42 +++--- .../cmdb/tables/ec2/VpcTableProviderTest.java | 37 +++-- 17 files changed, 484 insertions(+), 509 deletions(-) diff --git a/athena-aws-cmdb/pom.xml b/athena-aws-cmdb/pom.xml index 187d46137a..6cc732de9c 100644 --- a/athena-aws-cmdb/pom.xml +++ b/athena-aws-cmdb/pom.xml @@ -16,9 +16,9 @@ withdep - com.amazonaws - aws-java-sdk-ec2 - ${aws-sdk.version} + software.amazon.awssdk + ec2 + ${aws-sdk-v2.version} diff --git a/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/TableProviderFactory.java b/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/TableProviderFactory.java index 41ee3350c3..cdd1743950 100644 --- a/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/TableProviderFactory.java +++ b/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/TableProviderFactory.java @@ -32,9 +32,8 @@ import com.amazonaws.athena.connectors.aws.cmdb.tables.ec2.VpcTableProvider; import com.amazonaws.athena.connectors.aws.cmdb.tables.s3.S3BucketsTableProvider; import com.amazonaws.athena.connectors.aws.cmdb.tables.s3.S3ObjectsTableProvider; -import com.amazonaws.services.ec2.AmazonEC2; -import com.amazonaws.services.ec2.AmazonEC2ClientBuilder; import org.apache.arrow.util.VisibleForTesting; +import software.amazon.awssdk.services.ec2.Ec2Client; import software.amazon.awssdk.services.emr.EmrClient; import software.amazon.awssdk.services.rds.RdsClient; import software.amazon.awssdk.services.s3.S3Client; @@ -56,7 +55,7 @@ public class TableProviderFactory public TableProviderFactory(java.util.Map configOptions) { this( - AmazonEC2ClientBuilder.standard().build(), + Ec2Client.create(), EmrClient.create(), RdsClient.create(), S3Client.create(), @@ -64,7 +63,7 @@ public TableProviderFactory(java.util.Map configOptions) } @VisibleForTesting - protected TableProviderFactory(AmazonEC2 ec2, EmrClient emr, RdsClient rds, S3Client amazonS3, java.util.Map configOptions) + protected TableProviderFactory(Ec2Client ec2, EmrClient emr, RdsClient rds, S3Client amazonS3, java.util.Map configOptions) { addProvider(new Ec2TableProvider(ec2)); addProvider(new EbsTableProvider(ec2)); diff --git a/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/EbsTableProvider.java b/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/EbsTableProvider.java index 48b6503757..7356a34ea7 100644 --- a/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/EbsTableProvider.java +++ b/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/EbsTableProvider.java @@ -31,14 +31,14 @@ import com.amazonaws.athena.connector.lambda.metadata.GetTableResponse; import com.amazonaws.athena.connector.lambda.records.ReadRecordsRequest; import com.amazonaws.athena.connectors.aws.cmdb.tables.TableProvider; -import com.amazonaws.services.ec2.AmazonEC2; -import com.amazonaws.services.ec2.model.DescribeVolumesRequest; -import com.amazonaws.services.ec2.model.DescribeVolumesResult; -import com.amazonaws.services.ec2.model.Volume; import org.apache.arrow.vector.types.Types; import org.apache.arrow.vector.types.pojo.Schema; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.ec2.Ec2Client; +import software.amazon.awssdk.services.ec2.model.DescribeVolumesRequest; +import software.amazon.awssdk.services.ec2.model.DescribeVolumesResponse; +import software.amazon.awssdk.services.ec2.model.Volume; import java.util.Collections; import java.util.List; @@ -52,9 +52,9 @@ public class EbsTableProvider { private static final Logger logger = LoggerFactory.getLogger(EbsTableProvider.class); private static final Schema SCHEMA; - private AmazonEC2 ec2; + private Ec2Client ec2; - public EbsTableProvider(AmazonEC2 ec2) + public EbsTableProvider(Ec2Client ec2) { this.ec2 = ec2; } @@ -96,24 +96,24 @@ public GetTableResponse getTable(BlockAllocator blockAllocator, GetTableRequest public void readWithConstraint(BlockSpiller spiller, ReadRecordsRequest recordsRequest, QueryStatusChecker queryStatusChecker) { boolean done = false; - DescribeVolumesRequest request = new DescribeVolumesRequest(); + DescribeVolumesRequest.Builder request = DescribeVolumesRequest.builder(); ValueSet idConstraint = recordsRequest.getConstraints().getSummary().get("id"); if (idConstraint != null && idConstraint.isSingleValue()) { - request.setVolumeIds(Collections.singletonList(idConstraint.getSingleValue().toString())); + request.volumeIds(Collections.singletonList(idConstraint.getSingleValue().toString())); } while (!done) { - DescribeVolumesResult response = ec2.describeVolumes(request); + DescribeVolumesResponse response = ec2.describeVolumes(request.build()); - for (Volume volume : response.getVolumes()) { + for (Volume volume : response.volumes()) { logger.info("readWithConstraint: {}", response); instanceToRow(volume, spiller); } - request.setNextToken(response.getNextToken()); + request.nextToken(response.nextToken()); - if (response.getNextToken() == null || !queryStatusChecker.isQueryRunning()) { + if (response.nextToken() == null || !queryStatusChecker.isQueryRunning()) { done = true; } } @@ -133,26 +133,26 @@ private void instanceToRow(Volume volume, spiller.writeRows((Block block, int row) -> { boolean matched = true; - matched &= block.offerValue("id", row, volume.getVolumeId()); - matched &= block.offerValue("type", row, volume.getVolumeType()); - matched &= block.offerValue("availability_zone", row, volume.getAvailabilityZone()); - matched &= block.offerValue("created_time", row, volume.getCreateTime()); - matched &= block.offerValue("is_encrypted", row, volume.getEncrypted()); - matched &= block.offerValue("kms_key_id", row, volume.getKmsKeyId()); - matched &= block.offerValue("size", row, volume.getSize()); - matched &= block.offerValue("iops", row, volume.getIops()); - matched &= block.offerValue("snapshot_id", row, volume.getSnapshotId()); - matched &= block.offerValue("state", row, volume.getState()); - - if (volume.getAttachments().size() == 1) { - matched &= block.offerValue("target", row, volume.getAttachments().get(0).getInstanceId()); - matched &= block.offerValue("attached_device", row, volume.getAttachments().get(0).getDevice()); - matched &= block.offerValue("attachment_state", row, volume.getAttachments().get(0).getState()); - matched &= block.offerValue("attachment_time", row, volume.getAttachments().get(0).getAttachTime()); + matched &= block.offerValue("id", row, volume.volumeId()); + matched &= block.offerValue("type", row, volume.volumeTypeAsString()); + matched &= block.offerValue("availability_zone", row, volume.availabilityZone()); + matched &= block.offerValue("created_time", row, volume.createTime()); + matched &= block.offerValue("is_encrypted", row, volume.encrypted()); + matched &= block.offerValue("kms_key_id", row, volume.kmsKeyId()); + matched &= block.offerValue("size", row, volume.size()); + matched &= block.offerValue("iops", row, volume.iops()); + matched &= block.offerValue("snapshot_id", row, volume.snapshotId()); + matched &= block.offerValue("state", row, volume.stateAsString()); + + if (volume.attachments().size() == 1) { + matched &= block.offerValue("target", row, volume.attachments().get(0).instanceId()); + matched &= block.offerValue("attached_device", row, volume.attachments().get(0).device()); + matched &= block.offerValue("attachment_state", row, volume.attachments().get(0).stateAsString()); + matched &= block.offerValue("attachment_time", row, volume.attachments().get(0).attachTime()); } - List tags = volume.getTags().stream() - .map(next -> next.getKey() + ":" + next.getValue()).collect(Collectors.toList()); + List tags = volume.tags().stream() + .map(next -> next.key() + ":" + next.value()).collect(Collectors.toList()); matched &= block.offerComplexValue("tags", row, FieldResolver.DEFAULT, tags); return matched ? 1 : 0; diff --git a/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/Ec2TableProvider.java b/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/Ec2TableProvider.java index dfa8876284..6bf9dbb58d 100644 --- a/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/Ec2TableProvider.java +++ b/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/Ec2TableProvider.java @@ -32,19 +32,19 @@ import com.amazonaws.athena.connector.lambda.metadata.GetTableResponse; import com.amazonaws.athena.connector.lambda.records.ReadRecordsRequest; import com.amazonaws.athena.connectors.aws.cmdb.tables.TableProvider; -import com.amazonaws.services.ec2.AmazonEC2; -import com.amazonaws.services.ec2.model.DescribeInstancesRequest; -import com.amazonaws.services.ec2.model.DescribeInstancesResult; -import com.amazonaws.services.ec2.model.Instance; -import com.amazonaws.services.ec2.model.InstanceNetworkInterface; -import com.amazonaws.services.ec2.model.InstanceState; -import com.amazonaws.services.ec2.model.Reservation; -import com.amazonaws.services.ec2.model.StateReason; -import com.amazonaws.services.ec2.model.Tag; import org.apache.arrow.vector.types.Types; import org.apache.arrow.vector.types.pojo.ArrowType; import org.apache.arrow.vector.types.pojo.Field; import org.apache.arrow.vector.types.pojo.Schema; +import software.amazon.awssdk.services.ec2.Ec2Client; +import software.amazon.awssdk.services.ec2.model.DescribeInstancesRequest; +import software.amazon.awssdk.services.ec2.model.DescribeInstancesResponse; +import software.amazon.awssdk.services.ec2.model.Instance; +import software.amazon.awssdk.services.ec2.model.InstanceNetworkInterface; +import software.amazon.awssdk.services.ec2.model.InstanceState; +import software.amazon.awssdk.services.ec2.model.Reservation; +import software.amazon.awssdk.services.ec2.model.StateReason; +import software.amazon.awssdk.services.ec2.model.Tag; import java.util.Collections; import java.util.List; @@ -57,9 +57,9 @@ public class Ec2TableProvider implements TableProvider { private static final Schema SCHEMA; - private AmazonEC2 ec2; + private Ec2Client ec2; - public Ec2TableProvider(AmazonEC2 ec2) + public Ec2TableProvider(Ec2Client ec2) { this.ec2 = ec2; } @@ -101,25 +101,25 @@ public GetTableResponse getTable(BlockAllocator blockAllocator, GetTableRequest public void readWithConstraint(BlockSpiller spiller, ReadRecordsRequest recordsRequest, QueryStatusChecker queryStatusChecker) { boolean done = false; - DescribeInstancesRequest request = new DescribeInstancesRequest(); + DescribeInstancesRequest.Builder request = DescribeInstancesRequest.builder(); ValueSet idConstraint = recordsRequest.getConstraints().getSummary().get("instance_id"); if (idConstraint != null && idConstraint.isSingleValue()) { - request.setInstanceIds(Collections.singletonList(idConstraint.getSingleValue().toString())); + request.instanceIds(Collections.singletonList(idConstraint.getSingleValue().toString())); } while (!done) { - DescribeInstancesResult response = ec2.describeInstances(request); + DescribeInstancesResponse response = ec2.describeInstances(request.build()); - for (Reservation reservation : response.getReservations()) { - for (Instance instance : reservation.getInstances()) { + for (Reservation reservation : response.reservations()) { + for (Instance instance : reservation.instances()) { instanceToRow(instance, spiller); } } - request.setNextToken(response.getNextToken()); + request.nextToken(response.nextToken()); - if (response.getNextToken() == null || !queryStatusChecker.isQueryRunning()) { + if (response.nextToken() == null || !queryStatusChecker.isQueryRunning()) { done = true; } } @@ -139,106 +139,106 @@ private void instanceToRow(Instance instance, spiller.writeRows((Block block, int row) -> { boolean matched = true; - matched &= block.offerValue("instance_id", row, instance.getInstanceId()); - matched &= block.offerValue("image_id", row, instance.getImageId()); - matched &= block.offerValue("instance_type", row, instance.getInstanceType()); - matched &= block.offerValue("platform", row, instance.getPlatform()); - matched &= block.offerValue("private_dns_name", row, instance.getPrivateDnsName()); - matched &= block.offerValue("private_ip_address", row, instance.getPrivateIpAddress()); - matched &= block.offerValue("public_dns_name", row, instance.getPublicDnsName()); - matched &= block.offerValue("public_ip_address", row, instance.getPublicIpAddress()); - matched &= block.offerValue("subnet_id", row, instance.getSubnetId()); - matched &= block.offerValue("vpc_id", row, instance.getVpcId()); - matched &= block.offerValue("architecture", row, instance.getArchitecture()); - matched &= block.offerValue("instance_lifecycle", row, instance.getInstanceLifecycle()); - matched &= block.offerValue("root_device_name", row, instance.getRootDeviceName()); - matched &= block.offerValue("root_device_type", row, instance.getRootDeviceType()); - matched &= block.offerValue("spot_instance_request_id", row, instance.getSpotInstanceRequestId()); - matched &= block.offerValue("virtualization_type", row, instance.getVirtualizationType()); - matched &= block.offerValue("key_name", row, instance.getKeyName()); - matched &= block.offerValue("kernel_id", row, instance.getKernelId()); - matched &= block.offerValue("capacity_reservation_id", row, instance.getCapacityReservationId()); - matched &= block.offerValue("launch_time", row, instance.getLaunchTime()); + matched &= block.offerValue("instance_id", row, instance.instanceId()); + matched &= block.offerValue("image_id", row, instance.imageId()); + matched &= block.offerValue("instance_type", row, instance.instanceTypeAsString()); + matched &= block.offerValue("platform", row, instance.platformAsString()); + matched &= block.offerValue("private_dns_name", row, instance.privateDnsName()); + matched &= block.offerValue("private_ip_address", row, instance.privateIpAddress()); + matched &= block.offerValue("public_dns_name", row, instance.publicDnsName()); + matched &= block.offerValue("public_ip_address", row, instance.publicIpAddress()); + matched &= block.offerValue("subnet_id", row, instance.subnetId()); + matched &= block.offerValue("vpc_id", row, instance.vpcId()); + matched &= block.offerValue("architecture", row, instance.architectureAsString()); + matched &= block.offerValue("instance_lifecycle", row, instance.instanceLifecycleAsString()); + matched &= block.offerValue("root_device_name", row, instance.rootDeviceName()); + matched &= block.offerValue("root_device_type", row, instance.rootDeviceTypeAsString()); + matched &= block.offerValue("spot_instance_request_id", row, instance.spotInstanceRequestId()); + matched &= block.offerValue("virtualization_type", row, instance.virtualizationTypeAsString()); + matched &= block.offerValue("key_name", row, instance.keyName()); + matched &= block.offerValue("kernel_id", row, instance.kernelId()); + matched &= block.offerValue("capacity_reservation_id", row, instance.capacityReservationId()); + matched &= block.offerValue("launch_time", row, instance.launchTime()); matched &= block.offerComplexValue("state", row, (Field field, Object val) -> { if (field.getName().equals("name")) { - return ((InstanceState) val).getName(); + return ((InstanceState) val).nameAsString(); } else if (field.getName().equals("code")) { - return ((InstanceState) val).getCode(); + return ((InstanceState) val).code(); } throw new RuntimeException("Unknown field " + field.getName()); - }, instance.getState()); + }, instance.state()); matched &= block.offerComplexValue("network_interfaces", row, (Field field, Object val) -> { if (field.getName().equals("status")) { - return ((InstanceNetworkInterface) val).getStatus(); + return ((InstanceNetworkInterface) val).statusAsString(); } else if (field.getName().equals("subnet")) { - return ((InstanceNetworkInterface) val).getSubnetId(); + return ((InstanceNetworkInterface) val).subnetId(); } else if (field.getName().equals("vpc")) { - return ((InstanceNetworkInterface) val).getVpcId(); + return ((InstanceNetworkInterface) val).vpcId(); } else if (field.getName().equals("mac")) { - return ((InstanceNetworkInterface) val).getMacAddress(); + return ((InstanceNetworkInterface) val).macAddress(); } else if (field.getName().equals("private_dns")) { - return ((InstanceNetworkInterface) val).getPrivateDnsName(); + return ((InstanceNetworkInterface) val).privateDnsName(); } else if (field.getName().equals("private_ip")) { - return ((InstanceNetworkInterface) val).getPrivateIpAddress(); + return ((InstanceNetworkInterface) val).privateIpAddress(); } else if (field.getName().equals("security_groups")) { - return ((InstanceNetworkInterface) val).getGroups().stream().map(next -> next.getGroupName() + ":" + next.getGroupId()).collect(Collectors.toList()); + return ((InstanceNetworkInterface) val).groups().stream().map(next -> next.groupName() + ":" + next.groupId()).collect(Collectors.toList()); } else if (field.getName().equals("interface_id")) { - return ((InstanceNetworkInterface) val).getNetworkInterfaceId(); + return ((InstanceNetworkInterface) val).networkInterfaceId(); } throw new RuntimeException("Unknown field " + field.getName()); - }, instance.getNetworkInterfaces()); + }, instance.networkInterfaces()); matched &= block.offerComplexValue("state_reason", row, (Field field, Object val) -> { if (field.getName().equals("message")) { - return ((StateReason) val).getMessage(); + return ((StateReason) val).message(); } else if (field.getName().equals("code")) { - return ((StateReason) val).getCode(); + return ((StateReason) val).code(); } throw new RuntimeException("Unknown field " + field.getName()); - }, instance.getStateReason()); + }, instance.stateReason()); - matched &= block.offerValue("ebs_optimized", row, instance.getEbsOptimized()); + matched &= block.offerValue("ebs_optimized", row, instance.ebsOptimized()); - List securityGroups = instance.getSecurityGroups().stream() - .map(next -> next.getGroupId()).collect(Collectors.toList()); + List securityGroups = instance.securityGroups().stream() + .map(next -> next.groupId()).collect(Collectors.toList()); matched &= block.offerComplexValue("security_groups", row, FieldResolver.DEFAULT, securityGroups); - List securityGroupNames = instance.getSecurityGroups().stream() - .map(next -> next.getGroupName()).collect(Collectors.toList()); + List securityGroupNames = instance.securityGroups().stream() + .map(next -> next.groupName()).collect(Collectors.toList()); matched &= block.offerComplexValue("security_group_names", row, FieldResolver.DEFAULT, securityGroupNames); - List ebsVolumes = instance.getBlockDeviceMappings().stream() - .map(next -> next.getEbs().getVolumeId()).collect(Collectors.toList()); + List ebsVolumes = instance.blockDeviceMappings().stream() + .map(next -> next.ebs().volumeId()).collect(Collectors.toList()); matched &= block.offerComplexValue("ebs_volumes", row, FieldResolver.DEFAULT, ebsVolumes); matched &= block.offerComplexValue("tags", row, (Field field, Object val) -> { if (field.getName().equals("key")) { - return ((Tag) val).getKey(); + return ((Tag) val).key(); } else if (field.getName().equals("value")) { - return ((Tag) val).getValue(); + return ((Tag) val).value(); } throw new RuntimeException("Unexpected field " + field.getName()); }, - instance.getTags()); + instance.tags()); return matched ? 1 : 0; }); diff --git a/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/ImagesTableProvider.java b/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/ImagesTableProvider.java index 3858946948..a80ad779bf 100644 --- a/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/ImagesTableProvider.java +++ b/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/ImagesTableProvider.java @@ -31,17 +31,17 @@ import com.amazonaws.athena.connector.lambda.metadata.GetTableResponse; import com.amazonaws.athena.connector.lambda.records.ReadRecordsRequest; import com.amazonaws.athena.connectors.aws.cmdb.tables.TableProvider; -import com.amazonaws.services.ec2.AmazonEC2; -import com.amazonaws.services.ec2.model.BlockDeviceMapping; -import com.amazonaws.services.ec2.model.DescribeImagesRequest; -import com.amazonaws.services.ec2.model.DescribeImagesResult; -import com.amazonaws.services.ec2.model.EbsBlockDevice; -import com.amazonaws.services.ec2.model.Image; -import com.amazonaws.services.ec2.model.Tag; import org.apache.arrow.vector.types.Types; import org.apache.arrow.vector.types.pojo.ArrowType; import org.apache.arrow.vector.types.pojo.Field; import org.apache.arrow.vector.types.pojo.Schema; +import software.amazon.awssdk.services.ec2.Ec2Client; +import software.amazon.awssdk.services.ec2.model.BlockDeviceMapping; +import software.amazon.awssdk.services.ec2.model.DescribeImagesRequest; +import software.amazon.awssdk.services.ec2.model.DescribeImagesResponse; +import software.amazon.awssdk.services.ec2.model.EbsBlockDevice; +import software.amazon.awssdk.services.ec2.model.Image; +import software.amazon.awssdk.services.ec2.model.Tag; import java.util.Collections; import java.util.List; @@ -58,9 +58,9 @@ public class ImagesTableProvider //query for a specific owner. private final String defaultOwner; private static final Schema SCHEMA; - private AmazonEC2 ec2; + private Ec2Client ec2; - public ImagesTableProvider(AmazonEC2 ec2, java.util.Map configOptions) + public ImagesTableProvider(Ec2Client ec2, java.util.Map configOptions) { this.ec2 = ec2; this.defaultOwner = configOptions.get(DEFAULT_OWNER_ENV); @@ -104,28 +104,28 @@ public GetTableResponse getTable(BlockAllocator blockAllocator, GetTableRequest @Override public void readWithConstraint(BlockSpiller spiller, ReadRecordsRequest recordsRequest, QueryStatusChecker queryStatusChecker) { - DescribeImagesRequest request = new DescribeImagesRequest(); + DescribeImagesRequest.Builder request = DescribeImagesRequest.builder(); ValueSet idConstraint = recordsRequest.getConstraints().getSummary().get("id"); ValueSet ownerConstraint = recordsRequest.getConstraints().getSummary().get("owner"); if (idConstraint != null && idConstraint.isSingleValue()) { - request.setImageIds(Collections.singletonList(idConstraint.getSingleValue().toString())); + request.imageIds(Collections.singletonList(idConstraint.getSingleValue().toString())); } else if (ownerConstraint != null && ownerConstraint.isSingleValue()) { - request.setOwners(Collections.singletonList(ownerConstraint.getSingleValue().toString())); + request.owners(Collections.singletonList(ownerConstraint.getSingleValue().toString())); } else if (defaultOwner != null) { - request.setOwners(Collections.singletonList(defaultOwner)); + request.owners(Collections.singletonList(defaultOwner)); } else { throw new RuntimeException("A default owner account must be set or the query must have owner" + "in the where clause with exactly 1 value otherwise results may be too big."); } - DescribeImagesResult response = ec2.describeImages(request); + DescribeImagesResponse response = ec2.describeImages(request.build()); int count = 0; - for (Image next : response.getImages()) { + for (Image next : response.images()) { if (count++ > MAX_IMAGES) { throw new RuntimeException("Too many images returned, add an owner or id filter."); } @@ -147,34 +147,34 @@ private void instanceToRow(Image image, spiller.writeRows((Block block, int row) -> { boolean matched = true; - matched &= block.offerValue("id", row, image.getImageId()); - matched &= block.offerValue("architecture", row, image.getArchitecture()); - matched &= block.offerValue("created", row, image.getCreationDate()); - matched &= block.offerValue("description", row, image.getDescription()); - matched &= block.offerValue("hypervisor", row, image.getHypervisor()); - matched &= block.offerValue("location", row, image.getImageLocation()); - matched &= block.offerValue("type", row, image.getImageType()); - matched &= block.offerValue("kernel", row, image.getKernelId()); - matched &= block.offerValue("name", row, image.getName()); - matched &= block.offerValue("owner", row, image.getOwnerId()); - matched &= block.offerValue("platform", row, image.getPlatform()); - matched &= block.offerValue("ramdisk", row, image.getRamdiskId()); - matched &= block.offerValue("root_device", row, image.getRootDeviceName()); - matched &= block.offerValue("root_type", row, image.getRootDeviceType()); - matched &= block.offerValue("srvio_net", row, image.getSriovNetSupport()); - matched &= block.offerValue("state", row, image.getState()); - matched &= block.offerValue("virt_type", row, image.getVirtualizationType()); - matched &= block.offerValue("is_public", row, image.getPublic()); + matched &= block.offerValue("id", row, image.imageId()); + matched &= block.offerValue("architecture", row, image.architectureAsString()); + matched &= block.offerValue("created", row, image.creationDate()); + matched &= block.offerValue("description", row, image.description()); + matched &= block.offerValue("hypervisor", row, image.hypervisorAsString()); + matched &= block.offerValue("location", row, image.imageLocation()); + matched &= block.offerValue("type", row, image.imageTypeAsString()); + matched &= block.offerValue("kernel", row, image.kernelId()); + matched &= block.offerValue("name", row, image.name()); + matched &= block.offerValue("owner", row, image.ownerId()); + matched &= block.offerValue("platform", row, image.platformAsString()); + matched &= block.offerValue("ramdisk", row, image.ramdiskId()); + matched &= block.offerValue("root_device", row, image.rootDeviceName()); + matched &= block.offerValue("root_type", row, image.rootDeviceTypeAsString()); + matched &= block.offerValue("srvio_net", row, image.sriovNetSupport()); + matched &= block.offerValue("state", row, image.stateAsString()); + matched &= block.offerValue("virt_type", row, image.virtualizationTypeAsString()); + matched &= block.offerValue("is_public", row, image.publicLaunchPermissions()); - List tags = image.getTags(); + List tags = image.tags(); matched &= block.offerComplexValue("tags", row, (Field field, Object val) -> { if (field.getName().equals("key")) { - return ((Tag) val).getKey(); + return ((Tag) val).key(); } else if (field.getName().equals("value")) { - return ((Tag) val).getValue(); + return ((Tag) val).value(); } throw new RuntimeException("Unexpected field " + field.getName()); @@ -185,33 +185,33 @@ else if (field.getName().equals("value")) { row, (Field field, Object val) -> { if (field.getName().equals("dev_name")) { - return ((BlockDeviceMapping) val).getDeviceName(); + return ((BlockDeviceMapping) val).deviceName(); } else if (field.getName().equals("no_device")) { - return ((BlockDeviceMapping) val).getNoDevice(); + return ((BlockDeviceMapping) val).noDevice(); } else if (field.getName().equals("virt_name")) { - return ((BlockDeviceMapping) val).getVirtualName(); + return ((BlockDeviceMapping) val).virtualName(); } else if (field.getName().equals("ebs")) { - return ((BlockDeviceMapping) val).getEbs(); + return ((BlockDeviceMapping) val).ebs(); } else if (field.getName().equals("ebs_size")) { - return ((EbsBlockDevice) val).getVolumeSize(); + return ((EbsBlockDevice) val).volumeSize(); } else if (field.getName().equals("ebs_iops")) { - return ((EbsBlockDevice) val).getIops(); + return ((EbsBlockDevice) val).iops(); } else if (field.getName().equals("ebs_type")) { - return ((EbsBlockDevice) val).getVolumeType(); + return ((EbsBlockDevice) val).volumeTypeAsString(); } else if (field.getName().equals("ebs_kms_key")) { - return ((EbsBlockDevice) val).getKmsKeyId(); + return ((EbsBlockDevice) val).kmsKeyId(); } throw new RuntimeException("Unexpected field " + field.getName()); }, - image.getBlockDeviceMappings()); + image.blockDeviceMappings()); return matched ? 1 : 0; }); diff --git a/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/RouteTableProvider.java b/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/RouteTableProvider.java index 24583be45e..7c71183464 100644 --- a/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/RouteTableProvider.java +++ b/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/RouteTableProvider.java @@ -31,13 +31,13 @@ import com.amazonaws.athena.connector.lambda.metadata.GetTableResponse; import com.amazonaws.athena.connector.lambda.records.ReadRecordsRequest; import com.amazonaws.athena.connectors.aws.cmdb.tables.TableProvider; -import com.amazonaws.services.ec2.AmazonEC2; -import com.amazonaws.services.ec2.model.DescribeRouteTablesRequest; -import com.amazonaws.services.ec2.model.DescribeRouteTablesResult; -import com.amazonaws.services.ec2.model.Route; -import com.amazonaws.services.ec2.model.RouteTable; import org.apache.arrow.vector.types.Types; import org.apache.arrow.vector.types.pojo.Schema; +import software.amazon.awssdk.services.ec2.Ec2Client; +import software.amazon.awssdk.services.ec2.model.DescribeRouteTablesRequest; +import software.amazon.awssdk.services.ec2.model.DescribeRouteTablesResponse; +import software.amazon.awssdk.services.ec2.model.Route; +import software.amazon.awssdk.services.ec2.model.RouteTable; import java.util.Collections; import java.util.List; @@ -50,9 +50,9 @@ public class RouteTableProvider implements TableProvider { private static final Schema SCHEMA; - private AmazonEC2 ec2; + private Ec2Client ec2; - public RouteTableProvider(AmazonEC2 ec2) + public RouteTableProvider(Ec2Client ec2) { this.ec2 = ec2; } @@ -94,25 +94,25 @@ public GetTableResponse getTable(BlockAllocator blockAllocator, GetTableRequest public void readWithConstraint(BlockSpiller spiller, ReadRecordsRequest recordsRequest, QueryStatusChecker queryStatusChecker) { boolean done = false; - DescribeRouteTablesRequest request = new DescribeRouteTablesRequest(); + DescribeRouteTablesRequest.Builder request = DescribeRouteTablesRequest.builder(); ValueSet idConstraint = recordsRequest.getConstraints().getSummary().get("route_table_id"); if (idConstraint != null && idConstraint.isSingleValue()) { - request.setRouteTableIds(Collections.singletonList(idConstraint.getSingleValue().toString())); + request.routeTableIds(Collections.singletonList(idConstraint.getSingleValue().toString())); } while (!done) { - DescribeRouteTablesResult response = ec2.describeRouteTables(request); + DescribeRouteTablesResponse response = ec2.describeRouteTables(request.build()); - for (RouteTable nextRouteTable : response.getRouteTables()) { - for (Route route : nextRouteTable.getRoutes()) { + for (RouteTable nextRouteTable : response.routeTables()) { + for (Route route : nextRouteTable.routes()) { instanceToRow(nextRouteTable, route, spiller); } } - request.setNextToken(response.getNextToken()); + request.nextToken(response.nextToken()); - if (response.getNextToken() == null || !queryStatusChecker.isQueryRunning()) { + if (response.nextToken() == null || !queryStatusChecker.isQueryRunning()) { done = true; } } @@ -134,33 +134,33 @@ private void instanceToRow(RouteTable routeTable, spiller.writeRows((Block block, int row) -> { boolean matched = true; - matched &= block.offerValue("route_table_id", row, routeTable.getRouteTableId()); - matched &= block.offerValue("owner", row, routeTable.getOwnerId()); - matched &= block.offerValue("vpc", row, routeTable.getVpcId()); - matched &= block.offerValue("dst_cidr", row, route.getDestinationCidrBlock()); - matched &= block.offerValue("dst_cidr_v6", row, route.getDestinationIpv6CidrBlock()); - matched &= block.offerValue("dst_prefix_list", row, route.getDestinationPrefixListId()); - matched &= block.offerValue("egress_igw", row, route.getEgressOnlyInternetGatewayId()); - matched &= block.offerValue("gateway", row, route.getGatewayId()); - matched &= block.offerValue("instance_id", row, route.getInstanceId()); - matched &= block.offerValue("instance_owner", row, route.getInstanceOwnerId()); - matched &= block.offerValue("nat_gateway", row, route.getNatGatewayId()); - matched &= block.offerValue("interface", row, route.getNetworkInterfaceId()); - matched &= block.offerValue("origin", row, route.getOrigin()); - matched &= block.offerValue("state", row, route.getState()); - matched &= block.offerValue("transit_gateway", row, route.getTransitGatewayId()); - matched &= block.offerValue("vpc_peering_con", row, route.getVpcPeeringConnectionId()); - - List associations = routeTable.getAssociations().stream() - .map(next -> next.getSubnetId() + ":" + next.getRouteTableId()).collect(Collectors.toList()); + matched &= block.offerValue("route_table_id", row, routeTable.routeTableId()); + matched &= block.offerValue("owner", row, routeTable.ownerId()); + matched &= block.offerValue("vpc", row, routeTable.vpcId()); + matched &= block.offerValue("dst_cidr", row, route.destinationCidrBlock()); + matched &= block.offerValue("dst_cidr_v6", row, route.destinationIpv6CidrBlock()); + matched &= block.offerValue("dst_prefix_list", row, route.destinationPrefixListId()); + matched &= block.offerValue("egress_igw", row, route.egressOnlyInternetGatewayId()); + matched &= block.offerValue("gateway", row, route.gatewayId()); + matched &= block.offerValue("instance_id", row, route.instanceId()); + matched &= block.offerValue("instance_owner", row, route.instanceOwnerId()); + matched &= block.offerValue("nat_gateway", row, route.natGatewayId()); + matched &= block.offerValue("interface", row, route.networkInterfaceId()); + matched &= block.offerValue("origin", row, route.originAsString()); + matched &= block.offerValue("state", row, route.stateAsString()); + matched &= block.offerValue("transit_gateway", row, route.transitGatewayId()); + matched &= block.offerValue("vpc_peering_con", row, route.vpcPeeringConnectionId()); + + List associations = routeTable.associations().stream() + .map(next -> next.subnetId() + ":" + next.routeTableId()).collect(Collectors.toList()); matched &= block.offerComplexValue("associations", row, FieldResolver.DEFAULT, associations); - List tags = routeTable.getTags().stream() - .map(next -> next.getKey() + ":" + next.getValue()).collect(Collectors.toList()); + List tags = routeTable.tags().stream() + .map(next -> next.key() + ":" + next.value()).collect(Collectors.toList()); matched &= block.offerComplexValue("tags", row, FieldResolver.DEFAULT, tags); - List propagatingVgws = routeTable.getPropagatingVgws().stream() - .map(next -> next.getGatewayId()).collect(Collectors.toList()); + List propagatingVgws = routeTable.propagatingVgws().stream() + .map(next -> next.gatewayId()).collect(Collectors.toList()); matched &= block.offerComplexValue("propagating_vgws", row, FieldResolver.DEFAULT, propagatingVgws); return matched ? 1 : 0; diff --git a/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/SecurityGroupsTableProvider.java b/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/SecurityGroupsTableProvider.java index 8f4f6dd3c3..94afbdf687 100644 --- a/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/SecurityGroupsTableProvider.java +++ b/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/SecurityGroupsTableProvider.java @@ -31,13 +31,13 @@ import com.amazonaws.athena.connector.lambda.metadata.GetTableResponse; import com.amazonaws.athena.connector.lambda.records.ReadRecordsRequest; import com.amazonaws.athena.connectors.aws.cmdb.tables.TableProvider; -import com.amazonaws.services.ec2.AmazonEC2; -import com.amazonaws.services.ec2.model.DescribeSecurityGroupsRequest; -import com.amazonaws.services.ec2.model.DescribeSecurityGroupsResult; -import com.amazonaws.services.ec2.model.IpPermission; -import com.amazonaws.services.ec2.model.SecurityGroup; import org.apache.arrow.vector.types.Types; import org.apache.arrow.vector.types.pojo.Schema; +import software.amazon.awssdk.services.ec2.Ec2Client; +import software.amazon.awssdk.services.ec2.model.DescribeSecurityGroupsRequest; +import software.amazon.awssdk.services.ec2.model.DescribeSecurityGroupsResponse; +import software.amazon.awssdk.services.ec2.model.IpPermission; +import software.amazon.awssdk.services.ec2.model.SecurityGroup; import java.util.Collections; import java.util.List; @@ -53,9 +53,9 @@ public class SecurityGroupsTableProvider private static final String EGRESS = "egress"; private static final Schema SCHEMA; - private AmazonEC2 ec2; + private Ec2Client ec2; - public SecurityGroupsTableProvider(AmazonEC2 ec2) + public SecurityGroupsTableProvider(Ec2Client ec2) { this.ec2 = ec2; } @@ -97,34 +97,34 @@ public GetTableResponse getTable(BlockAllocator blockAllocator, GetTableRequest public void readWithConstraint(BlockSpiller spiller, ReadRecordsRequest recordsRequest, QueryStatusChecker queryStatusChecker) { boolean done = false; - DescribeSecurityGroupsRequest request = new DescribeSecurityGroupsRequest(); + DescribeSecurityGroupsRequest.Builder request = DescribeSecurityGroupsRequest.builder(); ValueSet idConstraint = recordsRequest.getConstraints().getSummary().get("id"); if (idConstraint != null && idConstraint.isSingleValue()) { - request.setGroupIds(Collections.singletonList(idConstraint.getSingleValue().toString())); + request.groupIds(Collections.singletonList(idConstraint.getSingleValue().toString())); } ValueSet nameConstraint = recordsRequest.getConstraints().getSummary().get("name"); if (nameConstraint != null && nameConstraint.isSingleValue()) { - request.setGroupNames(Collections.singletonList(nameConstraint.getSingleValue().toString())); + request.groupNames(Collections.singletonList(nameConstraint.getSingleValue().toString())); } while (!done) { - DescribeSecurityGroupsResult response = ec2.describeSecurityGroups(request); + DescribeSecurityGroupsResponse response = ec2.describeSecurityGroups(request.build()); //Each rule is mapped to a row in the response. SGs have INGRESS and EGRESS rules. - for (SecurityGroup next : response.getSecurityGroups()) { - for (IpPermission nextPerm : next.getIpPermissions()) { + for (SecurityGroup next : response.securityGroups()) { + for (IpPermission nextPerm : next.ipPermissions()) { instanceToRow(next, nextPerm, INGRESS, spiller); } - for (IpPermission nextPerm : next.getIpPermissionsEgress()) { + for (IpPermission nextPerm : next.ipPermissionsEgress()) { instanceToRow(next, nextPerm, EGRESS, spiller); } } - request.setNextToken(response.getNextToken()); - if (response.getNextToken() == null || !queryStatusChecker.isQueryRunning()) { + request.nextToken(response.nextToken()); + if (response.nextToken() == null || !queryStatusChecker.isQueryRunning()) { done = true; } } @@ -148,28 +148,28 @@ private void instanceToRow(SecurityGroup securityGroup, spiller.writeRows((Block block, int row) -> { boolean matched = true; - matched &= block.offerValue("id", row, securityGroup.getGroupId()); - matched &= block.offerValue("name", row, securityGroup.getGroupName()); - matched &= block.offerValue("description", row, securityGroup.getDescription()); - matched &= block.offerValue("from_port", row, permission.getFromPort()); - matched &= block.offerValue("to_port", row, permission.getFromPort()); - matched &= block.offerValue("protocol", row, permission.getIpProtocol()); - matched &= block.offerValue("direction", row, permission.getIpProtocol()); + matched &= block.offerValue("id", row, securityGroup.groupId()); + matched &= block.offerValue("name", row, securityGroup.groupName()); + matched &= block.offerValue("description", row, securityGroup.description()); + matched &= block.offerValue("from_port", row, permission.fromPort()); + matched &= block.offerValue("to_port", row, permission.toPort()); + matched &= block.offerValue("protocol", row, permission.ipProtocol()); + matched &= block.offerValue("direction", row, direction); - List ipv4Ranges = permission.getIpv4Ranges().stream() - .map(next -> next.getCidrIp() + ":" + next.getDescription()).collect(Collectors.toList()); + List ipv4Ranges = permission.ipRanges().stream() + .map(next -> next.cidrIp() + ":" + next.description()).collect(Collectors.toList()); matched &= block.offerComplexValue("ipv4_ranges", row, FieldResolver.DEFAULT, ipv4Ranges); - List ipv6Ranges = permission.getIpv6Ranges().stream() - .map(next -> next.getCidrIpv6() + ":" + next.getDescription()).collect(Collectors.toList()); + List ipv6Ranges = permission.ipv6Ranges().stream() + .map(next -> next.cidrIpv6() + ":" + next.description()).collect(Collectors.toList()); matched &= block.offerComplexValue("ipv6_ranges", row, FieldResolver.DEFAULT, ipv6Ranges); - List prefixLists = permission.getPrefixListIds().stream() - .map(next -> next.getPrefixListId() + ":" + next.getDescription()).collect(Collectors.toList()); + List prefixLists = permission.prefixListIds().stream() + .map(next -> next.prefixListId() + ":" + next.description()).collect(Collectors.toList()); matched &= block.offerComplexValue("prefix_lists", row, FieldResolver.DEFAULT, prefixLists); - List userIdGroups = permission.getUserIdGroupPairs().stream() - .map(next -> next.getUserId() + ":" + next.getGroupId()) + List userIdGroups = permission.userIdGroupPairs().stream() + .map(next -> next.userId() + ":" + next.groupId()) .collect(Collectors.toList()); matched &= block.offerComplexValue("user_id_groups", row, FieldResolver.DEFAULT, userIdGroups); diff --git a/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/SubnetTableProvider.java b/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/SubnetTableProvider.java index f64bb9bd26..444fd39510 100644 --- a/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/SubnetTableProvider.java +++ b/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/SubnetTableProvider.java @@ -31,12 +31,12 @@ import com.amazonaws.athena.connector.lambda.metadata.GetTableResponse; import com.amazonaws.athena.connector.lambda.records.ReadRecordsRequest; import com.amazonaws.athena.connectors.aws.cmdb.tables.TableProvider; -import com.amazonaws.services.ec2.AmazonEC2; -import com.amazonaws.services.ec2.model.DescribeSubnetsRequest; -import com.amazonaws.services.ec2.model.DescribeSubnetsResult; -import com.amazonaws.services.ec2.model.Subnet; import org.apache.arrow.vector.types.Types; import org.apache.arrow.vector.types.pojo.Schema; +import software.amazon.awssdk.services.ec2.Ec2Client; +import software.amazon.awssdk.services.ec2.model.DescribeSubnetsRequest; +import software.amazon.awssdk.services.ec2.model.DescribeSubnetsResponse; +import software.amazon.awssdk.services.ec2.model.Subnet; import java.util.Collections; import java.util.List; @@ -49,9 +49,9 @@ public class SubnetTableProvider implements TableProvider { private static final Schema SCHEMA; - private AmazonEC2 ec2; + private Ec2Client ec2; - public SubnetTableProvider(AmazonEC2 ec2) + public SubnetTableProvider(Ec2Client ec2) { this.ec2 = ec2; } @@ -92,15 +92,15 @@ public GetTableResponse getTable(BlockAllocator blockAllocator, GetTableRequest @Override public void readWithConstraint(BlockSpiller spiller, ReadRecordsRequest recordsRequest, QueryStatusChecker queryStatusChecker) { - DescribeSubnetsRequest request = new DescribeSubnetsRequest(); + DescribeSubnetsRequest.Builder request = DescribeSubnetsRequest.builder(); ValueSet idConstraint = recordsRequest.getConstraints().getSummary().get("id"); if (idConstraint != null && idConstraint.isSingleValue()) { - request.setSubnetIds(Collections.singletonList(idConstraint.getSingleValue().toString())); + request.subnetIds(Collections.singletonList(idConstraint.getSingleValue().toString())); } - DescribeSubnetsResult response = ec2.describeSubnets(request); - for (Subnet subnet : response.getSubnets()) { + DescribeSubnetsResponse response = ec2.describeSubnets(request.build()); + for (Subnet subnet : response.subnets()) { instanceToRow(subnet, spiller); } } @@ -119,19 +119,18 @@ private void instanceToRow(Subnet subnet, spiller.writeRows((Block block, int row) -> { boolean matched = true; - matched &= block.offerValue("id", row, subnet.getSubnetId()); - matched &= block.offerValue("availability_zone", row, subnet.getAvailabilityZone()); - matched &= block.offerValue("available_ip_count", row, subnet.getAvailableIpAddressCount()); - matched &= block.offerValue("cidr_block", row, subnet.getCidrBlock()); - matched &= block.offerValue("default_for_az", row, subnet.getDefaultForAz()); - matched &= block.offerValue("map_public_ip", row, subnet.getMapPublicIpOnLaunch()); - matched &= block.offerValue("owner", row, subnet.getOwnerId()); - matched &= block.offerValue("state", row, subnet.getState()); - matched &= block.offerValue("vpc", row, subnet.getVpcId()); - matched &= block.offerValue("vpc", row, subnet.getVpcId()); + matched &= block.offerValue("id", row, subnet.subnetId()); + matched &= block.offerValue("availability_zone", row, subnet.availabilityZone()); + matched &= block.offerValue("available_ip_count", row, subnet.availableIpAddressCount()); + matched &= block.offerValue("cidr_block", row, subnet.cidrBlock()); + matched &= block.offerValue("default_for_az", row, subnet.defaultForAz()); + matched &= block.offerValue("map_public_ip", row, subnet.mapPublicIpOnLaunch()); + matched &= block.offerValue("owner", row, subnet.ownerId()); + matched &= block.offerValue("state", row, subnet.stateAsString()); + matched &= block.offerValue("vpc", row, subnet.vpcId()); - List tags = subnet.getTags().stream() - .map(next -> next.getKey() + ":" + next.getValue()).collect(Collectors.toList()); + List tags = subnet.tags().stream() + .map(next -> next.key() + ":" + next.value()).collect(Collectors.toList()); matched &= block.offerComplexValue("tags", row, FieldResolver.DEFAULT, tags); return matched ? 1 : 0; diff --git a/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/VpcTableProvider.java b/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/VpcTableProvider.java index 18087ba5e5..44adc6a846 100644 --- a/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/VpcTableProvider.java +++ b/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/VpcTableProvider.java @@ -31,12 +31,12 @@ import com.amazonaws.athena.connector.lambda.metadata.GetTableResponse; import com.amazonaws.athena.connector.lambda.records.ReadRecordsRequest; import com.amazonaws.athena.connectors.aws.cmdb.tables.TableProvider; -import com.amazonaws.services.ec2.AmazonEC2; -import com.amazonaws.services.ec2.model.DescribeVpcsRequest; -import com.amazonaws.services.ec2.model.DescribeVpcsResult; -import com.amazonaws.services.ec2.model.Vpc; import org.apache.arrow.vector.types.Types; import org.apache.arrow.vector.types.pojo.Schema; +import software.amazon.awssdk.services.ec2.Ec2Client; +import software.amazon.awssdk.services.ec2.model.DescribeVpcsRequest; +import software.amazon.awssdk.services.ec2.model.DescribeVpcsResponse; +import software.amazon.awssdk.services.ec2.model.Vpc; import java.util.Collections; import java.util.List; @@ -49,9 +49,9 @@ public class VpcTableProvider implements TableProvider { private static final Schema SCHEMA; - private AmazonEC2 ec2; + private Ec2Client ec2; - public VpcTableProvider(AmazonEC2 ec2) + public VpcTableProvider(Ec2Client ec2) { this.ec2 = ec2; } @@ -92,15 +92,15 @@ public GetTableResponse getTable(BlockAllocator blockAllocator, GetTableRequest @Override public void readWithConstraint(BlockSpiller spiller, ReadRecordsRequest recordsRequest, QueryStatusChecker queryStatusChecker) { - DescribeVpcsRequest request = new DescribeVpcsRequest(); + DescribeVpcsRequest.Builder request = DescribeVpcsRequest.builder(); ValueSet idConstraint = recordsRequest.getConstraints().getSummary().get("id"); if (idConstraint != null && idConstraint.isSingleValue()) { - request.setVpcIds(Collections.singletonList(idConstraint.getSingleValue().toString())); + request.vpcIds(Collections.singletonList(idConstraint.getSingleValue().toString())); } - DescribeVpcsResult response = ec2.describeVpcs(request); - for (Vpc vpc : response.getVpcs()) { + DescribeVpcsResponse response = ec2.describeVpcs(request.build()); + for (Vpc vpc : response.vpcs()) { instanceToRow(vpc, spiller); } } @@ -119,16 +119,16 @@ private void instanceToRow(Vpc vpc, spiller.writeRows((Block block, int row) -> { boolean matched = true; - matched &= block.offerValue("id", row, vpc.getVpcId()); - matched &= block.offerValue("cidr_block", row, vpc.getCidrBlock()); - matched &= block.offerValue("dhcp_opts", row, vpc.getDhcpOptionsId()); - matched &= block.offerValue("tenancy", row, vpc.getInstanceTenancy()); - matched &= block.offerValue("owner", row, vpc.getOwnerId()); - matched &= block.offerValue("state", row, vpc.getState()); - matched &= block.offerValue("is_default", row, vpc.getIsDefault()); + matched &= block.offerValue("id", row, vpc.vpcId()); + matched &= block.offerValue("cidr_block", row, vpc.cidrBlock()); + matched &= block.offerValue("dhcp_opts", row, vpc.dhcpOptionsId()); + matched &= block.offerValue("tenancy", row, vpc.instanceTenancyAsString()); + matched &= block.offerValue("owner", row, vpc.ownerId()); + matched &= block.offerValue("state", row, vpc.stateAsString()); + matched &= block.offerValue("is_default", row, vpc.isDefault()); - List tags = vpc.getTags().stream() - .map(next -> next.getKey() + ":" + next.getValue()).collect(Collectors.toList()); + List tags = vpc.tags().stream() + .map(next -> next.key() + ":" + next.value()).collect(Collectors.toList()); matched &= block.offerComplexValue("tags", row, FieldResolver.DEFAULT, tags); return matched ? 1 : 0; diff --git a/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/TableProviderFactoryTest.java b/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/TableProviderFactoryTest.java index ce23513916..83e2f72c3b 100644 --- a/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/TableProviderFactoryTest.java +++ b/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/TableProviderFactoryTest.java @@ -21,11 +21,11 @@ import com.amazonaws.athena.connector.lambda.domain.TableName; import com.amazonaws.athena.connectors.aws.cmdb.tables.TableProvider; -import com.amazonaws.services.ec2.AmazonEC2; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; import org.mockito.junit.MockitoJUnitRunner; +import software.amazon.awssdk.services.ec2.Ec2Client; import software.amazon.awssdk.services.emr.EmrClient; import software.amazon.awssdk.services.rds.RdsClient; import software.amazon.awssdk.services.s3.S3Client; @@ -33,7 +33,7 @@ import java.util.List; import java.util.Map; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; @RunWith(MockitoJUnitRunner.class) public class TableProviderFactoryTest @@ -42,7 +42,7 @@ public class TableProviderFactoryTest private int expectedTables = 11; @Mock - private AmazonEC2 mockEc2; + private Ec2Client mockEc2; @Mock private EmrClient mockEmr; diff --git a/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/EbsTableProviderTest.java b/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/EbsTableProviderTest.java index 35ebc15812..2fdc295b4f 100644 --- a/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/EbsTableProviderTest.java +++ b/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/EbsTableProviderTest.java @@ -23,12 +23,6 @@ import com.amazonaws.athena.connector.lambda.data.BlockUtils; import com.amazonaws.athena.connectors.aws.cmdb.tables.AbstractTableProviderTest; import com.amazonaws.athena.connectors.aws.cmdb.tables.TableProvider; -import com.amazonaws.services.ec2.AmazonEC2; -import com.amazonaws.services.ec2.model.DescribeVolumesRequest; -import com.amazonaws.services.ec2.model.DescribeVolumesResult; -import com.amazonaws.services.ec2.model.Tag; -import com.amazonaws.services.ec2.model.Volume; -import com.amazonaws.services.ec2.model.VolumeAttachment; import org.apache.arrow.vector.complex.reader.FieldReader; import org.apache.arrow.vector.types.Types; import org.apache.arrow.vector.types.pojo.Field; @@ -38,6 +32,12 @@ import org.mockito.junit.MockitoJUnitRunner; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.ec2.Ec2Client; +import software.amazon.awssdk.services.ec2.model.DescribeVolumesRequest; +import software.amazon.awssdk.services.ec2.model.DescribeVolumesResponse; +import software.amazon.awssdk.services.ec2.model.Tag; +import software.amazon.awssdk.services.ec2.model.Volume; +import software.amazon.awssdk.services.ec2.model.VolumeAttachment; import java.util.ArrayList; import java.util.Date; @@ -47,7 +47,6 @@ import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; import static org.mockito.ArgumentMatchers.nullable; -import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @RunWith(MockitoJUnitRunner.class) @@ -57,7 +56,7 @@ public class EbsTableProviderTest private static final Logger logger = LoggerFactory.getLogger(EbsTableProviderTest.class); @Mock - private AmazonEC2 mockEc2; + private Ec2Client mockEc2; protected String getIdField() { @@ -95,14 +94,13 @@ protected void setUpRead() when(mockEc2.describeVolumes(nullable(DescribeVolumesRequest.class))).thenAnswer((InvocationOnMock invocation) -> { DescribeVolumesRequest request = (DescribeVolumesRequest) invocation.getArguments()[0]; - assertEquals(getIdValue(), request.getVolumeIds().get(0)); - DescribeVolumesResult mockResult = mock(DescribeVolumesResult.class); + assertEquals(getIdValue(), request.volumeIds().get(0)); + List values = new ArrayList<>(); values.add(makeVolume(getIdValue())); values.add(makeVolume(getIdValue())); values.add(makeVolume("fake-id")); - when(mockResult.getVolumes()).thenReturn(values); - return mockResult; + return DescribeVolumesResponse.builder().volumes(values).build(); }); } @@ -158,23 +156,23 @@ private void validate(FieldReader fieldReader) private Volume makeVolume(String id) { - Volume volume = new Volume(); - volume.withVolumeId(id) - .withVolumeType("type") - .withAttachments(new VolumeAttachment() - .withInstanceId("target") - .withDevice("attached_device") - .withState("attachment_state") - .withAttachTime(new Date(100_000))) - .withAvailabilityZone("availability_zone") - .withCreateTime(new Date(100_000)) - .withEncrypted(true) - .withKmsKeyId("kms_key_id") - .withSize(100) - .withIops(100) - .withSnapshotId("snapshot_id") - .withState("state") - .withTags(new Tag("key", "value")); + Volume volume = Volume.builder() + .volumeId(id) + .volumeType("type") + .attachments(VolumeAttachment.builder() + .instanceId("target") + .device("attached_device") + .state("attachment_state") + .attachTime(new Date(100_000).toInstant()).build()) + .availabilityZone("availability_zone") + .createTime(new Date(100_000).toInstant()) + .encrypted(true) + .kmsKeyId("kms_key_id") + .size(100) + .iops(100) + .snapshotId("snapshot_id") + .state("state") + .tags(Tag.builder().key("key").value("value").build()).build(); return volume; } diff --git a/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/Ec2TableProviderTest.java b/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/Ec2TableProviderTest.java index 9712796cf6..2dd9bcfe2a 100644 --- a/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/Ec2TableProviderTest.java +++ b/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/Ec2TableProviderTest.java @@ -23,19 +23,6 @@ import com.amazonaws.athena.connector.lambda.data.BlockUtils; import com.amazonaws.athena.connectors.aws.cmdb.tables.AbstractTableProviderTest; import com.amazonaws.athena.connectors.aws.cmdb.tables.TableProvider; -import com.amazonaws.services.ec2.AmazonEC2; -import com.amazonaws.services.ec2.model.DescribeInstancesRequest; -import com.amazonaws.services.ec2.model.DescribeInstancesResult; -import com.amazonaws.services.ec2.model.EbsInstanceBlockDevice; -import com.amazonaws.services.ec2.model.GroupIdentifier; -import com.amazonaws.services.ec2.model.Instance; -import com.amazonaws.services.ec2.model.InstanceBlockDeviceMapping; -import com.amazonaws.services.ec2.model.InstanceNetworkInterface; -import com.amazonaws.services.ec2.model.InstanceState; -import com.amazonaws.services.ec2.model.Reservation; -import com.amazonaws.services.ec2.model.StateReason; -import com.amazonaws.services.ec2.model.Tag; - import org.apache.arrow.vector.complex.reader.FieldReader; import org.apache.arrow.vector.types.Types; import org.apache.arrow.vector.types.pojo.Field; @@ -45,14 +32,27 @@ import org.mockito.junit.MockitoJUnitRunner; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.ec2.Ec2Client; +import software.amazon.awssdk.services.ec2.model.DescribeInstancesRequest; +import software.amazon.awssdk.services.ec2.model.DescribeInstancesResponse; +import software.amazon.awssdk.services.ec2.model.EbsInstanceBlockDevice; +import software.amazon.awssdk.services.ec2.model.GroupIdentifier; +import software.amazon.awssdk.services.ec2.model.Instance; +import software.amazon.awssdk.services.ec2.model.InstanceBlockDeviceMapping; +import software.amazon.awssdk.services.ec2.model.InstanceNetworkInterface; +import software.amazon.awssdk.services.ec2.model.InstanceState; +import software.amazon.awssdk.services.ec2.model.Reservation; +import software.amazon.awssdk.services.ec2.model.StateReason; +import software.amazon.awssdk.services.ec2.model.Tag; import java.util.ArrayList; import java.util.Date; import java.util.List; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; import static org.mockito.ArgumentMatchers.nullable; -import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @RunWith(MockitoJUnitRunner.class) @@ -62,7 +62,7 @@ public class Ec2TableProviderTest private static final Logger logger = LoggerFactory.getLogger(Ec2TableProviderTest.class); @Mock - private AmazonEC2 mockEc2; + private Ec2Client mockEc2; protected String getIdField() { @@ -100,13 +100,11 @@ protected void setUpRead() when(mockEc2.describeInstances(nullable(DescribeInstancesRequest.class))).thenAnswer((InvocationOnMock invocation) -> { DescribeInstancesRequest request = (DescribeInstancesRequest) invocation.getArguments()[0]; - assertEquals(getIdValue(), request.getInstanceIds().get(0)); - DescribeInstancesResult mockResult = mock(DescribeInstancesResult.class); + assertEquals(getIdValue(), request.instanceIds().get(0)); List reservations = new ArrayList<>(); reservations.add(makeReservation()); reservations.add(makeReservation()); - when(mockResult.getReservations()).thenReturn(reservations); - return mockResult; + return DescribeInstancesResponse.builder().reservations(reservations).build(); }); } @@ -162,68 +160,66 @@ private void validate(FieldReader fieldReader) private Reservation makeReservation() { - Reservation reservation = mock(Reservation.class); List instances = new ArrayList<>(); instances.add(makeInstance(getIdValue())); instances.add(makeInstance(getIdValue())); instances.add(makeInstance("non-matching-id")); - when(reservation.getInstances()).thenReturn(instances); - return reservation; + return Reservation.builder().instances(instances).build(); } private Instance makeInstance(String id) { - Instance instance = new Instance(); - instance.withInstanceId(id) - .withImageId("image_id") - .withInstanceType("instance_type") - .withPlatform("platform") - .withPrivateDnsName("private_dns_name") - .withPrivateIpAddress("private_ip_address") - .withPublicDnsName("public_dns_name") - .withPublicIpAddress("public_ip_address") - .withSubnetId("subnet_id") - .withVpcId("vpc_id") - .withArchitecture("architecture") - .withInstanceLifecycle("instance_lifecycle") - .withRootDeviceName("root_device_name") - .withRootDeviceType("root_device_type") - .withSpotInstanceRequestId("spot_instance_request_id") - .withVirtualizationType("virtualization_type") - .withKeyName("key_name") - .withKernelId("kernel_id") - .withCapacityReservationId("capacity_reservation_id") - .withLaunchTime(new Date(100_000)) - .withState(new InstanceState().withCode(100).withName("name")) - .withStateReason(new StateReason().withCode("code").withMessage("message")) - .withEbsOptimized(true) - .withTags(new Tag("key","value")); + Instance.Builder instance = Instance.builder() + .instanceId(id) + .imageId("image_id") + .instanceType("instance_type") + .platform("platform") + .privateDnsName("private_dns_name") + .privateIpAddress("private_ip_address") + .publicDnsName("public_dns_name") + .publicIpAddress("public_ip_address") + .subnetId("subnet_id") + .vpcId("vpc_id") + .architecture("architecture") + .instanceLifecycle("instance_lifecycle") + .rootDeviceName("root_device_name") + .rootDeviceType("root_device_type") + .spotInstanceRequestId("spot_instance_request_id") + .virtualizationType("virtualization_type") + .keyName("key_name") + .kernelId("kernel_id") + .capacityReservationId("capacity_reservation_id") + .launchTime(new Date(100_000).toInstant()) + .state(InstanceState.builder().code(100).name("name").build()) + .stateReason(StateReason.builder().code("code").message("message").build()) + .ebsOptimized(true) + .tags(Tag.builder().key("key").value("value").build()); List interfaces = new ArrayList<>(); - interfaces.add(new InstanceNetworkInterface() - .withStatus("status") - .withSubnetId("subnet") - .withVpcId("vpc") - .withMacAddress("mac_address") - .withPrivateDnsName("private_dns") - .withPrivateIpAddress("private_ip") - .withNetworkInterfaceId("interface_id") - .withGroups(new GroupIdentifier().withGroupId("group_id").withGroupName("group_name"))); - - interfaces.add(new InstanceNetworkInterface() - .withStatus("status") - .withSubnetId("subnet") - .withVpcId("vpc") - .withMacAddress("mac") - .withPrivateDnsName("private_dns") - .withPrivateIpAddress("private_ip") - .withNetworkInterfaceId("interface_id") - .withGroups(new GroupIdentifier().withGroupId("group_id").withGroupName("group_name"))); - - instance.withNetworkInterfaces(interfaces) - .withSecurityGroups(new GroupIdentifier().withGroupId("group_id").withGroupName("group_name")) - .withBlockDeviceMappings(new InstanceBlockDeviceMapping().withDeviceName("device_name").withEbs(new EbsInstanceBlockDevice().withVolumeId("volume_id"))); - - return instance; + interfaces.add(InstanceNetworkInterface.builder() + .status("status") + .subnetId("subnet") + .vpcId("vpc") + .macAddress("mac_address") + .privateDnsName("private_dns") + .privateIpAddress("private_ip") + .networkInterfaceId("interface_id") + .groups(GroupIdentifier.builder().groupId("group_id").groupName("group_name").build()).build()); + + interfaces.add(InstanceNetworkInterface.builder() + .status("status") + .subnetId("subnet") + .vpcId("vpc") + .macAddress("mac") + .privateDnsName("private_dns") + .privateIpAddress("private_ip") + .networkInterfaceId("interface_id") + .groups(GroupIdentifier.builder().groupId("group_id").groupName("group_name").build()).build()); + + instance.networkInterfaces(interfaces) + .securityGroups(GroupIdentifier.builder().groupId("group_id").groupName("group_name").build()) + .blockDeviceMappings(InstanceBlockDeviceMapping.builder().deviceName("device_name").ebs(EbsInstanceBlockDevice.builder().volumeId("volume_id").build()).build()); + + return instance.build(); } } diff --git a/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/ImagesTableProviderTest.java b/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/ImagesTableProviderTest.java index c1ab238c86..d981c618be 100644 --- a/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/ImagesTableProviderTest.java +++ b/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/ImagesTableProviderTest.java @@ -23,13 +23,6 @@ import com.amazonaws.athena.connector.lambda.data.BlockUtils; import com.amazonaws.athena.connectors.aws.cmdb.tables.AbstractTableProviderTest; import com.amazonaws.athena.connectors.aws.cmdb.tables.TableProvider; -import com.amazonaws.services.ec2.AmazonEC2; -import com.amazonaws.services.ec2.model.BlockDeviceMapping; -import com.amazonaws.services.ec2.model.DescribeImagesRequest; -import com.amazonaws.services.ec2.model.DescribeImagesResult; -import com.amazonaws.services.ec2.model.EbsBlockDevice; -import com.amazonaws.services.ec2.model.Image; -import com.amazonaws.services.ec2.model.Tag; import org.apache.arrow.vector.complex.reader.FieldReader; import org.apache.arrow.vector.types.Types; import org.apache.arrow.vector.types.pojo.Field; @@ -39,6 +32,13 @@ import org.mockito.junit.MockitoJUnitRunner; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.ec2.Ec2Client; +import software.amazon.awssdk.services.ec2.model.BlockDeviceMapping; +import software.amazon.awssdk.services.ec2.model.DescribeImagesRequest; +import software.amazon.awssdk.services.ec2.model.DescribeImagesResponse; +import software.amazon.awssdk.services.ec2.model.EbsBlockDevice; +import software.amazon.awssdk.services.ec2.model.Image; +import software.amazon.awssdk.services.ec2.model.Tag; import java.util.ArrayList; import java.util.List; @@ -47,7 +47,6 @@ import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; import static org.mockito.ArgumentMatchers.nullable; -import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @RunWith(MockitoJUnitRunner.class) @@ -57,7 +56,7 @@ public class ImagesTableProviderTest private static final Logger logger = LoggerFactory.getLogger(ImagesTableProviderTest.class); @Mock - private AmazonEC2 mockEc2; + private Ec2Client mockEc2; protected String getIdField() { @@ -95,14 +94,12 @@ protected void setUpRead() when(mockEc2.describeImages(nullable(DescribeImagesRequest.class))).thenAnswer((InvocationOnMock invocation) -> { DescribeImagesRequest request = (DescribeImagesRequest) invocation.getArguments()[0]; - assertEquals(getIdValue(), request.getImageIds().get(0)); - DescribeImagesResult mockResult = mock(DescribeImagesResult.class); + assertEquals(getIdValue(), request.imageIds().get(0)); List values = new ArrayList<>(); values.add(makeImage(getIdValue())); values.add(makeImage(getIdValue())); values.add(makeImage("fake-id")); - when(mockResult.getImages()).thenReturn(values); - return mockResult; + return DescribeImagesResponse.builder().images(values).build(); }); } @@ -158,35 +155,35 @@ private void validate(FieldReader fieldReader) private Image makeImage(String id) { - Image image = new Image(); - image.withImageId(id) - .withArchitecture("architecture") - .withCreationDate("created") - .withDescription("description") - .withHypervisor("hypervisor") - .withImageLocation("location") - .withImageType("type") - .withKernelId("kernel") - .withName("name") - .withOwnerId("owner") - .withPlatform("platform") - .withRamdiskId("ramdisk") - .withRootDeviceName("root_device") - .withRootDeviceType("root_type") - .withSriovNetSupport("srvio_net") - .withState("state") - .withVirtualizationType("virt_type") - .withPublic(true) - .withTags(new Tag("key", "value")) - .withBlockDeviceMappings(new BlockDeviceMapping() - .withDeviceName("dev_name") - .withNoDevice("no_device") - .withVirtualName("virt_name") - .withEbs(new EbsBlockDevice() - .withIops(100) - .withKmsKeyId("ebs_kms_key") - .withVolumeType("ebs_type") - .withVolumeSize(100))); + Image image = Image.builder() + .imageId(id) + .architecture("architecture") + .creationDate("created") + .description("description") + .hypervisor("hypervisor") + .imageLocation("location") + .imageType("type") + .kernelId("kernel") + .name("name") + .ownerId("owner") + .platform("platform") + .ramdiskId("ramdisk") + .rootDeviceName("root_device") + .rootDeviceType("root_type") + .sriovNetSupport("srvio_net") + .state("state") + .virtualizationType("virt_type") + .publicLaunchPermissions(true) + .tags(Tag.builder().key("key").value("value").build()) + .blockDeviceMappings(BlockDeviceMapping.builder() + .deviceName("dev_name") + .noDevice("no_device") + .virtualName("virt_name") + .ebs(EbsBlockDevice.builder() + .iops(100) + .kmsKeyId("ebs_kms_key") + .volumeType("ebs_type") + .volumeSize(100).build()).build()).build(); return image; } diff --git a/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/RouteTableProviderTest.java b/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/RouteTableProviderTest.java index cf293d33e2..6151bea84d 100644 --- a/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/RouteTableProviderTest.java +++ b/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/RouteTableProviderTest.java @@ -23,14 +23,6 @@ import com.amazonaws.athena.connector.lambda.data.BlockUtils; import com.amazonaws.athena.connectors.aws.cmdb.tables.AbstractTableProviderTest; import com.amazonaws.athena.connectors.aws.cmdb.tables.TableProvider; -import com.amazonaws.services.ec2.AmazonEC2; -import com.amazonaws.services.ec2.model.DescribeRouteTablesRequest; -import com.amazonaws.services.ec2.model.DescribeRouteTablesResult; -import com.amazonaws.services.ec2.model.PropagatingVgw; -import com.amazonaws.services.ec2.model.Route; -import com.amazonaws.services.ec2.model.RouteTable; -import com.amazonaws.services.ec2.model.RouteTableAssociation; -import com.amazonaws.services.ec2.model.Tag; import org.apache.arrow.vector.complex.reader.FieldReader; import org.apache.arrow.vector.types.Types; import org.apache.arrow.vector.types.pojo.Field; @@ -40,6 +32,14 @@ import org.mockito.junit.MockitoJUnitRunner; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.ec2.Ec2Client; +import software.amazon.awssdk.services.ec2.model.DescribeRouteTablesRequest; +import software.amazon.awssdk.services.ec2.model.DescribeRouteTablesResponse; +import software.amazon.awssdk.services.ec2.model.PropagatingVgw; +import software.amazon.awssdk.services.ec2.model.Route; +import software.amazon.awssdk.services.ec2.model.RouteTable; +import software.amazon.awssdk.services.ec2.model.RouteTableAssociation; +import software.amazon.awssdk.services.ec2.model.Tag; import java.util.ArrayList; import java.util.List; @@ -48,7 +48,6 @@ import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; import static org.mockito.ArgumentMatchers.nullable; -import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @RunWith(MockitoJUnitRunner.class) @@ -58,7 +57,7 @@ public class RouteTableProviderTest private static final Logger logger = LoggerFactory.getLogger(RouteTableProviderTest.class); @Mock - private AmazonEC2 mockEc2; + private Ec2Client mockEc2; protected String getIdField() { @@ -96,14 +95,12 @@ protected void setUpRead() when(mockEc2.describeRouteTables(nullable(DescribeRouteTablesRequest.class))).thenAnswer((InvocationOnMock invocation) -> { DescribeRouteTablesRequest request = (DescribeRouteTablesRequest) invocation.getArguments()[0]; - assertEquals(getIdValue(), request.getRouteTableIds().get(0)); - DescribeRouteTablesResult mockResult = mock(DescribeRouteTablesResult.class); + assertEquals(getIdValue(), request.routeTableIds().get(0)); List values = new ArrayList<>(); values.add(makeRouteTable(getIdValue())); values.add(makeRouteTable(getIdValue())); values.add(makeRouteTable("fake-id")); - when(mockResult.getRouteTables()).thenReturn(values); - return mockResult; + return DescribeRouteTablesResponse.builder().routeTables(values).build(); }); } @@ -159,28 +156,28 @@ private void validate(FieldReader fieldReader) private RouteTable makeRouteTable(String id) { - RouteTable routeTable = new RouteTable(); - routeTable.withRouteTableId(id) - .withOwnerId("owner") - .withVpcId("vpc") - .withAssociations(new RouteTableAssociation().withSubnetId("subnet").withRouteTableId("route_table_id")) - .withTags(new Tag("key", "value")) - .withPropagatingVgws(new PropagatingVgw().withGatewayId("gateway_id")) - .withRoutes(new Route() - .withDestinationCidrBlock("dst_cidr") - .withDestinationIpv6CidrBlock("dst_cidr_v6") - .withDestinationPrefixListId("dst_prefix_list") - .withEgressOnlyInternetGatewayId("egress_igw") - .withGatewayId("gateway") - .withInstanceId("instance_id") - .withInstanceOwnerId("instance_owner") - .withNatGatewayId("nat_gateway") - .withNetworkInterfaceId("interface") - .withOrigin("origin") - .withState("state") - .withTransitGatewayId("transit_gateway") - .withVpcPeeringConnectionId("vpc_peering_con") - ); + RouteTable routeTable = RouteTable.builder() + .routeTableId(id) + .ownerId("owner") + .vpcId("vpc") + .associations(RouteTableAssociation.builder().subnetId("subnet").routeTableId("route_table_id").build()) + .tags(Tag.builder().key("key").value("value").build()) + .propagatingVgws(PropagatingVgw.builder().gatewayId("gateway_id").build()) + .routes(Route.builder() + .destinationCidrBlock("dst_cidr") + .destinationIpv6CidrBlock("dst_cidr_v6") + .destinationPrefixListId("dst_prefix_list") + .egressOnlyInternetGatewayId("egress_igw") + .gatewayId("gateway") + .instanceId("instance_id") + .instanceOwnerId("instance_owner") + .natGatewayId("nat_gateway") + .networkInterfaceId("interface") + .origin("origin") + .state("state") + .transitGatewayId("transit_gateway") + .vpcPeeringConnectionId("vpc_peering_con").build() + ).build(); return routeTable; } diff --git a/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/SecurityGroupsTableProviderTest.java b/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/SecurityGroupsTableProviderTest.java index 471a54af25..ea89933190 100644 --- a/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/SecurityGroupsTableProviderTest.java +++ b/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/SecurityGroupsTableProviderTest.java @@ -23,15 +23,6 @@ import com.amazonaws.athena.connector.lambda.data.BlockUtils; import com.amazonaws.athena.connectors.aws.cmdb.tables.AbstractTableProviderTest; import com.amazonaws.athena.connectors.aws.cmdb.tables.TableProvider; -import com.amazonaws.services.ec2.AmazonEC2; -import com.amazonaws.services.ec2.model.DescribeSecurityGroupsRequest; -import com.amazonaws.services.ec2.model.DescribeSecurityGroupsResult; -import com.amazonaws.services.ec2.model.IpPermission; -import com.amazonaws.services.ec2.model.IpRange; -import com.amazonaws.services.ec2.model.Ipv6Range; -import com.amazonaws.services.ec2.model.PrefixListId; -import com.amazonaws.services.ec2.model.SecurityGroup; -import com.amazonaws.services.ec2.model.UserIdGroupPair; import org.apache.arrow.vector.complex.reader.FieldReader; import org.apache.arrow.vector.types.Types; import org.apache.arrow.vector.types.pojo.Field; @@ -41,6 +32,15 @@ import org.mockito.junit.MockitoJUnitRunner; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.ec2.Ec2Client; +import software.amazon.awssdk.services.ec2.model.DescribeSecurityGroupsRequest; +import software.amazon.awssdk.services.ec2.model.DescribeSecurityGroupsResponse; +import software.amazon.awssdk.services.ec2.model.IpPermission; +import software.amazon.awssdk.services.ec2.model.IpRange; +import software.amazon.awssdk.services.ec2.model.Ipv6Range; +import software.amazon.awssdk.services.ec2.model.PrefixListId; +import software.amazon.awssdk.services.ec2.model.SecurityGroup; +import software.amazon.awssdk.services.ec2.model.UserIdGroupPair; import java.util.ArrayList; import java.util.List; @@ -49,7 +49,6 @@ import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; import static org.mockito.ArgumentMatchers.nullable; -import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @RunWith(MockitoJUnitRunner.class) @@ -59,7 +58,7 @@ public class SecurityGroupsTableProviderTest private static final Logger logger = LoggerFactory.getLogger(SecurityGroupsTableProviderTest.class); @Mock - private AmazonEC2 mockEc2; + private Ec2Client mockEc2; protected String getIdField() { @@ -98,14 +97,12 @@ protected void setUpRead() .thenAnswer((InvocationOnMock invocation) -> { DescribeSecurityGroupsRequest request = (DescribeSecurityGroupsRequest) invocation.getArguments()[0]; - assertEquals(getIdValue(), request.getGroupIds().get(0)); - DescribeSecurityGroupsResult mockResult = mock(DescribeSecurityGroupsResult.class); + assertEquals(getIdValue(), request.groupIds().get(0)); List values = new ArrayList<>(); values.add(makeSecurityGroup(getIdValue())); values.add(makeSecurityGroup(getIdValue())); values.add(makeSecurityGroup("fake-id")); - when(mockResult.getSecurityGroups()).thenReturn(values); - return mockResult; + return DescribeSecurityGroupsResponse.builder().securityGroups(values).build(); }); } @@ -161,19 +158,18 @@ private void validate(FieldReader fieldReader) private SecurityGroup makeSecurityGroup(String id) { - return new SecurityGroup() - .withGroupId(id) - .withGroupName("name") - .withDescription("description") - .withIpPermissions(new IpPermission() - .withIpProtocol("protocol") - .withFromPort(100) - .withToPort(100) - .withIpv4Ranges(new IpRange().withCidrIp("cidr").withDescription("description")) - - .withIpv6Ranges(new Ipv6Range().withCidrIpv6("cidr").withDescription("description")) - .withPrefixListIds(new PrefixListId().withPrefixListId("prefix").withDescription("description")) - .withUserIdGroupPairs(new UserIdGroupPair().withGroupId("group_id").withUserId("user_id")) - ); + return SecurityGroup.builder() + .groupId(id) + .groupName("name") + .description("description") + .ipPermissions(IpPermission.builder() + .ipProtocol("protocol") + .fromPort(100) + .toPort(100) + .ipRanges(IpRange.builder().cidrIp("cidr").description("description").build()) + .ipv6Ranges(Ipv6Range.builder().cidrIpv6("cidr").description("description").build()) + .prefixListIds(PrefixListId.builder().prefixListId("prefix").description("description").build()) + .userIdGroupPairs(UserIdGroupPair.builder().groupId("group_id").userId("user_id").build()).build() + ).build(); } } diff --git a/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/SubnetTableProviderTest.java b/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/SubnetTableProviderTest.java index 04437e13f9..4afd3e4e5e 100644 --- a/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/SubnetTableProviderTest.java +++ b/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/SubnetTableProviderTest.java @@ -23,11 +23,6 @@ import com.amazonaws.athena.connector.lambda.data.BlockUtils; import com.amazonaws.athena.connectors.aws.cmdb.tables.AbstractTableProviderTest; import com.amazonaws.athena.connectors.aws.cmdb.tables.TableProvider; -import com.amazonaws.services.ec2.AmazonEC2; -import com.amazonaws.services.ec2.model.DescribeSubnetsRequest; -import com.amazonaws.services.ec2.model.DescribeSubnetsResult; -import com.amazonaws.services.ec2.model.Subnet; -import com.amazonaws.services.ec2.model.Tag; import org.apache.arrow.vector.complex.reader.FieldReader; import org.apache.arrow.vector.types.Types; import org.apache.arrow.vector.types.pojo.Field; @@ -37,6 +32,11 @@ import org.mockito.junit.MockitoJUnitRunner; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.ec2.Ec2Client; +import software.amazon.awssdk.services.ec2.model.DescribeSubnetsRequest; +import software.amazon.awssdk.services.ec2.model.DescribeSubnetsResponse; +import software.amazon.awssdk.services.ec2.model.Subnet; +import software.amazon.awssdk.services.ec2.model.Tag; import java.util.ArrayList; import java.util.List; @@ -45,7 +45,6 @@ import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; import static org.mockito.ArgumentMatchers.nullable; -import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @RunWith(MockitoJUnitRunner.class) @@ -55,7 +54,7 @@ public class SubnetTableProviderTest private static final Logger logger = LoggerFactory.getLogger(SubnetTableProviderTest.class); @Mock - private AmazonEC2 mockEc2; + private Ec2Client mockEc2; protected String getIdField() { @@ -93,15 +92,12 @@ protected void setUpRead() when(mockEc2.describeSubnets(nullable(DescribeSubnetsRequest.class))).thenAnswer((InvocationOnMock invocation) -> { DescribeSubnetsRequest request = (DescribeSubnetsRequest) invocation.getArguments()[0]; - assertEquals(getIdValue(), request.getSubnetIds().get(0)); - DescribeSubnetsResult mockResult = mock(DescribeSubnetsResult.class); + assertEquals(getIdValue(), request.subnetIds().get(0)); List values = new ArrayList<>(); values.add(makeSubnet(getIdValue())); values.add(makeSubnet(getIdValue())); values.add(makeSubnet("fake-id")); - when(mockResult.getSubnets()).thenReturn(values); - - return mockResult; + return DescribeSubnetsResponse.builder().subnets(values).build(); }); } @@ -157,16 +153,16 @@ private void validate(FieldReader fieldReader) private Subnet makeSubnet(String id) { - return new Subnet() - .withSubnetId(id) - .withAvailabilityZone("availability_zone") - .withCidrBlock("cidr_block") - .withAvailableIpAddressCount(100) - .withDefaultForAz(true) - .withMapPublicIpOnLaunch(true) - .withOwnerId("owner") - .withState("state") - .withTags(new Tag().withKey("key").withValue("value")) - .withVpcId("vpc"); + return Subnet.builder() + .subnetId(id) + .availabilityZone("availability_zone") + .cidrBlock("cidr_block") + .availableIpAddressCount(100) + .defaultForAz(true) + .mapPublicIpOnLaunch(true) + .ownerId("owner") + .state("state") + .tags(Tag.builder().key("key").value("value").build()) + .vpcId("vpc").build(); } } diff --git a/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/VpcTableProviderTest.java b/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/VpcTableProviderTest.java index 4abb29ccd1..900fdf67ca 100644 --- a/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/VpcTableProviderTest.java +++ b/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/VpcTableProviderTest.java @@ -23,11 +23,6 @@ import com.amazonaws.athena.connector.lambda.data.BlockUtils; import com.amazonaws.athena.connectors.aws.cmdb.tables.AbstractTableProviderTest; import com.amazonaws.athena.connectors.aws.cmdb.tables.TableProvider; -import com.amazonaws.services.ec2.AmazonEC2; -import com.amazonaws.services.ec2.model.DescribeVpcsRequest; -import com.amazonaws.services.ec2.model.DescribeVpcsResult; -import com.amazonaws.services.ec2.model.Tag; -import com.amazonaws.services.ec2.model.Vpc; import org.apache.arrow.vector.complex.reader.FieldReader; import org.apache.arrow.vector.types.Types; import org.apache.arrow.vector.types.pojo.Field; @@ -37,6 +32,11 @@ import org.mockito.junit.MockitoJUnitRunner; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.ec2.Ec2Client; +import software.amazon.awssdk.services.ec2.model.DescribeVpcsRequest; +import software.amazon.awssdk.services.ec2.model.DescribeVpcsResponse; +import software.amazon.awssdk.services.ec2.model.Tag; +import software.amazon.awssdk.services.ec2.model.Vpc; import java.util.ArrayList; import java.util.List; @@ -45,7 +45,6 @@ import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; import static org.mockito.ArgumentMatchers.nullable; -import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @RunWith(MockitoJUnitRunner.class) @@ -55,7 +54,7 @@ public class VpcTableProviderTest private static final Logger logger = LoggerFactory.getLogger(VpcTableProviderTest.class); @Mock - private AmazonEC2 mockEc2; + private Ec2Client mockEc2; protected String getIdField() { @@ -93,14 +92,12 @@ protected void setUpRead() when(mockEc2.describeVpcs(nullable(DescribeVpcsRequest.class))).thenAnswer((InvocationOnMock invocation) -> { DescribeVpcsRequest request = (DescribeVpcsRequest) invocation.getArguments()[0]; - assertEquals(getIdValue(), request.getVpcIds().get(0)); - DescribeVpcsResult mockResult = mock(DescribeVpcsResult.class); + assertEquals(getIdValue(), request.vpcIds().get(0)); List values = new ArrayList<>(); values.add(makeVpc(getIdValue())); values.add(makeVpc(getIdValue())); values.add(makeVpc("fake-id")); - when(mockResult.getVpcs()).thenReturn(values); - return mockResult; + return DescribeVpcsResponse.builder().vpcs(values).build(); }); } @@ -156,15 +153,15 @@ private void validate(FieldReader fieldReader) private Vpc makeVpc(String id) { - Vpc vpc = new Vpc(); - vpc.withVpcId(id) - .withCidrBlock("cidr_block") - .withDhcpOptionsId("dhcp_opts") - .withInstanceTenancy("tenancy") - .withOwnerId("owner") - .withState("state") - .withIsDefault(true) - .withTags(new Tag("key", "valye")); + Vpc vpc = Vpc.builder() + .vpcId(id) + .cidrBlock("cidr_block") + .dhcpOptionsId("dhcp_opts") + .instanceTenancy("tenancy") + .ownerId("owner") + .state("state") + .isDefault(true) + .tags(Tag.builder().key("key").value("valye").build()).build(); return vpc; } From a843fcd19eef70a942ec3d4347239cc5b1c39576 Mon Sep 17 00:00:00 2001 From: Aimery Methena <159072740+aimethed@users.noreply.github.com> Date: Thu, 26 Sep 2024 16:15:10 -0400 Subject: [PATCH 46/87] Fix teradata (#2294) --- athena-teradata/athena-teradata.yaml | 5 ----- athena-teradata/pom.xml | 5 +++++ 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/athena-teradata/athena-teradata.yaml b/athena-teradata/athena-teradata.yaml index 3226e6081d..01ee09c1db 100644 --- a/athena-teradata/athena-teradata.yaml +++ b/athena-teradata/athena-teradata.yaml @@ -30,9 +30,6 @@ Parameters: Description: 'The prefix within SpillBucket where this function can spill data.' Type: String Default: athena-spill - LambdaJDBCLayername: - Description: 'Lambda JDBC layer Name. Must be ARN of layer' - Type: String LambdaTimeout: Description: 'Maximum Lambda invocation runtime in seconds. (min 1 - 900 max)' Default: 900 @@ -77,8 +74,6 @@ Resources: default: !Ref DefaultConnectionString partitioncount: !Ref PartitionCount FunctionName: !Ref LambdaFunctionName - Layers: - - !Ref LambdaJDBCLayername PackageType: "Image" ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-teradata:2022.47.1' Description: "Enables Amazon Athena to communicate with Teradata using JDBC" diff --git a/athena-teradata/pom.xml b/athena-teradata/pom.xml index 25f7489180..c44fd9da86 100644 --- a/athena-teradata/pom.xml +++ b/athena-teradata/pom.xml @@ -59,6 +59,11 @@ ${mockito.version} test + + com.teradata.jdbc + terajdbc + 20.00.00.34 + From 0f4beb2ac4d0961b6c32cab70e4e599a1c482c91 Mon Sep 17 00:00:00 2001 From: ejeffrli Date: Fri, 27 Sep 2024 16:19:39 -0400 Subject: [PATCH 47/87] Use prod --- .../connector/lambda/connection/EnvironmentProperties.java | 1 - 1 file changed, 1 deletion(-) diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/EnvironmentProperties.java b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/EnvironmentProperties.java index 3aa27b449f..0423134288 100644 --- a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/EnvironmentProperties.java +++ b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/EnvironmentProperties.java @@ -67,7 +67,6 @@ public Connection getGlueConnection(String glueConnectionName) throws RuntimeExc { try { GlueClient awsGlue = GlueClient.builder() - .endpointOverride(new URI("https://glue-gamma.us-west-2.amazonaws.com")) .httpClientBuilder(ApacheHttpClient .builder() .connectionTimeout(Duration.ofMillis(CONNECT_TIMEOUT))) From ee408a2aa9778c94f6f6be8ae40c9de907eb4e1e Mon Sep 17 00:00:00 2001 From: Aimery Methena <159072740+aimethed@users.noreply.github.com> Date: Fri, 27 Sep 2024 16:39:45 -0400 Subject: [PATCH 48/87] V2 final changes (#2297) Co-authored-by: Trianz-Akshay <108925344+Trianz-Akshay@users.noreply.github.com> --- .../metrics/MetricsMetadataHandler.java | 2 +- .../dynamodb/DynamoDBMetadataHandler.java | 11 +- .../dynamodb/DynamoDBRecordHandler.java | 8 +- .../AWSRequestSigningApacheInterceptor.java | 106 ++++++++++-------- .../elasticsearch/AwsRestHighLevelClient.java | 24 ++-- .../AwsRestHighLevelClientFactory.java | 4 +- athena-federation-sdk/pom.xml | 27 ----- .../CrossAccountCredentialsProvider.java | 16 +-- .../lambda/data/DateTimeFormatterUtil.java | 6 +- .../lambda/QueryStatusCheckerTest.java | 4 +- athena-jdbc/pom.xml | 6 +- .../connectors/kafka/KafkaUtilsTest.java | 19 ++-- athena-msk/pom.xml | 12 +- .../vertica/VerticaMetadataHandler.java | 2 +- pom.xml | 3 +- 15 files changed, 121 insertions(+), 129 deletions(-) diff --git a/athena-cloudwatch-metrics/src/main/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricsMetadataHandler.java b/athena-cloudwatch-metrics/src/main/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricsMetadataHandler.java index 1efc065bf9..2b64e7c129 100644 --- a/athena-cloudwatch-metrics/src/main/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricsMetadataHandler.java +++ b/athena-cloudwatch-metrics/src/main/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricsMetadataHandler.java @@ -42,7 +42,6 @@ import com.amazonaws.athena.connectors.cloudwatch.metrics.tables.MetricSamplesTable; import com.amazonaws.athena.connectors.cloudwatch.metrics.tables.MetricsTable; import com.amazonaws.athena.connectors.cloudwatch.metrics.tables.Table; -import com.amazonaws.util.CollectionUtils; import com.google.common.collect.Lists; import org.apache.arrow.util.VisibleForTesting; import org.slf4j.Logger; @@ -54,6 +53,7 @@ import software.amazon.awssdk.services.cloudwatch.model.Metric; import software.amazon.awssdk.services.cloudwatch.model.MetricStat; import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient; +import software.amazon.awssdk.utils.CollectionUtils; import java.util.ArrayList; import java.util.Collections; diff --git a/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/DynamoDBMetadataHandler.java b/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/DynamoDBMetadataHandler.java index 29f772f5fd..4bdcda50fc 100644 --- a/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/DynamoDBMetadataHandler.java +++ b/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/DynamoDBMetadataHandler.java @@ -56,7 +56,7 @@ import com.amazonaws.athena.connectors.dynamodb.util.DDBTableUtils; import com.amazonaws.athena.connectors.dynamodb.util.DDBTypeUtils; import com.amazonaws.athena.connectors.dynamodb.util.IncrementingValueNameProducer; -import com.amazonaws.util.json.Jackson; +import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ImmutableMap; import org.apache.arrow.vector.complex.reader.FieldReader; @@ -446,7 +446,14 @@ private void precomputeAdditionalMetadata(Set columnsToIgnore, Map expressionAttributeValues = new HashMap<>(); if (rangeKeyFilter != null || nonKeyFilter != null) { try { - expressionAttributeNames.putAll(Jackson.getObjectMapper().readValue(split.getProperty(EXPRESSION_NAMES_METADATA), STRING_MAP_TYPE_REFERENCE)); + ObjectMapper objectMapper = new ObjectMapper(); + expressionAttributeNames.putAll(objectMapper.readValue(split.getProperty(EXPRESSION_NAMES_METADATA), STRING_MAP_TYPE_REFERENCE)); expressionAttributeValues.putAll(EnhancedDocument.fromJson(split.getProperty(EXPRESSION_VALUES_METADATA)).toMap()); } catch (IOException e) { @@ -388,7 +389,8 @@ private ScanRequest buildScanRequest(Split split, String tableName, Schema schem Map expressionAttributeValues = new HashMap<>(); if (rangeKeyFilter != null || nonKeyFilter != null) { try { - expressionAttributeNames.putAll(Jackson.getObjectMapper().readValue(split.getProperty(EXPRESSION_NAMES_METADATA), STRING_MAP_TYPE_REFERENCE)); + ObjectMapper objectMapper = new ObjectMapper(); + expressionAttributeNames.putAll(objectMapper.readValue(split.getProperty(EXPRESSION_NAMES_METADATA), STRING_MAP_TYPE_REFERENCE)); expressionAttributeValues.putAll(EnhancedDocument.fromJson(split.getProperty(EXPRESSION_VALUES_METADATA)).toMap()); } catch (IOException e) { diff --git a/athena-elasticsearch/src/main/java/com/amazonaws/athena/connectors/elasticsearch/AWSRequestSigningApacheInterceptor.java b/athena-elasticsearch/src/main/java/com/amazonaws/athena/connectors/elasticsearch/AWSRequestSigningApacheInterceptor.java index 4108d663b8..2c3f58e215 100644 --- a/athena-elasticsearch/src/main/java/com/amazonaws/athena/connectors/elasticsearch/AWSRequestSigningApacheInterceptor.java +++ b/athena-elasticsearch/src/main/java/com/amazonaws/athena/connectors/elasticsearch/AWSRequestSigningApacheInterceptor.java @@ -19,10 +19,6 @@ */ package com.amazonaws.athena.connectors.elasticsearch; -import com.amazonaws.DefaultRequest; -import com.amazonaws.auth.AWSCredentialsProvider; -import com.amazonaws.auth.Signer; -import com.amazonaws.http.HttpMethodName; import org.apache.http.Header; import org.apache.http.HttpEntityEnclosingRequest; import org.apache.http.HttpException; @@ -34,9 +30,15 @@ import org.apache.http.entity.BasicHttpEntity; import org.apache.http.message.BasicHeader; import org.apache.http.protocol.HttpContext; +import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; +import software.amazon.awssdk.http.SdkHttpFullRequest; +import software.amazon.awssdk.http.SdkHttpMethod; +import software.amazon.awssdk.http.auth.aws.signer.AwsV4HttpSigner; +import software.amazon.awssdk.http.auth.spi.signer.SignedRequest; import java.io.ByteArrayInputStream; import java.io.IOException; +import java.io.InputStream; import java.net.URI; import java.net.URISyntaxException; import java.util.ArrayList; @@ -47,8 +49,8 @@ import static org.apache.http.protocol.HttpCoreContext.HTTP_TARGET_HOST; /** - * An {@link HttpRequestInterceptor} that signs requests using any AWS {@link Signer} - * and {@link AWSCredentialsProvider}. + * An {@link HttpRequestInterceptor} that signs requests using any AWS {@link AwsV4HttpSigner} + * and {@link AwsCredentialsProvider}. */ public class AWSRequestSigningApacheInterceptor implements HttpRequestInterceptor { @@ -61,34 +63,35 @@ public class AWSRequestSigningApacheInterceptor implements HttpRequestIntercepto /** * The particular signer implementation. */ - private final Signer signer; + private final AwsV4HttpSigner signer; /** * The source of AWS credentials for signing. */ - private final AWSCredentialsProvider awsCredentialsProvider; + private final AwsCredentialsProvider awsCredentialsProvider; + private final String region; /** - * - * @param service service that we're connecting to - * @param signer particular signer implementation + * @param service service that we're connecting to + * @param signer particular signer implementation * @param awsCredentialsProvider source of AWS credentials for signing */ public AWSRequestSigningApacheInterceptor(final String service, - final Signer signer, - final AWSCredentialsProvider awsCredentialsProvider) + final AwsV4HttpSigner signer, + final AwsCredentialsProvider awsCredentialsProvider, + final String region) { this.service = service; this.signer = signer; this.awsCredentialsProvider = awsCredentialsProvider; + this.region = region; } /** * {@inheritDoc} */ @Override - public void process(final HttpRequest request, final HttpContext context) - throws HttpException, IOException + public void process(final HttpRequest request, final HttpContext context) throws HttpException, IOException { URIBuilder uriBuilder; try { @@ -98,55 +101,61 @@ public void process(final HttpRequest request, final HttpContext context) throw new IOException("Invalid URI", e); } - // Copy Apache HttpRequest to AWS DefaultRequest - DefaultRequest signableRequest = new DefaultRequest<>(service); - - HttpHost host = (HttpHost) context.getAttribute(HTTP_TARGET_HOST); - if (host != null) { - signableRequest.setEndpoint(URI.create(host.toURI())); - } - final HttpMethodName httpMethod = - HttpMethodName.fromValue(request.getRequestLine().getMethod()); - signableRequest.setHttpMethod(httpMethod); + // Build the SdkHttpFullRequest + SdkHttpFullRequest.Builder signableRequest = null; try { - signableRequest.setResourcePath(uriBuilder.build().getRawPath()); + signableRequest = SdkHttpFullRequest.builder() + .method(SdkHttpMethod.fromValue(request.getRequestLine().getMethod())) // Set HTTP Method + .encodedPath(uriBuilder.build().getRawPath()) // Set Resource Path + .rawQueryParameters(nvpToMapParams(uriBuilder.getQueryParams())) // Set Query Parameters + .headers(headerArrayToMap(request.getAllHeaders())); } catch (URISyntaxException e) { throw new IOException("Invalid URI", e); } + // Set the endpoint (host) if present in the context + HttpHost host = (HttpHost) context.getAttribute(HTTP_TARGET_HOST); + if (host != null) { + signableRequest.uri(URI.create(host.toURI())); // Set the base endpoint URL + } + + // Handle content/body if it's an HttpEntityEnclosingRequest if (request instanceof HttpEntityEnclosingRequest) { - HttpEntityEnclosingRequest httpEntityEnclosingRequest = - (HttpEntityEnclosingRequest) request; + HttpEntityEnclosingRequest httpEntityEnclosingRequest = (HttpEntityEnclosingRequest) request; if (httpEntityEnclosingRequest.getEntity() != null) { - signableRequest.setContent(httpEntityEnclosingRequest.getEntity().getContent()); + InputStream contentStream = httpEntityEnclosingRequest.getEntity().getContent(); + signableRequest.contentStreamProvider(() -> contentStream); // Set content provider } else { - // This is a workaround from here: https://github.com/aws/aws-sdk-java/issues/2078 - signableRequest.setContent(new ByteArrayInputStream(new byte[0])); + // Workaround: provide an empty stream if no entity is present + signableRequest.contentStreamProvider(() -> new ByteArrayInputStream(new byte[0])); } } - signableRequest.setParameters(nvpToMapParams(uriBuilder.getQueryParams())); - signableRequest.setHeaders(headerArrayToMap(request.getAllHeaders())); - - // Sign it - signer.sign(signableRequest, awsCredentialsProvider.getCredentials()); - // Now copy everything back - request.setHeaders(mapToHeaderArray(signableRequest.getHeaders())); + // Sign the request + SdkHttpFullRequest.Builder finalSignableRequest = signableRequest; + SignedRequest signedRequest = + signer.sign(r -> r.identity(awsCredentialsProvider.resolveCredentials()) + .request(finalSignableRequest.build()) + .payload(finalSignableRequest.contentStreamProvider()) + .putProperty(AwsV4HttpSigner.SERVICE_SIGNING_NAME, service) + .putProperty(AwsV4HttpSigner.REGION_NAME, region)); // Required for S3 only + // Now copy everything back to the original request (including signed headers) + request.setHeaders(mapToHeaderArray(signedRequest.request().headers())); + + // If the request has an entity (body), copy it back to the original request if (request instanceof HttpEntityEnclosingRequest) { - HttpEntityEnclosingRequest httpEntityEnclosingRequest = - (HttpEntityEnclosingRequest) request; + HttpEntityEnclosingRequest httpEntityEnclosingRequest = (HttpEntityEnclosingRequest) request; if (httpEntityEnclosingRequest.getEntity() != null) { BasicHttpEntity basicHttpEntity = new BasicHttpEntity(); - basicHttpEntity.setContent(signableRequest.getContent()); + basicHttpEntity.setContent(signableRequest.contentStreamProvider().newStream()); httpEntityEnclosingRequest.setEntity(basicHttpEntity); } } } /** - * * @param params list of HTTP query params as NameValuePairs * @return a multimap of HTTP query params */ @@ -165,12 +174,13 @@ private static Map> nvpToMapParams(final List headerArrayToMap(final Header[] headers) + private static Map> headerArrayToMap(final Header[] headers) { - Map headersMap = new TreeMap<>(String.CASE_INSENSITIVE_ORDER); + Map> headersMap = new TreeMap<>(String.CASE_INSENSITIVE_ORDER); for (Header header : headers) { if (!skipHeader(header)) { - headersMap.put(header.getName(), header.getValue()); + // If the header name already exists, add the new value to the list + headersMap.computeIfAbsent(header.getName(), k -> new ArrayList<>()).add(header.getValue()); } } return headersMap; @@ -191,12 +201,12 @@ private static boolean skipHeader(final Header header) * @param mapHeaders Map of header entries * @return modeled Header objects */ - private static Header[] mapToHeaderArray(final Map mapHeaders) + private static Header[] mapToHeaderArray(final Map> mapHeaders) { Header[] headers = new Header[mapHeaders.size()]; int i = 0; - for (Map.Entry headerEntry : mapHeaders.entrySet()) { - headers[i++] = new BasicHeader(headerEntry.getKey(), headerEntry.getValue()); + for (Map.Entry> headerEntry : mapHeaders.entrySet()) { + headers[i++] = new BasicHeader(headerEntry.getKey(), headerEntry.getValue().get(0)); } return headers; } diff --git a/athena-elasticsearch/src/main/java/com/amazonaws/athena/connectors/elasticsearch/AwsRestHighLevelClient.java b/athena-elasticsearch/src/main/java/com/amazonaws/athena/connectors/elasticsearch/AwsRestHighLevelClient.java index 3142dfda33..39c4ddd258 100644 --- a/athena-elasticsearch/src/main/java/com/amazonaws/athena/connectors/elasticsearch/AwsRestHighLevelClient.java +++ b/athena-elasticsearch/src/main/java/com/amazonaws/athena/connectors/elasticsearch/AwsRestHighLevelClient.java @@ -19,8 +19,6 @@ */ package com.amazonaws.athena.connectors.elasticsearch; -import com.amazonaws.auth.AWS4Signer; -import com.amazonaws.auth.AWSCredentialsProvider; import com.google.common.base.Splitter; import org.apache.http.HttpHost; import org.apache.http.HttpRequestInterceptor; @@ -46,6 +44,9 @@ import org.elasticsearch.search.SearchHit; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; +import software.amazon.awssdk.http.auth.aws.signer.AwsV4HttpSigner; +import software.amazon.awssdk.services.elasticsearch.ElasticsearchClient; import java.io.IOException; import java.util.LinkedHashMap; @@ -196,7 +197,7 @@ public static class Builder { private final String endpoint; private final RestClientBuilder clientBuilder; - private final AWS4Signer signer; + private final AwsV4HttpSigner signer; private final Splitter domainSplitter; /** @@ -207,7 +208,7 @@ public Builder(String endpoint) { this.endpoint = endpoint; this.clientBuilder = RestClient.builder(HttpHost.create(this.endpoint)); - this.signer = new AWS4Signer(); + this.signer = AwsV4HttpSigner.create(); this.domainSplitter = Splitter.on("."); } @@ -216,7 +217,7 @@ public Builder(String endpoint) * @param credentialsProvider is the AWS credentials provider. * @return self. */ - public Builder withCredentials(AWSCredentialsProvider credentialsProvider) + public Builder withCredentials(AwsCredentialsProvider credentialsProvider) { /** * endpoint: @@ -231,16 +232,13 @@ public Builder withCredentials(AWSCredentialsProvider credentialsProvider) */ List domainSplits = domainSplitter.splitToList(endpoint); + HttpRequestInterceptor interceptor; if (domainSplits.size() > 1) { - signer.setRegionName(domainSplits.get(1)); - signer.setServiceName("es"); - } - - HttpRequestInterceptor interceptor = - new AWSRequestSigningApacheInterceptor(signer.getServiceName(), signer, credentialsProvider); + interceptor = new AWSRequestSigningApacheInterceptor(ElasticsearchClient.SERVICE_NAME, signer, credentialsProvider, domainSplits.get(1)); - clientBuilder.setHttpClientConfigCallback(httpClientBuilder -> httpClientBuilder - .addInterceptorLast(interceptor)); + clientBuilder.setHttpClientConfigCallback(httpClientBuilder -> httpClientBuilder + .addInterceptorLast(interceptor)); + } return this; } diff --git a/athena-elasticsearch/src/main/java/com/amazonaws/athena/connectors/elasticsearch/AwsRestHighLevelClientFactory.java b/athena-elasticsearch/src/main/java/com/amazonaws/athena/connectors/elasticsearch/AwsRestHighLevelClientFactory.java index 422c3884dc..6286d64eda 100644 --- a/athena-elasticsearch/src/main/java/com/amazonaws/athena/connectors/elasticsearch/AwsRestHighLevelClientFactory.java +++ b/athena-elasticsearch/src/main/java/com/amazonaws/athena/connectors/elasticsearch/AwsRestHighLevelClientFactory.java @@ -19,9 +19,9 @@ */ package com.amazonaws.athena.connectors.elasticsearch; -import com.amazonaws.auth.DefaultAWSCredentialsProviderChain; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.auth.credentials.DefaultCredentialsProvider; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -100,7 +100,7 @@ private AwsRestHighLevelClient createClient(String endpoint) { if (useAwsCredentials) { return new AwsRestHighLevelClient.Builder(endpoint) - .withCredentials(new DefaultAWSCredentialsProviderChain()).build(); + .withCredentials(DefaultCredentialsProvider.create()).build(); } else { Matcher credentials = credentialsPattern.matcher(endpoint); diff --git a/athena-federation-sdk/pom.xml b/athena-federation-sdk/pom.xml index 273530c5f2..75269ede6b 100644 --- a/athena-federation-sdk/pom.xml +++ b/athena-federation-sdk/pom.xml @@ -26,33 +26,6 @@ - - com.amazonaws - aws-java-sdk-core - ${aws-sdk.version} - - - com.fasterxml.jackson.datatype - jackson-datatype-jsr310 - - - com.fasterxml.jackson.dataformat - jackson-dataformat-cbor - - - com.fasterxml.jackson.core - jackson-core - - - com.fasterxml.jackson.core - jackson-databind - - - com.fasterxml.jackson.core - jackson-annotations - - - software.amazon.awssdk apache-client diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/credentials/CrossAccountCredentialsProvider.java b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/credentials/CrossAccountCredentialsProvider.java index d71d1b1657..309082d1d5 100644 --- a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/credentials/CrossAccountCredentialsProvider.java +++ b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/credentials/CrossAccountCredentialsProvider.java @@ -19,12 +19,12 @@ */ package com.amazonaws.athena.connector.credentials; -import com.amazonaws.auth.AWSCredentialsProvider; -import com.amazonaws.auth.AWSStaticCredentialsProvider; -import com.amazonaws.auth.BasicSessionCredentials; -import com.amazonaws.auth.DefaultAWSCredentialsProviderChain; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; +import software.amazon.awssdk.auth.credentials.AwsSessionCredentials; +import software.amazon.awssdk.auth.credentials.DefaultCredentialsProvider; +import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; import software.amazon.awssdk.services.sts.StsClient; import software.amazon.awssdk.services.sts.model.AssumeRoleRequest; import software.amazon.awssdk.services.sts.model.AssumeRoleResponse; @@ -39,7 +39,7 @@ public class CrossAccountCredentialsProvider private CrossAccountCredentialsProvider() {} - public static AWSCredentialsProvider getCrossAccountCredentialsIfPresent(Map configOptions, String roleSessionName) + public static AwsCredentialsProvider getCrossAccountCredentialsIfPresent(Map configOptions, String roleSessionName) { if (configOptions.containsKey(CROSS_ACCOUNT_ROLE_ARN_CONFIG)) { logger.debug("Found cross-account role arn to assume."); @@ -50,9 +50,9 @@ public static AWSCredentialsProvider getCrossAccountCredentialsIfPresent(Map - com.amazonaws - aws-java-sdk-redshift - ${aws-sdk.version} + software.amazon.awssdk + redshift + ${aws-sdk-v2.version} test diff --git a/athena-kafka/src/test/java/com/amazonaws/athena/connectors/kafka/KafkaUtilsTest.java b/athena-kafka/src/test/java/com/amazonaws/athena/connectors/kafka/KafkaUtilsTest.java index 7baacf6180..02eb8d8fc8 100644 --- a/athena-kafka/src/test/java/com/amazonaws/athena/connectors/kafka/KafkaUtilsTest.java +++ b/athena-kafka/src/test/java/com/amazonaws/athena/connectors/kafka/KafkaUtilsTest.java @@ -21,9 +21,6 @@ import com.amazonaws.athena.connectors.kafka.dto.SplitParameters; import com.amazonaws.athena.connectors.kafka.dto.TopicResultSet; -import com.amazonaws.auth.AWSStaticCredentialsProvider; -import com.amazonaws.auth.BasicAWSCredentials; -import com.amazonaws.auth.DefaultAWSCredentialsProviderChain; import com.fasterxml.jackson.core.type.TypeReference; import com.fasterxml.jackson.databind.ObjectMapper; import org.apache.arrow.vector.types.Types; @@ -42,6 +39,10 @@ import org.mockito.junit.MockitoJUnitRunner; import org.mockito.MockedConstruction; import org.mockito.MockedStatic; + +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.auth.credentials.DefaultCredentialsProvider; +import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; import software.amazon.awssdk.core.ResponseInputStream; import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.s3.model.GetObjectRequest; @@ -81,13 +82,13 @@ public class KafkaUtilsTest { GetSecretValueResponse secretValueResponse; @Mock - DefaultAWSCredentialsProviderChain chain; + DefaultCredentialsProvider chain; @Mock - AWSStaticCredentialsProvider credentialsProvider; + StaticCredentialsProvider credentialsProvider; @Mock - BasicAWSCredentials credentials; + AwsBasicCredentials credentials; @Mock S3Client amazonS3Client; @@ -100,7 +101,7 @@ public class KafkaUtilsTest { "certificates_s3_reference", "s3://kafka-connector-test-bucket/kafkafiles/", "secrets_manager_secret", "Kafka_afq"); - private MockedConstruction mockedDefaultCredentials; + private MockedConstruction mockedDefaultCredentials; private MockedStatic mockedS3ClientBuilder; private MockedStatic mockedSecretsManagerClient; @@ -126,9 +127,9 @@ public void init() throws Exception { Mockito.when(secretValueResponse.secretString()).thenReturn(creds); Mockito.when(awsSecretsManager.getSecretValue(Mockito.isA(GetSecretValueRequest.class))).thenReturn(secretValueResponse); - mockedDefaultCredentials = Mockito.mockConstruction(DefaultAWSCredentialsProviderChain.class, + mockedDefaultCredentials = Mockito.mockConstruction(DefaultCredentialsProvider.class, (mock, context) -> { - Mockito.when(mock.getCredentials()).thenReturn(credentials); + Mockito.when(mock.resolveCredentials()).thenReturn(credentials); }); mockedS3ClientBuilder = Mockito.mockStatic(S3Client.class); mockedS3ClientBuilder.when(()-> S3Client.create()).thenReturn(amazonS3Client); diff --git a/athena-msk/pom.xml b/athena-msk/pom.xml index eb2ef81f98..42aa8974da 100644 --- a/athena-msk/pom.xml +++ b/athena-msk/pom.xml @@ -15,6 +15,13 @@ + + software.amazon.awssdk + bom + ${aws-sdk-v2.version} + pom + import + org.jetbrains.kotlin kotlin-stdlib @@ -58,11 +65,6 @@ 5.1.0 runtime - - com.amazonaws - aws-java-sdk-sts - 1.12.772 - software.amazon.msk aws-msk-iam-auth diff --git a/athena-vertica/src/main/java/com/amazonaws/athena/connectors/vertica/VerticaMetadataHandler.java b/athena-vertica/src/main/java/com/amazonaws/athena/connectors/vertica/VerticaMetadataHandler.java index 4e691900aa..da52327eaf 100644 --- a/athena-vertica/src/main/java/com/amazonaws/athena/connectors/vertica/VerticaMetadataHandler.java +++ b/athena-vertica/src/main/java/com/amazonaws/athena/connectors/vertica/VerticaMetadataHandler.java @@ -20,7 +20,6 @@ package com.amazonaws.athena.connectors.vertica; -import com.amazonaws.SdkClientException; import com.amazonaws.athena.connector.lambda.QueryStatusChecker; import com.amazonaws.athena.connector.lambda.data.Block; import com.amazonaws.athena.connector.lambda.data.BlockAllocator; @@ -57,6 +56,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.stringtemplate.v4.ST; +import software.amazon.awssdk.core.exception.SdkClientException; import software.amazon.awssdk.regions.providers.DefaultAwsRegionProviderChain; import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.s3.model.ListObjectsRequest; diff --git a/pom.xml b/pom.xml index 8567ef0a39..b7096fad04 100644 --- a/pom.xml +++ b/pom.xml @@ -14,8 +14,7 @@ 11 3.13.0 - 1.12.772 - 2.25.56 + 2.28.9 1.2.2 1.6.0 1.204.0 From 63a6075836ffde1e2e4278f2b2f1cc56a874deb5 Mon Sep 17 00:00:00 2001 From: Aimery Methena Date: Fri, 27 Sep 2024 16:58:32 -0400 Subject: [PATCH 49/87] fix checkstyle --- .../connector/lambda/connection/EnvironmentProperties.java | 1 - 1 file changed, 1 deletion(-) diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/EnvironmentProperties.java b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/EnvironmentProperties.java index 0423134288..a8b004d403 100644 --- a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/EnvironmentProperties.java +++ b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/EnvironmentProperties.java @@ -29,7 +29,6 @@ import software.amazon.awssdk.services.glue.model.GetConnectionRequest; import software.amazon.awssdk.services.glue.model.GetConnectionResponse; -import java.net.URI; import java.time.Duration; import java.util.HashMap; import java.util.Map; From a78742667dde9af5a888b2fa1be08567d8eac280 Mon Sep 17 00:00:00 2001 From: ejeffrli <144148373+ejeffrli@users.noreply.github.com> Date: Wed, 2 Oct 2024 11:27:29 -0400 Subject: [PATCH 50/87] Fix lambda issues with glue (#2308) --- .../connection/EnvironmentProperties.java | 8 ++- .../DatabaseConnectionConfigBuilder.java | 60 +++++++------------ .../DatabaseConnectionConfigBuilderTest.java | 35 ----------- .../SnowflakeEnvironmentProperties.java | 26 ++++++++ 4 files changed, 54 insertions(+), 75 deletions(-) diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/EnvironmentProperties.java b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/EnvironmentProperties.java index a8b004d403..569972e1b4 100644 --- a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/EnvironmentProperties.java +++ b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/EnvironmentProperties.java @@ -30,6 +30,7 @@ import software.amazon.awssdk.services.glue.model.GetConnectionResponse; import java.time.Duration; +import java.util.Arrays; import java.util.HashMap; import java.util.Map; @@ -51,7 +52,7 @@ public Map createEnvironment() throws RuntimeException HashMap connectionEnvironment = new HashMap<>(); if (StringUtils.isNotBlank(glueConnectionName)) { Connection connection = getGlueConnection(glueConnectionName); - Map connectionProperties = connection.connectionPropertiesAsStrings(); + Map connectionProperties = new HashMap<>(connection.connectionPropertiesAsStrings()); connectionProperties.putAll(authenticationConfigurationToMap(connection.authenticationConfiguration())); connectionEnvironment.putAll(connectionPropertiesToEnvironment(connectionProperties)); @@ -86,7 +87,10 @@ private Map authenticationConfigurationToMap(AuthenticationConfi if (StringUtils.isNotBlank(auth.secretArn())) { String[] splitArn = auth.secretArn().split(":"); - authMap.put(SECRET_NAME, splitArn[splitArn.length - 1]); + String[] secretNameWithRandom = splitArn[splitArn.length - 1].split("-"); // 6 random characters at end. at least length of 2 + String[] secretNameArray = Arrays.copyOfRange(secretNameWithRandom, 0, secretNameWithRandom.length - 1); + String secretName = String.join("-", secretNameArray); // add back the dashes + authMap.put(SECRET_NAME, secretName); } return authMap; } diff --git a/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/connection/DatabaseConnectionConfigBuilder.java b/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/connection/DatabaseConnectionConfigBuilder.java index 5594e14e5e..323472c25f 100644 --- a/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/connection/DatabaseConnectionConfigBuilder.java +++ b/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/connection/DatabaseConnectionConfigBuilder.java @@ -43,10 +43,6 @@ public class DatabaseConnectionConfigBuilder private static final String SECRET_PATTERN_STRING = "\\$\\{(([a-z-]+!)?[a-zA-Z0-9:/_+=.@-]+)}"; public static final Pattern SECRET_PATTERN = Pattern.compile(SECRET_PATTERN_STRING); - // Config variables used when glue connection supplements connection properties - public static final String DEFAULT_JDBC_CONNECTION_URL_PROPERTY = "default_connection_string"; - public static final String DEFAULT_SECRET_PROPERTY = "secret_name"; - public static final String DEFAULT_GLUE_CONNECTION = "glue_connection"; private Map properties; @@ -93,37 +89,33 @@ public DatabaseConnectionConfigBuilder properties(final Map prop public List build() { Validate.notEmpty(this.properties, "properties must not be empty"); - Validate.isTrue(properties.containsKey(DEFAULT_CONNECTION_STRING_PROPERTY) || properties.containsKey(DEFAULT_JDBC_CONNECTION_URL_PROPERTY), "Default connection string must be present"); + Validate.isTrue(properties.containsKey(DEFAULT_CONNECTION_STRING_PROPERTY), "Default connection string must be present"); List databaseConnectionConfigs = new ArrayList<>(); int numberOfCatalogs = 0; - if (!StringUtils.isBlank(properties.get(DEFAULT_GLUE_CONNECTION))) { - databaseConnectionConfigs.add(extractDatabaseGlueConnectionConfig(DEFAULT_CONNECTION_STRING_PROPERTY)); - numberOfCatalogs++; - } - else { - for (Map.Entry property : this.properties.entrySet()) { - final String key = property.getKey(); - final String value = property.getValue(); - - String catalogName; - if (DEFAULT_CONNECTION_STRING_PROPERTY.equals(key.toLowerCase())) { - catalogName = key.toLowerCase(); - } - else if (key.endsWith(CONNECTION_STRING_PROPERTY_SUFFIX)) { - catalogName = key.replace(CONNECTION_STRING_PROPERTY_SUFFIX, ""); - } - else { - // unknown property ignore - continue; - } - databaseConnectionConfigs.add(extractDatabaseConnectionConfig(catalogName, value)); + for (Map.Entry property : this.properties.entrySet()) { + final String key = property.getKey(); + final String value = property.getValue(); - numberOfCatalogs++; - if (numberOfCatalogs > MUX_CATALOG_LIMIT) { - throw new RuntimeException("Too many database instances in mux. Max supported is " + MUX_CATALOG_LIMIT); - } + String catalogName; + if (DEFAULT_CONNECTION_STRING_PROPERTY.equals(key.toLowerCase())) { + catalogName = key.toLowerCase(); + } + else if (key.endsWith(CONNECTION_STRING_PROPERTY_SUFFIX)) { + catalogName = key.replace(CONNECTION_STRING_PROPERTY_SUFFIX, ""); + } + else { + // unknown property ignore + continue; + } + databaseConnectionConfigs.add(extractDatabaseConnectionConfig(catalogName, value)); + + if (StringUtils.isBlank(properties.get(DEFAULT_GLUE_CONNECTION))) { + numberOfCatalogs++; // Mux is not supported with glue. Do not count + } + if (numberOfCatalogs > MUX_CATALOG_LIMIT) { + throw new RuntimeException("Too many database instances in mux. Max supported is " + MUX_CATALOG_LIMIT); } } @@ -153,14 +145,6 @@ private DatabaseConnectionConfig extractDatabaseConnectionConfig(final String ca .orElseGet(() -> new DatabaseConnectionConfig(catalogName, this.engine, jdbcConnectionString)); } - private DatabaseConnectionConfig extractDatabaseGlueConnectionConfig(final String catalogName) - { - final String jdbcConnectionString = properties.get(DEFAULT_JDBC_CONNECTION_URL_PROPERTY); - final String secretName = properties.get(DEFAULT_SECRET_PROPERTY); - Validate.notBlank(jdbcConnectionString, "JDBC Connection string must not be blank."); - return StringUtils.isBlank(secretName) ? new DatabaseConnectionConfig(catalogName, this.engine, jdbcConnectionString) : new DatabaseConnectionConfig(catalogName, this.engine, jdbcConnectionString, secretName); - } - private Optional extractSecretName(final String jdbcConnectionString) { Matcher secretMatcher = SECRET_PATTERN.matcher(jdbcConnectionString); diff --git a/athena-jdbc/src/test/java/com/amazonaws/athena/connectors/jdbc/connection/DatabaseConnectionConfigBuilderTest.java b/athena-jdbc/src/test/java/com/amazonaws/athena/connectors/jdbc/connection/DatabaseConnectionConfigBuilderTest.java index b17a88770b..5104571b56 100644 --- a/athena-jdbc/src/test/java/com/amazonaws/athena/connectors/jdbc/connection/DatabaseConnectionConfigBuilderTest.java +++ b/athena-jdbc/src/test/java/com/amazonaws/athena/connectors/jdbc/connection/DatabaseConnectionConfigBuilderTest.java @@ -125,40 +125,5 @@ public void validSecretsSyntaxTest() Assert.assertEquals(secrets[i], databaseConnectionConfigs.get(i).getSecret()); } } - - @Test - public void buildUsingGlueConnectionWithSecret() - { - DatabaseConnectionConfig glueSupplementedConnection = new DatabaseConnectionConfig("default", "postgres", - "jdbc:postgresql://hostname/test", "testSecret"); - - List databaseConnectionConfigs = new DatabaseConnectionConfigBuilder() - .engine("postgres") - .properties(ImmutableMap.of( - "default", CONNECTION_STRING2, - "default_connection_string", CONNECTION_STRING5, - "secret_name", CONNECTION_STRING5_SECRET, - "glue_connection", MOCK_GLUE_CONNECTION_NAME)) - .build(); - - Assert.assertEquals(Arrays.asList(glueSupplementedConnection), databaseConnectionConfigs); - } - - @Test - public void buildUsingGlueConnectionNoSecret() - { - DatabaseConnectionConfig glueSupplementedConnection = new DatabaseConnectionConfig("default", "postgres", - "jdbc:postgresql://hostname/test"); - - List databaseConnectionConfigs = new DatabaseConnectionConfigBuilder() - .engine("postgres") - .properties(ImmutableMap.of( - "default", CONNECTION_STRING2, - "default_connection_string", CONNECTION_STRING5, - "glue_connection", MOCK_GLUE_CONNECTION_NAME)) - .build(); - - Assert.assertEquals(Arrays.asList(glueSupplementedConnection), databaseConnectionConfigs); - } } diff --git a/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeEnvironmentProperties.java b/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeEnvironmentProperties.java index d90502132d..0528378c48 100644 --- a/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeEnvironmentProperties.java +++ b/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeEnvironmentProperties.java @@ -21,14 +21,34 @@ import com.amazonaws.athena.connectors.jdbc.JdbcEnvironmentProperties; +import java.util.HashMap; import java.util.Map; import static com.amazonaws.athena.connector.lambda.connection.EnvironmentConstants.DATABASE; +import static com.amazonaws.athena.connector.lambda.connection.EnvironmentConstants.DEFAULT; +import static com.amazonaws.athena.connector.lambda.connection.EnvironmentConstants.HOST; +import static com.amazonaws.athena.connector.lambda.connection.EnvironmentConstants.PORT; import static com.amazonaws.athena.connector.lambda.connection.EnvironmentConstants.SCHEMA; import static com.amazonaws.athena.connector.lambda.connection.EnvironmentConstants.WAREHOUSE; public class SnowflakeEnvironmentProperties extends JdbcEnvironmentProperties { + @Override + public Map connectionPropertiesToEnvironment(Map connectionProperties) + { + HashMap environment = new HashMap<>(); + + // now construct jdbc string + String connectionString = getConnectionStringPrefix(connectionProperties) + connectionProperties.get(HOST); + if (connectionProperties.containsKey(PORT)) { + connectionString = connectionString + ":" + connectionProperties.get(PORT); + } + connectionString = connectionString + getDatabase(connectionProperties) + getJdbcParameters(connectionProperties); + + environment.put(DEFAULT, connectionString); + return environment; + } + @Override protected String getConnectionStringPrefix(Map connectionProperties) { @@ -47,4 +67,10 @@ protected String getDatabase(Map connectionProperties) + "&schema=" + connectionProperties.get(SCHEMA); return databaseString; } + + @Override + protected String getJdbcParametersSeparator() + { + return "&"; + } } From d42e759214df22e3b29551ac353685a92477ec5a Mon Sep 17 00:00:00 2001 From: VenkatasivareddyTR <110587813+VenkatasivareddyTR@users.noreply.github.com> Date: Fri, 4 Oct 2024 18:58:48 +0530 Subject: [PATCH 51/87] reverting PR #2273 for gbq connector as it's not required for cdk. (#2311) --- athena-google-bigquery/pom.xml | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/athena-google-bigquery/pom.xml b/athena-google-bigquery/pom.xml index 45980e6263..f1a22bfb3f 100644 --- a/athena-google-bigquery/pom.xml +++ b/athena-google-bigquery/pom.xml @@ -27,16 +27,10 @@ - software.amazon.awssdk + software.amazon.awscdk rds - ${aws-sdk-v2.version} + ${aws-cdk.version} test - - - software.amazon.awssdk - netty-nio-client - - From 12644d09f77d237f4453df2aace2ed0e1f36c680 Mon Sep 17 00:00:00 2001 From: Aimery Methena Date: Wed, 16 Oct 2024 12:36:50 -0400 Subject: [PATCH 52/87] Update DDB new errors to v2 --- .../dynamodb/qpt/DDBQueryPassthrough.java | 8 ++++---- .../dynamodb/resolver/DynamoDBFieldResolver.java | 6 +++--- .../dynamodb/resolver/DynamoDBTableResolver.java | 6 +++--- .../dynamodb/util/DDBPredicateUtils.java | 8 ++++---- .../connectors/dynamodb/util/DDBTableUtils.java | 6 +++--- .../connectors/dynamodb/util/DDBTypeUtils.java | 16 ++++++++-------- 6 files changed, 25 insertions(+), 25 deletions(-) diff --git a/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/qpt/DDBQueryPassthrough.java b/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/qpt/DDBQueryPassthrough.java index 09a250a1fd..68a6d70403 100644 --- a/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/qpt/DDBQueryPassthrough.java +++ b/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/qpt/DDBQueryPassthrough.java @@ -21,11 +21,11 @@ import com.amazonaws.athena.connector.lambda.exceptions.AthenaConnectorException; import com.amazonaws.athena.connector.lambda.metadata.optimizations.querypassthrough.QueryPassthroughSignature; -import com.amazonaws.services.glue.model.ErrorDetails; -import com.amazonaws.services.glue.model.FederationSourceErrorCode; import com.google.common.collect.ImmutableSet; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.glue.model.ErrorDetails; +import software.amazon.awssdk.services.glue.model.FederationSourceErrorCode; import java.util.Arrays; import java.util.List; @@ -80,7 +80,7 @@ public void customConnectorVerifications(Map engineQptArguments) // Immediately check if the statement starts with "SELECT" if (!upperCaseStatement.startsWith("SELECT")) { - throw new AthenaConnectorException("Statement does not start with SELECT.", new ErrorDetails().withErrorCode(FederationSourceErrorCode.OperationNotSupportedException.toString())); + throw new AthenaConnectorException("Statement does not start with SELECT.", ErrorDetails.builder().errorCode(FederationSourceErrorCode.OPERATION_NOT_SUPPORTED_EXCEPTION.toString()).build()); } // List of disallowed keywords @@ -89,7 +89,7 @@ public void customConnectorVerifications(Map engineQptArguments) // Check if the statement contains any disallowed keywords for (String keyword : disallowedKeywords) { if (upperCaseStatement.contains(keyword)) { - throw new AthenaConnectorException("Unaccepted operation; only SELECT statements are allowed. Found: " + keyword, new ErrorDetails().withErrorCode(FederationSourceErrorCode.OperationNotSupportedException.toString())); + throw new AthenaConnectorException("Unaccepted operation; only SELECT statements are allowed. Found: " + keyword, ErrorDetails.builder().errorCode(FederationSourceErrorCode.OPERATION_NOT_SUPPORTED_EXCEPTION.toString()).build()); } } } diff --git a/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/resolver/DynamoDBFieldResolver.java b/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/resolver/DynamoDBFieldResolver.java index cebb175715..0a186d7763 100644 --- a/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/resolver/DynamoDBFieldResolver.java +++ b/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/resolver/DynamoDBFieldResolver.java @@ -23,12 +23,12 @@ import com.amazonaws.athena.connector.lambda.exceptions.AthenaConnectorException; import com.amazonaws.athena.connectors.dynamodb.util.DDBRecordMetadata; import com.amazonaws.athena.connectors.dynamodb.util.DDBTypeUtils; -import com.amazonaws.services.glue.model.ErrorDetails; -import com.amazonaws.services.glue.model.FederationSourceErrorCode; import org.apache.arrow.vector.types.Types; import org.apache.arrow.vector.types.pojo.Field; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.glue.model.ErrorDetails; +import software.amazon.awssdk.services.glue.model.FederationSourceErrorCode; import java.util.Map; @@ -90,7 +90,7 @@ public Object getFieldValue(Field field, Object originalValue) } throw new AthenaConnectorException("Invalid field value encountered in DB record for field: " + field + - ",value: " + fieldValue, new ErrorDetails().withErrorCode(FederationSourceErrorCode.InvalidInputException.toString())); + ",value: " + fieldValue, ErrorDetails.builder().errorCode(FederationSourceErrorCode.INVALID_INPUT_EXCEPTION.toString()).build()); } // Return the field value of a map key diff --git a/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/resolver/DynamoDBTableResolver.java b/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/resolver/DynamoDBTableResolver.java index 290359507b..7ae1fd436e 100644 --- a/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/resolver/DynamoDBTableResolver.java +++ b/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/resolver/DynamoDBTableResolver.java @@ -24,8 +24,6 @@ import com.amazonaws.athena.connectors.dynamodb.model.DynamoDBPaginatedTables; import com.amazonaws.athena.connectors.dynamodb.model.DynamoDBTable; import com.amazonaws.athena.connectors.dynamodb.util.DDBTableUtils; -import com.amazonaws.services.glue.model.ErrorDetails; -import com.amazonaws.services.glue.model.FederationSourceErrorCode; import com.google.common.collect.ArrayListMultimap; import com.google.common.collect.Multimap; import org.apache.arrow.vector.types.pojo.Schema; @@ -35,6 +33,8 @@ import software.amazon.awssdk.services.dynamodb.model.ListTablesRequest; import software.amazon.awssdk.services.dynamodb.model.ListTablesResponse; import software.amazon.awssdk.services.dynamodb.model.ResourceNotFoundException; +import software.amazon.awssdk.services.glue.model.ErrorDetails; +import software.amazon.awssdk.services.glue.model.FederationSourceErrorCode; import java.util.ArrayList; import java.util.Collection; @@ -121,7 +121,7 @@ public Schema getTableSchema(String tableName) return DDBTableUtils.peekTableForSchema(caseInsensitiveMatch.get(), invoker, ddbClient); } else { - throw new AthenaConnectorException(e.getMessage(), new ErrorDetails().withErrorCode(FederationSourceErrorCode.EntityNotFoundException.toString())); + throw new AthenaConnectorException(e.getMessage(), ErrorDetails.builder().errorCode(FederationSourceErrorCode.ENTITY_NOT_FOUND_EXCEPTION.toString()).build()); } } } diff --git a/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/util/DDBPredicateUtils.java b/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/util/DDBPredicateUtils.java index 3c38e4dec7..bf7aa0854f 100644 --- a/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/util/DDBPredicateUtils.java +++ b/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/util/DDBPredicateUtils.java @@ -27,12 +27,12 @@ import com.amazonaws.athena.connector.lambda.exceptions.AthenaConnectorException; import com.amazonaws.athena.connectors.dynamodb.model.DynamoDBIndex; import com.amazonaws.athena.connectors.dynamodb.model.DynamoDBTable; -import com.amazonaws.services.glue.model.ErrorDetails; -import com.amazonaws.services.glue.model.FederationSourceErrorCode; import com.google.common.base.Joiner; import com.google.common.collect.ImmutableList; import software.amazon.awssdk.services.dynamodb.model.AttributeValue; import software.amazon.awssdk.services.dynamodb.model.ProjectionType; +import software.amazon.awssdk.services.glue.model.ErrorDetails; +import software.amazon.awssdk.services.glue.model.FederationSourceErrorCode; import java.util.ArrayList; import java.util.HashSet; @@ -192,7 +192,7 @@ private static void validateColumnRange(Range range) case EXACTLY: break; case BELOW: - throw new AthenaConnectorException("Low marker should never use BELOW bound", new ErrorDetails().withErrorCode(FederationSourceErrorCode.InvalidInputException.toString())); + throw new AthenaConnectorException("Low marker should never use BELOW bound", ErrorDetails.builder().errorCode(FederationSourceErrorCode.INVALID_INPUT_EXCEPTION.toString()).build()); default: throw new AssertionError("Unhandled lower bound: " + range.getLow().getBound()); } @@ -200,7 +200,7 @@ private static void validateColumnRange(Range range) if (!range.getHigh().isUpperUnbounded()) { switch (range.getHigh().getBound()) { case ABOVE: - throw new AthenaConnectorException("High marker should never use ABOVE bound", new ErrorDetails().withErrorCode(FederationSourceErrorCode.InvalidInputException.toString())); + throw new AthenaConnectorException("High marker should never use ABOVE bound", ErrorDetails.builder().errorCode(FederationSourceErrorCode.INVALID_INPUT_EXCEPTION.toString()).build()); case EXACTLY: break; case BELOW: diff --git a/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/util/DDBTableUtils.java b/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/util/DDBTableUtils.java index 923d03ec48..98332f78c1 100644 --- a/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/util/DDBTableUtils.java +++ b/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/util/DDBTableUtils.java @@ -24,8 +24,6 @@ import com.amazonaws.athena.connector.lambda.exceptions.AthenaConnectorException; import com.amazonaws.athena.connectors.dynamodb.model.DynamoDBIndex; import com.amazonaws.athena.connectors.dynamodb.model.DynamoDBTable; -import com.amazonaws.services.glue.model.ErrorDetails; -import com.amazonaws.services.glue.model.FederationSourceErrorCode; import com.google.common.collect.ImmutableList; import org.apache.arrow.vector.types.pojo.Schema; import org.slf4j.Logger; @@ -44,6 +42,8 @@ import software.amazon.awssdk.services.dynamodb.model.ScanRequest; import software.amazon.awssdk.services.dynamodb.model.ScanResponse; import software.amazon.awssdk.services.dynamodb.model.TableDescription; +import software.amazon.awssdk.services.glue.model.ErrorDetails; +import software.amazon.awssdk.services.glue.model.FederationSourceErrorCode; import java.util.List; import java.util.Map; @@ -170,7 +170,7 @@ public static Schema peekTableForSchema(String tableName, ThrottlingInvoker invo logger.warn("Failed to retrieve table schema due to KMS issue, empty schema for table: {}. Error Message: {}", tableName, runtimeException.getMessage()); } else { - throw new AthenaConnectorException(runtimeException.getMessage(), new ErrorDetails().withErrorCode(FederationSourceErrorCode.OperationTimeoutException.toString())); + throw new AthenaConnectorException(runtimeException.getMessage(), ErrorDetails.builder().errorCode(FederationSourceErrorCode.OPERATION_TIMEOUT_EXCEPTION.toString()).build()); } } return schemaBuilder.build(); diff --git a/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/util/DDBTypeUtils.java b/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/util/DDBTypeUtils.java index b5f27a434a..d1abcdefaa 100644 --- a/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/util/DDBTypeUtils.java +++ b/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/util/DDBTypeUtils.java @@ -32,8 +32,6 @@ import com.amazonaws.athena.connector.lambda.domain.predicate.ConstraintProjector; import com.amazonaws.athena.connector.lambda.exceptions.AthenaConnectorException; import com.amazonaws.athena.connectors.dynamodb.resolver.DynamoDBFieldResolver; -import com.amazonaws.services.glue.model.ErrorDetails; -import com.amazonaws.services.glue.model.FederationSourceErrorCode; import org.apache.arrow.vector.FieldVector; import org.apache.arrow.vector.holders.NullableBitHolder; import org.apache.arrow.vector.types.Types; @@ -52,6 +50,8 @@ import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.EnhancedAttributeValue; import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.StringAttributeConverter; import software.amazon.awssdk.services.dynamodb.model.AttributeValue; +import software.amazon.awssdk.services.glue.model.ErrorDetails; +import software.amazon.awssdk.services.glue.model.FederationSourceErrorCode; import software.amazon.awssdk.utils.ImmutableMap; import java.math.BigDecimal; @@ -191,7 +191,7 @@ else if (enhancedAttributeValue.isMap()) { } String attributeTypeName = (value == null || value.getClass() == null) ? "null" : enhancedAttributeValue.type().name(); - throw new AthenaConnectorException("Unknown Attribute Value Type[" + attributeTypeName + "] for field[" + key + "]", new ErrorDetails().withErrorCode(FederationSourceErrorCode.InvalidInputException.toString())); + throw new AthenaConnectorException("Unknown Attribute Value Type[" + attributeTypeName + "] for field[" + key + "]", ErrorDetails.builder().errorCode(FederationSourceErrorCode.INVALID_INPUT_EXCEPTION.toString()).build()); } /** @@ -265,7 +265,7 @@ public static Field getArrowFieldFromDDBType(String attributeName, String attrib case MAP: return new Field(attributeName, FieldType.nullable(Types.MinorType.STRUCT.getType()), null); default: - throw new AthenaConnectorException("Unknown type[" + attributeType + "] for field[" + attributeName + "]", new ErrorDetails().withErrorCode(FederationSourceErrorCode.InvalidInputException.toString())); + throw new AthenaConnectorException("Unknown type[" + attributeType + "] for field[" + attributeName + "]", ErrorDetails.builder().errorCode(FederationSourceErrorCode.INVALID_INPUT_EXCEPTION.toString()).build()); } } @@ -385,7 +385,7 @@ public static List coerceListToExpectedType(Object value, Field field, D if (!(value instanceof Collection)) { if (value instanceof Map) { - throw new AthenaConnectorException("Unexpected type (Map) encountered for: " + childField.getName(), new ErrorDetails().withErrorCode(FederationSourceErrorCode.InvalidInputException.toString())); + throw new AthenaConnectorException("Unexpected type (Map) encountered for: " + childField.getName(), ErrorDetails.builder().errorCode(FederationSourceErrorCode.INVALID_INPUT_EXCEPTION.toString()).build()); } return Collections.singletonList(coerceValueToExpectedType(value, childField, fieldType, recordMetadata)); } @@ -621,7 +621,7 @@ else if (value instanceof Map) { return handleMapType((Map) value); } else { - throw new AthenaConnectorException("Unsupported value type: " + value.getClass(), new ErrorDetails().withErrorCode(FederationSourceErrorCode.InvalidInputException.toString())); + throw new AthenaConnectorException("Unsupported value type: " + value.getClass(), ErrorDetails.builder().errorCode(FederationSourceErrorCode.INVALID_INPUT_EXCEPTION.toString()).build()); } } @@ -635,7 +635,7 @@ public static AttributeValue jsonToAttributeValue(String jsonString, String key) { EnhancedDocument enhancedDocument = EnhancedDocument.fromJson(jsonString); if (!enhancedDocument.isPresent(key)) { - throw new AthenaConnectorException("Unknown attribute Key", new ErrorDetails().withErrorCode(FederationSourceErrorCode.InvalidInputException.toString())); + throw new AthenaConnectorException("Unknown attribute Key", ErrorDetails.builder().errorCode(FederationSourceErrorCode.INVALID_INPUT_EXCEPTION.toString()).build()); } return enhancedDocument.toMap().get(key); } @@ -658,7 +658,7 @@ else if (firstElement instanceof Number) { } // Add other types if needed // Fallback for unsupported set types - throw new AthenaConnectorException("Unsupported Set element type: " + firstElement.getClass(), new ErrorDetails().withErrorCode(FederationSourceErrorCode.InvalidInputException.toString())); + throw new AthenaConnectorException("Unsupported Set element type: " + firstElement.getClass(), ErrorDetails.builder().errorCode(FederationSourceErrorCode.INVALID_INPUT_EXCEPTION.toString()).build()); } private static AttributeValue handleListType(List value) From 6b99732a4a49766272b5ba4b73821d12b2b23abd Mon Sep 17 00:00:00 2001 From: Aimery Methena Date: Wed, 23 Oct 2024 09:55:15 -0400 Subject: [PATCH 53/87] update connections yaml files with new image uri --- athena-aws-cmdb/athena-aws-cmdb-connection.yaml | 6 +++++- athena-cloudera-hive/athena-cloudera-hive-connection.yaml | 6 +++++- .../athena-cloudera-impala-connection.yaml | 6 +++++- .../athena-cloudwatch-metrics-connection.yaml | 6 +++++- athena-cloudwatch/athena-cloudwatch-connection.yaml | 6 +++++- athena-datalakegen2/athena-datalakegen2-connection.yaml | 6 +++++- athena-db2-as400/athena-db2-as400-connection.yaml | 6 +++++- athena-db2/athena-db2-connection.yaml | 6 +++++- athena-docdb/athena-docdb-connection.yaml | 6 +++++- athena-dynamodb/athena-dynamodb-connection.yaml | 6 +++++- athena-elasticsearch/athena-elasticsearch-connection.yaml | 6 +++++- athena-gcs/athena-gcs-connection.yaml | 6 +++++- .../athena-google-bigquery-connection.yaml | 6 +++++- athena-hbase/athena-hbase-connection.yaml | 6 +++++- .../athena-hortonworks-hive-connection.yaml | 6 +++++- athena-msk/athena-msk-connection.yaml | 6 +++++- athena-mysql/athena-mysql-connection.yaml | 6 +++++- athena-neptune/athena-neptune-connection.yaml | 6 +++++- athena-oracle/athena-oracle-connection.yaml | 6 +++++- athena-postgresql/athena-postgresql-connection.yaml | 6 +++++- athena-redis/athena-redis-connection.yaml | 6 +++++- athena-redshift/athena-redshift-connection.yaml | 6 +++++- athena-saphana/athena-saphana-connection.yaml | 6 +++++- athena-snowflake/athena-snowflake-connection.yaml | 6 +++++- athena-sqlserver/athena-sqlserver-connection.yaml | 6 +++++- athena-synapse/athena-synapse-connection.yaml | 6 +++++- athena-teradata/athena-teradata-connection.yaml | 6 +++++- athena-timestream/athena-timestream-connection.yaml | 6 +++++- athena-tpcds/athena-tpcds-connection.yaml | 6 +++++- athena-vertica/athena-vertica-connection.yaml | 6 +++++- 30 files changed, 150 insertions(+), 30 deletions(-) diff --git a/athena-aws-cmdb/athena-aws-cmdb-connection.yaml b/athena-aws-cmdb/athena-aws-cmdb-connection.yaml index e4ec86f8bc..bea006e174 100644 --- a/athena-aws-cmdb/athena-aws-cmdb-connection.yaml +++ b/athena-aws-cmdb/athena-aws-cmdb-connection.yaml @@ -36,6 +36,8 @@ Conditions: HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] + IsRegionBAH: !Equals [!Ref "AWS::Region", "me-south-1"] + IsRegionHKG: !Equals [!Ref "AWS::Region", "ap-east-1"] Resources: ConnectorConfig: @@ -46,7 +48,9 @@ Resources: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-aws-cmdb:2022.47.1' + ImageUri: !Sub + - '${Account}.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-aws-cmdb:2022.47.1' + - Account: !If [IsRegionBAH, 084828588479, !If [IsRegionHKG, 183295418215, 292517598671]] Description: "Enables Amazon Athena to communicate with various AWS Services, making your resource inventories accessible via SQL." Timeout: 900 MemorySize: 3008 diff --git a/athena-cloudera-hive/athena-cloudera-hive-connection.yaml b/athena-cloudera-hive/athena-cloudera-hive-connection.yaml index f958e4718c..29fa4f5d2e 100644 --- a/athena-cloudera-hive/athena-cloudera-hive-connection.yaml +++ b/athena-cloudera-hive/athena-cloudera-hive-connection.yaml @@ -44,6 +44,8 @@ Conditions: NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] CreateKmsPolicy: !And [!Condition NotHasLambdaRole, !Condition HasKmsKeyId] + IsRegionBAH: !Equals [!Ref "AWS::Region", "me-south-1"] + IsRegionHKG: !Equals [!Ref "AWS::Region", "ap-east-1"] Resources: JdbcConnectorConfig: Type: 'AWS::Serverless::Function' @@ -53,7 +55,9 @@ Resources: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-cloudera-hive:2022.47.1' + ImageUri: !Sub + - '${Account}.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-cloudera-hive:2022.47.1' + - Account: !If [IsRegionBAH, 084828588479, !If [IsRegionHKG, 183295418215, 292517598671]] ImageConfig: Command: [ "com.amazonaws.athena.connectors.cloudera.HiveCompositeHandler" ] Description: "Enables Amazon Athena to communicate with Cloudera Hive using JDBC" diff --git a/athena-cloudera-impala/athena-cloudera-impala-connection.yaml b/athena-cloudera-impala/athena-cloudera-impala-connection.yaml index d3d5c568df..6c40c72087 100644 --- a/athena-cloudera-impala/athena-cloudera-impala-connection.yaml +++ b/athena-cloudera-impala/athena-cloudera-impala-connection.yaml @@ -44,6 +44,8 @@ Conditions: HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] CreateKmsPolicy: !And [!Condition NotHasLambdaRole, !Condition HasKmsKeyId] + IsRegionBAH: !Equals [!Ref "AWS::Region", "me-south-1"] + IsRegionHKG: !Equals [!Ref "AWS::Region", "ap-east-1"] Resources: JdbcConnectorConfig: Type: 'AWS::Serverless::Function' @@ -53,7 +55,9 @@ Resources: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-cloudera-impala:2022.47.1' + ImageUri: !Sub + - '${Account}.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-cloudera-impala:2022.47.1' + - Account: !If [IsRegionBAH, 084828588479, !If [IsRegionHKG, 183295418215, 292517598671]] ImageConfig: Command: [ "com.amazonaws.athena.connectors.cloudera.ImpalaCompositeHandler" ] Description: "Enables Amazon Athena to communicate with Cloudera Impala using JDBC" diff --git a/athena-cloudwatch-metrics/athena-cloudwatch-metrics-connection.yaml b/athena-cloudwatch-metrics/athena-cloudwatch-metrics-connection.yaml index c5bae85d66..1cf557a0fa 100644 --- a/athena-cloudwatch-metrics/athena-cloudwatch-metrics-connection.yaml +++ b/athena-cloudwatch-metrics/athena-cloudwatch-metrics-connection.yaml @@ -36,6 +36,8 @@ Conditions: HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] + IsRegionBAH: !Equals [!Ref "AWS::Region", "me-south-1"] + IsRegionHKG: !Equals [!Ref "AWS::Region", "ap-east-1"] Resources: ConnectorConfig: @@ -46,7 +48,9 @@ Resources: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-cloudwatch-metrics:2022.47.1' + ImageUri: !Sub + - '${Account}.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-cloudwatch-metrics:2022.47.1' + - Account: !If [IsRegionBAH, 084828588479, !If [IsRegionHKG, 183295418215, 292517598671]] Description: "Enables Amazon Athena to communicate with Cloudwatch Metrics, making your metrics data accessible via SQL" Timeout: 900 MemorySize: 3008 diff --git a/athena-cloudwatch/athena-cloudwatch-connection.yaml b/athena-cloudwatch/athena-cloudwatch-connection.yaml index 6be26a2409..b2bab240ef 100644 --- a/athena-cloudwatch/athena-cloudwatch-connection.yaml +++ b/athena-cloudwatch/athena-cloudwatch-connection.yaml @@ -36,6 +36,8 @@ Conditions: HasKmsKeyId: !Not [!Equals [!Ref KmsKeyId, ""]] NotHasLambdaRole: !Equals [!Ref LambdaRole, ""] CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] + IsRegionBAH: !Equals [!Ref "AWS::Region", "me-south-1"] + IsRegionHKG: !Equals [!Ref "AWS::Region", "ap-east-1"] Resources: ConnectorConfig: @@ -46,7 +48,9 @@ Resources: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-cloudwatch:2022.47.1' + ImageUri: !Sub + - '${Account}.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-cloudwatch:2022.47.1' + - Account: !If [IsRegionBAH, 084828588479, !If [IsRegionHKG, 183295418215, 292517598671]] Description: "Enables Amazon Athena to communicate with Cloudwatch, making your log accessible via SQL" Timeout: 900 MemorySize: 3008 diff --git a/athena-datalakegen2/athena-datalakegen2-connection.yaml b/athena-datalakegen2/athena-datalakegen2-connection.yaml index 93a01cb338..875093bae6 100644 --- a/athena-datalakegen2/athena-datalakegen2-connection.yaml +++ b/athena-datalakegen2/athena-datalakegen2-connection.yaml @@ -51,6 +51,8 @@ Conditions: HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] + IsRegionBAH: !Equals [!Ref "AWS::Region", "me-south-1"] + IsRegionHKG: !Equals [!Ref "AWS::Region", "ap-east-1"] Resources: JdbcConnectorConfig: @@ -61,7 +63,9 @@ Resources: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-datalakegen2:2022.47.1' + ImageUri: !Sub + - '${Account}.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-datalakegen2:2022.47.1' + - Account: !If [IsRegionBAH, 084828588479, !If [IsRegionHKG, 183295418215, 292517598671]] ImageConfig: Command: [ "com.amazonaws.athena.connectors.datalakegen2.DataLakeGen2CompositeHandler" ] Description: "Enables Amazon Athena to communicate with DataLake Gen2 using JDBC" diff --git a/athena-db2-as400/athena-db2-as400-connection.yaml b/athena-db2-as400/athena-db2-as400-connection.yaml index a55586143c..1f245c8491 100644 --- a/athena-db2-as400/athena-db2-as400-connection.yaml +++ b/athena-db2-as400/athena-db2-as400-connection.yaml @@ -52,6 +52,8 @@ Conditions: HasKmsKeyId: !Not [!Equals [!Ref KmsKeyId, ""]] NotHasLambdaRole: !Equals [!Ref LambdaRoleArn, ""] CreateKmsPolicy: !And [!Condition HasKmsKeyId, !Condition NotHasLambdaRole] + IsRegionBAH: !Equals [!Ref "AWS::Region", "me-south-1"] + IsRegionHKG: !Equals [!Ref "AWS::Region", "ap-east-1"] Resources: JdbcConnectorConfig: @@ -62,7 +64,9 @@ Resources: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-db2-as400:2022.47.1' + ImageUri: !Sub + - '${Account}.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-db2-as400:2022.47.1' + - Account: !If [IsRegionBAH, 084828588479, !If [IsRegionHKG, 183295418215, 292517598671]] ImageConfig: Command: [ "com.amazonaws.athena.connectors.db2as400.Db2As400CompositeHandler" ] Description: "Enables Amazon Athena to communicate with DB2 on iSeries (AS400) using JDBC" diff --git a/athena-db2/athena-db2-connection.yaml b/athena-db2/athena-db2-connection.yaml index 5d4fa52892..34ad745ff1 100644 --- a/athena-db2/athena-db2-connection.yaml +++ b/athena-db2/athena-db2-connection.yaml @@ -52,6 +52,8 @@ Conditions: HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] + IsRegionBAH: !Equals [!Ref "AWS::Region", "me-south-1"] + IsRegionHKG: !Equals [!Ref "AWS::Region", "ap-east-1"] Resources: JdbcConnectorConfig: @@ -62,7 +64,9 @@ Resources: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-db2:2022.47.1' + ImageUri: !Sub + - '${Account}.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-db2:2022.47.1' + - Account: !If [IsRegionBAH, 084828588479, !If [IsRegionHKG, 183295418215, 292517598671]] ImageConfig: Command: [ "com.amazonaws.athena.connectors.db2.Db2CompositeHandler" ] Description: "Enables Amazon Athena to communicate with DB2 using JDBC" diff --git a/athena-docdb/athena-docdb-connection.yaml b/athena-docdb/athena-docdb-connection.yaml index 6caa42b15e..1f1d6e0841 100644 --- a/athena-docdb/athena-docdb-connection.yaml +++ b/athena-docdb/athena-docdb-connection.yaml @@ -45,6 +45,8 @@ Conditions: HasKmsKeyId: !Not [!Equals [!Ref KmsKeyId, ""]] NotHasLambdaRole: !Equals [!Ref LambdaRoleArn, ""] CreateKmsPolicy: !And [!Condition HasKmsKeyId, !Condition NotHasLambdaRole] + IsRegionBAH: !Equals [!Ref "AWS::Region", "me-south-1"] + IsRegionHKG: !Equals [!Ref "AWS::Region", "ap-east-1"] Resources: ConnectorConfig: @@ -55,7 +57,9 @@ Resources: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-docdb:2022.47.1' + ImageUri: !Sub + - '${Account}.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-docdb:2022.47.1' + - Account: !If [IsRegionBAH, 084828588479, !If [IsRegionHKG, 183295418215, 292517598671]] Description: "Enables Amazon Athena to communicate with DocumentDB, making your DocumentDB data accessible via SQL." Timeout: 900 MemorySize: 3008 diff --git a/athena-dynamodb/athena-dynamodb-connection.yaml b/athena-dynamodb/athena-dynamodb-connection.yaml index 505d1b1e7f..600bb49697 100644 --- a/athena-dynamodb/athena-dynamodb-connection.yaml +++ b/athena-dynamodb/athena-dynamodb-connection.yaml @@ -36,6 +36,8 @@ Conditions: HasKmsKeyId: !Not [!Equals [!Ref KmsKeyId, ""]] NotHasLambdaRole: !Equals [!Ref LambdaRoleArn, ""] CreateKmsPolicy: !And [!Condition HasKmsKeyId, !Condition NotHasLambdaRole] + IsRegionBAH: !Equals [!Ref "AWS::Region", "me-south-1"] + IsRegionHKG: !Equals [!Ref "AWS::Region", "ap-east-1"] Resources: ConnectorConfig: @@ -46,7 +48,9 @@ Resources: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-dynamodb:2022.47.1' + ImageUri: !Sub + - '${Account}.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-dynamodb:2022.47.1' + - Account: !If [IsRegionBAH, 084828588479, !If [IsRegionHKG, 183295418215, 292517598671]] Description: "Enables Amazon Athena to communicate with DynamoDB, making your tables accessible via SQL" Timeout: 900 MemorySize: 3008 diff --git a/athena-elasticsearch/athena-elasticsearch-connection.yaml b/athena-elasticsearch/athena-elasticsearch-connection.yaml index 6e58f5e8f1..e2c051882f 100644 --- a/athena-elasticsearch/athena-elasticsearch-connection.yaml +++ b/athena-elasticsearch/athena-elasticsearch-connection.yaml @@ -53,6 +53,8 @@ Conditions: HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] + IsRegionBAH: !Equals [!Ref "AWS::Region", "me-south-1"] + IsRegionHKG: !Equals [!Ref "AWS::Region", "ap-east-1"] Resources: ConnectorConfig: @@ -63,7 +65,9 @@ Resources: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-elasticsearch:2022.47.1' + ImageUri: !Sub + - '${Account}.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-elasticsearch:2022.47.1' + - Account: !If [IsRegionBAH, 084828588479, !If [IsRegionHKG, 183295418215, 292517598671]] Description: "The Elasticsearch Lambda Connector provides Athena users the ability to query data stored on Elasticsearch clusters." Timeout: 900 MemorySize: 3008 diff --git a/athena-gcs/athena-gcs-connection.yaml b/athena-gcs/athena-gcs-connection.yaml index e514c170c0..304e998c88 100644 --- a/athena-gcs/athena-gcs-connection.yaml +++ b/athena-gcs/athena-gcs-connection.yaml @@ -40,6 +40,8 @@ Conditions: HasKmsKeyId: !Not [!Equals [!Ref KmsKeyId, ""]] NotHasLambdaRole: !Equals [!Ref LambdaRoleArn, ""] CreateKmsPolicy: !And [!Condition HasKmsKeyId, !Condition NotHasLambdaRole] + IsRegionBAH: !Equals [!Ref "AWS::Region", "me-south-1"] + IsRegionHKG: !Equals [!Ref "AWS::Region", "ap-east-1"] Resources: AthenaGCSConnector: @@ -50,7 +52,9 @@ Resources: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-gcs:2022.47.1' + ImageUri: !Sub + - '${Account}.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-gcs:2022.47.1' + - Account: !If [IsRegionBAH, 084828588479, !If [IsRegionHKG, 183295418215, 292517598671]] Description: "Amazon Athena GCS Connector" Timeout: 900 MemorySize: 3008 diff --git a/athena-google-bigquery/athena-google-bigquery-connection.yaml b/athena-google-bigquery/athena-google-bigquery-connection.yaml index b06057c3d5..6ed37cde58 100644 --- a/athena-google-bigquery/athena-google-bigquery-connection.yaml +++ b/athena-google-bigquery/athena-google-bigquery-connection.yaml @@ -52,6 +52,8 @@ Conditions: HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] + IsRegionBAH: !Equals [!Ref "AWS::Region", "me-south-1"] + IsRegionHKG: !Equals [!Ref "AWS::Region", "ap-east-1"] Resources: AthenaBigQueryConnector: @@ -63,7 +65,9 @@ Resources: GOOGLE_APPLICATION_CREDENTIALS: '/tmp/service-account.json' FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-google-bigquery:2022.47.1' + ImageUri: !Sub + - '${Account}.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-google-bigquery:2022.47.1' + - Account: !If [IsRegionBAH, 084828588479, !If [IsRegionHKG, 183295418215, 292517598671]] Description: "Enables Amazon Athena to communicate with BigQuery using Google SDK" Timeout: 900 MemorySize: 3008 diff --git a/athena-hbase/athena-hbase-connection.yaml b/athena-hbase/athena-hbase-connection.yaml index e8b9e8bd52..e950f7e16d 100644 --- a/athena-hbase/athena-hbase-connection.yaml +++ b/athena-hbase/athena-hbase-connection.yaml @@ -45,6 +45,8 @@ Conditions: HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] + IsRegionBAH: !Equals [!Ref "AWS::Region", "me-south-1"] + IsRegionHKG: !Equals [!Ref "AWS::Region", "ap-east-1"] Resources: ConnectorConfig: @@ -55,7 +57,9 @@ Resources: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-hbase:2022.47.1' + ImageUri: !Sub + - '${Account}.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-hbase:2022.47.1' + - Account: !If [IsRegionBAH, 084828588479, !If [IsRegionHKG, 183295418215, 292517598671]] Description: "Enables Amazon Athena to communicate with HBase, making your HBase data accessible via SQL" Timeout: 900 MemorySize: 3008 diff --git a/athena-hortonworks-hive/athena-hortonworks-hive-connection.yaml b/athena-hortonworks-hive/athena-hortonworks-hive-connection.yaml index d75c03443b..9c92230e5a 100644 --- a/athena-hortonworks-hive/athena-hortonworks-hive-connection.yaml +++ b/athena-hortonworks-hive/athena-hortonworks-hive-connection.yaml @@ -49,6 +49,8 @@ Conditions: HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] + IsRegionBAH: !Equals [!Ref "AWS::Region", "me-south-1"] + IsRegionHKG: !Equals [!Ref "AWS::Region", "ap-east-1"] Resources: JdbcConnectorConfig: @@ -59,7 +61,9 @@ Resources: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-hortonworks-hive:2022.47.1' + ImageUri: !Sub + - '${Account}.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-hortonworks-hive:2022.47.1' + - Account: !If [IsRegionBAH, 084828588479, !If [IsRegionHKG, 183295418215, 292517598671]] ImageConfig: Command: [ "com.amazonaws.athena.connectors.hortonworks.HiveCompositeHandler" ] Description: "Enables Amazon Athena to communicate with Hortonworks Hive using JDBC" diff --git a/athena-msk/athena-msk-connection.yaml b/athena-msk/athena-msk-connection.yaml index 491ff8985f..d8649830a0 100644 --- a/athena-msk/athena-msk-connection.yaml +++ b/athena-msk/athena-msk-connection.yaml @@ -51,6 +51,8 @@ Conditions: HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] + IsRegionBAH: !Equals [!Ref "AWS::Region", "me-south-1"] + IsRegionHKG: !Equals [!Ref "AWS::Region", "ap-east-1"] Resources: AthenaMSKConnector: @@ -61,7 +63,9 @@ Resources: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-msk:2022.47.1' + ImageUri: !Sub + - '${Account}.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-msk:2022.47.1' + - Account: !If [IsRegionBAH, 084828588479, !If [IsRegionHKG, 183295418215, 292517598671]] Description: "Enables Amazon Athena to communicate with MSK clusters" Timeout: 900 MemorySize: 3008 diff --git a/athena-mysql/athena-mysql-connection.yaml b/athena-mysql/athena-mysql-connection.yaml index b5165260d8..68cb803e91 100644 --- a/athena-mysql/athena-mysql-connection.yaml +++ b/athena-mysql/athena-mysql-connection.yaml @@ -45,6 +45,8 @@ Conditions: HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] + IsRegionBAH: !Equals [!Ref "AWS::Region", "me-south-1"] + IsRegionHKG: !Equals [!Ref "AWS::Region", "ap-east-1"] Resources: JdbcConnectorConfig: @@ -55,7 +57,9 @@ Resources: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-mysql:2022.47.1' + ImageUri: !Sub + - '${Account}.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-mysql:2022.47.1' + - Account: !If [IsRegionBAH, 084828588479, !If [IsRegionHKG, 183295418215, 292517598671]] ImageConfig: Command: [ "com.amazonaws.athena.connectors.mysql.MySqlCompositeHandler" ] Description: "Enables Amazon Athena to communicate with MySQL using JDBC" diff --git a/athena-neptune/athena-neptune-connection.yaml b/athena-neptune/athena-neptune-connection.yaml index 46705c4938..f0d7e098e8 100644 --- a/athena-neptune/athena-neptune-connection.yaml +++ b/athena-neptune/athena-neptune-connection.yaml @@ -47,6 +47,8 @@ Conditions: HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] + IsRegionBAH: !Equals [!Ref "AWS::Region", "me-south-1"] + IsRegionHKG: !Equals [!Ref "AWS::Region", "ap-east-1"] Resources: ConnectorConfig: @@ -58,7 +60,9 @@ Resources: SERVICE_REGION: !Ref AWS::Region FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-neptune:2022.47.1' + ImageUri: !Sub + - '${Account}.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-neptune:2022.47.1' + - Account: !If [IsRegionBAH, 084828588479, !If [IsRegionHKG, 183295418215, 292517598671]] Description: "Enables Amazon Athena to communicate with Neptune, making your Neptune graph data accessible via SQL." Timeout: 900 MemorySize: 3008 diff --git a/athena-oracle/athena-oracle-connection.yaml b/athena-oracle/athena-oracle-connection.yaml index abd8648fce..0deb4b184c 100644 --- a/athena-oracle/athena-oracle-connection.yaml +++ b/athena-oracle/athena-oracle-connection.yaml @@ -47,6 +47,8 @@ Conditions: HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] + IsRegionBAH: !Equals [!Ref "AWS::Region", "me-south-1"] + IsRegionHKG: !Equals [!Ref "AWS::Region", "ap-east-1"] Resources: JdbcConnectorConfig: Type: 'AWS::Serverless::Function' @@ -56,7 +58,9 @@ Resources: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-oracle:2022.47.1' + ImageUri: !Sub + - '${Account}.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-oracle:2022.47.1' + - Account: !If [IsRegionBAH, 084828588479, !If [IsRegionHKG, 183295418215, 292517598671]] ImageConfig: Command: [ "com.amazonaws.athena.connectors.oracle.OracleCompositeHandler" ] Description: "Enables Amazon Athena to communicate with ORACLE using JDBC" diff --git a/athena-postgresql/athena-postgresql-connection.yaml b/athena-postgresql/athena-postgresql-connection.yaml index 26e1c096bb..cebadb96ec 100644 --- a/athena-postgresql/athena-postgresql-connection.yaml +++ b/athena-postgresql/athena-postgresql-connection.yaml @@ -54,6 +54,8 @@ Conditions: HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] + IsRegionBAH: !Equals [!Ref "AWS::Region", "me-south-1"] + IsRegionHKG: !Equals [!Ref "AWS::Region", "ap-east-1"] Resources: JdbcConnectorConfig: @@ -64,7 +66,9 @@ Resources: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-postgresql:2022.47.1' + ImageUri: !Sub + - '${Account}.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-postgresql:2022.47.1' + - Account: !If [IsRegionBAH, 084828588479, !If [IsRegionHKG, 183295418215, 292517598671]] Description: "Enables Amazon Athena to communicate with PostgreSQL using JDBC" Timeout: 900 MemorySize: 3008 diff --git a/athena-redis/athena-redis-connection.yaml b/athena-redis/athena-redis-connection.yaml index b901cfb39b..adf5552f00 100644 --- a/athena-redis/athena-redis-connection.yaml +++ b/athena-redis/athena-redis-connection.yaml @@ -44,6 +44,8 @@ Conditions: HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] + IsRegionBAH: !Equals [!Ref "AWS::Region", "me-south-1"] + IsRegionHKG: !Equals [!Ref "AWS::Region", "ap-east-1"] Resources: ConnectorConfig: Type: 'AWS::Serverless::Function' @@ -53,7 +55,9 @@ Resources: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-redis:2022.47.1' + ImageUri: !Sub + - '${Account}.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-redis:2022.47.1' + - Account: !If [IsRegionBAH, 084828588479, !If [IsRegionHKG, 183295418215, 292517598671]] Description: "Enables Amazon Athena to communicate with Redis, making your Redis data accessible via SQL" Timeout: 900 MemorySize: 3008 diff --git a/athena-redshift/athena-redshift-connection.yaml b/athena-redshift/athena-redshift-connection.yaml index 05a4728372..9c63a6ffde 100644 --- a/athena-redshift/athena-redshift-connection.yaml +++ b/athena-redshift/athena-redshift-connection.yaml @@ -44,6 +44,8 @@ Conditions: HasKmsKeyId: !Not [!Equals [!Ref KmsKeyId, ""]] NotHasLambdaRole: !Equals [!Ref LambdaRoleArn, ""] CreateKmsPolicy: !And [!Condition NotHasLambdaRole, !Condition HasKmsKeyId] + IsRegionBAH: !Equals [!Ref "AWS::Region", "me-south-1"] + IsRegionHKG: !Equals [!Ref "AWS::Region", "ap-east-1"] Resources: JdbcConnectorConfig: Type: 'AWS::Serverless::Function' @@ -53,7 +55,9 @@ Resources: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-redshift:2022.47.1' + ImageUri: !Sub + - '${Account}.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-redshift:2022.47.1' + - Account: !If [IsRegionBAH, 084828588479, !If [IsRegionHKG, 183295418215, 292517598671]] ImageConfig: Command: [ "com.amazonaws.athena.connectors.redshift.RedshiftCompositeHandler" ] Description: "Enables Amazon Athena to communicate with Redshift using JDBC" diff --git a/athena-saphana/athena-saphana-connection.yaml b/athena-saphana/athena-saphana-connection.yaml index 656049b169..7a58a1a271 100644 --- a/athena-saphana/athena-saphana-connection.yaml +++ b/athena-saphana/athena-saphana-connection.yaml @@ -49,6 +49,8 @@ Conditions: HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] + IsRegionBAH: !Equals [!Ref "AWS::Region", "me-south-1"] + IsRegionHKG: !Equals [!Ref "AWS::Region", "ap-east-1"] Resources: JdbcConnectorConfig: @@ -59,7 +61,9 @@ Resources: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-saphana:2022.47.1' + ImageUri: !Sub + - '${Account}.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-saphana:2022.47.1' + - Account: !If [IsRegionBAH, 084828588479, !If [IsRegionHKG, 183295418215, 292517598671]] ImageConfig: Command: [ "com.amazonaws.athena.connectors.saphana.SaphanaCompositeHandler" ] Description: "Enables Amazon Athena to communicate with Teradata using JDBC" diff --git a/athena-snowflake/athena-snowflake-connection.yaml b/athena-snowflake/athena-snowflake-connection.yaml index b1cd847087..af2d35782e 100644 --- a/athena-snowflake/athena-snowflake-connection.yaml +++ b/athena-snowflake/athena-snowflake-connection.yaml @@ -49,6 +49,8 @@ Conditions: HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] + IsRegionBAH: !Equals [!Ref "AWS::Region", "me-south-1"] + IsRegionHKG: !Equals [!Ref "AWS::Region", "ap-east-1"] Resources: JdbcConnectorConfig: @@ -59,7 +61,9 @@ Resources: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-snowflake:2022.47.1' + ImageUri: !Sub + - '${Account}.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-snowflake:2022.47.1' + - Account: !If [IsRegionBAH, 084828588479, !If [IsRegionHKG, 183295418215, 292517598671]] ImageConfig: Command: [ "com.amazonaws.athena.connectors.snowflake.SnowflakeCompositeHandler" ] Description: "Enables Amazon Athena to communicate with Snowflake using JDBC" diff --git a/athena-sqlserver/athena-sqlserver-connection.yaml b/athena-sqlserver/athena-sqlserver-connection.yaml index 59d952b43a..54ba72b3a8 100644 --- a/athena-sqlserver/athena-sqlserver-connection.yaml +++ b/athena-sqlserver/athena-sqlserver-connection.yaml @@ -50,6 +50,8 @@ Conditions: HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] + IsRegionBAH: !Equals [!Ref "AWS::Region", "me-south-1"] + IsRegionHKG: !Equals [!Ref "AWS::Region", "ap-east-1"] Resources: JdbcConnectorConfig: Type: 'AWS::Serverless::Function' @@ -59,7 +61,9 @@ Resources: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-sqlserver:2022.47.1' + ImageUri: !Sub + - '${Account}.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-sqlserver:2022.47.1' + - Account: !If [IsRegionBAH, 084828588479, !If [IsRegionHKG, 183295418215, 292517598671]] ImageConfig: Command: [ "com.amazonaws.athena.connectors.sqlserver.SqlServerCompositeHandler" ] Description: "Enables Amazon Athena to communicate with SQLSERVER using JDBC" diff --git a/athena-synapse/athena-synapse-connection.yaml b/athena-synapse/athena-synapse-connection.yaml index bf9d3fd9fe..56645fd176 100644 --- a/athena-synapse/athena-synapse-connection.yaml +++ b/athena-synapse/athena-synapse-connection.yaml @@ -51,6 +51,8 @@ Conditions: HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] + IsRegionBAH: !Equals [!Ref "AWS::Region", "me-south-1"] + IsRegionHKG: !Equals [!Ref "AWS::Region", "ap-east-1"] Resources: JdbcConnectorConfig: @@ -61,7 +63,9 @@ Resources: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-synapse:2022.47.1' + ImageUri: !Sub + - '${Account}.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-synapse:2022.47.1' + - Account: !If [IsRegionBAH, 084828588479, !If [IsRegionHKG, 183295418215, 292517598671]] ImageConfig: Command: [ "com.amazonaws.athena.connectors.synapse.SynapseCompositeHandler" ] Description: "Enables Amazon Athena to communicate with SYNPASE using JDBC" diff --git a/athena-teradata/athena-teradata-connection.yaml b/athena-teradata/athena-teradata-connection.yaml index 51f1074674..dcdc786e9c 100644 --- a/athena-teradata/athena-teradata-connection.yaml +++ b/athena-teradata/athena-teradata-connection.yaml @@ -48,6 +48,8 @@ Conditions: HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] + IsRegionBAH: !Equals [!Ref "AWS::Region", "me-south-1"] + IsRegionHKG: !Equals [!Ref "AWS::Region", "ap-east-1"] Resources: JdbcConnectorConfig: Type: 'AWS::Serverless::Function' @@ -57,7 +59,9 @@ Resources: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-teradata:2022.47.1' + ImageUri: !Sub + - '${Account}.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-teradata:2022.47.1' + - Account: !If [IsRegionBAH, 084828588479, !If [IsRegionHKG, 183295418215, 292517598671]] ImageConfig: Command: [ "com.amazonaws.athena.connectors.teradata.TeradataCompositeHandler" ] Description: "Enables Amazon Athena to communicate with Teradata using JDBC" diff --git a/athena-timestream/athena-timestream-connection.yaml b/athena-timestream/athena-timestream-connection.yaml index 5d73156b3c..288cbfea47 100644 --- a/athena-timestream/athena-timestream-connection.yaml +++ b/athena-timestream/athena-timestream-connection.yaml @@ -35,6 +35,8 @@ Conditions: HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] + IsRegionBAH: !Equals [!Ref "AWS::Region", "me-south-1"] + IsRegionHKG: !Equals [!Ref "AWS::Region", "ap-east-1"] Resources: ConnectorConfig: Type: 'AWS::Serverless::Function' @@ -44,7 +46,9 @@ Resources: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-timestream:2022.47.1' + ImageUri: !Sub + - '${Account}.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-timestream:2022.47.1' + - Account: !If [IsRegionBAH, 084828588479, !If [IsRegionHKG, 183295418215, 292517598671]] Description: "Enables Amazon Athena to communicate with Amazon Timestream, making your time series data accessible from Athena." Timeout: 900 MemorySize: 3008 diff --git a/athena-tpcds/athena-tpcds-connection.yaml b/athena-tpcds/athena-tpcds-connection.yaml index db51aa5664..e5b77b3312 100644 --- a/athena-tpcds/athena-tpcds-connection.yaml +++ b/athena-tpcds/athena-tpcds-connection.yaml @@ -36,6 +36,8 @@ Conditions: HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] + IsRegionBAH: !Equals [!Ref "AWS::Region", "me-south-1"] + IsRegionHKG: !Equals [!Ref "AWS::Region", "ap-east-1"] Resources: ConnectorConfig: @@ -46,7 +48,9 @@ Resources: glue_connection: Ref GlueConnection FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-tpcds:2022.47.1' + ImageUri: !Sub + - '${Account}.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-tpcds:2022.47.1' + - Account: !If [IsRegionBAH, 084828588479, !If [IsRegionHKG, 183295418215, 292517598671]] Description: "This connector enables Amazon Athena to communicate with a randomly generated TPC-DS data source." Timeout: 900 MemorySize: 3008 diff --git a/athena-vertica/athena-vertica-connection.yaml b/athena-vertica/athena-vertica-connection.yaml index 778cc7511c..502c3ac451 100644 --- a/athena-vertica/athena-vertica-connection.yaml +++ b/athena-vertica/athena-vertica-connection.yaml @@ -52,6 +52,8 @@ Conditions: HasKmsKeyId: !Not [ !Equals [ !Ref KmsKeyId, "" ] ] NotHasLambdaRole: !Equals [ !Ref LambdaRoleArn, "" ] CreateKmsPolicy: !And [ !Condition HasKmsKeyId, !Condition NotHasLambdaRole ] + IsRegionBAH: !Equals [!Ref "AWS::Region", "me-south-1"] + IsRegionHKG: !Equals [!Ref "AWS::Region", "ap-east-1"] Resources: ConnectorConfig: @@ -62,7 +64,9 @@ Resources: glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName PackageType: "Image" - ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-vertica:2022.47.1' + ImageUri: !Sub + - '${Account}.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-vertica:2022.47.1' + - Account: !If [IsRegionBAH, 084828588479, !If [IsRegionHKG, 183295418215, 292517598671]] Description: "Amazon Athena Vertica Connector" Timeout: 900 MemorySize: 3008 From aa3efc153764183e5c6d75ff16a81b85d6a541ef Mon Sep 17 00:00:00 2001 From: ejeffrli Date: Thu, 31 Oct 2024 11:42:53 -0400 Subject: [PATCH 54/87] Use gamma glue endpoint if environment variable specified --- .../lambda/connection/EnvironmentProperties.java | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/EnvironmentProperties.java b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/EnvironmentProperties.java index 569972e1b4..3b479328a4 100644 --- a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/EnvironmentProperties.java +++ b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/EnvironmentProperties.java @@ -29,6 +29,7 @@ import software.amazon.awssdk.services.glue.model.GetConnectionRequest; import software.amazon.awssdk.services.glue.model.GetConnectionResponse; +import java.net.URI; import java.time.Duration; import java.util.Arrays; import java.util.HashMap; @@ -66,11 +67,20 @@ public Map createEnvironment() throws RuntimeException public Connection getGlueConnection(String glueConnectionName) throws RuntimeException { try { + HashMap lambdaEnvironment = new HashMap<>(System.getenv()); GlueClient awsGlue = GlueClient.builder() .httpClientBuilder(ApacheHttpClient .builder() .connectionTimeout(Duration.ofMillis(CONNECT_TIMEOUT))) .build(); + if (lambdaEnvironment.getOrDefault("USE_GAMMA_GLUE", "false").equals("true")) { + awsGlue = GlueClient.builder() + .endpointOverride(new URI(String.format("https://glue-gamma.%s.amazonaws.com", lambdaEnvironment.get("AWS_REGION")))) + .httpClientBuilder(ApacheHttpClient + .builder() + .connectionTimeout(Duration.ofMillis(CONNECT_TIMEOUT))) + .build(); + } GetConnectionResponse glueConnection = awsGlue.getConnection(GetConnectionRequest.builder().name(glueConnectionName).build()); logger.debug("Successfully retrieved connection {}", glueConnectionName); return glueConnection.connection(); From bd915a83b7a9aa9a6216ca68220b68e079b61c18 Mon Sep 17 00:00:00 2001 From: ejeffrli Date: Fri, 1 Nov 2024 18:11:35 -0400 Subject: [PATCH 55/87] Use glue gamma as default --- .../connector/lambda/connection/EnvironmentProperties.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/EnvironmentProperties.java b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/EnvironmentProperties.java index 3b479328a4..a30847e47c 100644 --- a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/EnvironmentProperties.java +++ b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/EnvironmentProperties.java @@ -73,7 +73,7 @@ public Connection getGlueConnection(String glueConnectionName) throws RuntimeExc .builder() .connectionTimeout(Duration.ofMillis(CONNECT_TIMEOUT))) .build(); - if (lambdaEnvironment.getOrDefault("USE_GAMMA_GLUE", "false").equals("true")) { + if (lambdaEnvironment.getOrDefault("USE_GAMMA_GLUE", "true").equals("true")) { awsGlue = GlueClient.builder() .endpointOverride(new URI(String.format("https://glue-gamma.%s.amazonaws.com", lambdaEnvironment.get("AWS_REGION")))) .httpClientBuilder(ApacheHttpClient From 483fc3b341aeecb10291fce4cbb042ba65e3239f Mon Sep 17 00:00:00 2001 From: ejeffrli Date: Mon, 4 Nov 2024 13:01:17 -0500 Subject: [PATCH 56/87] Use proper SecretNamePrefix instead of SecretName --- .../app/lib/stacks/opensearch-stack.ts | 2 +- .../app/lib/stacks/rds-generic-stack.ts | 2 +- .../app/lib/stacks/redshift-stack.ts | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/validation_testing/cdk_federation_infra_provisioning/app/lib/stacks/opensearch-stack.ts b/validation_testing/cdk_federation_infra_provisioning/app/lib/stacks/opensearch-stack.ts index 58ac20ab52..5b2378e815 100644 --- a/validation_testing/cdk_federation_infra_provisioning/app/lib/stacks/opensearch-stack.ts +++ b/validation_testing/cdk_federation_infra_provisioning/app/lib/stacks/opensearch-stack.ts @@ -168,7 +168,7 @@ export class OpenSearchStack extends cdk.Stack { parameters: { 'AthenaCatalogName': `opensearch-cdk-deployed`, 'IsVPCAccess': true, - 'SecretName': 'asdf', + 'SecretNamePrefix': 'asdf', 'AutoDiscoverEndpoint': false, 'DomainMapping': `default=${connectionString}`, 'SecurityGroupIds': [securityGroup.securityGroupId], diff --git a/validation_testing/cdk_federation_infra_provisioning/app/lib/stacks/rds-generic-stack.ts b/validation_testing/cdk_federation_infra_provisioning/app/lib/stacks/rds-generic-stack.ts index 37502ba751..c6bde711fc 100644 --- a/validation_testing/cdk_federation_infra_provisioning/app/lib/stacks/rds-generic-stack.ts +++ b/validation_testing/cdk_federation_infra_provisioning/app/lib/stacks/rds-generic-stack.ts @@ -145,7 +145,7 @@ export class RdsGenericStack extends cdk.Stack { templateFile: cfn_template_file, parameters: { 'LambdaFunctionName': `${db_type}-cdk-deployed`, - 'SecretName': 'asdf', + 'SecretNamePrefix': 'asdf', 'DefaultConnectionString': `${connectionStringPrefix}://${connectionString}`, 'SecurityGroupIds': [securityGroup.securityGroupId], 'SubnetIds': [subnet.subnetId], diff --git a/validation_testing/cdk_federation_infra_provisioning/app/lib/stacks/redshift-stack.ts b/validation_testing/cdk_federation_infra_provisioning/app/lib/stacks/redshift-stack.ts index 631a36ba5a..8f70345b02 100644 --- a/validation_testing/cdk_federation_infra_provisioning/app/lib/stacks/redshift-stack.ts +++ b/validation_testing/cdk_federation_infra_provisioning/app/lib/stacks/redshift-stack.ts @@ -141,7 +141,7 @@ export class RedshiftStack extends cdk.Stack { templateFile: cfn_template_file, parameters: { 'LambdaFunctionName': 'redshift-cdk-deployed', - 'SecretName': 'asdf', + 'SecretNamePrefix': 'asdf', 'DefaultConnectionString': `${connectionStringPrefix}://${connectionString}`, 'SecurityGroupIds': [securityGroup.securityGroupId], 'SubnetIds': [subnet.subnetId], From e602b6ddfe4237eb7018a0914c35694cc931b57b Mon Sep 17 00:00:00 2001 From: ejeffrli Date: Fri, 8 Nov 2024 16:36:41 -0500 Subject: [PATCH 57/87] Increase glue connection timeout --- .../connector/lambda/connection/EnvironmentConstants.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/EnvironmentConstants.java b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/EnvironmentConstants.java index 33687e029a..fa5621ebf8 100644 --- a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/EnvironmentConstants.java +++ b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/EnvironmentConstants.java @@ -23,7 +23,7 @@ public final class EnvironmentConstants { private EnvironmentConstants() {} - public static final int CONNECT_TIMEOUT = 250; + public static final int CONNECT_TIMEOUT = 2000; // Lambda environment variable keys public static final String DEFAULT_GLUE_CONNECTION = "glue_connection"; From 7301f73d40a4a8825f4f723db4fcf829764f2738 Mon Sep 17 00:00:00 2001 From: Aimery Methena <159072740+aimethed@users.noreply.github.com> Date: Mon, 11 Nov 2024 15:54:44 -0500 Subject: [PATCH 58/87] add ImageConfig back to postgres connection yaml (#2386) --- athena-postgresql/athena-postgresql-connection.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/athena-postgresql/athena-postgresql-connection.yaml b/athena-postgresql/athena-postgresql-connection.yaml index cebadb96ec..79b3cb38c8 100644 --- a/athena-postgresql/athena-postgresql-connection.yaml +++ b/athena-postgresql/athena-postgresql-connection.yaml @@ -69,6 +69,8 @@ Resources: ImageUri: !Sub - '${Account}.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-postgresql:2022.47.1' - Account: !If [IsRegionBAH, 084828588479, !If [IsRegionHKG, 183295418215, 292517598671]] + ImageConfig: + Command: [ !Sub "com.amazonaws.athena.connectors.postgresql.PostGreSqlCompositeHandler" ] Description: "Enables Amazon Athena to communicate with PostgreSQL using JDBC" Timeout: 900 MemorySize: 3008 From 856374e132cf1b7a9a51d65cbb966ed23cef016d Mon Sep 17 00:00:00 2001 From: chngpe <102991671+chngpe@users.noreply.github.com> Date: Wed, 13 Nov 2024 10:16:16 -0500 Subject: [PATCH 59/87] Snowflake case insensitive match instead of upper case by default (#2387) --- .../SnowflakeCaseInsensitiveResolver.java | 114 ++++++++ .../snowflake/SnowflakeMetadataHandler.java | 247 ++++++++---------- .../snowflake/SnowflakeRecordHandler.java | 3 +- .../SnowflakeMetadataHandlerTest.java | 28 +- 4 files changed, 234 insertions(+), 158 deletions(-) create mode 100644 athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeCaseInsensitiveResolver.java diff --git a/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeCaseInsensitiveResolver.java b/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeCaseInsensitiveResolver.java new file mode 100644 index 0000000000..490d032b95 --- /dev/null +++ b/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeCaseInsensitiveResolver.java @@ -0,0 +1,114 @@ +/*- + * #%L + * athena-snowflake + * %% + * Copyright (C) 2019 - 2024 Amazon Web Services + * %% + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * #L% + */ +package com.amazonaws.athena.connectors.snowflake; + +import com.amazonaws.athena.connector.lambda.domain.TableName; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.Map; + +public class SnowflakeCaseInsensitiveResolver +{ + private static final Logger LOGGER = LoggerFactory.getLogger(SnowflakeCaseInsensitiveResolver.class); + private static final String SCHEMA_NAME_QUERY = "select * from INFORMATION_SCHEMA.SCHEMATA where lower(SCHEMA_NAME) = "; + private static final String TABLE_NAME_QUERY = "select * from INFORMATION_SCHEMA.TABLES where TABLE_SCHEMA = "; + private static final String SCHEMA_NAME_COLUMN_KEY = "SCHEMA_NAME"; + private static final String TABLE_NAME_COLUMN_KEY = "TABLE_NAME"; + + private static final String ENABLE_CASE_INSENSITIVE_MATCH = "enable_case_insensitive_match"; + + private SnowflakeCaseInsensitiveResolver() + { + } + + public static TableName getTableNameObjectCaseInsensitiveMatch(final Connection connection, TableName tableName, Map configOptions) + throws SQLException + { + if (!isCaseInsensitiveMatchEnable(configOptions)) { + return tableName; + } + + String schemaNameCaseInsensitively = getSchemaNameCaseInsensitively(connection, tableName.getSchemaName(), configOptions); + String tableNameCaseInsensitively = getTableNameCaseInsensitively(connection, schemaNameCaseInsensitively, tableName.getTableName(), configOptions); + + return new TableName(schemaNameCaseInsensitively, tableNameCaseInsensitively); + } + + public static String getSchemaNameCaseInsensitively(final Connection connection, String schemaNameInput, Map configOptions) + throws SQLException + { + if (!isCaseInsensitiveMatchEnable(configOptions)) { + return schemaNameInput; + } + + return getNameCaseInsensitively(connection, SCHEMA_NAME_COLUMN_KEY, SCHEMA_NAME_QUERY + "'" + schemaNameInput.toLowerCase() + "'", configOptions); + } + + public static String getTableNameCaseInsensitively(final Connection connection, String schemaName, String tableNameInput, Map configOptions) + throws SQLException + { + if (!isCaseInsensitiveMatchEnable(configOptions)) { + return tableNameInput; + } + //'?' and lower(TABLE_NAME) = '?' + return getNameCaseInsensitively(connection, TABLE_NAME_COLUMN_KEY, TABLE_NAME_QUERY + "'" + schemaName + "' and lower(TABLE_NAME) = '" + tableNameInput.toLowerCase() + "'", configOptions); + } + + public static String getNameCaseInsensitively(final Connection connection, String columnLabel, String query, Map configOptions) + throws SQLException + { + LOGGER.debug("getNameCaseInsensitively, query:" + query); + String nameFromSnowFlake = null; + int i = 0; + try (Statement statement = connection.createStatement(); + ResultSet resultSet = statement.executeQuery(query)) { + while (resultSet.next()) { + i++; + String schemaNameCandidate = resultSet.getString(columnLabel); + LOGGER.debug("Case insensitive search on columLabel: {}, schema name: {}", columnLabel, schemaNameCandidate); + nameFromSnowFlake = schemaNameCandidate; + } + } + catch (SQLException e) { + throw new RuntimeException(e); + } + + if (i == 0 || i > 1) { + throw new RuntimeException(String.format("Schema name case insensitive match failed, number of match : %d", i)); + } + + return nameFromSnowFlake; + } + + private static boolean isCaseInsensitiveMatchEnable(Map configOptions) + { + String enableCaseInsensitiveMatchEnvValue = configOptions.getOrDefault(ENABLE_CASE_INSENSITIVE_MATCH, "false").toLowerCase(); + boolean enableCaseInsensitiveMatch = enableCaseInsensitiveMatchEnvValue.equals("true"); + LOGGER.info("{} environment variable set to: {}. Resolved to: {}", + ENABLE_CASE_INSENSITIVE_MATCH, enableCaseInsensitiveMatchEnvValue, enableCaseInsensitiveMatch); + + return enableCaseInsensitiveMatch; + } +} diff --git a/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMetadataHandler.java b/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMetadataHandler.java index d9c9b9d3f1..4737c7f2f2 100644 --- a/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMetadataHandler.java +++ b/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMetadataHandler.java @@ -41,6 +41,8 @@ import com.amazonaws.athena.connector.lambda.metadata.GetTableResponse; import com.amazonaws.athena.connector.lambda.metadata.ListSchemasRequest; import com.amazonaws.athena.connector.lambda.metadata.ListSchemasResponse; +import com.amazonaws.athena.connector.lambda.metadata.ListTablesRequest; +import com.amazonaws.athena.connector.lambda.metadata.ListTablesResponse; import com.amazonaws.athena.connector.lambda.metadata.optimizations.DataSourceOptimizations; import com.amazonaws.athena.connector.lambda.metadata.optimizations.OptimizationSubType; import com.amazonaws.athena.connector.lambda.metadata.optimizations.pushdown.ComplexExpressionPushdownSubType; @@ -57,6 +59,7 @@ import com.amazonaws.athena.connectors.jdbc.manager.PreparedStatementBuilder; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Strings; +import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; import org.apache.arrow.vector.complex.reader.FieldReader; @@ -84,6 +87,7 @@ import java.util.Set; import java.util.stream.Collectors; +import static com.amazonaws.athena.connector.lambda.metadata.ListTablesRequest.UNLIMITED_PAGE_SIZE_VALUE; import static com.amazonaws.athena.connectors.snowflake.SnowflakeConstants.MAX_PARTITION_COUNT; import static com.amazonaws.athena.connectors.snowflake.SnowflakeConstants.SINGLE_SPLIT_LIMIT_COUNT; @@ -93,7 +97,7 @@ */ public class SnowflakeMetadataHandler extends JdbcMetadataHandler { - static final Map JDBC_PROPERTIES = ImmutableMap.of("databaseTerm", "SCHEMA"); + static final Map JDBC_PROPERTIES = ImmutableMap.of("databaseTerm", "SCHEMA", "CLIENT_RESULT_COLUMN_CASE_INSENSITIVE", "true"); static final String BLOCK_PARTITION_COLUMN_NAME = "partition"; private static final Logger LOGGER = LoggerFactory.getLogger(SnowflakeMetadataHandler.class); private static final int MAX_SPLITS_PER_REQUEST = 1000_000; @@ -109,13 +113,23 @@ public class SnowflakeMetadataHandler extends JdbcMetadataHandler static final String SHOW_PRIMARY_KEYS_QUERY = "SHOW PRIMARY KEYS IN "; static final String PRIMARY_KEY_COLUMN_NAME = "column_name"; static final String COUNTS_COLUMN_NAME = "COUNTS"; - private static final String CASE_UPPER = "upper"; - private static final String CASE_LOWER = "lower"; /** * Query to check view */ static final String VIEW_CHECK_QUERY = "SELECT * FROM information_schema.views WHERE table_schema = ? AND table_name = ?"; static final String ALL_PARTITIONS = "*"; + + static final Map STRING_ARROW_TYPE_MAP = com.google.common.collect.ImmutableMap.of( + "INTEGER", (ArrowType) Types.MinorType.INT.getType(), + "DATE", (ArrowType) Types.MinorType.DATEDAY.getType(), + "TIMESTAMP", (ArrowType) Types.MinorType.DATEMILLI.getType(), + "TIMESTAMP_LTZ", (ArrowType) Types.MinorType.DATEMILLI.getType(), + "TIMESTAMP_NTZ", (ArrowType) Types.MinorType.DATEMILLI.getType(), + "TIMESTAMP_TZ", (ArrowType) Types.MinorType.DATEMILLI.getType(), + "TIMESTAMPLTZ", (ArrowType) Types.MinorType.DATEMILLI.getType(), + "TIMESTAMPNTZ", (ArrowType) Types.MinorType.DATEMILLI.getType(), + "TIMESTAMPTZ", (ArrowType) Types.MinorType.DATEMILLI.getType() + ); /** * Instantiates handler to be used by Lambda function directly. * @@ -150,6 +164,7 @@ protected SnowflakeMetadataHandler( @Override public GetDataSourceCapabilitiesResponse doGetDataSourceCapabilities(BlockAllocator allocator, GetDataSourceCapabilitiesRequest request) { + LOGGER.debug("doGetDataSourceCapabilities: " + request); ImmutableMap.Builder> capabilities = ImmutableMap.builder(); capabilities.put(DataSourceOptimizations.SUPPORTS_FILTER_PUSHDOWN.withSupportedSubTypes( @@ -180,26 +195,29 @@ public SnowflakeMetadataHandler(DatabaseConnectionConfig databaseConnectionConfi @Override public Schema getPartitionSchema(final String catalogName) { + LOGGER.debug("getPartitionSchema: " + catalogName); SchemaBuilder schemaBuilder = SchemaBuilder.newBuilder() .addField(BLOCK_PARTITION_COLUMN_NAME, Types.MinorType.VARCHAR.getType()); return schemaBuilder.build(); } - private Optional getPrimaryKey(TableName tableName) throws Exception + private Optional getPrimaryKey(TableName tableName) throws Exception { + LOGGER.debug("getPrimaryKey tableName: " + tableName); List primaryKeys = new ArrayList(); try (Connection connection = getJdbcConnectionFactory().getConnection(getCredentialProvider())) { - try (PreparedStatement preparedStatement = connection.prepareStatement(SHOW_PRIMARY_KEYS_QUERY + tableName.getTableName()); + try (PreparedStatement preparedStatement = connection.prepareStatement(SHOW_PRIMARY_KEYS_QUERY + "\"" + tableName.getSchemaName() + "\".\"" + tableName.getTableName() + "\""); ResultSet rs = preparedStatement.executeQuery()) { while (rs.next()) { // Concatenate multiple primary keys if they exist primaryKeys.add(rs.getString(PRIMARY_KEY_COLUMN_NAME)); } } - } - String primaryKey = String.join(", ", primaryKeys); - if (!Strings.isNullOrEmpty(primaryKey) && hasUniquePrimaryKey(tableName, primaryKey)) { - return Optional.of(primaryKey); + + String primaryKey = String.join(", ", primaryKeys); + if (!Strings.isNullOrEmpty(primaryKey) && hasUniquePrimaryKey(tableName, primaryKey)) { + return Optional.of(primaryKey); + } } return Optional.empty(); } @@ -237,38 +255,42 @@ private boolean hasUniquePrimaryKey(TableName tableName, String primaryKey) thro public void getPartitions(BlockWriter blockWriter, GetTableLayoutRequest getTableLayoutRequest, QueryStatusChecker queryStatusChecker) throws Exception { - LOGGER.info("{}: Schema {}, table {}", getTableLayoutRequest.getQueryId(), getTableLayoutRequest.getTableName().getSchemaName(), + LOGGER.debug("getPartitions: {}: Schema {}, table {}", getTableLayoutRequest.getQueryId(), getTableLayoutRequest.getTableName().getSchemaName(), getTableLayoutRequest.getTableName().getTableName()); - /** - * "MAX_PARTITION_COUNT" is currently set to 50 to limit the number of partitions. - * this is to handle timeout issues because of huge partitions - */ - LOGGER.info(" Total Partition Limit" + MAX_PARTITION_COUNT); - boolean viewFlag = checkForView(getTableLayoutRequest); - //if the input table is a view , there will be single split - if (viewFlag) { - blockWriter.writeRows((Block block, int rowNum) -> { - block.setValue(BLOCK_PARTITION_COLUMN_NAME, rowNum, ALL_PARTITIONS); - return 1; - }); - } - else { + + try (Connection connection = getJdbcConnectionFactory().getConnection(getCredentialProvider())) { + TableName tableName = SnowflakeCaseInsensitiveResolver.getTableNameObjectCaseInsensitiveMatch(connection, getTableLayoutRequest.getTableName(), configOptions); + /** + * "MAX_PARTITION_COUNT" is currently set to 50 to limit the number of partitions. + * this is to handle timeout issues because of huge partitions + */ + LOGGER.info(" Total Partition Limit" + MAX_PARTITION_COUNT); + boolean viewFlag = checkForView(tableName); + //if the input table is a view , there will be single split + if (viewFlag) { + blockWriter.writeRows((Block block, int rowNum) -> { + block.setValue(BLOCK_PARTITION_COLUMN_NAME, rowNum, ALL_PARTITIONS); + return 1; + }); + return; + } + double totalRecordCount = 0; LOGGER.info(COUNT_RECORDS_QUERY); - List parameters = Arrays.asList(getTableLayoutRequest.getTableName().getSchemaName(), getTableLayoutRequest.getTableName().getTableName()); - try (Connection connection = getJdbcConnectionFactory().getConnection(getCredentialProvider()); - PreparedStatement preparedStatement = new PreparedStatementBuilder().withConnection(connection) - .withQuery(COUNT_RECORDS_QUERY).withParameters(parameters).build(); - ResultSet rs = preparedStatement.executeQuery()) { + try (PreparedStatement preparedStatement = new PreparedStatementBuilder() + .withConnection(connection) + .withQuery(COUNT_RECORDS_QUERY) + .withParameters(Arrays.asList(tableName.getSchemaName(), tableName.getTableName())).build(); + ResultSet rs = preparedStatement.executeQuery()) { while (rs.next()) { totalRecordCount = rs.getLong(1); } if (totalRecordCount > 0) { - Optional primaryKey = getPrimaryKey(getTableLayoutRequest.getTableName()); + Optional primaryKey = getPrimaryKey(tableName); long recordsInPartition = (long) (Math.ceil(totalRecordCount / MAX_PARTITION_COUNT)); long partitionRecordCount = (totalRecordCount <= SINGLE_SPLIT_LIMIT_COUNT || !primaryKey.isPresent()) ? (long) totalRecordCount : recordsInPartition; - LOGGER.info(" Total Page Count: " + partitionRecordCount); + LOGGER.info(" Total Page Count: " + partitionRecordCount); double numberOfPartitions = (int) Math.ceil(totalRecordCount / partitionRecordCount); long offset = 0; /** @@ -276,7 +298,7 @@ public void getPartitions(BlockWriter blockWriter, GetTableLayoutRequest getTabl * It will have maximum 50 partitions and number of records in each partition is decided by dividing total number of records by 50 * the partition values we are setting the limit and offset values like p-limit-3000-offset-0 */ - for (int i = 1; i <= numberOfPartitions; i++) { + for (int i = 1; i <= numberOfPartitions; i++) { final String partitionVal = BLOCK_PARTITION_COLUMN_NAME + "-primary-" + primaryKey.orElse("") + "-limit-" + partitionRecordCount + "-offset-" + offset; LOGGER.info("partitionVal {} ", partitionVal); blockWriter.writeRows((Block block, int rowNum) -> @@ -288,22 +310,19 @@ public void getPartitions(BlockWriter blockWriter, GetTableLayoutRequest getTabl } } else { - LOGGER.info("No Records Found for table {}", getTableLayoutRequest.getTableName().getTableName()); + LOGGER.info("No Records Found for table {}", tableName); } } } } - /** + /* * Check if the input table is a view and returns viewflag accordingly - * @param getTableLayoutRequest - * @return - * @throws Exception */ - private boolean checkForView(GetTableLayoutRequest getTableLayoutRequest) throws Exception + private boolean checkForView(TableName tableName) throws Exception { boolean viewFlag = false; - List viewparameters = Arrays.asList(getTableLayoutRequest.getTableName().getSchemaName(), getTableLayoutRequest.getTableName().getTableName()); + List viewparameters = Arrays.asList(tableName.getSchemaName(), tableName.getTableName()); try (Connection connection = getJdbcConnectionFactory().getConnection(getCredentialProvider())) { try (PreparedStatement preparedStatement = new PreparedStatementBuilder().withConnection(connection).withQuery(VIEW_CHECK_QUERY).withParameters(viewparameters).build(); ResultSet resultSet = preparedStatement.executeQuery()) { @@ -319,7 +338,7 @@ private boolean checkForView(GetTableLayoutRequest getTableLayoutRequest) throws @Override public GetSplitsResponse doGetSplits(BlockAllocator blockAllocator, GetSplitsRequest getSplitsRequest) { - LOGGER.info("{}: Catalog {}, table {}", getSplitsRequest.getQueryId(), getSplitsRequest.getTableName().getSchemaName(), getSplitsRequest.getTableName().getTableName()); + LOGGER.info("doGetSplits: {}: Catalog {}, table {}", getSplitsRequest.getQueryId(), getSplitsRequest.getTableName().getSchemaName(), getSplitsRequest.getTableName().getTableName()); if (getSplitsRequest.getConstraints().isQueryPassThrough()) { LOGGER.info("QPT Split Requested"); return setupQueryPassthroughSplit(getSplitsRequest); @@ -362,15 +381,55 @@ private String encodeContinuationToken(int partition) public GetTableResponse doGetTable(final BlockAllocator blockAllocator, final GetTableRequest getTableRequest) throws Exception { + LOGGER.debug("doGetTable getTableName:{}", getTableRequest.getTableName()); try (Connection connection = getJdbcConnectionFactory().getConnection(getCredentialProvider())) { Schema partitionSchema = getPartitionSchema(getTableRequest.getCatalogName()); - TableName tableName = getTableFromMetadata(connection.getCatalog(), getTableRequest.getTableName(), connection.getMetaData()); + TableName tableName = SnowflakeCaseInsensitiveResolver.getTableNameObjectCaseInsensitiveMatch(connection, getTableRequest.getTableName(), configOptions); GetTableResponse getTableResponse = new GetTableResponse(getTableRequest.getCatalogName(), tableName, getSchema(connection, tableName, partitionSchema), partitionSchema.getFields().stream().map(Field::getName).collect(Collectors.toSet())); return getTableResponse; } } + @Override + public ListTablesResponse doListTables(final BlockAllocator blockAllocator, final ListTablesRequest listTablesRequest) + throws Exception + { + try (Connection connection = getJdbcConnectionFactory().getConnection(getCredentialProvider())) { + LOGGER.info("{}: List table names for Catalog {}, Schema {}", listTablesRequest.getQueryId(), + listTablesRequest.getCatalogName(), listTablesRequest.getSchemaName()); + String schemaName = SnowflakeCaseInsensitiveResolver.getSchemaNameCaseInsensitively(connection, listTablesRequest.getSchemaName(), configOptions); + + String token = listTablesRequest.getNextToken(); + int pageSize = listTablesRequest.getPageSize(); + + if (pageSize == UNLIMITED_PAGE_SIZE_VALUE && token == null) { // perform no pagination + LOGGER.info("doListTables - NO pagination"); + return new ListTablesResponse(listTablesRequest.getCatalogName(), listTablesNoPagination(connection, schemaName), null); + } + + LOGGER.info("doListTables - pagination - NOT SUPPORTED - return all tables"); + return new ListTablesResponse(listTablesRequest.getCatalogName(), listTablesNoPagination(connection, schemaName), null); + } + } + + private List listTablesNoPagination(final Connection jdbcConnection, final String databaseName) + throws SQLException + { + LOGGER.debug("listTables, databaseName:" + databaseName); + try (ResultSet resultSet = jdbcConnection.getMetaData().getTables( + jdbcConnection.getCatalog(), + databaseName, + null, + new String[] {"TABLE", "VIEW", "EXTERNAL TABLE", "MATERIALIZED VIEW"})) { + ImmutableList.Builder list = ImmutableList.builder(); + while (resultSet.next()) { + list.add(JDBCUtil.getSchemaTableName(resultSet)); + } + return list.build(); + } + } + /** * * @param jdbcConnection @@ -379,9 +438,10 @@ public GetTableResponse doGetTable(final BlockAllocator blockAllocator, final Ge * @return * @throws Exception */ - public Schema getSchema(Connection jdbcConnection, TableName tableName, Schema partitionSchema) + private Schema getSchema(Connection jdbcConnection, TableName tableName, Schema partitionSchema) throws Exception { + LOGGER.debug("getSchema start, tableName:" + tableName); /** * query to fetch column data type to handle appropriate datatype to arrowtype conversions. */ @@ -391,8 +451,8 @@ public Schema getSchema(Connection jdbcConnection, TableName tableName, Schema p try (ResultSet resultSet = getColumns(jdbcConnection.getCatalog(), tableName, jdbcConnection.getMetaData()); Connection connection = getJdbcConnectionFactory().getConnection(getCredentialProvider()); PreparedStatement stmt = connection.prepareStatement(dataTypeQuery)) { - stmt.setString(1, tableName.getSchemaName().toUpperCase()); - stmt.setString(2, tableName.getTableName().toUpperCase()); + stmt.setString(1, tableName.getSchemaName()); + stmt.setString(2, tableName.getTableName()); HashMap hashMap = new HashMap(); ResultSet dataTypeResultSet = stmt.executeQuery(); @@ -419,16 +479,8 @@ public Schema getSchema(Connection jdbcConnection, TableName tableName, Schema p String dataType = hashMap.get(columnName); LOGGER.debug("columnName: " + columnName); LOGGER.debug("dataType: " + dataType); - final Map stringArrowTypeMap = com.google.common.collect.ImmutableMap.of( - "INTEGER", (ArrowType) Types.MinorType.INT.getType(), - "DATE", (ArrowType) Types.MinorType.DATEDAY.getType(), - "TIMESTAMP", (ArrowType) Types.MinorType.DATEMILLI.getType(), - "TIMESTAMP_LTZ", (ArrowType) Types.MinorType.DATEMILLI.getType(), - "TIMESTAMP_NTZ", (ArrowType) Types.MinorType.DATEMILLI.getType(), - "TIMESTAMP_TZ", (ArrowType) Types.MinorType.DATEMILLI.getType() - ); - if (dataType != null && stringArrowTypeMap.containsKey(dataType.toUpperCase())) { - columnType = stringArrowTypeMap.get(dataType.toUpperCase()); + if (dataType != null && STRING_ARROW_TYPE_MAP.containsKey(dataType.toUpperCase())) { + columnType = STRING_ARROW_TYPE_MAP.get(dataType.toUpperCase()); } /** * converting into VARCHAR for not supported data types. @@ -469,90 +521,18 @@ public Schema getSchema(Connection jdbcConnection, TableName tableName, Schema p private ResultSet getColumns(final String catalogName, final TableName tableHandle, final DatabaseMetaData metadata) throws SQLException { + LOGGER.debug("getColumns, catalogName:" + catalogName + ", tableHandle: " + tableHandle); String escape = metadata.getSearchStringEscape(); - return metadata.getColumns( + + ResultSet columns = metadata.getColumns( catalogName, escapeNamePattern(tableHandle.getSchemaName(), escape), escapeNamePattern(tableHandle.getTableName(), escape), null); - } - /** - * Finding table name from query hint - * In sap hana schemas and tables can be case sensitive, but executed query from athena sends table and schema names - * in lower case, this has been handled by appending query hint to the table name as below - * "lambda:lambdaname".SCHEMA_NAME."TABLE_NAME@schemacase=upper&tablecase=upper" - * @param table - * @return - */ - protected TableName findTableNameFromQueryHint(TableName table) - { - //if no query hints has been passed then return input table name - if (!table.getTableName().contains("@")) { - return new TableName(table.getSchemaName().toUpperCase(), table.getTableName().toUpperCase()); - } - //analyze the hint to find table and schema case - String[] tbNameWithQueryHint = table.getTableName().split("@"); - String[] hintDetails = tbNameWithQueryHint[1].split("&"); - String schemaCase = CASE_UPPER; - String tableCase = CASE_UPPER; - String tableName = tbNameWithQueryHint[0]; - for (String str : hintDetails) { - String[] hintDetail = str.split("="); - if (hintDetail[0].contains("schema")) { - schemaCase = hintDetail[1]; - } - else if (hintDetail[0].contains("table")) { - tableCase = hintDetail[1]; - } - } - if (schemaCase.equalsIgnoreCase(CASE_UPPER) && tableCase.equalsIgnoreCase(CASE_UPPER)) { - return new TableName(table.getSchemaName().toUpperCase(), tableName.toUpperCase()); - } - else if (schemaCase.equalsIgnoreCase(CASE_LOWER) && tableCase.equalsIgnoreCase(CASE_LOWER)) { - return new TableName(table.getSchemaName().toLowerCase(), tableName.toLowerCase()); - } - else if (schemaCase.equalsIgnoreCase(CASE_LOWER) && tableCase.equalsIgnoreCase(CASE_UPPER)) { - return new TableName(table.getSchemaName().toLowerCase(), tableName.toUpperCase()); - } - else if (schemaCase.equalsIgnoreCase(CASE_UPPER) && tableCase.equalsIgnoreCase(CASE_LOWER)) { - return new TableName(table.getSchemaName().toUpperCase(), tableName.toLowerCase()); - } - else { - return new TableName(table.getSchemaName().toUpperCase(), tableName.toUpperCase()); - } + return columns; } - /** - * Logic to handle case sensitivity of table name and schema name - * @param catalogName - * @param tableHandle - * @param metadata - * @return - * @throws SQLException - */ - protected TableName getTableFromMetadata(final String catalogName, final TableName tableHandle, final DatabaseMetaData metadata) - throws SQLException - { - TableName tableName = findTableNameFromQueryHint(tableHandle); - //check for presence exact table and schema name returned by findTableNameFromQueryHint method by invoking metadata.getTables method - ResultSet resultSet = metadata.getTables(catalogName, tableName.getSchemaName(), tableName.getTableName(), null); - while (resultSet.next()) { - if (tableName.getTableName().equals(resultSet.getString(3))) { - tableName = new TableName(tableName.getSchemaName(), resultSet.getString(3)); - return tableName; - } - } - // if table not found in above step, check for presence of input table by doing pattern search - ResultSet rs = metadata.getTables(catalogName, tableName.getSchemaName().toUpperCase(), "%", null); - while (rs.next()) { - if (tableName.getTableName().equalsIgnoreCase(rs.getString(3))) { - tableName = new TableName(tableName.getSchemaName().toUpperCase(), rs.getString(3)); - return tableName; - } - } - return tableName; - } @Override public ListSchemasResponse doListSchemaNames(final BlockAllocator blockAllocator, final ListSchemasRequest listSchemasRequest) throws Exception @@ -562,10 +542,13 @@ public ListSchemasResponse doListSchemaNames(final BlockAllocator blockAllocator return new ListSchemasResponse(listSchemasRequest.getCatalogName(), listDatabaseNames(connection)); } } - protected static Set listDatabaseNames(final Connection jdbcConnection) + + private static Set listDatabaseNames(final Connection jdbcConnection) throws Exception { - try (ResultSet resultSet = jdbcConnection.getMetaData().getSchemas()) { + try (ResultSet resultSet = jdbcConnection + .getMetaData() + .getSchemas(jdbcConnection.getCatalog(), null)) { ImmutableSet.Builder schemaNames = ImmutableSet.builder(); String inputCatalogName = jdbcConnection.getCatalog(); String inputSchemaName = jdbcConnection.getSchema(); diff --git a/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeRecordHandler.java b/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeRecordHandler.java index 28ac13ff21..0242ec0e82 100644 --- a/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeRecordHandler.java +++ b/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeRecordHandler.java @@ -77,7 +77,7 @@ public SnowflakeRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, } @Override - public PreparedStatement buildSplitSql(Connection jdbcConnection, String catalogName, TableName tableName, Schema schema, Constraints constraints, Split split) throws SQLException + public PreparedStatement buildSplitSql(Connection jdbcConnection, String catalogName, TableName tableNameInput, Schema schema, Constraints constraints, Split split) throws SQLException { PreparedStatement preparedStatement; @@ -85,6 +85,7 @@ public PreparedStatement buildSplitSql(Connection jdbcConnection, String catalog preparedStatement = buildQueryPassthroughSql(jdbcConnection, constraints); } else { + TableName tableName = SnowflakeCaseInsensitiveResolver.getTableNameObjectCaseInsensitiveMatch(jdbcConnection, tableNameInput, configOptions); preparedStatement = jdbcSplitQueryBuilder.buildSql(jdbcConnection, null, tableName.getSchemaName(), tableName.getTableName(), schema, constraints, split); } diff --git a/athena-snowflake/src/test/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMetadataHandlerTest.java b/athena-snowflake/src/test/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMetadataHandlerTest.java index 6a219a3b1f..15326bc5c9 100644 --- a/athena-snowflake/src/test/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMetadataHandlerTest.java +++ b/athena-snowflake/src/test/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMetadataHandlerTest.java @@ -111,7 +111,7 @@ public void doGetTableLayout() String[] primaryKeyColumns = new String[] {SnowflakeMetadataHandler.PRIMARY_KEY_COLUMN_NAME}; String[][] primaryKeyValues = new String[][]{new String[] {"pkey"}}; ResultSet primaryKeyResultSet = mockResultSet(primaryKeyColumns, primaryKeyValues, new AtomicInteger(-1)); - Mockito.when(this.connection.prepareStatement(SnowflakeMetadataHandler.SHOW_PRIMARY_KEYS_QUERY + "testTable")).thenReturn(primaryKeyPreparedStatement); + Mockito.when(this.connection.prepareStatement(SnowflakeMetadataHandler.SHOW_PRIMARY_KEYS_QUERY + "\"testSchema\"" + "." + "\"testTable\"")).thenReturn(primaryKeyPreparedStatement); Mockito.when(primaryKeyPreparedStatement.executeQuery()).thenReturn(primaryKeyResultSet); PreparedStatement countsPreparedStatement = Mockito.mock(PreparedStatement.class); @@ -236,7 +236,7 @@ public void doGetTableLayoutMaxPartition() String[] primaryKeyColumns = new String[] {SnowflakeMetadataHandler.PRIMARY_KEY_COLUMN_NAME}; String[][] primaryKeyValues = new String[][]{new String[] {"pkey"}}; ResultSet primaryKeyResultSet = mockResultSet(primaryKeyColumns, primaryKeyValues, new AtomicInteger(-1)); - Mockito.when(this.connection.prepareStatement(SnowflakeMetadataHandler.SHOW_PRIMARY_KEYS_QUERY + "testTable")).thenReturn(primaryKeyPreparedStatement); + Mockito.when(this.connection.prepareStatement(SnowflakeMetadataHandler.SHOW_PRIMARY_KEYS_QUERY + "\"testSchema\"" + "." + "\"testTable\"")).thenReturn(primaryKeyPreparedStatement); Mockito.when(primaryKeyPreparedStatement.executeQuery()).thenReturn(primaryKeyResultSet); PreparedStatement countsPreparedStatement = Mockito.mock(PreparedStatement.class); @@ -426,28 +426,6 @@ public void doGetTable() Assert.assertEquals("testCatalog", getTableResponse.getCatalogName()); } - @Test - public void testFindTableNameFromQueryHint() - throws Exception - { - TableName inputTableName = new TableName("testSchema", "testTable@schemacase=upper&tablecase=upper"); - TableName tableName = snowflakeMetadataHandler.findTableNameFromQueryHint(inputTableName); - Assert.assertEquals(new TableName("TESTSCHEMA", "TESTTABLE"), tableName); - - TableName inputTableName1 = new TableName("testSchema", "testTable@schemacase=upper&tablecase=lower"); - TableName tableName1 = snowflakeMetadataHandler.findTableNameFromQueryHint(inputTableName1); - Assert.assertEquals(new TableName("TESTSCHEMA", "testtable"), tableName1); - - TableName inputTableName2 = new TableName("testSchema", "testTable@schemacase=lower&tablecase=lower"); - TableName tableName2 = snowflakeMetadataHandler.findTableNameFromQueryHint(inputTableName2); - Assert.assertEquals(new TableName("testschema", "testtable"), tableName2); - - TableName inputTableName3 = new TableName("testSchema", "testTable@schemacase=lower&tablecase=upper"); - TableName tableName3 = snowflakeMetadataHandler.findTableNameFromQueryHint(inputTableName3); - Assert.assertEquals(new TableName("testschema", "TESTTABLE"), tableName3); - - } - @Test(expected = RuntimeException.class) public void doListSchemaNames() throws Exception { BlockAllocator blockAllocator = new BlockAllocatorImpl(); @@ -457,7 +435,7 @@ public void doListSchemaNames() throws Exception { Mockito.when(this.connection.createStatement()).thenReturn(statement); String[][] SchemaandCatalogNames = {{"TESTSCHEMA"},{"TESTCATALOG"}}; ResultSet schemaResultSet = mockResultSet(new String[]{"TABLE_SCHEM","TABLE_CATALOG"}, new int[]{Types.VARCHAR,Types.VARCHAR}, SchemaandCatalogNames, new AtomicInteger(-1)); - Mockito.when(this.connection.getMetaData().getSchemas()).thenReturn(schemaResultSet); + Mockito.when(this.connection.getMetaData().getSchemas(any(), any())).thenReturn(schemaResultSet); ListSchemasResponse listSchemasResponse = this.snowflakeMetadataHandler.doListSchemaNames(blockAllocator, listSchemasRequest); String[] expectedResult = {"TESTSCHEMA","TESTCATALOG"}; Assert.assertEquals(Arrays.toString(expectedResult), listSchemasResponse.getSchemas().toString()); From 72a5298e56f148e5fbd19ca08a92140721e7232c Mon Sep 17 00:00:00 2001 From: ejeffrli <144148373+ejeffrli@users.noreply.github.com> Date: Thu, 14 Nov 2024 10:35:58 -0500 Subject: [PATCH 60/87] Check if auth is not null (#2388) --- .../connector/lambda/connection/EnvironmentProperties.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/EnvironmentProperties.java b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/EnvironmentProperties.java index a30847e47c..3f602d960c 100644 --- a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/EnvironmentProperties.java +++ b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/EnvironmentProperties.java @@ -95,7 +95,7 @@ private Map authenticationConfigurationToMap(AuthenticationConfi { Map authMap = new HashMap<>(); - if (StringUtils.isNotBlank(auth.secretArn())) { + if (auth != null && StringUtils.isNotBlank(auth.secretArn())) { String[] splitArn = auth.secretArn().split(":"); String[] secretNameWithRandom = splitArn[splitArn.length - 1].split("-"); // 6 random characters at end. at least length of 2 String[] secretNameArray = Arrays.copyOfRange(secretNameWithRandom, 0, secretNameWithRandom.length - 1); From c076ed84fc46b1bb860607cafbc927f221241fca Mon Sep 17 00:00:00 2001 From: Aimery Methena <159072740+aimethed@users.noreply.github.com> Date: Thu, 14 Nov 2024 16:14:25 -0500 Subject: [PATCH 61/87] add default to oracle connection kmskeyid (#2392) --- athena-oracle/athena-oracle-connection.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/athena-oracle/athena-oracle-connection.yaml b/athena-oracle/athena-oracle-connection.yaml index 0deb4b184c..871f1624b0 100644 --- a/athena-oracle/athena-oracle-connection.yaml +++ b/athena-oracle/athena-oracle-connection.yaml @@ -37,6 +37,7 @@ Parameters: KmsKeyId: Description: "(Optional) By default any data that is spilled to S3 is encrypted using AES-GCM and a randomly generated key. Setting a KMS Key ID allows your Lambda function to use KMS for key generation for a stronger source of encryption keys." Type: String + Default: "" LambdaRoleArn: Description: "(Optional) A custom role to be used by the Connector lambda" Type: String From 165f6b350534f258879c3ea0a9caba65c0612073 Mon Sep 17 00:00:00 2001 From: ejeffrli <144148373+ejeffrli@users.noreply.github.com> Date: Thu, 14 Nov 2024 22:52:45 -0500 Subject: [PATCH 62/87] =?UTF-8?q?Fix=20issue=20with=20secret=20being=20not?= =?UTF-8?q?=20populated=20for=20default=20EnvironmentProp=E2=80=A6=20(#239?= =?UTF-8?q?3)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../lambda/connection/EnvironmentProperties.java | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/EnvironmentProperties.java b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/EnvironmentProperties.java index 3f602d960c..765dcb28f8 100644 --- a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/EnvironmentProperties.java +++ b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/EnvironmentProperties.java @@ -53,10 +53,10 @@ public Map createEnvironment() throws RuntimeException HashMap connectionEnvironment = new HashMap<>(); if (StringUtils.isNotBlank(glueConnectionName)) { Connection connection = getGlueConnection(glueConnectionName); - Map connectionProperties = new HashMap<>(connection.connectionPropertiesAsStrings()); - connectionProperties.putAll(authenticationConfigurationToMap(connection.authenticationConfiguration())); + Map connectionPropertiesWithSecret = new HashMap<>(connection.connectionPropertiesAsStrings()); + connectionPropertiesWithSecret.putAll(authenticationConfigurationToMap(connection.authenticationConfiguration())); - connectionEnvironment.putAll(connectionPropertiesToEnvironment(connectionProperties)); + connectionEnvironment.putAll(connectionPropertiesToEnvironment(connectionPropertiesWithSecret)); connectionEnvironment.putAll(athenaPropertiesToEnvironment(connection.athenaProperties())); } @@ -120,13 +120,14 @@ public Map athenaPropertiesToEnvironment(Map ath } /** - * Maps glue connection properties to environment properties like 'default' and 'secret_manager_gcp_creds_name' - * Default behavior is to not populate environment with these properties + * Maps glue connection properties and authentication configuration + * to Athena federation environment properties like 'default' and 'secret_manager_gcp_creds_name' + * Default behavior is to not map to Athena federation environment variables * * @param connectionProperties contains secret_name and connection properties */ public Map connectionPropertiesToEnvironment(Map connectionProperties) { - return new HashMap<>(); + return connectionProperties; } } From 7f8c53cd895cc57b5da817da4b633983c32460c8 Mon Sep 17 00:00:00 2001 From: chngpe <102991671+chngpe@users.noreply.github.com> Date: Fri, 15 Nov 2024 08:51:46 -0500 Subject: [PATCH 63/87] =?UTF-8?q?Passing=20snowflake=20JDBC=20parameter=20?= =?UTF-8?q?into=20parameters=20fields=20instead=20of=20wi=E2=80=A6=20(#239?= =?UTF-8?q?1)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../connection/EnvironmentProperties.java | 1 + .../SnowflakeEnvironmentProperties.java | 87 ++++++++++++++++--- .../snowflake/SnowflakeMetadataHandler.java | 5 +- .../snowflake/SnowflakeRecordHandler.java | 3 +- 4 files changed, 79 insertions(+), 17 deletions(-) diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/EnvironmentProperties.java b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/EnvironmentProperties.java index 765dcb28f8..95362f0041 100644 --- a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/EnvironmentProperties.java +++ b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/EnvironmentProperties.java @@ -52,6 +52,7 @@ public Map createEnvironment() throws RuntimeException HashMap connectionEnvironment = new HashMap<>(); if (StringUtils.isNotBlank(glueConnectionName)) { + connectionEnvironment.put(DEFAULT_GLUE_CONNECTION, glueConnectionName); Connection connection = getGlueConnection(glueConnectionName); Map connectionPropertiesWithSecret = new HashMap<>(connection.connectionPropertiesAsStrings()); connectionPropertiesWithSecret.putAll(authenticationConfigurationToMap(connection.authenticationConfiguration())); diff --git a/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeEnvironmentProperties.java b/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeEnvironmentProperties.java index 0528378c48..3e74355467 100644 --- a/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeEnvironmentProperties.java +++ b/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeEnvironmentProperties.java @@ -20,12 +20,17 @@ package com.amazonaws.athena.connectors.snowflake; import com.amazonaws.athena.connectors.jdbc.JdbcEnvironmentProperties; +import com.google.common.base.Strings; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import java.util.HashMap; import java.util.Map; +import java.util.Optional; import static com.amazonaws.athena.connector.lambda.connection.EnvironmentConstants.DATABASE; import static com.amazonaws.athena.connector.lambda.connection.EnvironmentConstants.DEFAULT; +import static com.amazonaws.athena.connector.lambda.connection.EnvironmentConstants.DEFAULT_GLUE_CONNECTION; import static com.amazonaws.athena.connector.lambda.connection.EnvironmentConstants.HOST; import static com.amazonaws.athena.connector.lambda.connection.EnvironmentConstants.PORT; import static com.amazonaws.athena.connector.lambda.connection.EnvironmentConstants.SCHEMA; @@ -33,19 +38,38 @@ public class SnowflakeEnvironmentProperties extends JdbcEnvironmentProperties { + private static final Logger LOGGER = LoggerFactory.getLogger(SnowflakeEnvironmentProperties.class); + private static final String WAREHOUSE_PROPERTY_KEY = "warehouse"; + private static final String DB_PROPERTY_KEY = "db"; + private static final String SCHEMA_PROPERTY_KEY = "schema"; + private static final String SNOWFLAKE_ESCAPE_CHARACTER = "\""; + @Override public Map connectionPropertiesToEnvironment(Map connectionProperties) { HashMap environment = new HashMap<>(); - // now construct jdbc string - String connectionString = getConnectionStringPrefix(connectionProperties) + connectionProperties.get(HOST); + // put it as environment variable so we can put it as JDBC parameters later when creation connection (not with JDBC) + Optional.ofNullable(connectionProperties.get(WAREHOUSE)).ifPresent(x -> environment.put(WAREHOUSE, x)); + Optional.ofNullable(connectionProperties.get(DATABASE)).ifPresent(x -> environment.put(DATABASE, x)); + Optional.ofNullable(connectionProperties.get(SCHEMA)).ifPresent(x -> environment.put(SCHEMA, x)); + + // now construct jdbc string, Snowflake JDBC should just be plain JDBC String. Parameter in JDBC string will get upper case. + StringBuilder connectionStringBuilder = new StringBuilder(getConnectionStringPrefix(connectionProperties)); + connectionStringBuilder.append(connectionProperties.get(HOST)); if (connectionProperties.containsKey(PORT)) { - connectionString = connectionString + ":" + connectionProperties.get(PORT); + connectionStringBuilder + .append(":") + .append(connectionProperties.get(PORT)); + } + + String jdbcParametersString = getJdbcParameters(connectionProperties); + if (!Strings.isNullOrEmpty(jdbcParametersString)) { + LOGGER.info("JDBC parameters found, adding to JDBC String"); + connectionStringBuilder.append(getSnowflakeJDBCParameterPrefix()).append(getJdbcParameters(connectionProperties)); } - connectionString = connectionString + getDatabase(connectionProperties) + getJdbcParameters(connectionProperties); - environment.put(DEFAULT, connectionString); + environment.put(DEFAULT, connectionStringBuilder.toString()); return environment; } @@ -55,17 +79,15 @@ protected String getConnectionStringPrefix(Map connectionPropert return "snowflake://jdbc:snowflake://"; } + /** + * For Snowflake, we don't put warehouse, database or schema information to the JDBC String to avoid casing issues. + * @param connectionProperties + * @return + */ @Override protected String getDatabase(Map connectionProperties) { - if (!connectionProperties.containsKey(SCHEMA)) { - logger.debug("No schema specified in connection string"); - } - - String databaseString = "/?warehouse=" + connectionProperties.get(WAREHOUSE) - + "&db=" + connectionProperties.get(DATABASE) - + "&schema=" + connectionProperties.get(SCHEMA); - return databaseString; + return ""; } @Override @@ -73,4 +95,43 @@ protected String getJdbcParametersSeparator() { return "&"; } + + private String getSnowflakeJDBCParameterPrefix() + { + return "/?"; + } + + private static String getValueWrapperWithEscapedCharacter(String input) + { + return SNOWFLAKE_ESCAPE_CHARACTER + input + SNOWFLAKE_ESCAPE_CHARACTER; + } + + private static boolean isGlueConnection(Map properties) + { + return properties.containsKey(DEFAULT_GLUE_CONNECTION); + } + + public static Map getSnowFlakeParameter(Map baseProperty, Map connectionProperties) + { + logger.debug("getSnowFlakeParameter, Loading connection properties"); + Map parameters = new HashMap<>(baseProperty); + + if (!isGlueConnection(connectionProperties)) { + return parameters; + } + + if (!connectionProperties.containsKey(SCHEMA)) { + logger.debug("No schema specified in connection string"); + } + + parameters.put(WAREHOUSE_PROPERTY_KEY, getValueWrapperWithEscapedCharacter(connectionProperties.get(WAREHOUSE))); + parameters.put(DB_PROPERTY_KEY, getValueWrapperWithEscapedCharacter(connectionProperties.get(DATABASE))); + + if (connectionProperties.containsKey(SCHEMA)) { + logger.debug("Found schema specified"); + parameters.put(SCHEMA_PROPERTY_KEY, getValueWrapperWithEscapedCharacter(connectionProperties.get(SCHEMA))); + } + + return parameters; + } } diff --git a/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMetadataHandler.java b/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMetadataHandler.java index 4737c7f2f2..b8754adf8b 100644 --- a/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMetadataHandler.java +++ b/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMetadataHandler.java @@ -146,8 +146,8 @@ public SnowflakeMetadataHandler(java.util.Map configOptions) public SnowflakeMetadataHandler(DatabaseConnectionConfig databaseConnectionConfig, java.util.Map configOptions) { this(databaseConnectionConfig, new GenericJdbcConnectionFactory(databaseConnectionConfig, - JDBC_PROPERTIES, new DatabaseConnectionInfo(SnowflakeConstants.SNOWFLAKE_DRIVER_CLASS, - SnowflakeConstants.SNOWFLAKE_DEFAULT_PORT)), configOptions); + SnowflakeEnvironmentProperties.getSnowFlakeParameter(JDBC_PROPERTIES, configOptions), + new DatabaseConnectionInfo(SnowflakeConstants.SNOWFLAKE_DRIVER_CLASS, SnowflakeConstants.SNOWFLAKE_DEFAULT_PORT)), configOptions); } @VisibleForTesting @@ -166,7 +166,6 @@ public GetDataSourceCapabilitiesResponse doGetDataSourceCapabilities(BlockAlloca { LOGGER.debug("doGetDataSourceCapabilities: " + request); ImmutableMap.Builder> capabilities = ImmutableMap.builder(); - capabilities.put(DataSourceOptimizations.SUPPORTS_FILTER_PUSHDOWN.withSupportedSubTypes( FilterPushdownSubType.SORTED_RANGE_SET, FilterPushdownSubType.NULLABLE_COMPARISON )); diff --git a/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeRecordHandler.java b/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeRecordHandler.java index 0242ec0e82..9120f23a8a 100644 --- a/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeRecordHandler.java +++ b/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeRecordHandler.java @@ -42,6 +42,7 @@ import java.sql.SQLException; import static com.amazonaws.athena.connectors.snowflake.SnowflakeConstants.SNOWFLAKE_QUOTE_CHARACTER; +import static com.amazonaws.athena.connectors.snowflake.SnowflakeMetadataHandler.JDBC_PROPERTIES; public class SnowflakeRecordHandler extends JdbcRecordHandler { @@ -59,7 +60,7 @@ public SnowflakeRecordHandler(java.util.Map configOptions) public SnowflakeRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, java.util.Map configOptions) { this(databaseConnectionConfig, new GenericJdbcConnectionFactory(databaseConnectionConfig, - SnowflakeMetadataHandler.JDBC_PROPERTIES, + SnowflakeEnvironmentProperties.getSnowFlakeParameter(JDBC_PROPERTIES, configOptions), new DatabaseConnectionInfo(SnowflakeConstants.SNOWFLAKE_DRIVER_CLASS, SnowflakeConstants.SNOWFLAKE_DEFAULT_PORT)), configOptions); } From 2e2ecf267f4d77cf9c6c6f3c50656ceb3167ec6a Mon Sep 17 00:00:00 2001 From: Jithendar Trianz <106380520+Jithendar12@users.noreply.github.com> Date: Fri, 15 Nov 2024 21:42:16 +0530 Subject: [PATCH 64/87] [panama-sdkv2-gdcv2] Fix Db2 JDBC Connection String (#2395) --- .../athena/connectors/db2/Db2EnvironmentProperties.java | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/athena-db2/src/main/java/com/amazonaws/athena/connectors/db2/Db2EnvironmentProperties.java b/athena-db2/src/main/java/com/amazonaws/athena/connectors/db2/Db2EnvironmentProperties.java index 06c0f6ca4e..7d410d149e 100644 --- a/athena-db2/src/main/java/com/amazonaws/athena/connectors/db2/Db2EnvironmentProperties.java +++ b/athena-db2/src/main/java/com/amazonaws/athena/connectors/db2/Db2EnvironmentProperties.java @@ -23,8 +23,6 @@ import java.util.Map; -import static com.amazonaws.athena.connector.lambda.connection.EnvironmentConstants.DATABASE; - public class Db2EnvironmentProperties extends JdbcEnvironmentProperties { @Override @@ -34,9 +32,9 @@ protected String getConnectionStringPrefix(Map connectionPropert } @Override - protected String getDatabase(Map connectionProperties) + protected String getJdbcParametersSeparator() { - return ":" + connectionProperties.get(DATABASE); + return ":"; } @Override From 537fff1cdf7fae1d02a2f6e20adfaed60c97c109 Mon Sep 17 00:00:00 2001 From: VenkatasivareddyTR <110587813+VenkatasivareddyTR@users.noreply.github.com> Date: Fri, 15 Nov 2024 21:42:30 +0530 Subject: [PATCH 65/87] Synapse panama issue fix, updated connection string prefix. (#2394) --- .../athena/connectors/synapse/SynapseEnvironmentProperties.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/athena-synapse/src/main/java/com/amazonaws/athena/connectors/synapse/SynapseEnvironmentProperties.java b/athena-synapse/src/main/java/com/amazonaws/athena/connectors/synapse/SynapseEnvironmentProperties.java index 3c42e4fdce..a9d146e311 100644 --- a/athena-synapse/src/main/java/com/amazonaws/athena/connectors/synapse/SynapseEnvironmentProperties.java +++ b/athena-synapse/src/main/java/com/amazonaws/athena/connectors/synapse/SynapseEnvironmentProperties.java @@ -30,7 +30,7 @@ public class SynapseEnvironmentProperties extends JdbcEnvironmentProperties @Override protected String getConnectionStringPrefix(Map connectionProperties) { - return "synapse://jdbc:synapse://"; + return "synapse://jdbc:sqlserver://"; } @Override From 220390d8bcb25810823f838aec5f90ed1c28af0a Mon Sep 17 00:00:00 2001 From: ejeffrli <144148373+ejeffrli@users.noreply.github.com> Date: Sat, 16 Nov 2024 16:30:20 -0500 Subject: [PATCH 66/87] =?UTF-8?q?Enable=20case=20insensitive=20username/pa?= =?UTF-8?q?ssword=20in=20secret=20and=20allow=20secret=20=E2=80=A6=20(#239?= =?UTF-8?q?8)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../elasticsearch/ElasticsearchCredentialProvider.java | 7 ++++++- .../jdbc/connection/GenericJdbcConnectionFactory.java | 2 +- .../jdbc/connection/RdsSecretsCredentialProvider.java | 7 ++++++- 3 files changed, 13 insertions(+), 3 deletions(-) diff --git a/athena-elasticsearch/src/main/java/com/amazonaws/athena/connectors/elasticsearch/ElasticsearchCredentialProvider.java b/athena-elasticsearch/src/main/java/com/amazonaws/athena/connectors/elasticsearch/ElasticsearchCredentialProvider.java index 90cf3194a5..9e8f9768da 100644 --- a/athena-elasticsearch/src/main/java/com/amazonaws/athena/connectors/elasticsearch/ElasticsearchCredentialProvider.java +++ b/athena-elasticsearch/src/main/java/com/amazonaws/athena/connectors/elasticsearch/ElasticsearchCredentialProvider.java @@ -47,7 +47,12 @@ public ElasticsearchCredentialProvider(final String secretString) { Map elasticsearchSecrets; try { - elasticsearchSecrets = OBJECT_MAPPER.readValue(secretString, HashMap.class); + Map originalMap = OBJECT_MAPPER.readValue(secretString, HashMap.class); + + elasticsearchSecrets = new HashMap<>(); + for (Map.Entry entry : originalMap.entrySet()) { + elasticsearchSecrets.put(entry.getKey().toLowerCase(), entry.getValue()); + } } catch (IOException ioException) { throw new RuntimeException("Could not deserialize Elasticsearch credentials into HashMap", ioException); diff --git a/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/connection/GenericJdbcConnectionFactory.java b/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/connection/GenericJdbcConnectionFactory.java index 40478a00cf..ab061c52d2 100644 --- a/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/connection/GenericJdbcConnectionFactory.java +++ b/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/connection/GenericJdbcConnectionFactory.java @@ -45,7 +45,7 @@ public class GenericJdbcConnectionFactory { private static final Logger LOGGER = LoggerFactory.getLogger(GenericJdbcConnectionFactory.class); - private static final String SECRET_NAME_PATTERN_STRING = "(\\$\\{[a-zA-Z0-9:/_+=.@-]+})"; + private static final String SECRET_NAME_PATTERN_STRING = "(\\$\\{[a-zA-Z0-9:/_+=.@!-]+})"; public static final Pattern SECRET_NAME_PATTERN = Pattern.compile(SECRET_NAME_PATTERN_STRING); private final DatabaseConnectionInfo databaseConnectionInfo; diff --git a/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/connection/RdsSecretsCredentialProvider.java b/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/connection/RdsSecretsCredentialProvider.java index 71fdfda3bf..7f563a50ad 100644 --- a/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/connection/RdsSecretsCredentialProvider.java +++ b/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/connection/RdsSecretsCredentialProvider.java @@ -48,7 +48,12 @@ public RdsSecretsCredentialProvider(final String secretString) { Map rdsSecrets; try { - rdsSecrets = OBJECT_MAPPER.readValue(secretString, HashMap.class); + Map originalMap = OBJECT_MAPPER.readValue(secretString, HashMap.class); + + rdsSecrets = new HashMap<>(); + for (Map.Entry entry : originalMap.entrySet()) { + rdsSecrets.put(entry.getKey().toLowerCase(), entry.getValue()); + } } catch (IOException ioException) { throw new RuntimeException("Could not deserialize RDS credentials into HashMap", ioException); From b3077e8a1f10b9b37b9e25e0f217070033449396 Mon Sep 17 00:00:00 2001 From: Aimery Methena <159072740+aimethed@users.noreply.github.com> Date: Sun, 17 Nov 2024 09:52:49 -0500 Subject: [PATCH 67/87] update to use SSL oracle url (#2400) --- .../connector/lambda/connection/EnvironmentConstants.java | 1 + .../connectors/oracle/OracleEnvironmentProperties.java | 8 +++++++- .../connectors/oracle/OracleJdbcConnectionFactory.java | 5 +---- 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/EnvironmentConstants.java b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/EnvironmentConstants.java index fa5621ebf8..2b16ad7e61 100644 --- a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/EnvironmentConstants.java +++ b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/EnvironmentConstants.java @@ -51,4 +51,5 @@ private EnvironmentConstants() {} public static final String ZOOKEEPER_PORT = "ZOOKEEPER_PORT"; public static final String CUSTOM_AUTH_TYPE = "CUSTOM_AUTH_TYPE"; public static final String GLUE_CERTIFICATES_S3_REFERENCE = "CERTIFICATE_S3_REFERENCE"; + public static final String ENFORCE_SSL = "ENFORCE_SSL"; } diff --git a/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleEnvironmentProperties.java b/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleEnvironmentProperties.java index 1bdd16ca65..fbbe7195bc 100644 --- a/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleEnvironmentProperties.java +++ b/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleEnvironmentProperties.java @@ -24,6 +24,7 @@ import java.util.Map; import static com.amazonaws.athena.connector.lambda.connection.EnvironmentConstants.DATABASE; +import static com.amazonaws.athena.connector.lambda.connection.EnvironmentConstants.ENFORCE_SSL; import static com.amazonaws.athena.connector.lambda.connection.EnvironmentConstants.SECRET_NAME; public class OracleEnvironmentProperties extends JdbcEnvironmentProperties @@ -35,7 +36,12 @@ protected String getConnectionStringPrefix(Map connectionPropert if (connectionProperties.containsKey(SECRET_NAME)) { prefix = prefix + "${" + connectionProperties.get(SECRET_NAME) + "}"; } - prefix = prefix + "@//"; + if (connectionProperties.containsKey(ENFORCE_SSL)) { + prefix = prefix + "@tcps://"; + } + else { + prefix = prefix + "@//"; + } return prefix; } diff --git a/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleJdbcConnectionFactory.java b/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleJdbcConnectionFactory.java index 98d5fb08a0..0d1992317c 100644 --- a/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleJdbcConnectionFactory.java +++ b/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleJdbcConnectionFactory.java @@ -41,10 +41,7 @@ public class OracleJdbcConnectionFactory extends GenericJdbcConnectionFactory private final DatabaseConnectionInfo databaseConnectionInfo; private final DatabaseConnectionConfig databaseConnectionConfig; private static final Logger LOGGER = LoggerFactory.getLogger(OracleJdbcConnectionFactory.class); - private static final String SSL_CONNECTION_STRING_REGEX = "jdbc:oracle:thin:\\$\\{([a-zA-Z0-9:_/+=.@-]+)\\}@" + - "\\((?i)description=\\(address=\\(protocol=tcps\\)\\(host=[a-zA-Z0-9-.]+\\)" + - "\\(port=([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])\\)\\)" + - "\\(connect_data=\\(sid=[a-zA-Z_]+\\)\\)\\(security=\\(ssl_server_cert_dn=\"[=a-zA-Z,0-9-.,]+\"\\)\\)\\)"; + private static final String SSL_CONNECTION_STRING_REGEX = "jdbc:oracle:thin:\\$\\{([a-zA-Z0-9:_/+=.@-]+)\\}@tcps://"; private static final Pattern SSL_CONNECTION_STRING_PATTERN = Pattern.compile(SSL_CONNECTION_STRING_REGEX); /** From fdb2f25a17d77fa20e53582bd1c9d3460683faac Mon Sep 17 00:00:00 2001 From: Aimery Methena <159072740+aimethed@users.noreply.github.com> Date: Sun, 17 Nov 2024 09:53:04 -0500 Subject: [PATCH 68/87] add quotes around oracle password (#2399) --- .../athena/connectors/oracle/OracleJdbcConnectionFactory.java | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleJdbcConnectionFactory.java b/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleJdbcConnectionFactory.java index 0d1992317c..32d5a0d2a4 100644 --- a/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleJdbcConnectionFactory.java +++ b/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleJdbcConnectionFactory.java @@ -76,6 +76,10 @@ public Connection getConnection(final JdbcCredentialProvider jdbcCredentialProvi LOGGER.info("Establishing normal connection.."); } Matcher secretMatcher = SECRET_NAME_PATTERN.matcher(databaseConnectionConfig.getJdbcConnectionString()); + String password = jdbcCredentialProvider.getCredential().getPassword(); + if (!password.contains("\"")) { + password = String.format("\"%s\"", password); + } final String secretReplacement = String.format("%s/%s", jdbcCredentialProvider.getCredential().getUser(), jdbcCredentialProvider.getCredential().getPassword()); derivedJdbcString = secretMatcher.replaceAll(Matcher.quoteReplacement(secretReplacement)); From 35e2b6cc9650b531fa30e53c212cdf954feda6ff Mon Sep 17 00:00:00 2001 From: ritiktrianz Date: Mon, 18 Nov 2024 22:55:16 +0530 Subject: [PATCH 69/87] [panama-sdkv2-gdcv2] Oracle Panama Testing Fix (#2397) --- .../lambda/connection/EnvironmentProperties.java | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/EnvironmentProperties.java b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/EnvironmentProperties.java index 95362f0041..c7e0fb5512 100644 --- a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/EnvironmentProperties.java +++ b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/EnvironmentProperties.java @@ -113,11 +113,12 @@ private Map authenticationConfigurationToMap(AuthenticationConfi * */ public Map athenaPropertiesToEnvironment(Map athenaProperties) { - if (athenaProperties.containsKey(SPILL_KMS_KEY_ID)) { - String kmsKeyId = athenaProperties.remove(SPILL_KMS_KEY_ID); - athenaProperties.put(KMS_KEY_ID, kmsKeyId); + Map athenaPropertiesModified = new HashMap<>(athenaProperties); + if (athenaPropertiesModified.containsKey(SPILL_KMS_KEY_ID)) { + String kmsKeyId = athenaPropertiesModified.remove(SPILL_KMS_KEY_ID); + athenaPropertiesModified.put(KMS_KEY_ID, kmsKeyId); } - return athenaProperties; + return athenaPropertiesModified; } /** From 0b81e21cba7cf86df9d3392b10f72b23629e54ad Mon Sep 17 00:00:00 2001 From: ejeffrli <144148373+ejeffrli@users.noreply.github.com> Date: Mon, 18 Nov 2024 13:16:07 -0500 Subject: [PATCH 70/87] Fix docdb connection string (#2407) --- .../docdb/DocDBEnvironmentProperties.java | 11 +-- .../docdb/DocDBEnvironmentPropertiesTest.java | 67 +++++++++++++++++++ 2 files changed, 74 insertions(+), 4 deletions(-) create mode 100644 athena-docdb/src/test/java/com/amazonaws/athena/connectors/docdb/DocDBEnvironmentPropertiesTest.java diff --git a/athena-docdb/src/main/java/com/amazonaws/athena/connectors/docdb/DocDBEnvironmentProperties.java b/athena-docdb/src/main/java/com/amazonaws/athena/connectors/docdb/DocDBEnvironmentProperties.java index 50c378b676..60e79d5792 100644 --- a/athena-docdb/src/main/java/com/amazonaws/athena/connectors/docdb/DocDBEnvironmentProperties.java +++ b/athena-docdb/src/main/java/com/amazonaws/athena/connectors/docdb/DocDBEnvironmentProperties.java @@ -37,10 +37,13 @@ public Map connectionPropertiesToEnvironment(Map { Map environment = new HashMap<>(); - String connectionString = "mongodb://${" + connectionProperties.get(SECRET_NAME) + "}@" - + connectionProperties.get(HOST) + connectionProperties.get(PORT) + "/?" - + connectionProperties.getOrDefault(JDBC_PARAMS, ""); - environment.put(DEFAULT_DOCDB, connectionString); + StringBuilder connectionString = new StringBuilder("mongodb://${"); + connectionString.append(connectionProperties.get(SECRET_NAME)).append("}@"); + connectionString.append(connectionProperties.get(HOST)).append(":").append(connectionProperties.get(PORT)); + if (connectionProperties.containsKey(JDBC_PARAMS)) { + connectionString.append("/?").append(connectionProperties.get(JDBC_PARAMS)); + } + environment.put(DEFAULT_DOCDB, connectionString.toString()); return environment; } } diff --git a/athena-docdb/src/test/java/com/amazonaws/athena/connectors/docdb/DocDBEnvironmentPropertiesTest.java b/athena-docdb/src/test/java/com/amazonaws/athena/connectors/docdb/DocDBEnvironmentPropertiesTest.java new file mode 100644 index 0000000000..5b5362bfad --- /dev/null +++ b/athena-docdb/src/test/java/com/amazonaws/athena/connectors/docdb/DocDBEnvironmentPropertiesTest.java @@ -0,0 +1,67 @@ +/*- + * #%L + * athena-mongodb + * %% + * Copyright (C) 2019 Amazon Web Services + * %% + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * #L% + */ +package com.amazonaws.athena.connectors.docdb; + +import org.junit.Test; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +import static com.amazonaws.athena.connector.lambda.connection.EnvironmentConstants.DEFAULT_DOCDB; +import static com.amazonaws.athena.connector.lambda.connection.EnvironmentConstants.HOST; +import static com.amazonaws.athena.connector.lambda.connection.EnvironmentConstants.JDBC_PARAMS; +import static com.amazonaws.athena.connector.lambda.connection.EnvironmentConstants.PORT; +import static com.amazonaws.athena.connector.lambda.connection.EnvironmentConstants.SECRET_NAME; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +public class DocDBEnvironmentPropertiesTest +{ + @Test + public void connectionPropertiesToEnvironmentTest() + throws IOException + { + Map connectionProperties = new HashMap<>(); + connectionProperties.put(HOST, "localhost"); + connectionProperties.put(PORT, "1234"); + connectionProperties.put(JDBC_PARAMS, "key=value&key2=value2"); + connectionProperties.put(SECRET_NAME, "secret"); + String connectionString = "mongodb://${secret}@localhost:1234/?key=value&key2=value2"; + + Map docdbConnectionProperties = new DocDBEnvironmentProperties().connectionPropertiesToEnvironment(connectionProperties); + assertTrue(docdbConnectionProperties.containsKey(DEFAULT_DOCDB)); + assertEquals(connectionString, docdbConnectionProperties.get(DEFAULT_DOCDB)); + } + + @Test + public void noJdbcParamsConnectionProperties() + { + Map connectionProperties = new HashMap<>(); + connectionProperties.put(HOST, "localhost"); + connectionProperties.put(PORT, "1234"); + connectionProperties.put(SECRET_NAME, "secret"); + String connectionString = "mongodb://${secret}@localhost:1234"; + + Map docdbConnectionProperties = new DocDBEnvironmentProperties().connectionPropertiesToEnvironment(connectionProperties); + assertTrue(docdbConnectionProperties.containsKey(DEFAULT_DOCDB)); + assertEquals(connectionString, docdbConnectionProperties.get(DEFAULT_DOCDB)); + } +} From 153b751be5a7c06c1e0267339ad39f977f6a5760 Mon Sep 17 00:00:00 2001 From: Aimery Methena <159072740+aimethed@users.noreply.github.com> Date: Wed, 20 Nov 2024 15:38:27 -0500 Subject: [PATCH 71/87] fixing to use quoted password (#2413) --- .../athena/connectors/oracle/OracleJdbcConnectionFactory.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleJdbcConnectionFactory.java b/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleJdbcConnectionFactory.java index 32d5a0d2a4..cfe488a7ce 100644 --- a/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleJdbcConnectionFactory.java +++ b/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleJdbcConnectionFactory.java @@ -81,7 +81,7 @@ public Connection getConnection(final JdbcCredentialProvider jdbcCredentialProvi password = String.format("\"%s\"", password); } final String secretReplacement = String.format("%s/%s", jdbcCredentialProvider.getCredential().getUser(), - jdbcCredentialProvider.getCredential().getPassword()); + password); derivedJdbcString = secretMatcher.replaceAll(Matcher.quoteReplacement(secretReplacement)); LOGGER.info("derivedJdbcString: " + derivedJdbcString); return DriverManager.getConnection(derivedJdbcString, properties); From e4a53aa5d93ebc664fcfabf8aa901f8b6cc8e76d Mon Sep 17 00:00:00 2001 From: chngpe <102991671+chngpe@users.noreply.github.com> Date: Wed, 20 Nov 2024 16:55:17 -0500 Subject: [PATCH 72/87] glue connection reference fix (#2414) --- athena-tpcds/athena-tpcds-connection.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/athena-tpcds/athena-tpcds-connection.yaml b/athena-tpcds/athena-tpcds-connection.yaml index e5b77b3312..6bc7d694b9 100644 --- a/athena-tpcds/athena-tpcds-connection.yaml +++ b/athena-tpcds/athena-tpcds-connection.yaml @@ -45,7 +45,7 @@ Resources: Properties: Environment: Variables: - glue_connection: Ref GlueConnection + glue_connection: !Ref GlueConnection FunctionName: !Ref LambdaFunctionName PackageType: "Image" ImageUri: !Sub From 5fe1b3c2355d88e54d4b7f7d2cddb9e31b78c1bc Mon Sep 17 00:00:00 2001 From: ejeffrli <144148373+ejeffrli@users.noreply.github.com> Date: Wed, 20 Nov 2024 17:14:34 -0500 Subject: [PATCH 73/87] Fix secret issue if field is integer (#2412) --- .../jdbc/connection/RdsSecretsCredentialProvider.java | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/connection/RdsSecretsCredentialProvider.java b/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/connection/RdsSecretsCredentialProvider.java index 7f563a50ad..a98958e981 100644 --- a/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/connection/RdsSecretsCredentialProvider.java +++ b/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/connection/RdsSecretsCredentialProvider.java @@ -52,7 +52,9 @@ public RdsSecretsCredentialProvider(final String secretString) rdsSecrets = new HashMap<>(); for (Map.Entry entry : originalMap.entrySet()) { - rdsSecrets.put(entry.getKey().toLowerCase(), entry.getValue()); + if (entry.getKey().equalsIgnoreCase("username") || entry.getKey().equalsIgnoreCase("password")) { + rdsSecrets.put(entry.getKey().toLowerCase(), entry.getValue()); + } } } catch (IOException ioException) { From 640b3a6219f865d6f69a1bbbd5338eff4832f111 Mon Sep 17 00:00:00 2001 From: ejeffrli <144148373+ejeffrli@users.noreply.github.com> Date: Fri, 22 Nov 2024 12:50:48 -0500 Subject: [PATCH 74/87] Change default endpoint (#2416) --- .../connector/lambda/connection/EnvironmentProperties.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/EnvironmentProperties.java b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/EnvironmentProperties.java index c7e0fb5512..a94bde43bb 100644 --- a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/EnvironmentProperties.java +++ b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/connection/EnvironmentProperties.java @@ -74,7 +74,7 @@ public Connection getGlueConnection(String glueConnectionName) throws RuntimeExc .builder() .connectionTimeout(Duration.ofMillis(CONNECT_TIMEOUT))) .build(); - if (lambdaEnvironment.getOrDefault("USE_GAMMA_GLUE", "true").equals("true")) { + if (lambdaEnvironment.getOrDefault("USE_GAMMA_GLUE", "false").equals("true")) { awsGlue = GlueClient.builder() .endpointOverride(new URI(String.format("https://glue-gamma.%s.amazonaws.com", lambdaEnvironment.get("AWS_REGION")))) .httpClientBuilder(ApacheHttpClient From 85e29d077a060c3083afb48d84f3ff5d2447d9a2 Mon Sep 17 00:00:00 2001 From: Aimery Methena <159072740+aimethed@users.noreply.github.com> Date: Fri, 22 Nov 2024 12:54:40 -0500 Subject: [PATCH 75/87] oracle casing flag (#2415) --- .../oracle/OracleMetadataHandler.java | 47 +++++++++++++++---- .../oracle/OracleMetadataHandlerTest.java | 18 +++---- 2 files changed, 47 insertions(+), 18 deletions(-) diff --git a/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleMetadataHandler.java b/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleMetadataHandler.java index 3932645fe7..334c3ea139 100644 --- a/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleMetadataHandler.java +++ b/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleMetadataHandler.java @@ -61,6 +61,7 @@ import org.apache.arrow.vector.types.pojo.ArrowType; import org.apache.arrow.vector.types.pojo.Field; import org.apache.arrow.vector.types.pojo.Schema; +import org.apache.commons.lang3.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import software.amazon.awssdk.services.athena.AthenaClient; @@ -78,6 +79,7 @@ import java.util.Set; import java.util.stream.Collectors; +import static com.amazonaws.athena.connector.lambda.connection.EnvironmentConstants.DEFAULT_GLUE_CONNECTION; import static com.amazonaws.athena.connector.lambda.domain.predicate.functions.StandardFunctions.IS_DISTINCT_FROM_OPERATOR_FUNCTION_NAME; import static com.amazonaws.athena.connector.lambda.domain.predicate.functions.StandardFunctions.MODULUS_FUNCTION_NAME; import static com.amazonaws.athena.connector.lambda.domain.predicate.functions.StandardFunctions.NULLIF_FUNCTION_NAME; @@ -89,13 +91,15 @@ public class OracleMetadataHandler extends JdbcMetadataHandler { - static final String GET_PARTITIONS_QUERY = "Select DISTINCT PARTITION_NAME FROM USER_TAB_PARTITIONS where table_name= ?"; - static final String BLOCK_PARTITION_COLUMN_NAME = "PARTITION_NAME"; + static final String GET_PARTITIONS_QUERY = "Select DISTINCT PARTITION_NAME as \"partition_name\" FROM USER_TAB_PARTITIONS where table_name= ?"; + static final String BLOCK_PARTITION_COLUMN_NAME = "PARTITION_NAME".toLowerCase(); static final String ALL_PARTITIONS = "0"; - static final String PARTITION_COLUMN_NAME = "PARTITION_NAME"; + static final String PARTITION_COLUMN_NAME = "PARTITION_NAME".toLowerCase(); + static final String CASING_MODE = "casing_mode"; private static final Logger LOGGER = LoggerFactory.getLogger(OracleMetadataHandler.class); private static final int MAX_SPLITS_PER_REQUEST = 1000_000; private static final String COLUMN_NAME = "COLUMN_NAME"; + private static final String ORACLE_QUOTE_CHARACTER = "\""; static final String LIST_PAGINATED_TABLES_QUERY = "SELECT TABLE_NAME as \"TABLE_NAME\", OWNER as \"TABLE_SCHEM\" FROM all_tables WHERE owner = ? ORDER BY TABLE_NAME OFFSET ? ROWS FETCH NEXT ? ROWS ONLY"; @@ -154,15 +158,18 @@ public Schema getPartitionSchema(final String catalogName) public void getPartitions(final BlockWriter blockWriter, final GetTableLayoutRequest getTableLayoutRequest, QueryStatusChecker queryStatusChecker) throws Exception { - LOGGER.debug("{}: Schema {}, table {}", getTableLayoutRequest.getQueryId(), getTableLayoutRequest.getTableName().getSchemaName(), - getTableLayoutRequest.getTableName().getTableName()); + LOGGER.debug("{}: Schema {}, table {}", getTableLayoutRequest.getQueryId(), transformString(getTableLayoutRequest.getTableName().getSchemaName(), true), + transformString(getTableLayoutRequest.getTableName().getTableName(), true)); try (Connection connection = getJdbcConnectionFactory().getConnection(getCredentialProvider())) { - List parameters = Arrays.asList(getTableLayoutRequest.getTableName().getTableName().toUpperCase()); + List parameters = Arrays.asList(transformString(getTableLayoutRequest.getTableName().getTableName(), true)); + //try (Statement statement = connection.createStatement(); ResultSet resultSet = statement.executeQuery(GET_PARTITIONS_QUERY + )) try (PreparedStatement preparedStatement = new PreparedStatementBuilder().withConnection(connection).withQuery(GET_PARTITIONS_QUERY).withParameters(parameters).build(); - ResultSet resultSet = preparedStatement.executeQuery()) { + ResultSet resultSet = preparedStatement.executeQuery()) { // Return a single partition if no partitions defined if (!resultSet.next()) { + LOGGER.debug("here"); blockWriter.writeRows((Block block, int rowNum) -> { + LOGGER.debug("Parameters: " + BLOCK_PARTITION_COLUMN_NAME + " " + rowNum + " " + ALL_PARTITIONS); block.setValue(BLOCK_PARTITION_COLUMN_NAME, rowNum, ALL_PARTITIONS); LOGGER.info("Adding partition {}", ALL_PARTITIONS); //we wrote 1 row so we return 1 @@ -305,7 +312,7 @@ public GetTableResponse doGetTable(final BlockAllocator blockAllocator, final Ge { try (Connection connection = getJdbcConnectionFactory().getConnection(getCredentialProvider())) { Schema partitionSchema = getPartitionSchema(getTableRequest.getCatalogName()); - TableName tableName = new TableName(getTableRequest.getTableName().getSchemaName().toUpperCase(), getTableRequest.getTableName().getTableName().toUpperCase()); + TableName tableName = new TableName(transformString(getTableRequest.getTableName().getSchemaName(), false), transformString(getTableRequest.getTableName().getTableName(), false)); return new GetTableResponse(getTableRequest.getCatalogName(), tableName, getSchema(connection, tableName, partitionSchema), partitionSchema.getFields().stream().map(Field::getName).collect(Collectors.toSet())); } @@ -357,11 +364,12 @@ private Schema getSchema(Connection jdbcConnection, TableName tableName, Schema */ try (PreparedStatement stmt = connection.prepareStatement("select COLUMN_NAME ,DATA_TYPE from USER_TAB_COLS where table_name =?")) { - stmt.setString(1, tableName.getTableName().toUpperCase()); + stmt.setString(1, transformString(tableName.getTableName(), true)); ResultSet dataTypeResultSet = stmt.executeQuery(); while (dataTypeResultSet.next()) { hashMap.put(dataTypeResultSet.getString(COLUMN_NAME).trim(), dataTypeResultSet.getString("DATA_TYPE").trim()); } + LOGGER.debug("hashMap", hashMap.toString()); while (resultSet.next()) { ArrowType columnType = JdbcArrowTypeConverter.toArrowType( resultSet.getInt("DATA_TYPE"), @@ -433,4 +441,25 @@ private Schema getSchema(Connection jdbcConnection, TableName tableName, Schema return schemaBuilder.build(); } } + + /** + * Always adds double quotes around the string + * If the lambda uses a glue connection, return the string as is (lowercased by the trino engine) + * Otherwise uppercase it (the default of oracle) + * @param str + * @param quote + * @return + */ + private String transformString(String str, boolean quote) + { + boolean isGlueConnection = StringUtils.isNotBlank(configOptions.get(DEFAULT_GLUE_CONNECTION)); + boolean uppercase = configOptions.getOrDefault(CASING_MODE, isGlueConnection ? "lower" : "upper").toLowerCase().equals("upper"); + if (uppercase) { + str = str.toUpperCase(); + } + if (quote && !str.contains(ORACLE_QUOTE_CHARACTER)) { + str = ORACLE_QUOTE_CHARACTER + str + ORACLE_QUOTE_CHARACTER; + } + return str; + } } diff --git a/athena-oracle/src/test/java/com/amazonaws/athena/connectors/oracle/OracleMetadataHandlerTest.java b/athena-oracle/src/test/java/com/amazonaws/athena/connectors/oracle/OracleMetadataHandlerTest.java index c84dc15538..eea74cfc23 100644 --- a/athena-oracle/src/test/java/com/amazonaws/athena/connectors/oracle/OracleMetadataHandlerTest.java +++ b/athena-oracle/src/test/java/com/amazonaws/athena/connectors/oracle/OracleMetadataHandlerTest.java @@ -65,7 +65,7 @@ public class OracleMetadataHandlerTest extends TestBase { - private static final Schema PARTITION_SCHEMA = SchemaBuilder.newBuilder().addField("PARTITION_NAME", org.apache.arrow.vector.types.Types.MinorType.VARCHAR.getType()).build(); + private static final Schema PARTITION_SCHEMA = SchemaBuilder.newBuilder().addField("partition_name", org.apache.arrow.vector.types.Types.MinorType.VARCHAR.getType()).build(); private DatabaseConnectionConfig databaseConnectionConfig = new DatabaseConnectionConfig("testCatalog", ORACLE_NAME, "oracle://jdbc:oracle:thin:username/password@//127.0.0.1:1521/orcl"); private OracleMetadataHandler oracleMetadataHandler; @@ -103,7 +103,7 @@ public void doGetTableLayout() { BlockAllocator blockAllocator = new BlockAllocatorImpl(); Constraints constraints = Mockito.mock(Constraints.class); - TableName tableName = new TableName("testSchema", "TESTTABLE"); + TableName tableName = new TableName("testSchema", "\"TESTTABLE\""); Schema partitionSchema = this.oracleMetadataHandler.getPartitionSchema("testCatalogName"); Set partitionCols = partitionSchema.getFields().stream().map(Field::getName).collect(Collectors.toSet()); GetTableLayoutRequest getTableLayoutRequest = new GetTableLayoutRequest(this.federatedIdentity, "testQueryId", "testCatalogName", tableName, constraints, partitionSchema, partitionCols); @@ -111,7 +111,7 @@ public void doGetTableLayout() PreparedStatement preparedStatement = Mockito.mock(PreparedStatement.class); Mockito.when(this.connection.prepareStatement(OracleMetadataHandler.GET_PARTITIONS_QUERY)).thenReturn(preparedStatement); - String[] columns = {"PARTITION_NAME"}; + String[] columns = {"PARTITION_NAME".toLowerCase()}; int[] types = {Types.VARCHAR}; Object[][] values = {{"p0"}, {"p1"}}; ResultSet resultSet = mockResultSet(columns, types, values, new AtomicInteger(-1)); @@ -127,7 +127,7 @@ public void doGetTableLayout() for (int i = 0; i < getTableLayoutResponse.getPartitions().getRowCount(); i++) { expectedValues.add(BlockUtils.rowToString(getTableLayoutResponse.getPartitions(), i)); } - Assert.assertEquals(expectedValues, Arrays.asList("[PARTITION_NAME : p0]", "[PARTITION_NAME : p1]")); + Assert.assertEquals(expectedValues, Arrays.asList("[partition_name : p0]", "[partition_name : p1]")); SchemaBuilder expectedSchemaBuilder = SchemaBuilder.newBuilder(); expectedSchemaBuilder.addField(FieldBuilder.newBuilder(OracleMetadataHandler.BLOCK_PARTITION_COLUMN_NAME, org.apache.arrow.vector.types.Types.MinorType.VARCHAR.getType()).build()); @@ -144,7 +144,7 @@ public void doGetTableLayoutWithNoPartitions() { BlockAllocator blockAllocator = new BlockAllocatorImpl(); Constraints constraints = Mockito.mock(Constraints.class); - TableName tableName = new TableName("testSchema", "TESTTABLE"); + TableName tableName = new TableName("testSchema", "\"TESTTABLE\""); Schema partitionSchema = this.oracleMetadataHandler.getPartitionSchema("testCatalogName"); Set partitionCols = partitionSchema.getFields().stream().map(Field::getName).collect(Collectors.toSet()); GetTableLayoutRequest getTableLayoutRequest = new GetTableLayoutRequest(this.federatedIdentity, "testQueryId", "testCatalogName", tableName, constraints, partitionSchema, partitionCols); @@ -152,7 +152,7 @@ public void doGetTableLayoutWithNoPartitions() PreparedStatement preparedStatement = Mockito.mock(PreparedStatement.class); Mockito.when(this.connection.prepareStatement(OracleMetadataHandler.GET_PARTITIONS_QUERY)).thenReturn(preparedStatement); - String[] columns = {"PARTITION_NAME"}; + String[] columns = {"PARTITION_NAME".toLowerCase()}; int[] types = {Types.VARCHAR}; Object[][] values = {{}}; ResultSet resultSet = mockResultSet(columns, types, values, new AtomicInteger(-1)); @@ -168,7 +168,7 @@ public void doGetTableLayoutWithNoPartitions() for (int i = 0; i < getTableLayoutResponse.getPartitions().getRowCount(); i++) { expectedValues.add(BlockUtils.rowToString(getTableLayoutResponse.getPartitions(), i)); } - Assert.assertEquals(expectedValues, Collections.singletonList("[PARTITION_NAME : 0]")); + Assert.assertEquals(expectedValues, Collections.singletonList("[partition_name : 0]")); SchemaBuilder expectedSchemaBuilder = SchemaBuilder.newBuilder(); expectedSchemaBuilder.addField(FieldBuilder.newBuilder(OracleMetadataHandler.BLOCK_PARTITION_COLUMN_NAME, org.apache.arrow.vector.types.Types.MinorType.VARCHAR.getType()).build()); @@ -249,7 +249,7 @@ public void doGetSplitsContinuation() PreparedStatement preparedStatement = Mockito.mock(PreparedStatement.class); Mockito.when(this.connection.prepareStatement(OracleMetadataHandler.GET_PARTITIONS_QUERY)).thenReturn(preparedStatement); - String[] columns = {"PARTITION_NAME"}; + String[] columns = {"PARTITION_NAME".toLowerCase()}; int[] types = {Types.VARCHAR}; Object[][] values = {{"p0"}, {"p1"}}; ResultSet resultSet = mockResultSet(columns, types, values, new AtomicInteger(-1)); @@ -265,7 +265,7 @@ public void doGetSplitsContinuation() GetSplitsResponse getSplitsResponse = this.oracleMetadataHandler.doGetSplits(splitBlockAllocator, getSplitsRequest); Set> expectedSplits = new HashSet<>(); - expectedSplits.add(Collections.singletonMap("PARTITION_NAME", "p1")); + expectedSplits.add(Collections.singletonMap("PARTITION_NAME".toLowerCase(), "p1")); Assert.assertEquals(expectedSplits.size(), getSplitsResponse.getSplits().size()); Set> actualSplits = getSplitsResponse.getSplits().stream().map(Split::getProperties).collect(Collectors.toSet()); Assert.assertEquals(expectedSplits, actualSplits); From 65e381b17fdfdee3adcb1d5476fb8a85c749f7b1 Mon Sep 17 00:00:00 2001 From: AbdulRehman Date: Fri, 22 Nov 2024 22:35:18 -0500 Subject: [PATCH 76/87] Changed message to debug (#2422) Co-authored-by: AbdulRehman Faraj --- .../athena/connector/lambda/handlers/CompositeHandler.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/handlers/CompositeHandler.java b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/handlers/CompositeHandler.java index a3af2e5b90..188b407d23 100644 --- a/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/handlers/CompositeHandler.java +++ b/athena-federation-sdk/src/main/java/com/amazonaws/athena/connector/lambda/handlers/CompositeHandler.java @@ -106,7 +106,7 @@ public final void handleRequest(InputStream inputStream, OutputStream outputStre break; } catch (IllegalStateException e) { // if client has not upgraded to our latest, fallback to lower version - logger.warn("Client's SerDe mis-matched with connector version:, attempt with lower version: '{}'", --resolvedSerDeVersion, e); + logger.debug("Client's SerDe mis-matched with connector version:, attempt with lower version: '{}'", --resolvedSerDeVersion); } } From 8a43a535ad6b419f3e3003d196826ca807d785aa Mon Sep 17 00:00:00 2001 From: chngpe <102991671+chngpe@users.noreply.github.com> Date: Wed, 27 Nov 2024 12:41:40 -0500 Subject: [PATCH 77/87] Snowflake case insensitive search improve with ANNOTATION added back for backward compatibility (#2437) --- .../SnowflakeCaseInsensitiveResolver.java | 177 ++++++++++++++---- .../snowflake/SnowflakeMetadataHandler.java | 14 +- .../snowflake/SnowflakeRecordHandler.java | 3 +- .../SnowflakeMetadataHandlerTest.java | 15 +- 4 files changed, 160 insertions(+), 49 deletions(-) diff --git a/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeCaseInsensitiveResolver.java b/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeCaseInsensitiveResolver.java index 490d032b95..5c2f542709 100644 --- a/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeCaseInsensitiveResolver.java +++ b/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeCaseInsensitiveResolver.java @@ -20,74 +20,126 @@ package com.amazonaws.athena.connectors.snowflake; import com.amazonaws.athena.connector.lambda.domain.TableName; +import com.amazonaws.athena.connectors.jdbc.manager.PreparedStatementBuilder; +import org.apache.commons.lang3.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.sql.Connection; +import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; -import java.sql.Statement; +import java.util.Arrays; import java.util.Map; +import static com.amazonaws.athena.connector.lambda.connection.EnvironmentConstants.DEFAULT_GLUE_CONNECTION; + public class SnowflakeCaseInsensitiveResolver { private static final Logger LOGGER = LoggerFactory.getLogger(SnowflakeCaseInsensitiveResolver.class); - private static final String SCHEMA_NAME_QUERY = "select * from INFORMATION_SCHEMA.SCHEMATA where lower(SCHEMA_NAME) = "; - private static final String TABLE_NAME_QUERY = "select * from INFORMATION_SCHEMA.TABLES where TABLE_SCHEMA = "; + private static final String SCHEMA_NAME_QUERY_TEMPLATE = "select * from INFORMATION_SCHEMA.SCHEMATA where lower(SCHEMA_NAME) = ?"; + private static final String TABLE_NAME_QUERY_TEMPLATE = "select * from INFORMATION_SCHEMA.TABLES where TABLE_SCHEMA = ? and lower(TABLE_NAME) = ?"; private static final String SCHEMA_NAME_COLUMN_KEY = "SCHEMA_NAME"; private static final String TABLE_NAME_COLUMN_KEY = "TABLE_NAME"; - - private static final String ENABLE_CASE_INSENSITIVE_MATCH = "enable_case_insensitive_match"; + private static final String CASING_MODE = "casing_mode"; + private static final String ANNOTATION_CASE_UPPER = "upper"; + private static final String ANNOTATION_CASE_LOWER = "lower"; private SnowflakeCaseInsensitiveResolver() { } - public static TableName getTableNameObjectCaseInsensitiveMatch(final Connection connection, TableName tableName, Map configOptions) + private enum SnowflakeCasingMode + { + NONE, + CASE_INSENSITIVE_SEARCH, + ANNOTATION + } + + public static TableName getAdjustedTableObjectNameBasedOnConfig(final Connection connection, TableName tableName, Map configOptions) throws SQLException { - if (!isCaseInsensitiveMatchEnable(configOptions)) { - return tableName; + SnowflakeCasingMode casingMode = getCasingMode(configOptions); + switch (casingMode) { + case CASE_INSENSITIVE_SEARCH: + String schemaNameCaseInsensitively = getSchemaNameCaseInsensitively(connection, tableName.getSchemaName(), configOptions); + String tableNameCaseInsensitively = getTableNameCaseInsensitively(connection, schemaNameCaseInsensitively, tableName.getTableName(), configOptions); + TableName tableNameResult = new TableName(schemaNameCaseInsensitively, tableNameCaseInsensitively); + LOGGER.info("casing mode is `CASE_INSENSITIVE_SEARCH`: adjusting casing from Slowflake case insensitive search for TableName object. TableName:{}", tableNameResult); + return tableNameResult; + case ANNOTATION: + TableName tableNameFromQueryHint = getTableNameFromQueryHint(tableName); + LOGGER.info("casing mode is `ANNOTATION`: adjusting casing from input if annotation found for TableName object. TableName:{}", tableNameFromQueryHint); + return tableNameFromQueryHint; + case NONE: + LOGGER.info("casing mode is `NONE`: not adjust casing from input for TableName object. TableName:{}", tableName); + return tableName; } - - String schemaNameCaseInsensitively = getSchemaNameCaseInsensitively(connection, tableName.getSchemaName(), configOptions); - String tableNameCaseInsensitively = getTableNameCaseInsensitively(connection, schemaNameCaseInsensitively, tableName.getTableName(), configOptions); - - return new TableName(schemaNameCaseInsensitively, tableNameCaseInsensitively); + LOGGER.warn("casing mode is empty: not adjust casing from input for TableName object. TableName:{}", tableName); + return tableName; } - public static String getSchemaNameCaseInsensitively(final Connection connection, String schemaNameInput, Map configOptions) + public static String getAdjustedSchemaNameBasedOnConfig(final Connection connection, String schemaNameInput, Map configOptions) throws SQLException { - if (!isCaseInsensitiveMatchEnable(configOptions)) { - return schemaNameInput; + SnowflakeCasingMode casingMode = getCasingMode(configOptions); + switch (casingMode) { + case CASE_INSENSITIVE_SEARCH: + LOGGER.info("casing mode is `CASE_INSENSITIVE_SEARCH`: adjusting casing from Slowflake case insensitive search for Schema..."); + return getSchemaNameCaseInsensitively(connection, schemaNameInput, configOptions); + case NONE: + LOGGER.info("casing mode is `NONE`: not adjust casing from input for Schema"); + return schemaNameInput; + case ANNOTATION: + LOGGER.info("casing mode is `ANNOTATION`: adjust casing for SCHEMA is NOT SUPPORTED. Skip casing adjustment"); } - return getNameCaseInsensitively(connection, SCHEMA_NAME_COLUMN_KEY, SCHEMA_NAME_QUERY + "'" + schemaNameInput.toLowerCase() + "'", configOptions); + return schemaNameInput; } - public static String getTableNameCaseInsensitively(final Connection connection, String schemaName, String tableNameInput, Map configOptions) + public static String getSchemaNameCaseInsensitively(final Connection connection, String schemaNameInput, Map configOptions) throws SQLException { - if (!isCaseInsensitiveMatchEnable(configOptions)) { - return tableNameInput; + String nameFromSnowFlake = null; + int i = 0; + try (PreparedStatement preparedStatement = new PreparedStatementBuilder() + .withConnection(connection) + .withQuery(SCHEMA_NAME_QUERY_TEMPLATE) + .withParameters(Arrays.asList(schemaNameInput.toLowerCase())).build(); + ResultSet resultSet = preparedStatement.executeQuery()) { + while (resultSet.next()) { + i++; + String schemaNameCandidate = resultSet.getString(SCHEMA_NAME_COLUMN_KEY); + LOGGER.debug("Case insensitive search on columLabel: {}, schema name: {}", SCHEMA_NAME_COLUMN_KEY, schemaNameCandidate); + nameFromSnowFlake = schemaNameCandidate; + } } - //'?' and lower(TABLE_NAME) = '?' - return getNameCaseInsensitively(connection, TABLE_NAME_COLUMN_KEY, TABLE_NAME_QUERY + "'" + schemaName + "' and lower(TABLE_NAME) = '" + tableNameInput.toLowerCase() + "'", configOptions); + catch (SQLException e) { + throw new RuntimeException(e); + } + + if (i == 0 || i > 1) { + throw new RuntimeException(String.format("Schema name case insensitive match failed, number of match : %d", i)); + } + + return nameFromSnowFlake; } - public static String getNameCaseInsensitively(final Connection connection, String columnLabel, String query, Map configOptions) + public static String getTableNameCaseInsensitively(final Connection connection, String schemaName, String tableNameInput, Map configOptions) throws SQLException { - LOGGER.debug("getNameCaseInsensitively, query:" + query); + // schema name input should be correct case before searching tableName already String nameFromSnowFlake = null; int i = 0; - try (Statement statement = connection.createStatement(); - ResultSet resultSet = statement.executeQuery(query)) { + try (PreparedStatement preparedStatement = new PreparedStatementBuilder() + .withConnection(connection) + .withQuery(TABLE_NAME_QUERY_TEMPLATE) + .withParameters(Arrays.asList(schemaName, tableNameInput.toLowerCase())).build(); + ResultSet resultSet = preparedStatement.executeQuery()) { while (resultSet.next()) { i++; - String schemaNameCandidate = resultSet.getString(columnLabel); - LOGGER.debug("Case insensitive search on columLabel: {}, schema name: {}", columnLabel, schemaNameCandidate); + String schemaNameCandidate = resultSet.getString(TABLE_NAME_COLUMN_KEY); + LOGGER.debug("Case insensitive search on columLabel: {}, schema name: {}", TABLE_NAME_COLUMN_KEY, schemaNameCandidate); nameFromSnowFlake = schemaNameCandidate; } } @@ -102,13 +154,70 @@ public static String getNameCaseInsensitively(final Connection connection, Strin return nameFromSnowFlake; } - private static boolean isCaseInsensitiveMatchEnable(Map configOptions) + /* + Keep previous implementation of table name casing adjustment from query hint. This is to keep backward compatibility. + */ + public static TableName getTableNameFromQueryHint(TableName table) + { + LOGGER.info("getTableNameFromQueryHint: " + table); + //if no query hints has been passed then return input table name + if (!table.getTableName().contains("@")) { + return new TableName(table.getSchemaName().toUpperCase(), table.getTableName().toUpperCase()); + } + //analyze the hint to find table and schema case + String[] tbNameWithQueryHint = table.getTableName().split("@"); + String[] hintDetails = tbNameWithQueryHint[1].split("&"); + String schemaCase = ANNOTATION_CASE_UPPER; + String tableCase = ANNOTATION_CASE_UPPER; + String tableName = tbNameWithQueryHint[0]; + for (String str : hintDetails) { + String[] hintDetail = str.split("="); + if (hintDetail[0].contains("schema")) { + schemaCase = hintDetail[1]; + } + else if (hintDetail[0].contains("table")) { + tableCase = hintDetail[1]; + } + } + if (schemaCase.equalsIgnoreCase(ANNOTATION_CASE_UPPER) && tableCase.equalsIgnoreCase(ANNOTATION_CASE_UPPER)) { + return new TableName(table.getSchemaName().toUpperCase(), tableName.toUpperCase()); + } + else if (schemaCase.equalsIgnoreCase(ANNOTATION_CASE_LOWER) && tableCase.equalsIgnoreCase(ANNOTATION_CASE_LOWER)) { + return new TableName(table.getSchemaName().toLowerCase(), tableName.toLowerCase()); + } + else if (schemaCase.equalsIgnoreCase(ANNOTATION_CASE_LOWER) && tableCase.equalsIgnoreCase(ANNOTATION_CASE_UPPER)) { + return new TableName(table.getSchemaName().toLowerCase(), tableName.toUpperCase()); + } + else if (schemaCase.equalsIgnoreCase(ANNOTATION_CASE_UPPER) && tableCase.equalsIgnoreCase(ANNOTATION_CASE_LOWER)) { + return new TableName(table.getSchemaName().toUpperCase(), tableName.toLowerCase()); + } + else { + return new TableName(table.getSchemaName().toUpperCase(), tableName.toUpperCase()); + } + } + + /* + Default behavior with and without glue connection is different. As we want to make it backward compatible for customer who is not using glue connection. + With Glue connection, default behavior is `NONE` which we will not adjust any casing in the connector. + Without Glue connection, default behavior is `ANNOTATION` which customer can perform MY_TABLE@schemaCase=upper&tableCase=upper + */ + private static SnowflakeCasingMode getCasingMode(Map configOptions) { - String enableCaseInsensitiveMatchEnvValue = configOptions.getOrDefault(ENABLE_CASE_INSENSITIVE_MATCH, "false").toLowerCase(); - boolean enableCaseInsensitiveMatch = enableCaseInsensitiveMatchEnvValue.equals("true"); - LOGGER.info("{} environment variable set to: {}. Resolved to: {}", - ENABLE_CASE_INSENSITIVE_MATCH, enableCaseInsensitiveMatchEnvValue, enableCaseInsensitiveMatch); + boolean isGlueConnection = StringUtils.isNotBlank(configOptions.get(DEFAULT_GLUE_CONNECTION)); + if (!configOptions.containsKey(CASING_MODE)) { + LOGGER.info("CASING MODE disable"); + return isGlueConnection ? SnowflakeCasingMode.NONE : SnowflakeCasingMode.ANNOTATION; + } - return enableCaseInsensitiveMatch; + try { + SnowflakeCasingMode snowflakeCasingMode = SnowflakeCasingMode.valueOf(configOptions.get(CASING_MODE).toUpperCase()); + LOGGER.info("CASING MODE enable: {}", snowflakeCasingMode.toString()); + return snowflakeCasingMode; + } + catch (Exception ex) { + // print error log for customer along with list of input + LOGGER.error("Invalid input for:{}, input value:{}, valid values:{}", CASING_MODE, configOptions.get(CASING_MODE), Arrays.asList(SnowflakeCasingMode.values()), ex); + throw ex; + } } } diff --git a/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMetadataHandler.java b/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMetadataHandler.java index b8754adf8b..fd05d4d88a 100644 --- a/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMetadataHandler.java +++ b/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMetadataHandler.java @@ -213,9 +213,9 @@ private Optional getPrimaryKey(TableName tableName) throws Exception } } - String primaryKey = String.join(", ", primaryKeys); - if (!Strings.isNullOrEmpty(primaryKey) && hasUniquePrimaryKey(tableName, primaryKey)) { - return Optional.of(primaryKey); + String primaryKeyString = primaryKeys.stream().map(s -> "\"" + s + "\"").collect(Collectors.joining(",")); + if (!Strings.isNullOrEmpty(primaryKeyString) && hasUniquePrimaryKey(tableName, primaryKeyString)) { + return Optional.of(primaryKeyString); } } return Optional.empty(); @@ -228,7 +228,7 @@ private Optional getPrimaryKey(TableName tableName) throws Exception private boolean hasUniquePrimaryKey(TableName tableName, String primaryKey) throws Exception { try (Connection connection = getJdbcConnectionFactory().getConnection(getCredentialProvider())) { - try (PreparedStatement preparedStatement = connection.prepareStatement("SELECT " + primaryKey + ", count(*) as COUNTS FROM " + tableName.getTableName() + " GROUP BY " + primaryKey + " ORDER BY COUNTS DESC"); + try (PreparedStatement preparedStatement = connection.prepareStatement("SELECT " + primaryKey + ", count(*) as COUNTS FROM " + "\"" + tableName.getSchemaName() + "\".\"" + tableName.getTableName() + "\"" + " GROUP BY " + primaryKey + " ORDER BY COUNTS DESC"); ResultSet rs = preparedStatement.executeQuery()) { if (rs.next()) { if (rs.getInt(COUNTS_COLUMN_NAME) == 1) { @@ -258,7 +258,7 @@ public void getPartitions(BlockWriter blockWriter, GetTableLayoutRequest getTabl getTableLayoutRequest.getTableName().getTableName()); try (Connection connection = getJdbcConnectionFactory().getConnection(getCredentialProvider())) { - TableName tableName = SnowflakeCaseInsensitiveResolver.getTableNameObjectCaseInsensitiveMatch(connection, getTableLayoutRequest.getTableName(), configOptions); + TableName tableName = getTableLayoutRequest.getTableName(); /** * "MAX_PARTITION_COUNT" is currently set to 50 to limit the number of partitions. * this is to handle timeout issues because of huge partitions @@ -383,7 +383,7 @@ public GetTableResponse doGetTable(final BlockAllocator blockAllocator, final Ge LOGGER.debug("doGetTable getTableName:{}", getTableRequest.getTableName()); try (Connection connection = getJdbcConnectionFactory().getConnection(getCredentialProvider())) { Schema partitionSchema = getPartitionSchema(getTableRequest.getCatalogName()); - TableName tableName = SnowflakeCaseInsensitiveResolver.getTableNameObjectCaseInsensitiveMatch(connection, getTableRequest.getTableName(), configOptions); + TableName tableName = SnowflakeCaseInsensitiveResolver.getAdjustedTableObjectNameBasedOnConfig(connection, getTableRequest.getTableName(), configOptions); GetTableResponse getTableResponse = new GetTableResponse(getTableRequest.getCatalogName(), tableName, getSchema(connection, tableName, partitionSchema), partitionSchema.getFields().stream().map(Field::getName).collect(Collectors.toSet())); return getTableResponse; @@ -397,7 +397,7 @@ public ListTablesResponse doListTables(final BlockAllocator blockAllocator, fina try (Connection connection = getJdbcConnectionFactory().getConnection(getCredentialProvider())) { LOGGER.info("{}: List table names for Catalog {}, Schema {}", listTablesRequest.getQueryId(), listTablesRequest.getCatalogName(), listTablesRequest.getSchemaName()); - String schemaName = SnowflakeCaseInsensitiveResolver.getSchemaNameCaseInsensitively(connection, listTablesRequest.getSchemaName(), configOptions); + String schemaName = SnowflakeCaseInsensitiveResolver.getAdjustedSchemaNameBasedOnConfig(connection, listTablesRequest.getSchemaName(), configOptions); String token = listTablesRequest.getNextToken(); int pageSize = listTablesRequest.getPageSize(); diff --git a/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeRecordHandler.java b/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeRecordHandler.java index 9120f23a8a..f1776b3985 100644 --- a/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeRecordHandler.java +++ b/athena-snowflake/src/main/java/com/amazonaws/athena/connectors/snowflake/SnowflakeRecordHandler.java @@ -86,8 +86,7 @@ public PreparedStatement buildSplitSql(Connection jdbcConnection, String catalog preparedStatement = buildQueryPassthroughSql(jdbcConnection, constraints); } else { - TableName tableName = SnowflakeCaseInsensitiveResolver.getTableNameObjectCaseInsensitiveMatch(jdbcConnection, tableNameInput, configOptions); - preparedStatement = jdbcSplitQueryBuilder.buildSql(jdbcConnection, null, tableName.getSchemaName(), tableName.getTableName(), schema, constraints, split); + preparedStatement = jdbcSplitQueryBuilder.buildSql(jdbcConnection, null, tableNameInput.getSchemaName(), tableNameInput.getTableName(), schema, constraints, split); } // Disable fetching all rows. diff --git a/athena-snowflake/src/test/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMetadataHandlerTest.java b/athena-snowflake/src/test/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMetadataHandlerTest.java index 15326bc5c9..9f292c12aa 100644 --- a/athena-snowflake/src/test/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMetadataHandlerTest.java +++ b/athena-snowflake/src/test/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMetadataHandlerTest.java @@ -115,7 +115,8 @@ public void doGetTableLayout() Mockito.when(primaryKeyPreparedStatement.executeQuery()).thenReturn(primaryKeyResultSet); PreparedStatement countsPreparedStatement = Mockito.mock(PreparedStatement.class); - String GET_PKEY_COUNTS_QUERY = "SELECT pkey, count(*) as COUNTS FROM testTable GROUP BY pkey ORDER BY COUNTS DESC"; + String GET_PKEY_COUNTS_QUERY = "SELECT \"pkey\", count(*) as COUNTS FROM \"testSchema\".\"testTable\" GROUP BY \"pkey\" ORDER BY COUNTS DESC"; + System.err.println("test:" + GET_PKEY_COUNTS_QUERY); String[] countsColumns = new String[] {"pkey", SnowflakeMetadataHandler.COUNTS_COLUMN_NAME}; Object[][] countsValues = {{"a", 1}}; ResultSet countsResultSet = mockResultSet(countsColumns, countsValues, new AtomicInteger(-1)); @@ -136,7 +137,7 @@ public void doGetTableLayout() if (i > 1) { offset = offset + partitionActualRecordCount; } - actualValues.add("[partition : partition-primary-pkey-limit-" +partitionActualRecordCount + "-offset-" + offset + "]"); + actualValues.add("[partition : partition-primary-\"pkey\"-limit-" + + partitionActualRecordCount + "-offset-" + offset + "]"); } Assert.assertEquals((int)limit, getTableLayoutResponse.getPartitions().getRowCount()); Assert.assertEquals(expectedValues, actualValues); @@ -179,7 +180,8 @@ public void doGetTableLayoutSinglePartition() Mockito.when(primaryKeyPreparedStatement.executeQuery()).thenReturn(primaryKeyResultSet); PreparedStatement countsPreparedStatement = Mockito.mock(PreparedStatement.class); - String GET_PKEY_COUNTS_QUERY = "SELECT pkey, count(*) as COUNTS FROM testTable GROUP BY pkey ORDER BY COUNTS DESC"; + String GET_PKEY_COUNTS_QUERY = "SELECT \"pkey\", count(*) as COUNTS FROM \"testSchema\".\"testTable\" GROUP BY \"pkey\" ORDER BY COUNTS DESC"; + System.err.println("test:" + GET_PKEY_COUNTS_QUERY); String[] countsColumns = new String[] {"pkey", SnowflakeMetadataHandler.COUNTS_COLUMN_NAME}; Object[][] countsValues = {{"a", 1}}; ResultSet countsResultSet = mockResultSet(countsColumns, countsValues, new AtomicInteger(-1)); @@ -223,6 +225,7 @@ public void doGetTableLayoutMaxPartition() long pageCount = (long) (Math.ceil(totalActualRecordCount / MAX_PARTITION_COUNT)); long partitionActualRecordCount = (totalActualRecordCount <= 10000) ? (long) totalActualRecordCount : pageCount; double limit = (int) Math.ceil(totalActualRecordCount / partitionActualRecordCount); +// double limit = 1; long offset = 0; String[] columns = {"partition"}; int[] types = {Types.VARCHAR}; @@ -240,8 +243,8 @@ public void doGetTableLayoutMaxPartition() Mockito.when(primaryKeyPreparedStatement.executeQuery()).thenReturn(primaryKeyResultSet); PreparedStatement countsPreparedStatement = Mockito.mock(PreparedStatement.class); - String GET_PKEY_COUNTS_QUERY = "SELECT pkey, count(*) as COUNTS FROM testTable GROUP BY pkey ORDER BY COUNTS DESC"; - String[] countsColumns = new String[] {"pkey", SnowflakeMetadataHandler.COUNTS_COLUMN_NAME}; + String GET_PKEY_COUNTS_QUERY = "SELECT \"pkey\", count(*) as COUNTS FROM \"testSchema\".\"testTable\" GROUP BY \"pkey\" ORDER BY COUNTS DESC"; + String[] countsColumns = new String[] {"\"pkey\"", SnowflakeMetadataHandler.COUNTS_COLUMN_NAME}; Object[][] countsValues = {{"a", 1}}; ResultSet countsResultSet = mockResultSet(countsColumns, countsValues, new AtomicInteger(-1)); Mockito.when(this.connection.prepareStatement(GET_PKEY_COUNTS_QUERY)).thenReturn(countsPreparedStatement); @@ -257,7 +260,7 @@ public void doGetTableLayoutMaxPartition() if (i > 1) { offset = offset + partitionActualRecordCount; } - actualValues.add("[partition : partition-primary-pkey-limit-" +partitionActualRecordCount + "-offset-" + offset + "]"); + actualValues.add("[partition : partition-primary-\"pkey\"-limit-" +partitionActualRecordCount + "-offset-" + offset + "]"); } Assert.assertEquals(expectedValues,actualValues); SchemaBuilder expectedSchemaBuilder = SchemaBuilder.newBuilder(); From b54a0216a2f1efbcd9ac226411684ac7828160cf Mon Sep 17 00:00:00 2001 From: Aimery Methena Date: Wed, 11 Dec 2024 13:19:09 -0500 Subject: [PATCH 78/87] remove dynamodb:ListSchemas --- athena-dynamodb/athena-dynamodb.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/athena-dynamodb/athena-dynamodb.yaml b/athena-dynamodb/athena-dynamodb.yaml index ec2201408f..366fcd85cd 100644 --- a/athena-dynamodb/athena-dynamodb.yaml +++ b/athena-dynamodb/athena-dynamodb.yaml @@ -103,7 +103,6 @@ Resources: Statement: - Action: - dynamodb:DescribeTable - - dynamodb:ListSchemas - dynamodb:ListTables - dynamodb:Query - dynamodb:Scan From 0ffb7b8bdd42032d7f8d42a5e36300b68a87f37a Mon Sep 17 00:00:00 2001 From: Aimery Methena <159072740+aimethed@users.noreply.github.com> Date: Wed, 11 Dec 2024 16:08:05 -0500 Subject: [PATCH 79/87] various cleanup fixes (#2467) --- .../connectors/db2as400/Db2As400EnvironmentProperties.java | 6 +++--- .../athena/connectors/docdb/DocDBMetadataHandler.java | 4 +++- .../athena/connectors/docdb/DocDBRecordHandler.java | 3 ++- .../athena/connectors/jdbc/JdbcEnvironmentProperties.java | 4 ++-- .../athena/connectors/oracle/OracleMetadataHandler.java | 4 +--- .../connectors/snowflake/SnowflakeMetadataHandlerTest.java | 3 --- 6 files changed, 11 insertions(+), 13 deletions(-) diff --git a/athena-db2-as400/src/main/java/com/amazonaws/athena/connectors/db2as400/Db2As400EnvironmentProperties.java b/athena-db2-as400/src/main/java/com/amazonaws/athena/connectors/db2as400/Db2As400EnvironmentProperties.java index 46dcde3b14..e4473f833d 100644 --- a/athena-db2-as400/src/main/java/com/amazonaws/athena/connectors/db2as400/Db2As400EnvironmentProperties.java +++ b/athena-db2-as400/src/main/java/com/amazonaws/athena/connectors/db2as400/Db2As400EnvironmentProperties.java @@ -37,14 +37,14 @@ public Map connectionPropertiesToEnvironment(Map HashMap environment = new HashMap<>(); // now construct jdbc string - String connectionString = "db2as400://jdbc:as400://" + connectionProperties.get(HOST) - + ";" + connectionProperties.getOrDefault(JDBC_PARAMS, ""); + String connectionString = String.join("db2as400://jdbc:as400://", connectionProperties.get(HOST), + ";", connectionProperties.getOrDefault(JDBC_PARAMS, "")); if (connectionProperties.containsKey(SECRET_NAME)) { if (connectionProperties.containsKey(JDBC_PARAMS)) { // need to add delimiter connectionString = connectionString + ";"; } - connectionString = connectionString + ":${" + connectionProperties.get(SECRET_NAME) + "}"; + connectionString = String.join(connectionString, ":${", connectionProperties.get(SECRET_NAME), "}"); } logger.debug("Constructed connection string: {}", connectionString); diff --git a/athena-docdb/src/main/java/com/amazonaws/athena/connectors/docdb/DocDBMetadataHandler.java b/athena-docdb/src/main/java/com/amazonaws/athena/connectors/docdb/DocDBMetadataHandler.java index 5b21336bab..8b57dd6685 100644 --- a/athena-docdb/src/main/java/com/amazonaws/athena/connectors/docdb/DocDBMetadataHandler.java +++ b/athena-docdb/src/main/java/com/amazonaws/athena/connectors/docdb/DocDBMetadataHandler.java @@ -96,6 +96,8 @@ public class DocDBMetadataHandler //The Glue table property that indicates that a table matching the name of an DocDB table //is indeed enabled for use by this connector. private static final String DOCDB_METADATA_FLAG = "docdb-metadata-flag"; + //The prefix of a connection string + protected static final String DOCDB_CONN_STRING_PREFIX = "mongodb://"; //Used to filter out Glue tables which lack a docdb metadata flag. private static final TableFilter TABLE_FILTER = (Table table) -> table.parameters().containsKey(DOCDB_METADATA_FLAG); //The number of documents to scan when attempting to infer schema from an DocDB collection. @@ -134,7 +136,7 @@ private MongoClient getOrCreateConn(MetadataRequest request) { String connStr = getConnStr(request); if (configOptions.containsKey(SECRET_NAME) && !hasEmbeddedSecret(connStr)) { - connStr = connStr.substring(0, 10) + "${" + configOptions.get(SECRET_NAME) + "}@" + connStr.substring(10); + connStr = String.join(connStr.substring(0, DOCDB_CONN_STRING_PREFIX.length()), "${", configOptions.get(SECRET_NAME), "}@", connStr.substring(DOCDB_CONN_STRING_PREFIX.length())); } String endpoint = resolveSecrets(connStr); return connectionFactory.getOrCreateConn(endpoint); diff --git a/athena-docdb/src/main/java/com/amazonaws/athena/connectors/docdb/DocDBRecordHandler.java b/athena-docdb/src/main/java/com/amazonaws/athena/connectors/docdb/DocDBRecordHandler.java index d1d536ee73..446578361f 100644 --- a/athena-docdb/src/main/java/com/amazonaws/athena/connectors/docdb/DocDBRecordHandler.java +++ b/athena-docdb/src/main/java/com/amazonaws/athena/connectors/docdb/DocDBRecordHandler.java @@ -49,6 +49,7 @@ import static com.amazonaws.athena.connector.lambda.handlers.GlueMetadataHandler.SOURCE_TABLE_PROPERTY; import static com.amazonaws.athena.connectors.docdb.DocDBFieldResolver.DEFAULT_FIELD_RESOLVER; import static com.amazonaws.athena.connectors.docdb.DocDBMetadataHandler.DOCDB_CONN_STR; +import static com.amazonaws.athena.connectors.docdb.DocDBMetadataHandler.DOCDB_CONN_STRING_PREFIX; /** * Handles data read record requests for the Athena DocumentDB Connector. @@ -110,7 +111,7 @@ private MongoClient getOrCreateConn(Split split) throw new RuntimeException(DOCDB_CONN_STR + " Split property is null! Unable to create connection."); } if (configOptions.containsKey(SECRET_NAME) && !hasEmbeddedSecret(connStr)) { - connStr = connStr.substring(0, 10) + "${" + configOptions.get(SECRET_NAME) + "}@" + connStr.substring(10); + connStr = String.join(connStr.substring(0, DOCDB_CONN_STRING_PREFIX.length()), "${", configOptions.get(SECRET_NAME), "}@", connStr.substring(DOCDB_CONN_STRING_PREFIX.length())); } String endpoint = resolveSecrets(connStr); return connectionFactory.getOrCreateConn(endpoint); diff --git a/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/JdbcEnvironmentProperties.java b/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/JdbcEnvironmentProperties.java index db4a349155..e25b14b60d 100644 --- a/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/JdbcEnvironmentProperties.java +++ b/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/JdbcEnvironmentProperties.java @@ -39,8 +39,8 @@ public Map connectionPropertiesToEnvironment(Map HashMap environment = new HashMap<>(); // now construct jdbc string - String connectionString = getConnectionStringPrefix(connectionProperties) + connectionProperties.get(HOST) - + ":" + connectionProperties.get(PORT) + getDatabase(connectionProperties) + getJdbcParameters(connectionProperties); + String connectionString = String.join(getConnectionStringPrefix(connectionProperties), connectionProperties.get(HOST), + ":", connectionProperties.get(PORT), getDatabase(connectionProperties), getJdbcParameters(connectionProperties)); environment.put(DEFAULT, connectionString); return environment; diff --git a/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleMetadataHandler.java b/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleMetadataHandler.java index 334c3ea139..1979c3596e 100644 --- a/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleMetadataHandler.java +++ b/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleMetadataHandler.java @@ -161,13 +161,11 @@ public void getPartitions(final BlockWriter blockWriter, final GetTableLayoutReq LOGGER.debug("{}: Schema {}, table {}", getTableLayoutRequest.getQueryId(), transformString(getTableLayoutRequest.getTableName().getSchemaName(), true), transformString(getTableLayoutRequest.getTableName().getTableName(), true)); try (Connection connection = getJdbcConnectionFactory().getConnection(getCredentialProvider())) { - List parameters = Arrays.asList(transformString(getTableLayoutRequest.getTableName().getTableName(), true)); - //try (Statement statement = connection.createStatement(); ResultSet resultSet = statement.executeQuery(GET_PARTITIONS_QUERY + )) + List parameters = Arrays.asList(transformString(getTableLayoutRequest.getTableName().getTableName(), true)); try (PreparedStatement preparedStatement = new PreparedStatementBuilder().withConnection(connection).withQuery(GET_PARTITIONS_QUERY).withParameters(parameters).build(); ResultSet resultSet = preparedStatement.executeQuery()) { // Return a single partition if no partitions defined if (!resultSet.next()) { - LOGGER.debug("here"); blockWriter.writeRows((Block block, int rowNum) -> { LOGGER.debug("Parameters: " + BLOCK_PARTITION_COLUMN_NAME + " " + rowNum + " " + ALL_PARTITIONS); block.setValue(BLOCK_PARTITION_COLUMN_NAME, rowNum, ALL_PARTITIONS); diff --git a/athena-snowflake/src/test/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMetadataHandlerTest.java b/athena-snowflake/src/test/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMetadataHandlerTest.java index 9f292c12aa..37421c2a02 100644 --- a/athena-snowflake/src/test/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMetadataHandlerTest.java +++ b/athena-snowflake/src/test/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMetadataHandlerTest.java @@ -116,7 +116,6 @@ public void doGetTableLayout() PreparedStatement countsPreparedStatement = Mockito.mock(PreparedStatement.class); String GET_PKEY_COUNTS_QUERY = "SELECT \"pkey\", count(*) as COUNTS FROM \"testSchema\".\"testTable\" GROUP BY \"pkey\" ORDER BY COUNTS DESC"; - System.err.println("test:" + GET_PKEY_COUNTS_QUERY); String[] countsColumns = new String[] {"pkey", SnowflakeMetadataHandler.COUNTS_COLUMN_NAME}; Object[][] countsValues = {{"a", 1}}; ResultSet countsResultSet = mockResultSet(countsColumns, countsValues, new AtomicInteger(-1)); @@ -181,7 +180,6 @@ public void doGetTableLayoutSinglePartition() PreparedStatement countsPreparedStatement = Mockito.mock(PreparedStatement.class); String GET_PKEY_COUNTS_QUERY = "SELECT \"pkey\", count(*) as COUNTS FROM \"testSchema\".\"testTable\" GROUP BY \"pkey\" ORDER BY COUNTS DESC"; - System.err.println("test:" + GET_PKEY_COUNTS_QUERY); String[] countsColumns = new String[] {"pkey", SnowflakeMetadataHandler.COUNTS_COLUMN_NAME}; Object[][] countsValues = {{"a", 1}}; ResultSet countsResultSet = mockResultSet(countsColumns, countsValues, new AtomicInteger(-1)); @@ -225,7 +223,6 @@ public void doGetTableLayoutMaxPartition() long pageCount = (long) (Math.ceil(totalActualRecordCount / MAX_PARTITION_COUNT)); long partitionActualRecordCount = (totalActualRecordCount <= 10000) ? (long) totalActualRecordCount : pageCount; double limit = (int) Math.ceil(totalActualRecordCount / partitionActualRecordCount); -// double limit = 1; long offset = 0; String[] columns = {"partition"}; int[] types = {Types.VARCHAR}; From c03a2fd78ef7e2ab50e34ffaeac4c3d7c4010e96 Mon Sep 17 00:00:00 2001 From: Aimery Methena <159072740+aimethed@users.noreply.github.com> Date: Thu, 12 Dec 2024 10:01:29 -0500 Subject: [PATCH 80/87] remove Trianz label (#2468) --- athena-google-bigquery/athena-google-bigquery-connection.yaml | 1 - athena-google-bigquery/athena-google-bigquery.yaml | 1 - 2 files changed, 2 deletions(-) diff --git a/athena-google-bigquery/athena-google-bigquery-connection.yaml b/athena-google-bigquery/athena-google-bigquery-connection.yaml index 6ed37cde58..3105bed256 100644 --- a/athena-google-bigquery/athena-google-bigquery-connection.yaml +++ b/athena-google-bigquery/athena-google-bigquery-connection.yaml @@ -8,7 +8,6 @@ Metadata: LicenseUrl: LICENSE.txt ReadmeUrl: README.md Labels: - - Trianz - Big-Query - Athena-Federation - Google-SDK diff --git a/athena-google-bigquery/athena-google-bigquery.yaml b/athena-google-bigquery/athena-google-bigquery.yaml index b113376133..b92e9d01ea 100644 --- a/athena-google-bigquery/athena-google-bigquery.yaml +++ b/athena-google-bigquery/athena-google-bigquery.yaml @@ -8,7 +8,6 @@ Metadata: LicenseUrl: LICENSE.txt ReadmeUrl: README.md Labels: - - Trianz - Big-Query - Athena-Federation - Google-SDK From c639820fb1f2deae642f87f6c8e7bf35f73bbeb5 Mon Sep 17 00:00:00 2001 From: Aimery Methena <159072740+aimethed@users.noreply.github.com> Date: Thu, 12 Dec 2024 10:21:44 -0500 Subject: [PATCH 81/87] revert bump (#2469) --- athena-redshift/pom.xml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/athena-redshift/pom.xml b/athena-redshift/pom.xml index 57690be6cd..688f70378a 100644 --- a/athena-redshift/pom.xml +++ b/athena-redshift/pom.xml @@ -23,7 +23,8 @@ com.amazon.redshift redshift-jdbc42 - 2.1.0.31 + + 2.1.0.30 org.mockito From 4a41f7e5aa217f24f0affde0161a5eb284b4657f Mon Sep 17 00:00:00 2001 From: Aimery Methena <159072740+aimethed@users.noreply.github.com> Date: Thu, 12 Dec 2024 16:54:37 -0500 Subject: [PATCH 82/87] check legacy IS_FIPS_ENABLED (#2470) --- .../connectors/oracle/OracleJdbcConnectionFactory.java | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleJdbcConnectionFactory.java b/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleJdbcConnectionFactory.java index cfe488a7ce..fc9153381d 100644 --- a/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleJdbcConnectionFactory.java +++ b/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleJdbcConnectionFactory.java @@ -38,6 +38,7 @@ public class OracleJdbcConnectionFactory extends GenericJdbcConnectionFactory { public static final String IS_FIPS_ENABLED = "is_fips_enabled"; + public static final String IS_FIPS_ENABLED_LEGACY = "is_FIPS_Enabled"; private final DatabaseConnectionInfo databaseConnectionInfo; private final DatabaseConnectionConfig databaseConnectionConfig; private static final Logger LOGGER = LoggerFactory.getLogger(OracleJdbcConnectionFactory.class); @@ -46,8 +47,8 @@ public class OracleJdbcConnectionFactory extends GenericJdbcConnectionFactory /** * @param databaseConnectionConfig database connection configuration {@link DatabaseConnectionConfig} - * @param databaseConnectionInfo - */ + * @param databaseConnectionInfo + */ public OracleJdbcConnectionFactory(DatabaseConnectionConfig databaseConnectionConfig, DatabaseConnectionInfo databaseConnectionInfo) { super(databaseConnectionConfig, null, databaseConnectionInfo); @@ -68,7 +69,7 @@ public Connection getConnection(final JdbcCredentialProvider jdbcCredentialProvi properties.put("javax.net.ssl.trustStoreType", "JKS"); properties.put("javax.net.ssl.trustStorePassword", "changeit"); properties.put("oracle.net.ssl_server_dn_match", "true"); - if (System.getenv().getOrDefault(IS_FIPS_ENABLED, "false").equalsIgnoreCase("true")) { + if (System.getenv().getOrDefault(IS_FIPS_ENABLED, "false").equalsIgnoreCase("true") || System.getenv().getOrDefault(IS_FIPS_ENABLED_LEGACY, "false").equalsIgnoreCase("true")) { properties.put("oracle.net.ssl_cipher_suites", "(TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA)"); } } From d0376e4db0d97539740b03c6607673ef23444f32 Mon Sep 17 00:00:00 2001 From: AbdulRehman Date: Fri, 13 Dec 2024 11:06:47 -0500 Subject: [PATCH 83/87] Oracle Data Types Clean up (#2453) Co-authored-by: AbdulRehman Faraj Co-authored-by: Trianz-Akshay <108925344+Trianz-Akshay@users.noreply.github.com> --- .../oracle/OracleMetadataHandler.java | 126 +++++++----------- .../oracle/OracleMetadataHandlerTest.java | 5 +- 2 files changed, 54 insertions(+), 77 deletions(-) diff --git a/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleMetadataHandler.java b/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleMetadataHandler.java index 1979c3596e..cfa34e9057 100644 --- a/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleMetadataHandler.java +++ b/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleMetadataHandler.java @@ -56,6 +56,7 @@ import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; +import oracle.jdbc.OracleTypes; import org.apache.arrow.vector.complex.reader.FieldReader; import org.apache.arrow.vector.types.Types; import org.apache.arrow.vector.types.pojo.ArrowType; @@ -73,7 +74,6 @@ import java.sql.ResultSet; import java.sql.SQLException; import java.util.Arrays; -import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Set; @@ -353,87 +353,61 @@ private Schema getSchema(Connection jdbcConnection, TableName tableName, Schema { SchemaBuilder schemaBuilder = SchemaBuilder.newBuilder(); - try (ResultSet resultSet = getColumns(jdbcConnection.getCatalog(), tableName, jdbcConnection.getMetaData()); - Connection connection = getJdbcConnectionFactory().getConnection(getCredentialProvider())) { - boolean found = false; - HashMap hashMap = new HashMap(); - /** - * Getting original data type from oracle table for conversion - */ - try - (PreparedStatement stmt = connection.prepareStatement("select COLUMN_NAME ,DATA_TYPE from USER_TAB_COLS where table_name =?")) { - stmt.setString(1, transformString(tableName.getTableName(), true)); - ResultSet dataTypeResultSet = stmt.executeQuery(); - while (dataTypeResultSet.next()) { - hashMap.put(dataTypeResultSet.getString(COLUMN_NAME).trim(), dataTypeResultSet.getString("DATA_TYPE").trim()); - } - LOGGER.debug("hashMap", hashMap.toString()); - while (resultSet.next()) { - ArrowType columnType = JdbcArrowTypeConverter.toArrowType( - resultSet.getInt("DATA_TYPE"), - resultSet.getInt("COLUMN_SIZE"), - resultSet.getInt("DECIMAL_DIGITS"), - configOptions); - String columnName = resultSet.getString(COLUMN_NAME); - /** Handling TIMESTAMP,DATE, 0 Precesion**/ - if (columnType != null && columnType.getTypeID().equals(ArrowType.ArrowTypeID.Decimal)) { - String[] data = columnType.toString().split(","); - if (data[0].contains("0") || data[1].contains("0")) { - columnType = Types.MinorType.BIGINT.getType(); - } - - /** Handling negative scale issue */ - if (Integer.parseInt(data[1].trim().replace(")", "")) < 0.0) { - columnType = Types.MinorType.VARCHAR.getType(); - } + try (ResultSet resultSet = getColumns(jdbcConnection.getCatalog(), tableName, jdbcConnection.getMetaData())) { + while (resultSet.next()) { + ArrowType arrowColumnType = JdbcArrowTypeConverter.toArrowType( + resultSet.getInt("DATA_TYPE"), + resultSet.getInt("COLUMN_SIZE"), + resultSet.getInt("DECIMAL_DIGITS"), + configOptions); + + String columnName = resultSet.getString(COLUMN_NAME); + int jdbcColumnType = resultSet.getInt("DATA_TYPE"); + int scale = resultSet.getInt("COLUMN_SIZE"); + + LOGGER.debug("columnName: {}", columnName); + LOGGER.debug("arrowColumnType: {}", arrowColumnType); + LOGGER.debug("jdbcColumnType: {}", jdbcColumnType); + + /** + * below data type conversion doing since a framework not giving appropriate + * data types for oracle data types. + */ + + /** Handling TIMESTAMP, DATE, 0 Precision **/ + if (arrowColumnType != null && arrowColumnType.getTypeID().equals(ArrowType.ArrowTypeID.Decimal)) { + String[] data = arrowColumnType.toString().split(","); + if (scale == 0 || Integer.parseInt(data[1].trim()) < 0) { + arrowColumnType = Types.MinorType.BIGINT.getType(); } + } - String dataType = hashMap.get(columnName); - LOGGER.debug("columnName: " + columnName); - LOGGER.debug("dataType: " + dataType); - /** - * below data type conversion doing since framework not giving appropriate - * data types for oracle data types.. - */ - /** - * Converting oracle date data type into DATEDAY MinorType - */ - if (dataType != null && (dataType.contains("date") || dataType.contains("DATE"))) { - columnType = Types.MinorType.DATEDAY.getType(); - } - /** - * Converting oracle NUMBER data type into BIGINT MinorType - */ - if (dataType != null && (dataType.contains("NUMBER")) && columnType.getTypeID().toString().equalsIgnoreCase("Utf8")) { - columnType = Types.MinorType.BIGINT.getType(); - } + /** + * Converting an Oracle date data type into DATEDAY MinorType + */ + if (jdbcColumnType == java.sql.Types.TIMESTAMP && scale == 7) { + arrowColumnType = Types.MinorType.DATEDAY.getType(); + } - /** - * Converting oracle TIMESTAMP data type into DATEMILLI MinorType - */ - if (dataType != null && (dataType.contains("TIMESTAMP")) - ) { - columnType = Types.MinorType.DATEMILLI.getType(); - } - if (columnType == null) { - columnType = Types.MinorType.VARCHAR.getType(); - } - if (columnType != null && !SupportedTypes.isSupported(columnType)) { - columnType = Types.MinorType.VARCHAR.getType(); - } + /** + * Converting an Oracle TIMESTAMP_WITH_TZ & TIMESTAMP_WITH_LOCAL_TZ data type into DATEMILLI MinorType + */ + if (jdbcColumnType == OracleTypes.TIMESTAMPLTZ || jdbcColumnType == OracleTypes.TIMESTAMPTZ) { + arrowColumnType = Types.MinorType.DATEMILLI.getType(); + } - if (columnType != null && SupportedTypes.isSupported(columnType)) { - schemaBuilder.addField(FieldBuilder.newBuilder(columnName, columnType).build()); - found = true; - } - else { - LOGGER.error("getSchema: Unable to map type for column[" + columnName + "] to a supported type, attempted " + columnType); - } + if (arrowColumnType != null && !SupportedTypes.isSupported(arrowColumnType)) { + LOGGER.warn("getSchema: Unable to map type JDBC type [{}] for column[{}] to a supported type, attempted {}", jdbcColumnType, columnName, arrowColumnType); + arrowColumnType = Types.MinorType.VARCHAR.getType(); } + + if (arrowColumnType == null) { + LOGGER.warn("getSchema: column[{}] type is null setting it to varchar | JDBC Type is [{}]", columnName, jdbcColumnType); + arrowColumnType = Types.MinorType.VARCHAR.getType(); + } + schemaBuilder.addField(FieldBuilder.newBuilder(columnName, arrowColumnType).build()); } - if (!found) { - throw new RuntimeException("Could not find table in " + tableName.getSchemaName()); - } + partitionSchema.getFields().forEach(schemaBuilder::addField); LOGGER.debug("Oracle Table Schema" + schemaBuilder.toString()); return schemaBuilder.build(); diff --git a/athena-oracle/src/test/java/com/amazonaws/athena/connectors/oracle/OracleMetadataHandlerTest.java b/athena-oracle/src/test/java/com/amazonaws/athena/connectors/oracle/OracleMetadataHandlerTest.java index eea74cfc23..9a4cd4b376 100644 --- a/athena-oracle/src/test/java/com/amazonaws/athena/connectors/oracle/OracleMetadataHandlerTest.java +++ b/athena-oracle/src/test/java/com/amazonaws/athena/connectors/oracle/OracleMetadataHandlerTest.java @@ -33,6 +33,7 @@ import com.amazonaws.athena.connectors.jdbc.connection.DatabaseConnectionConfig; import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory; import com.amazonaws.athena.connectors.jdbc.connection.JdbcCredentialProvider; +import org.apache.arrow.vector.types.pojo.ArrowType; import org.apache.arrow.vector.types.pojo.Field; import org.apache.arrow.vector.types.pojo.Schema; import org.junit.Assert; @@ -312,7 +313,7 @@ public void doGetTable() BlockAllocator blockAllocator = new BlockAllocatorImpl(); String[] schema = {"DATA_TYPE", "COLUMN_SIZE", "COLUMN_NAME", "DECIMAL_DIGITS", "NUM_PREC_RADIX"}; Object[][] values = {{Types.INTEGER, 12, "testCol1", 0, 0}, {Types.VARCHAR, 25, "testCol2", 0, 0}, - {Types.TIMESTAMP, 93, "testCol3", 0, 0}, {Types.TIMESTAMP_WITH_TIMEZONE, 93, "testCol4", 0, 0}}; + {Types.TIMESTAMP, 93, "testCol3", 0, 0}, {Types.TIMESTAMP_WITH_TIMEZONE, 93, "testCol4", 0, 0}, {Types.NUMERIC, 10, "testCol5", 2, 0}}; AtomicInteger rowNumber = new AtomicInteger(-1); ResultSet resultSet = mockResultSet(schema, values, rowNumber); @@ -321,6 +322,8 @@ public void doGetTable() expectedSchemaBuilder.addField(FieldBuilder.newBuilder("testCol2", org.apache.arrow.vector.types.Types.MinorType.VARCHAR.getType()).build()); expectedSchemaBuilder.addField(FieldBuilder.newBuilder("testCol3", org.apache.arrow.vector.types.Types.MinorType.DATEMILLI.getType()).build()); expectedSchemaBuilder.addField(FieldBuilder.newBuilder("testCol4", org.apache.arrow.vector.types.Types.MinorType.VARCHAR.getType()).build()); + ArrowType.Decimal testCol5ArrowType = ArrowType.Decimal.createDecimal(10, 2, 128); + expectedSchemaBuilder.addField(FieldBuilder.newBuilder("testCol5", testCol5ArrowType).build()); PARTITION_SCHEMA.getFields().forEach(expectedSchemaBuilder::addField); Schema expected = expectedSchemaBuilder.build(); From c8371be7face574eb6becaaeb74cc4dd4af29d18 Mon Sep 17 00:00:00 2001 From: AbdulRehman Date: Mon, 16 Dec 2024 14:35:40 -0500 Subject: [PATCH 84/87] Enabled RDS Certs for Oracle DB (#2473) Co-authored-by: AbdulRehman Faraj --- athena-oracle/Dockerfile | 33 +++++++++++++++++++ .../oracle/OracleJdbcConnectionFactory.java | 10 +++--- 2 files changed, 37 insertions(+), 6 deletions(-) diff --git a/athena-oracle/Dockerfile b/athena-oracle/Dockerfile index eaeafac1b6..cb5216c202 100644 --- a/athena-oracle/Dockerfile +++ b/athena-oracle/Dockerfile @@ -1,9 +1,42 @@ FROM public.ecr.aws/lambda/java:11 +# Install necessary tools +RUN yum update -y && yum install -y curl perl openssl + +ENV truststore=${LAMBDA_TASK_ROOT}/rds-truststore.jks +ENV storepassword=federationStorePass + +# Download and process the RDS certificate +RUN curl -sS "https://truststore.pki.rds.amazonaws.com/global/global-bundle.pem" > ${LAMBDA_TASK_ROOT}/global-bundle.pem && \ + awk 'split_after == 1 {n++;split_after=0} /-----END CERTIFICATE-----/ {split_after=1}{print > "rds-ca-" n ".pem"}' < ${LAMBDA_TASK_ROOT}/global-bundle.pem + +# Import certificates into the truststore +RUN for CERT in rds-ca-*; do \ + alias=$(openssl x509 -noout -text -in $CERT | perl -ne 'next unless /Subject:/; s/.*(CN=|CN = )//; print') && \ + echo "Importing $alias" && \ + keytool -import -file ${CERT} -alias "${alias}" -storepass ${storepassword} -keystore ${truststore} -noprompt && \ + rm $CERT; \ + done + +# Clean up +RUN rm ${LAMBDA_TASK_ROOT}/global-bundle.pem + +# Optional: List the content of the trust store (for verification) +RUN echo "Trust store content is: " && \ + keytool -list -v -keystore "$truststore" -storepass ${storepassword} | grep Alias | cut -d " " -f3- | while read alias; do \ + expiry=$(keytool -list -v -keystore "$truststore" -storepass ${storepassword} -alias "${alias}" | grep Valid | perl -ne 'if(/until: (.*?)\n/) { print "$1\n"; }'); \ + echo " Certificate ${alias} expires in '$expiry'"; \ + done + # Copy function code and runtime dependencies from Maven layout COPY target/athena-oracle-2022.47.1.jar ${LAMBDA_TASK_ROOT} # Unpack the jar RUN jar xf athena-oracle-2022.47.1.jar +# Clean up JAR +RUN rm ${LAMBDA_TASK_ROOT}/athena-oracle-2022.47.1.jar + # Command can be overwritten by providing a different command in the template directly. # No need to specify here (already defined in .yaml file because legacy and connections use different) +# Set the CMD to your handler by removing the following comment for manual testing +# CMD [ "com.amazonaws.athena.connectors.oracle.OracleCompositeHandler" ] diff --git a/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleJdbcConnectionFactory.java b/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleJdbcConnectionFactory.java index fc9153381d..22efdd5ebb 100644 --- a/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleJdbcConnectionFactory.java +++ b/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleJdbcConnectionFactory.java @@ -33,7 +33,6 @@ import java.sql.SQLException; import java.util.Properties; import java.util.regex.Matcher; -import java.util.regex.Pattern; public class OracleJdbcConnectionFactory extends GenericJdbcConnectionFactory { @@ -42,8 +41,6 @@ public class OracleJdbcConnectionFactory extends GenericJdbcConnectionFactory private final DatabaseConnectionInfo databaseConnectionInfo; private final DatabaseConnectionConfig databaseConnectionConfig; private static final Logger LOGGER = LoggerFactory.getLogger(OracleJdbcConnectionFactory.class); - private static final String SSL_CONNECTION_STRING_REGEX = "jdbc:oracle:thin:\\$\\{([a-zA-Z0-9:_/+=.@-]+)\\}@tcps://"; - private static final Pattern SSL_CONNECTION_STRING_PATTERN = Pattern.compile(SSL_CONNECTION_STRING_REGEX); /** * @param databaseConnectionConfig database connection configuration {@link DatabaseConnectionConfig} @@ -64,10 +61,12 @@ public Connection getConnection(final JdbcCredentialProvider jdbcCredentialProvi Properties properties = new Properties(); if (null != jdbcCredentialProvider) { - if (SSL_CONNECTION_STRING_PATTERN.matcher(databaseConnectionConfig.getJdbcConnectionString()).matches()) { + //checking for tcps (Secure Communication) protocol as part of the connection string. + if (databaseConnectionConfig.getJdbcConnectionString().toLowerCase().contains("@tcps://")) { LOGGER.info("Establishing connection over SSL.."); properties.put("javax.net.ssl.trustStoreType", "JKS"); - properties.put("javax.net.ssl.trustStorePassword", "changeit"); + properties.put("javax.net.ssl.trustStore", "rds-truststore.jks"); + properties.put("javax.net.ssl.trustStorePassword", "federationStorePass"); properties.put("oracle.net.ssl_server_dn_match", "true"); if (System.getenv().getOrDefault(IS_FIPS_ENABLED, "false").equalsIgnoreCase("true") || System.getenv().getOrDefault(IS_FIPS_ENABLED_LEGACY, "false").equalsIgnoreCase("true")) { properties.put("oracle.net.ssl_cipher_suites", "(TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA)"); @@ -84,7 +83,6 @@ public Connection getConnection(final JdbcCredentialProvider jdbcCredentialProvi final String secretReplacement = String.format("%s/%s", jdbcCredentialProvider.getCredential().getUser(), password); derivedJdbcString = secretMatcher.replaceAll(Matcher.quoteReplacement(secretReplacement)); - LOGGER.info("derivedJdbcString: " + derivedJdbcString); return DriverManager.getConnection(derivedJdbcString, properties); } else { From a12f67667a295b572bd3b5dad5f2b30a4a631062 Mon Sep 17 00:00:00 2001 From: ejeffrli <144148373+ejeffrli@users.noreply.github.com> Date: Mon, 16 Dec 2024 15:08:09 -0500 Subject: [PATCH 85/87] Update neptune documentation (#2472) --- .../docs/neptune-connector-setup/README.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/athena-neptune/docs/neptune-connector-setup/README.md b/athena-neptune/docs/neptune-connector-setup/README.md index e1e231966d..44ef93597e 100644 --- a/athena-neptune/docs/neptune-connector-setup/README.md +++ b/athena-neptune/docs/neptune-connector-setup/README.md @@ -3,10 +3,10 @@ To deploy the Amazon Athena Neptune connector, we will need the following pre-requisite information: 1) SpillBucket – You can either use an existing S3 bucket or create a new one to be used by the connector to store spill over results for Athena to consume. -2) NeptuneEndpoint – You can get this information from the Neptune console and copying the cluster “Writer” endpoint information. +2) NeptuneClusterEndpoint – You can get this information from the Neptune console and copying the cluster “Writer” endpoint information. ![](./assets/connector-clusterendpoint.png) -3) NeptuneClusterResId - To find the Neptune cluster resource ID in the Amazon Neptune AWS Management Console, choose the DB cluster that you want. The Resource ID is shown in the Configuration section. +3) NeptuneClusterResId/NeptuneClusterResourceID - To find the Neptune cluster resource ID in the Amazon Neptune AWS Management Console, choose the DB cluster that you want. The Resource ID is shown in the Configuration section. ![](./assets/connector-clusterId.png) 4) SecurityGroupIds – These are the Security Group ID(s) that the connector Lambda function uses to communicate with Neptune. There are two steps: @@ -47,15 +47,15 @@ Scroll down to “Application Settings” and specify the following field values * GlueDatabaseName: This should be same as the glue database you created in one of the earlier steps. Example: graph-database. - * IamEnabled: This option indicates whether you have IAM DB Auth enabled on your Neptune Cluster or not. Default value is "false". + * IAMEnabled: This option indicates whether you have IAM DB Auth enabled on your Neptune Cluster or not. Default value is "false". * LambdaMemory: The memory allocation for the connector lambda function ranging between 128 – 3008 MB. The default is 3008 MB. * LambdaTimeout: Timeout value in seconds for the connector lambda function. Default value is 900 seconds. - * NeptuneEndpoint: Provide the Neptune Cluster endpoint that you have captured in one of the previous steps. + * NeptuneClusterEndpoint: Provide the Neptune Cluster endpoint that you have captured in one of the previous steps. - * NeptuneClusterResId: Provide the Neptune Cluster resourceid that you have captured in one of the previous steps. + * NeptuneClusterResId/NeptuneClusterResourceID: Provide the Neptune Cluster resourceid that you have captured in one of the previous steps. * NeptunePort: The listener port for your Neptune Cluster. Default is 8182. @@ -67,7 +67,7 @@ Scroll down to “Application Settings” and specify the following field values * SubnetIds: Subnet IDs that you have captured in one of the earlier steps separated by commas. - * NeptuneGraphtype: PROPERTYGRAPH or RDF. + * NeptuneGraphType: PROPERTYGRAPH or RDF. Provide Acknowledgement on the custom IAM roles creation and click on “Deploy”. Sample screenshots below: From 5cf871fa8bb16d45de0d61fbd3670aaf2db4d9a2 Mon Sep 17 00:00:00 2001 From: ejeffrli <144148373+ejeffrli@users.noreply.github.com> Date: Mon, 16 Dec 2024 15:08:25 -0500 Subject: [PATCH 86/87] Add SECRET_NAME_PATTERN unit test (#2471) --- .../GenericJdbcConnectionFactoryTest.java | 48 +++++++++++++++++++ 1 file changed, 48 insertions(+) create mode 100644 athena-jdbc/src/test/java/com/amazonaws/athena/connectors/jdbc/connection/GenericJdbcConnectionFactoryTest.java diff --git a/athena-jdbc/src/test/java/com/amazonaws/athena/connectors/jdbc/connection/GenericJdbcConnectionFactoryTest.java b/athena-jdbc/src/test/java/com/amazonaws/athena/connectors/jdbc/connection/GenericJdbcConnectionFactoryTest.java new file mode 100644 index 0000000000..2ba8cb254d --- /dev/null +++ b/athena-jdbc/src/test/java/com/amazonaws/athena/connectors/jdbc/connection/GenericJdbcConnectionFactoryTest.java @@ -0,0 +1,48 @@ +/*- + * #%L + * athena-jdbc + * %% + * Copyright (C) 2019 - 2024 Amazon Web Services + * %% + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * #L% + */ +package com.amazonaws.athena.connectors.jdbc.connection; + +import org.junit.Assert; +import org.junit.Test; + +import java.util.regex.Matcher; + +import static com.amazonaws.athena.connectors.jdbc.connection.GenericJdbcConnectionFactory.SECRET_NAME_PATTERN; + +public class GenericJdbcConnectionFactoryTest +{ + @Test + public void matchSecretNamePattern() + { + String jdbcConnectionString = "mysql://jdbc:mysql://mysql.host:3333/default?${secret!@+=_}"; + Matcher secretMatcher = SECRET_NAME_PATTERN.matcher(jdbcConnectionString); + + Assert.assertTrue(secretMatcher.find()); + } + + @Test + public void matchIncorrectSecretNamePattern() + { + String jdbcConnectionString = "mysql://jdbc:mysql://mysql.host:3333/default?${secret!@+=*_}"; + Matcher secretMatcher = SECRET_NAME_PATTERN.matcher(jdbcConnectionString); + + Assert.assertFalse(secretMatcher.find()); + } +} From 66bac000d5da58d40137987b3890a1bba48e73e0 Mon Sep 17 00:00:00 2001 From: ejeffrli <144148373+ejeffrli@users.noreply.github.com> Date: Tue, 17 Dec 2024 12:40:37 -0500 Subject: [PATCH 87/87] Support DECIMAL type properly when using DDB type NUMBER in sets (#2483) --- .../amazonaws/athena/connectors/dynamodb/util/DDBTypeUtils.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/util/DDBTypeUtils.java b/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/util/DDBTypeUtils.java index d1abcdefaa..8412b584f5 100644 --- a/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/util/DDBTypeUtils.java +++ b/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/util/DDBTypeUtils.java @@ -126,7 +126,7 @@ else if (enhancedAttributeValue.isSetOfBytes()) { return new Field(key, FieldType.nullable(Types.MinorType.LIST.getType()), Collections.singletonList(child)); } else if (enhancedAttributeValue.isSetOfNumbers()) { - Field child = new Field(key, FieldType.nullable(Types.MinorType.DECIMAL.getType()), null); + Field child = new Field(key, FieldType.nullable(new ArrowType.Decimal(38, 9)), null); return new Field(key, FieldType.nullable(Types.MinorType.LIST.getType()), Collections.singletonList(child)); } else if (enhancedAttributeValue.isSetOfStrings()) {