Skip to content

Commit

Permalink
Add auto_purge to hive table properties
Browse files Browse the repository at this point in the history
  • Loading branch information
posulliv committed Oct 1, 2021
1 parent 9484d74 commit ca17dec
Show file tree
Hide file tree
Showing 3 changed files with 52 additions and 2 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -189,6 +189,7 @@
import static io.trino.plugin.hive.HiveSessionProperties.isSortedWritingEnabled;
import static io.trino.plugin.hive.HiveSessionProperties.isStatisticsEnabled;
import static io.trino.plugin.hive.HiveTableProperties.ANALYZE_COLUMNS_PROPERTY;
import static io.trino.plugin.hive.HiveTableProperties.AUTO_PURGE;
import static io.trino.plugin.hive.HiveTableProperties.AVRO_SCHEMA_URL;
import static io.trino.plugin.hive.HiveTableProperties.BUCKETED_BY_PROPERTY;
import static io.trino.plugin.hive.HiveTableProperties.BUCKET_COUNT_PROPERTY;
Expand Down Expand Up @@ -316,6 +317,8 @@ public class HiveMetadata
private static final String CSV_QUOTE_KEY = OpenCSVSerde.QUOTECHAR;
private static final String CSV_ESCAPE_KEY = OpenCSVSerde.ESCAPECHAR;

private static final String AUTO_PURGE_KEY = "auto.purge";

private final CatalogName catalogName;
private final SemiTransactionalHiveMetastore metastore;
private final HdfsEnvironment hdfsEnvironment;
Expand Down Expand Up @@ -606,6 +609,11 @@ private ConnectorTableMetadata doGetTableMetadata(ConnectorSession session, Sche

Optional<String> comment = Optional.ofNullable(table.getParameters().get(TABLE_COMMENT));

String autoPurgeProperty = table.getParameters().get(AUTO_PURGE_KEY);
if (parseBoolean(autoPurgeProperty)) {
properties.put(AUTO_PURGE, true);
}

return new ConnectorTableMetadata(tableName, columns.build(), properties.build(), comment);
}

Expand Down Expand Up @@ -877,6 +885,9 @@ private Map<String, String> getEmptyTableProperties(ConnectorTableMetadata table
boolean transactional = HiveTableProperties.isTransactional(tableMetadata.getProperties()).orElse(false);
tableProperties.put(TRANSACTIONAL, String.valueOf(transactional));

boolean autoPurgeEnabled = HiveTableProperties.isAutoPurge(tableMetadata.getProperties()).orElse(false);
tableProperties.put(AUTO_PURGE_KEY, String.valueOf(autoPurgeEnabled));

bucketProperty.ifPresent(hiveBucketProperty ->
tableProperties.put(BUCKETING_VERSION, Integer.toString(hiveBucketProperty.getBucketingVersion().getVersion())));

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,7 @@ public class HiveTableProperties
public static final String CSV_QUOTE = "csv_quote";
public static final String CSV_ESCAPE = "csv_escape";
public static final String TRANSACTIONAL = "transactional";
public static final String AUTO_PURGE = "auto_purge";

private final List<PropertyMetadata<?>> tableProperties;

Expand Down Expand Up @@ -153,7 +154,8 @@ public HiveTableProperties(
stringProperty(CSV_SEPARATOR, "CSV separator character", null, false),
stringProperty(CSV_QUOTE, "CSV quote character", null, false),
stringProperty(CSV_ESCAPE, "CSV escape character", null, false),
booleanProperty(TRANSACTIONAL, "Table is transactional", null, false));
booleanProperty(TRANSACTIONAL, "Table is transactional", null, false),
booleanProperty(AUTO_PURGE, "Skip trash when table or partition is deleted", null, false));
}

public List<PropertyMetadata<?>> getTableProperties()
Expand Down Expand Up @@ -277,4 +279,9 @@ public static Optional<Boolean> isTransactional(Map<String, Object> tablePropert
{
return Optional.ofNullable((Boolean) tableProperties.get(TRANSACTIONAL));
}

public static Optional<Boolean> isAutoPurge(Map<String, Object> tableProperties)
{
return Optional.ofNullable((Boolean) tableProperties.get(AUTO_PURGE));
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -112,6 +112,7 @@
import static io.trino.plugin.hive.HiveQueryRunner.HIVE_CATALOG;
import static io.trino.plugin.hive.HiveQueryRunner.TPCH_SCHEMA;
import static io.trino.plugin.hive.HiveQueryRunner.createBucketedSession;
import static io.trino.plugin.hive.HiveTableProperties.AUTO_PURGE;
import static io.trino.plugin.hive.HiveTableProperties.BUCKETED_BY_PROPERTY;
import static io.trino.plugin.hive.HiveTableProperties.BUCKET_COUNT_PROPERTY;
import static io.trino.plugin.hive.HiveTableProperties.PARTITIONED_BY_PROPERTY;
Expand Down Expand Up @@ -1784,7 +1785,7 @@ public void testPropertiesTable()
String queryId = (String) computeScalar("SELECT query_id FROM system.runtime.queries WHERE query LIKE 'CREATE TABLE test_show_properties%'");
String nodeVersion = (String) computeScalar("SELECT node_version FROM system.runtime.nodes WHERE coordinator");
assertQuery("SELECT * FROM \"test_show_properties$properties\"",
"SELECT 'workaround for potential lack of HIVE-12730', 'ship_priority,order_status', '0.5', '" + queryId + "', '" + nodeVersion + "', 'false'");
"SELECT 'workaround for potential lack of HIVE-12730', 'false', 'ship_priority,order_status', '0.5', '" + queryId + "', '" + nodeVersion + "', 'false'");
assertUpdate("DROP TABLE test_show_properties");
}

Expand Down Expand Up @@ -7935,6 +7936,37 @@ public void testExplainOfCreateTableAs()
assertEquals(getOnlyElement(result.getOnlyColumnAsSet()), getExplainPlan(query, DISTRIBUTED));
}

@Test
public void testAutoPurgeProperty()
{
String tableName = "test_auto_purge_property";
@Language("SQL") String createTableSql = format("" +
"CREATE TABLE %s " +
"AS " +
"SELECT * FROM tpch.tiny.customer",
tableName);
assertUpdate(createTableSql, 1500L);

TableMetadata tableMetadataDefaults = getTableMetadata(catalog, TPCH_SCHEMA, tableName);
assertEquals(tableMetadataDefaults.getMetadata().getProperties().get(AUTO_PURGE), null);

assertUpdate("DROP TABLE " + tableName);

@Language("SQL") String createTableSqlWithAutoPurge = format("" +
"CREATE TABLE %s " +
"WITH (" +
" auto_purge = true" +
") AS " +
"SELECT * FROM tpch.tiny.customer",
tableName);
assertUpdate(createTableSqlWithAutoPurge, 1500L);

TableMetadata tableMetadataWithPurge = getTableMetadata(catalog, TPCH_SCHEMA, tableName);
assertEquals(tableMetadataWithPurge.getMetadata().getProperties().get(AUTO_PURGE), true);

assertUpdate("DROP TABLE " + tableName);
}

private static final Set<HiveStorageFormat> NAMED_COLUMN_ONLY_FORMATS = ImmutableSet.of(HiveStorageFormat.AVRO, HiveStorageFormat.JSON);

@DataProvider
Expand Down

0 comments on commit ca17dec

Please sign in to comment.