Skip to content

Commit

Permalink
Backport to branch(3) : Add time related types (#2474)
Browse files Browse the repository at this point in the history
Co-authored-by: Vincent Guilpain <[email protected]>
  • Loading branch information
feeblefakie and Torch3333 authored Jan 22, 2025
1 parent d607e18 commit a67c74d
Show file tree
Hide file tree
Showing 194 changed files with 10,761 additions and 1,809 deletions.
4 changes: 2 additions & 2 deletions build.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,8 @@ subprojects {
commonsDbcp2Version = '2.13.0'
mysqlDriverVersion = '8.4.0'
postgresqlDriverVersion = '42.7.5'
oracleDriverVersion = '21.16.0.0'
sqlserverDriverVersion = '11.2.3.jre8'
oracleDriverVersion = '23.6.0.24.10'
sqlserverDriverVersion = '12.8.1.jre8'
sqliteDriverVersion = '3.48.0.0'
yugabyteDriverVersion = '42.7.3-yb-2'
mariadDbDriverVersion = '3.5.1'
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,4 +16,9 @@ protected String getSystemNamespaceName(Properties properties) {
.getSystemNamespaceName()
.orElse(DatabaseConfig.DEFAULT_SYSTEM_NAMESPACE_NAME);
}

@Override
protected boolean isTimestampTypeSupported() {
return false;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -43,11 +43,17 @@ public void repairTable_ShouldDoNothing() throws ExecutionException {
// Act
assertThatCode(
() ->
admin.repairTable(getNamespace(), getTable(), TABLE_METADATA, getCreationOptions()))
admin.repairTable(
getNamespace(), getTable(), getTableMetadata(), getCreationOptions()))
.doesNotThrowAnyException();

// Assert
assertThat(admin.tableExists(getNamespace(), getTable())).isTrue();
assertThat(admin.getTableMetadata(getNamespace(), getTable())).isEqualTo(TABLE_METADATA);
assertThat(admin.getTableMetadata(getNamespace(), getTable())).isEqualTo(getTableMetadata());
}

@Override
protected boolean isTimestampTypeSupported() {
return false;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -9,4 +9,9 @@ public class CassandraColumnValueIntegrationTest
protected Properties getProperties(String testName) {
return CassandraEnv.getProperties(testName);
}

@Override
protected boolean isTimestampTypeSupported() {
return false;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -25,4 +25,9 @@ protected boolean shouldMutate(
return super.shouldMutate(initialColumn, columnToCompare, operator);
}
}

@Override
protected boolean isTimestampTypeSupported() {
return false;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -24,4 +24,9 @@ protected Map<String, String> getCreationOptions() {
@Override
@Disabled("Cross partition scan with ordering is not supported in Cassandra")
public void scan_WithOrderingForNonPrimaryColumns_ShouldReturnProperResult() {}

@Override
protected boolean isTimestampTypeSupported() {
return false;
}
}
Original file line number Diff line number Diff line change
@@ -1,7 +1,10 @@
package com.scalar.db.storage.cassandra;

import com.scalar.db.api.DistributedStorageMultipleClusteringKeyScanIntegrationTestBase;
import com.scalar.db.io.DataType;
import java.util.List;
import java.util.Properties;
import java.util.stream.Collectors;

public class CassandraMultipleClusteringKeyScanIntegrationTest
extends DistributedStorageMultipleClusteringKeyScanIntegrationTestBase {
Expand All @@ -10,4 +13,11 @@ public class CassandraMultipleClusteringKeyScanIntegrationTest
protected Properties getProperties(String testName) {
return CassandraEnv.getProperties(testName);
}

@Override
protected List<DataType> getDataTypes() {
return super.getDataTypes().stream()
.filter(type -> type != DataType.TIMESTAMP)
.collect(Collectors.toList());
}
}
Original file line number Diff line number Diff line change
@@ -1,12 +1,22 @@
package com.scalar.db.storage.cassandra;

import com.scalar.db.api.DistributedStorageMultiplePartitionKeyIntegrationTestBase;
import com.scalar.db.io.DataType;
import java.util.List;
import java.util.Properties;
import java.util.stream.Collectors;

public class CassandraMultiplePartitionKeyIntegrationTest
extends DistributedStorageMultiplePartitionKeyIntegrationTestBase {
@Override
protected Properties getProperties(String testName) {
return CassandraEnv.getProperties(testName);
}

@Override
protected List<DataType> getDataTypes() {
return super.getDataTypes().stream()
.filter(type -> type != DataType.TIMESTAMP)
.collect(Collectors.toList());
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -15,4 +15,9 @@ protected Properties getProperties(String testName) {
protected AdminTestUtils getAdminTestUtils(String testName) {
return new CassandraAdminTestUtils(getProperties(testName));
}

@Override
protected boolean isTimestampTypeSupported() {
return false;
}
}
Original file line number Diff line number Diff line change
@@ -1,12 +1,22 @@
package com.scalar.db.storage.cassandra;

import com.scalar.db.api.DistributedStorageSecondaryIndexIntegrationTestBase;
import com.scalar.db.io.DataType;
import java.util.Properties;
import java.util.Set;
import java.util.stream.Collectors;

public class CassandraSecondaryIndexIntegrationTest
extends DistributedStorageSecondaryIndexIntegrationTestBase {
@Override
protected Properties getProperties(String testName) {
return CassandraEnv.getProperties(testName);
}

@Override
protected Set<DataType> getSecondaryIndexTypes() {
return super.getSecondaryIndexTypes().stream()
.filter(type -> type != DataType.TIMESTAMP)
.collect(Collectors.toSet());
}
}
Original file line number Diff line number Diff line change
@@ -1,12 +1,22 @@
package com.scalar.db.storage.cassandra;

import com.scalar.db.api.DistributedStorageSingleClusteringKeyScanIntegrationTestBase;
import com.scalar.db.io.DataType;
import java.util.List;
import java.util.Properties;
import java.util.stream.Collectors;

public class CassandraSingleClusteringKeyScanIntegrationTest
extends DistributedStorageSingleClusteringKeyScanIntegrationTestBase {
@Override
protected Properties getProperties(String testName) {
return CassandraEnv.getProperties(testName);
}

@Override
protected List<DataType> getClusteringKeyTypes() {
return super.getClusteringKeyTypes().stream()
.filter(type -> type != DataType.TIMESTAMP)
.collect(Collectors.toList());
}
}
Original file line number Diff line number Diff line change
@@ -1,12 +1,22 @@
package com.scalar.db.storage.cassandra;

import com.scalar.db.api.DistributedStorageSinglePartitionKeyIntegrationTestBase;
import com.scalar.db.io.DataType;
import java.util.List;
import java.util.Properties;
import java.util.stream.Collectors;

public class CassandraSinglePartitionKeyIntegrationTest
extends DistributedStorageSinglePartitionKeyIntegrationTestBase {
@Override
protected Properties getProperties(String testName) {
return CassandraEnv.getProperties(testName);
}

@Override
protected List<DataType> getPartitionKeyTypes() {
return super.getPartitionKeyTypes().stream()
.filter(type -> type != DataType.TIMESTAMP)
.collect(Collectors.toList());
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,11 @@ protected boolean isGroupCommitEnabled(String testName) {
.isCoordinatorGroupCommitEnabled();
}

@Override
protected boolean isTimestampTypeSupported() {
return false;
}

@Override
protected void extraCheckOnCoordinatorTable() {}
}
Original file line number Diff line number Diff line change
Expand Up @@ -43,14 +43,20 @@ public void repairTableAndCoordinatorTable_ShouldDoNothing() throws ExecutionExc
// Act
assertThatCode(
() -> {
admin.repairTable(getNamespace(), getTable(), TABLE_METADATA, getCreationOptions());
admin.repairTable(
getNamespace(), getTable(), getTableMetadata(), getCreationOptions());
admin.repairCoordinatorTables(getCreationOptions());
})
.doesNotThrowAnyException();

// Assert
assertThat(admin.tableExists(getNamespace(), getTable())).isTrue();
assertThat(admin.getTableMetadata(getNamespace(), getTable())).isEqualTo(TABLE_METADATA);
assertThat(admin.getTableMetadata(getNamespace(), getTable())).isEqualTo(getTableMetadata());
assertThat(admin.coordinatorTablesExist()).isTrue();
}

@Override
protected boolean isTimestampTypeSupported() {
return false;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -9,4 +9,9 @@ public class ConsensusCommitIntegrationTestWithCassandra
protected Properties getProps(String testName) {
return ConsensusCommitCassandraEnv.getProperties(testName);
}

@Override
protected boolean isTimestampTypeSupported() {
return false;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -23,4 +23,9 @@ protected Map<String, String> getCreationOptions() {
protected String getSystemNamespaceName(Properties properties) {
return DatabaseConfig.DEFAULT_SYSTEM_NAMESPACE_NAME;
}

@Override
protected boolean isTimestampTypeSupported() {
return false;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -17,4 +17,9 @@ protected Properties getProps(String testName) {
protected Map<String, String> getCreationOptions() {
return Collections.singletonMap(CassandraAdmin.REPLICATION_FACTOR, "1");
}

@Override
protected boolean isTimestampTypeSupported() {
return false;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -9,4 +9,9 @@ public class TwoPhaseConsensusCommitIntegrationTestWithCassandra
protected Properties getProps1(String testName) {
return ConsensusCommitCassandraEnv.getProperties(testName);
}

@Override
protected boolean isTimestampTypeSupported() {
return false;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ public void repairTable_ForTableWithoutStoredProcedure_ShouldCreateStoredProcedu
cosmosAdminTestUtils.getTableStoredProcedure(getNamespace(), getTable()).delete();

// Act
admin.repairTable(getNamespace(), getTable(), TABLE_METADATA, getCreationOptions());
admin.repairTable(getNamespace(), getTable(), getTableMetadata(), getCreationOptions());

// Assert
assertThatCode(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ public void repairTable_ForTableWithoutStoredProcedure_ShouldCreateStoredProcedu
cosmosAdminTestUtils.getTableStoredProcedure(getNamespace(), getTable()).delete();

// Act
admin.repairTable(getNamespace(), getTable(), TABLE_METADATA, getCreationOptions());
admin.repairTable(getNamespace(), getTable(), getTableMetadata(), getCreationOptions());

// Assert
assertThatCode(
Expand Down
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
package com.scalar.db.storage.cosmos;

import com.google.common.collect.ArrayListMultimap;
import com.google.common.collect.ListMultimap;
import com.scalar.db.api.DistributedStorageMultipleClusteringKeyScanIntegrationTestBase;
import com.scalar.db.io.DataType;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.stream.Collectors;

public class CosmosMultipleClusteringKeyScanIntegrationTest
extends DistributedStorageMultipleClusteringKeyScanIntegrationTestBase {
Expand All @@ -16,21 +16,11 @@ protected Properties getProperties(String testName) {
}

@Override
protected ListMultimap<DataType, DataType> getClusteringKeyTypes() {
protected List<DataType> getDataTypes() {
// Return types without BLOB because blob is not supported for clustering key for now
ListMultimap<DataType, DataType> clusteringKeyTypes = ArrayListMultimap.create();
for (DataType firstClusteringKeyType : DataType.values()) {
if (firstClusteringKeyType == DataType.BLOB) {
continue;
}
for (DataType secondClusteringKeyType : DataType.values()) {
if (secondClusteringKeyType == DataType.BLOB) {
continue;
}
clusteringKeyTypes.put(firstClusteringKeyType, secondClusteringKeyType);
}
}
return clusteringKeyTypes;
return super.getDataTypes().stream()
.filter(type -> type != DataType.BLOB)
.collect(Collectors.toList());
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,10 @@

import com.scalar.db.api.DistributedStorageSingleClusteringKeyScanIntegrationTestBase;
import com.scalar.db.io.DataType;
import java.util.HashSet;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Set;

public class CosmosSingleClusteringKeyScanIntegrationTest
extends DistributedStorageSingleClusteringKeyScanIntegrationTestBase {
Expand All @@ -15,9 +15,9 @@ protected Properties getProperties(String testName) {
}

@Override
protected Set<DataType> getClusteringKeyTypes() {
protected List<DataType> getClusteringKeyTypes() {
// Return types without BLOB because blob is not supported for clustering key for now
Set<DataType> clusteringKeyTypes = new HashSet<>();
List<DataType> clusteringKeyTypes = new ArrayList<>();
for (DataType dataType : DataType.values()) {
if (dataType == DataType.BLOB) {
continue;
Expand Down
Original file line number Diff line number Diff line change
@@ -1,13 +1,13 @@
package com.scalar.db.storage.dynamo;

import com.google.common.collect.ArrayListMultimap;
import com.google.common.collect.ListMultimap;
import com.scalar.db.api.DistributedStorageMultipleClusteringKeyScanIntegrationTestBase;
import com.scalar.db.io.Column;
import com.scalar.db.io.DataType;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Random;
import java.util.stream.Collectors;

public class DynamoMultipleClusteringKeyScanIntegrationTest
extends DistributedStorageMultipleClusteringKeyScanIntegrationTestBase {
Expand All @@ -18,21 +18,11 @@ protected Properties getProperties(String testName) {
}

@Override
protected ListMultimap<DataType, DataType> getClusteringKeyTypes() {
protected List<DataType> getDataTypes() {
// Return types without BLOB because blob is not supported for clustering key for now
ListMultimap<DataType, DataType> clusteringKeyTypes = ArrayListMultimap.create();
for (DataType firstClusteringKeyType : DataType.values()) {
if (firstClusteringKeyType == DataType.BLOB) {
continue;
}
for (DataType secondClusteringKeyType : DataType.values()) {
if (secondClusteringKeyType == DataType.BLOB) {
continue;
}
clusteringKeyTypes.put(firstClusteringKeyType, secondClusteringKeyType);
}
}
return clusteringKeyTypes;
return super.getDataTypes().stream()
.filter(type -> type != DataType.BLOB)
.collect(Collectors.toList());
}

@Override
Expand Down
Loading

0 comments on commit a67c74d

Please sign in to comment.