Skip to content

HDDS-12833. Remove the CodecRegistry field from DBStoreBuilder. #8327

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Apr 29, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ public void start(ConfigurationSource config)
options.setInfoLogLevel(level);
options.setMaxLogFileSize(dc.getRocksdbLogMaxFileSize());
options.setKeepLogFileNum(dc.getRocksdbLogMaxFileNum());
this.store = initDBStore(DBStoreBuilder.newBuilder(config, dbDef)
this.store = initDBStore(DBStoreBuilder.newBuilder(config, dbDef, null, null)
.setDBOptions(options)
.setDefaultCFOptions(cfOptions)
.setOpenReadOnly(openReadOnly), options, config);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,6 @@
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.security.cert.X509Certificate;
import java.util.ArrayList;
import java.util.Collection;
Expand All @@ -59,11 +58,9 @@
import org.apache.hadoop.hdds.scm.proxy.SCMContainerLocationFailoverProxyProvider;
import org.apache.hadoop.hdds.security.exception.SCMSecurityException;
import org.apache.hadoop.hdds.tracing.TracingUtil;
import org.apache.hadoop.hdds.utils.db.DBColumnFamilyDefinition;
import org.apache.hadoop.hdds.utils.db.DBDefinition;
import org.apache.hadoop.hdds.utils.db.DBStore;
import org.apache.hadoop.hdds.utils.db.DBStoreBuilder;
import org.apache.hadoop.hdds.utils.db.RocksDBConfiguration;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.io.retry.RetryPolicies;
import org.apache.hadoop.io.retry.RetryPolicy;
Expand Down Expand Up @@ -250,9 +247,7 @@ private static TransactionInfo getTransactionInfoFromDB(
DBDefinition definition)
throws IOException {

try (DBStore dbStore = loadDB(tempConfig, dbDir.toFile(),
dbName, definition)) {

try (DBStore dbStore = DBStoreBuilder.newBuilder(tempConfig, definition, dbName, dbDir).build()) {
// Get the table name with TransactionInfo as the value. The transaction
// info table name are different in SCM and SCM.

Expand Down Expand Up @@ -307,27 +302,6 @@ public static boolean verifyTransactionInfo(TransactionInfo transactionInfo,
return true;
}

public static DBStore loadDB(OzoneConfiguration configuration, File metaDir,
String dbName, DBDefinition definition) throws IOException {
RocksDBConfiguration rocksDBConfiguration =
configuration.getObject(RocksDBConfiguration.class);
DBStoreBuilder dbStoreBuilder =
DBStoreBuilder.newBuilder(configuration, rocksDBConfiguration)
.setName(dbName)
.setPath(Paths.get(metaDir.getPath()));
// Add column family names and codecs.
for (DBColumnFamilyDefinition columnFamily : definition
.getColumnFamilies()) {

dbStoreBuilder.addTable(columnFamily.getName());
dbStoreBuilder
.addCodec(columnFamily.getKeyType(), columnFamily.getKeyCodec());
dbStoreBuilder
.addCodec(columnFamily.getValueType(), columnFamily.getValueCodec());
}
return dbStoreBuilder.build();
}

public static File getMetaDir(DBDefinition definition,
OzoneConfiguration configuration) {
// Set metadata dirs.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,6 @@
import static org.rocksdb.RocksDB.DEFAULT_COLUMN_FAMILY;

import com.google.common.base.Preconditions;
import com.google.protobuf.MessageLite;
import java.io.File;
import java.io.IOException;
import java.nio.file.Path;
Expand Down Expand Up @@ -91,7 +90,6 @@ public final class DBStoreBuilder {
// any options. On build, this will be replaced with defaultCfOptions.
private Map<String, ManagedColumnFamilyOptions> cfOptions;
private ConfigurationSource configuration;
private final CodecRegistry.Builder registry = CodecRegistry.newBuilder();
private String rocksDbStat;
// RocksDB column family write buffer size
private long rocksDbCfWriteBufferSize;
Expand All @@ -112,28 +110,23 @@ public final class DBStoreBuilder {
*/
public static DBStore createDBStore(ConfigurationSource configuration,
DBDefinition definition) throws IOException {
return newBuilder(configuration, definition).build();
return newBuilder(configuration, definition, null, null).build();
}

public static DBStoreBuilder newBuilder(ConfigurationSource configuration,
DBDefinition definition) {

DBStoreBuilder builder = newBuilder(configuration);
builder.applyDBDefinition(definition);
public static DBStoreBuilder newBuilder(ConfigurationSource conf, DBDefinition definition, File dbDir) {
return newBuilder(conf, definition, dbDir.getName(), dbDir.getParentFile().toPath());
}

return builder;
public static DBStoreBuilder newBuilder(ConfigurationSource conf, DBDefinition definition,
String name, Path metadataDir) {
return newBuilder(conf).apply(definition, name, metadataDir);
}

public static DBStoreBuilder newBuilder(ConfigurationSource configuration) {
return newBuilder(configuration,
return new DBStoreBuilder(configuration,
configuration.getObject(RocksDBConfiguration.class));
}

public static DBStoreBuilder newBuilder(ConfigurationSource configuration,
RocksDBConfiguration rocksDBConfiguration) {
return new DBStoreBuilder(configuration, rocksDBConfiguration);
}

private DBStoreBuilder(ConfigurationSource configuration,
RocksDBConfiguration rocksDBConfiguration) {
cfOptions = new HashMap<>();
Expand Down Expand Up @@ -173,21 +166,23 @@ public static File getDBDirPath(DBDefinition definition,
return metadataDir;
}

private void applyDBDefinition(DBDefinition definition) {
// Set metadata dirs.
File metadataDir = getDBDirPath(definition, configuration);
private DBStoreBuilder apply(DBDefinition definition, String name, Path metadataDir) {
if (name == null) {
name = definition.getName();
}
setName(name);

setName(definition.getName());
setPath(Paths.get(metadataDir.getPath()));
// Set metadata dirs.
if (metadataDir == null) {
metadataDir = getDBDirPath(definition, configuration).toPath();
}
setPath(metadataDir);

// Add column family names and codecs.
for (DBColumnFamilyDefinition columnFamily :
definition.getColumnFamilies()) {

for (DBColumnFamilyDefinition<?, ?> columnFamily : definition.getColumnFamilies()) {
addTable(columnFamily.getName(), columnFamily.getCfOptions());
addCodec(columnFamily.getKeyType(), columnFamily.getKeyCodec());
addCodec(columnFamily.getValueType(), columnFamily.getValueCodec());
}
return this;
}

private void setDBOptionsProps(ManagedDBOptions dbOptions) {
Expand All @@ -206,7 +201,7 @@ private void setDBOptionsProps(ManagedDBOptions dbOptions) {
*
* @return DBStore
*/
public DBStore build() throws IOException {
public RDBStore build() throws IOException {
if (StringUtil.isBlank(dbname) || (dbPath == null)) {
LOG.error("Required Parameter missing.");
throw new IOException("Required parameter is missing. Please make sure "
Expand All @@ -229,7 +224,7 @@ public DBStore build() throws IOException {
}

return new RDBStore(dbFile, rocksDBOption, statistics, writeOptions, tableConfigs,
registry.build(), openReadOnly, dbJmxBeanNameName, enableCompactionDag,
openReadOnly, dbJmxBeanNameName, enableCompactionDag,
maxDbUpdatesSizeThreshold, createCheckpointDirs, configuration,
enableRocksDbMetrics);
} finally {
Expand Down Expand Up @@ -257,15 +252,6 @@ public DBStoreBuilder addTable(String tableName,
return this;
}

public <T> DBStoreBuilder addCodec(Class<T> type, Codec<T> codec) {
registry.addCodec(type, codec);
return this;
}

public <T extends MessageLite> DBStoreBuilder addProto2Codec(T type) {
return addCodec((Class<T>)type.getClass(), Proto2Codec.get(type));
}

public DBStoreBuilder setDBOptions(ManagedDBOptions option) {
rocksDBOption = option;
return this;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -75,9 +75,9 @@ public class RDBStore implements DBStore {
private final ManagedStatistics statistics;

@SuppressWarnings("parameternumber")
public RDBStore(File dbFile, ManagedDBOptions dbOptions, ManagedStatistics statistics,
RDBStore(File dbFile, ManagedDBOptions dbOptions, ManagedStatistics statistics,
ManagedWriteOptions writeOptions, Set<TableConfig> families,
CodecRegistry registry, boolean readOnly,
boolean readOnly,
String dbJmxBeanName, boolean enableCompactionDag,
long maxDbUpdatesSizeThreshold,
boolean createCheckpointDirs,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@

import static org.junit.jupiter.api.Assertions.assertArrayEquals;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertInstanceOf;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
Expand Down Expand Up @@ -203,11 +202,7 @@ public File getDBLocation(ConfigurationSource conf) {
}
};

try (DBStore dbStore = DBStoreBuilder.newBuilder(conf, sampleDB)
.setName("SampleStore").setPath(newFolder.toPath()).build()) {
assertInstanceOf(RDBStore.class, dbStore);

RDBStore rdbStore = (RDBStore) dbStore;
try (RDBStore rdbStore = DBStoreBuilder.newBuilder(conf, sampleDB, "SampleStore", newFolder.toPath()).build()) {
Collection<RocksDatabase.ColumnFamily> cfFamilies =
rdbStore.getColumnFamilies();

Expand Down Expand Up @@ -267,13 +262,9 @@ public File getDBLocation(ConfigurationSource conf) {
}
};

try (DBStore dbStore = DBStoreBuilder.newBuilder(conf, sampleDB)
.setName("SampleStore")
.disableDefaultCFAutoCompaction(disableAutoCompaction)
.setPath(newFolder.toPath()).build()) {
assertInstanceOf(RDBStore.class, dbStore);

RDBStore rdbStore = (RDBStore) dbStore;
try (RDBStore rdbStore = DBStoreBuilder.newBuilder(conf, sampleDB, "SampleStore", newFolder.toPath())
.disableDefaultCFAutoCompaction(disableAutoCompaction)
.build()) {
Collection<RocksDatabase.ColumnFamily> cfFamilies =
rdbStore.getColumnFamilies();

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ public static RDBStore newRDBStore(File dbFile, ManagedDBOptions options,
long maxDbUpdatesSizeThreshold)
throws IOException {
return new RDBStore(dbFile, options, null, new ManagedWriteOptions(), families,
CodecRegistry.newBuilder().build(), false, null, false,
false, null, false,
maxDbUpdatesSizeThreshold, true, null, true);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@
import org.apache.hadoop.hdds.utils.TransactionInfo;
import org.apache.hadoop.hdds.utils.db.DBCheckpoint;
import org.apache.hadoop.hdds.utils.db.DBStore;
import org.apache.hadoop.hdds.utils.db.DBStoreBuilder;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConsts;
import org.junit.jupiter.api.AfterAll;
Expand Down Expand Up @@ -130,8 +131,7 @@ public void testInstallCheckPoint() throws Exception {
assertNotNull(parent);
Path fileName = location.getFileName();
assertNotNull(fileName);
final DBStore db = HAUtils.loadDB(conf, parent.toFile(),
fileName.toString(), SCMDBDefinition.get());
final DBStore db = DBStoreBuilder.newBuilder(conf, SCMDBDefinition.get(), location.toFile()).build();
// Hack the transaction index in the checkpoint so as to ensure the
// checkpointed transaction index is higher than when it was downloaded
// from.
Expand Down
Loading