Skip to content

Commit dd74eee

Browse files
authored
HDDS-12430. Document in ozone-default.xml the config keys moved from DFSConfigKeysLegacy (apache#7987)
1 parent 34041ca commit dd74eee

File tree

13 files changed

+146
-36
lines changed

13 files changed

+146
-36
lines changed

hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java

-2
Original file line numberDiff line numberDiff line change
@@ -412,8 +412,6 @@ private HddsConfigKeys() {
412412
"hdds.datanode.dns.nameserver";
413413
public static final String HDDS_DATANODE_HOST_NAME_KEY =
414414
"hdds.datanode.hostname";
415-
public static final String HDDS_DATANODE_DATA_DIR_KEY =
416-
"hdds.datanode.data.dir";
417415
public static final String HDDS_DATANODE_USE_DN_HOSTNAME =
418416
"hdds.datanode.use.datanode.hostname";
419417
public static final boolean HDDS_DATANODE_USE_DN_HOSTNAME_DEFAULT = false;

hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java

+1-1
Original file line numberDiff line numberDiff line change
@@ -387,7 +387,7 @@ private static void addDeprecatedKeys() {
387387
new DeprecationDelta("dfs.datanode.hostname",
388388
HddsConfigKeys.HDDS_DATANODE_HOST_NAME_KEY),
389389
new DeprecationDelta("dfs.datanode.data.dir",
390-
HddsConfigKeys.HDDS_DATANODE_DATA_DIR_KEY),
390+
ScmConfigKeys.HDDS_DATANODE_DIR_KEY),
391391
new DeprecationDelta("dfs.datanode.use.datanode.hostname",
392392
HddsConfigKeys.HDDS_DATANODE_USE_DN_HOSTNAME),
393393
new DeprecationDelta("dfs.xframe.enabled",

hadoop-hdds/common/src/main/resources/ozone-default.xml

+123-2
Original file line numberDiff line numberDiff line change
@@ -171,8 +171,7 @@
171171
<name>hdds.datanode.dir</name>
172172
<value/>
173173
<tag>OZONE, CONTAINER, STORAGE, MANAGEMENT</tag>
174-
<description>Determines where on the local filesystem HDDS data will be
175-
stored. Defaults to hdds.datanode.data.dir if not specified.
174+
<description>Determines where on the local filesystem HDDS data will be stored.
176175
The directories should be tagged with corresponding storage types
177176
([SSD]/[DISK]/[ARCHIVE]/[RAM_DISK]) for storage policies. The default
178177
storage type will be DISK if the directory does not have a storage type
@@ -4553,4 +4552,126 @@
45534552
allowing for better identification and analysis of performance issues.
45544553
</description>
45554554
</property>
4555+
4556+
<property>
4557+
<name>hdds.datanode.dns.interface</name>
4558+
<value>default</value>
4559+
<tag>OZONE, DATANODE</tag>
4560+
<description>
4561+
The name of the Network Interface from which a Datanode should
4562+
report its IP address. e.g. eth2. This setting may be required for some
4563+
multi-homed nodes where the Datanodes are assigned multiple hostnames
4564+
and it is desirable for the Datanodes to use a non-default hostname.
4565+
</description>
4566+
</property>
4567+
<property>
4568+
<name>hdds.datanode.dns.nameserver</name>
4569+
<value>default</value>
4570+
<tag>OZONE, DATANODE</tag>
4571+
<description>
4572+
The host name or IP address of the name server (DNS) which a Datanode
4573+
should use to determine its own host name.
4574+
</description>
4575+
</property>
4576+
<property>
4577+
<name>hdds.datanode.hostname</name>
4578+
<value/>
4579+
<tag>OZONE, DATANODE</tag>
4580+
<description>
4581+
Optional. The hostname for the Datanode containing this
4582+
configuration file. Will be different for each machine.
4583+
Defaults to current hostname.
4584+
</description>
4585+
</property>
4586+
<property>
4587+
<name>hdds.datanode.use.datanode.hostname</name>
4588+
<value>false</value>
4589+
<tag>OZONE, DATANODE</tag>
4590+
<description>
4591+
Whether Datanodes should use Datanode hostnames when
4592+
connecting to other Datanodes for data transfer.
4593+
</description>
4594+
</property>
4595+
<property>
4596+
<name>hdds.xframe.enabled</name>
4597+
<value>true</value>
4598+
<tag>OZONE, HDDS</tag>
4599+
<description>
4600+
If true, then enables protection against clickjacking by returning
4601+
X_FRAME_OPTIONS header value set to SAMEORIGIN.
4602+
Clickjacking protection prevents an attacker from using transparent or
4603+
opaque layers to trick a user into clicking on a button
4604+
or link on another page.
4605+
</description>
4606+
</property>
4607+
<property>
4608+
<name>hdds.xframe.value</name>
4609+
<value>SAMEORIGIN</value>
4610+
<tag>OZONE, HDDS</tag>
4611+
<description>
4612+
This configration value allows user to specify the value for the
4613+
X-FRAME-OPTIONS. The possible values for this field are
4614+
DENY, SAMEORIGIN and ALLOW-FROM. Any other value will throw an
4615+
exception when Datanodes are starting up.
4616+
</description>
4617+
</property>
4618+
<property>
4619+
<name>hdds.metrics.session-id</name>
4620+
<value/>
4621+
<tag>OZONE, HDDS</tag>
4622+
<description>
4623+
Get the user-specified session identifier. The default is the empty string.
4624+
The session identifier is used to tag metric data that is reported to some
4625+
performance metrics system via the org.apache.hadoop.metrics API. The
4626+
session identifier is intended, in particular, for use by Hadoop-On-Demand
4627+
(HOD) which allocates a virtual Hadoop cluster dynamically and transiently.
4628+
HOD will set the session identifier by modifying the mapred-site.xml file
4629+
before starting the cluster.
4630+
When not running under HOD, this identifer is expected to remain set to
4631+
the empty string.
4632+
</description>
4633+
</property>
4634+
<property>
4635+
<name>hdds.datanode.kerberos.principal</name>
4636+
<value/>
4637+
<tag>OZONE, DATANODE</tag>
4638+
<description>
4639+
The Datanode service principal. This is typically set to
4640+
dn/[email protected]. Each Datanode will substitute _HOST with its
4641+
own fully qualified hostname at startup. The _HOST placeholder
4642+
allows using the same configuration setting on all Datanodes.
4643+
</description>
4644+
</property>
4645+
<property>
4646+
<name>hdds.datanode.kerberos.keytab.file</name>
4647+
<value/>
4648+
<tag>OZONE, DATANODE</tag>
4649+
<description>
4650+
The keytab file used by each Datanode daemon to login as its
4651+
service principal. The principal name is configured with
4652+
hdds.datanode.kerberos.principal.
4653+
</description>
4654+
</property>
4655+
<property>
4656+
<name>hdds.metrics.percentiles.intervals</name>
4657+
<value></value>
4658+
<tag>OZONE, DATANODE</tag>
4659+
<description>
4660+
Comma-delimited set of integers denoting the desired rollover intervals
4661+
(in seconds) for percentile latency metrics on the Datanode.
4662+
By default, percentile latency metrics are disabled.
4663+
</description>
4664+
</property>
4665+
4666+
<property>
4667+
<name>net.topology.node.switch.mapping.impl</name>
4668+
<value>org.apache.hadoop.net.ScriptBasedMapping</value>
4669+
<tag>OZONE, SCM</tag>
4670+
<description>
4671+
The default implementation of the DNSToSwitchMapping. It
4672+
invokes a script specified in net.topology.script.file.name to resolve
4673+
node names. If the value for net.topology.script.file.name is not set, the
4674+
default value of DEFAULT_RACK is returned for all node names.
4675+
</description>
4676+
</property>
45564677
</configuration>

hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsDatanodeService.java

+1-1
Original file line numberDiff line numberDiff line change
@@ -98,7 +98,7 @@ public void setUp() throws IOException {
9898
conf.setBoolean(HDDS_CONTAINER_TOKEN_ENABLED, true);
9999

100100
String volumeDir = testDir + OZONE_URI_DELIMITER + "disk1";
101-
conf.set(HddsConfigKeys.HDDS_DATANODE_DATA_DIR_KEY, volumeDir);
101+
conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, volumeDir);
102102
}
103103

104104
@ParameterizedTest

hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsSecureDatanodeInit.java

+2-1
Original file line numberDiff line numberDiff line change
@@ -53,6 +53,7 @@
5353
import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
5454
import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos;
5555
import org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolClientSideTranslatorPB;
56+
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
5657
import org.apache.hadoop.hdds.security.SecurityConfig;
5758
import org.apache.hadoop.hdds.security.x509.certificate.client.DNCertificateClient;
5859
import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec;
@@ -97,7 +98,7 @@ public static void setUp() throws Exception {
9798
conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getPath());
9899
//conf.set(ScmConfigKeys.OZONE_SCM_NAMES, "localhost");
99100
String volumeDir = testDir + "/disk1";
100-
conf.set(HddsConfigKeys.HDDS_DATANODE_DATA_DIR_KEY, volumeDir);
101+
conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, volumeDir);
101102

102103
conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, true);
103104
conf.setClass(OzoneConfigKeys.HDDS_DATANODE_PLUGINS_KEY,

hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteBlocksCommandHandler.java

+2
Original file line numberDiff line numberDiff line change
@@ -55,6 +55,7 @@
5555
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CommandStatus.Status;
5656
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto.DeleteBlockTransactionResult;
5757
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
58+
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
5859
import org.apache.hadoop.ozone.container.ContainerTestHelper;
5960
import org.apache.hadoop.ozone.container.common.ContainerTestUtils;
6061
import org.apache.hadoop.ozone.container.common.helpers.BlockDeletingServiceMetrics;
@@ -301,6 +302,7 @@ public void testDeleteBlockCommandHandleWhenDeleteCommandQueuesFull()
301302
// Setting up the test environment
302303
OzoneConfiguration configuration = new OzoneConfiguration();
303304
configuration.set(HddsConfigKeys.OZONE_METADATA_DIRS, folder.toString());
305+
configuration.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, folder.toString());
304306
DatanodeDetails datanodeDetails = MockDatanodeDetails.randomDatanodeDetails();
305307
DatanodeConfiguration dnConf =
306308
configuration.getObject(DatanodeConfiguration.class);

hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java

+1-1
Original file line numberDiff line numberDiff line change
@@ -76,7 +76,7 @@ public void setup() throws Exception {
7676
String dataDirKey = volume1 + "," + volume2;
7777
volumes.add(volume1);
7878
volumes.add(volume2);
79-
conf.set(HddsConfigKeys.HDDS_DATANODE_DATA_DIR_KEY, dataDirKey);
79+
conf.set(HDDS_DATANODE_DIR_KEY, dataDirKey);
8080
conf.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR,
8181
dataDirKey);
8282
initializeVolumeSet();

hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java

+3-3
Original file line numberDiff line numberDiff line change
@@ -90,7 +90,7 @@ public class TestVolumeSetDiskChecks {
9090
@AfterEach
9191
public void cleanup() {
9292
final Collection<String> dirs = conf.getTrimmedStringCollection(
93-
HddsConfigKeys.HDDS_DATANODE_DATA_DIR_KEY);
93+
ScmConfigKeys.HDDS_DATANODE_DIR_KEY);
9494

9595
for (String d: dirs) {
9696
FileUtils.deleteQuietly(new File(d));
@@ -115,7 +115,7 @@ public void testOzoneDirsAreCreated() throws IOException {
115115

116116
// Verify that the Ozone dirs were created during initialization.
117117
Collection<String> dirs = conf.getTrimmedStringCollection(
118-
HddsConfigKeys.HDDS_DATANODE_DATA_DIR_KEY);
118+
ScmConfigKeys.HDDS_DATANODE_DIR_KEY);
119119
for (String d : dirs) {
120120
assertTrue(new File(d).isDirectory());
121121
}
@@ -222,7 +222,7 @@ private OzoneConfiguration getConfWithDataNodeDirs(int numDirs) {
222222
for (int i = 0; i < numDirs; ++i) {
223223
dirs.add(new File(dir, randomAlphanumeric(10)).toString());
224224
}
225-
ozoneConf.set(HddsConfigKeys.HDDS_DATANODE_DATA_DIR_KEY,
225+
ozoneConf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY,
226226
String.join(",", dirs));
227227

228228
final List<String> metaDirs = new ArrayList<>();

hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerImporter.java

+2
Original file line numberDiff line numberDiff line change
@@ -43,6 +43,7 @@
4343
import org.apache.commons.io.IOUtils;
4444
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
4545
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
46+
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
4647
import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
4748
import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
4849
import org.apache.hadoop.ozone.container.common.impl.ContainerData;
@@ -74,6 +75,7 @@ class TestContainerImporter {
7475
@BeforeEach
7576
void setup() {
7677
conf = new OzoneConfiguration();
78+
conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, tempDir.getAbsolutePath());
7779
}
7880

7981
@Test

hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestSendContainerRequestHandler.java

+7
Original file line numberDiff line numberDiff line change
@@ -24,8 +24,10 @@
2424
import static org.mockito.Mockito.doAnswer;
2525
import static org.mockito.Mockito.mock;
2626

27+
import java.io.File;
2728
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
2829
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
30+
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
2931
import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
3032
import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion;
3133
import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
@@ -38,17 +40,22 @@
3840
import org.apache.ratis.thirdparty.io.grpc.stub.StreamObserver;
3941
import org.junit.jupiter.api.BeforeEach;
4042
import org.junit.jupiter.api.Test;
43+
import org.junit.jupiter.api.io.TempDir;
4144

4245
/**
4346
* Test for {@link SendContainerRequestHandler}.
4447
*/
4548
class TestSendContainerRequestHandler {
4649

50+
@TempDir
51+
private File tempDir;
52+
4753
private OzoneConfiguration conf;
4854

4955
@BeforeEach
5056
void setup() {
5157
conf = new OzoneConfiguration();
58+
conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, tempDir.getAbsolutePath());
5259
}
5360

5461
@Test

hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java

+3-10
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,6 @@
1717

1818
package org.apache.hadoop.hdds.utils;
1919

20-
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DATANODE_DATA_DIR_KEY;
2120
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL;
2221
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL_DEFAULT;
2322
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_RECON_HEARTBEAT_INTERVAL;
@@ -404,16 +403,10 @@ public static Collection<String> getOzoneDatanodeRatisDirectory(
404403
return rawLocations;
405404
}
406405

407-
public static Collection<String> getDatanodeStorageDirs(
408-
ConfigurationSource conf) {
409-
Collection<String> rawLocations = conf.getTrimmedStringCollection(
410-
HDDS_DATANODE_DIR_KEY);
411-
if (rawLocations.isEmpty()) {
412-
rawLocations = conf.getTrimmedStringCollection(HDDS_DATANODE_DATA_DIR_KEY);
413-
}
406+
public static Collection<String> getDatanodeStorageDirs(ConfigurationSource conf) {
407+
Collection<String> rawLocations = conf.getTrimmedStringCollection(HDDS_DATANODE_DIR_KEY);
414408
if (rawLocations.isEmpty()) {
415-
throw new IllegalArgumentException("No location configured in either "
416-
+ HDDS_DATANODE_DIR_KEY + " or " + HDDS_DATANODE_DATA_DIR_KEY);
409+
throw new IllegalArgumentException("No location configured in " + HDDS_DATANODE_DIR_KEY);
417410
}
418411
return rawLocations;
419412
}

hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java

+1-13
Original file line numberDiff line numberDiff line change
@@ -132,19 +132,7 @@ private void addPropertiesNotInXml() {
132132
HddsConfigKeys.HDDS_DATANODE_VOLUME_MIN_FREE_SPACE_PERCENT,
133133
OzoneConfigKeys.HDDS_SCM_CLIENT_RPC_TIME_OUT,
134134
OzoneConfigKeys.HDDS_SCM_CLIENT_MAX_RETRY_TIMEOUT,
135-
OzoneConfigKeys.HDDS_SCM_CLIENT_FAILOVER_MAX_RETRY,
136-
HddsConfigKeys.HDDS_DATANODE_DNS_INTERFACE_KEY,
137-
HddsConfigKeys.HDDS_DATANODE_DNS_NAMESERVER_KEY,
138-
HddsConfigKeys.HDDS_DATANODE_HOST_NAME_KEY,
139-
HddsConfigKeys.HDDS_DATANODE_DATA_DIR_KEY,
140-
HddsConfigKeys.HDDS_DATANODE_USE_DN_HOSTNAME,
141-
HddsConfigKeys.HDDS_XFRAME_OPTION_ENABLED,
142-
HddsConfigKeys.HDDS_XFRAME_OPTION_VALUE,
143-
HddsConfigKeys.HDDS_METRICS_SESSION_ID_KEY,
144-
ScmConfigKeys.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
145-
HddsConfigKeys.HDDS_DATANODE_KERBEROS_PRINCIPAL_KEY,
146-
HddsConfigKeys.HDDS_DATANODE_KERBEROS_KEYTAB_FILE_KEY,
147-
HddsConfigKeys.HDDS_METRICS_PERCENTILES_INTERVALS_KEY
135+
OzoneConfigKeys.HDDS_SCM_CLIENT_FAILOVER_MAX_RETRY
148136
));
149137
}
150138
}

hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/UniformDatanodesFactory.java

-2
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,6 @@
1818
package org.apache.hadoop.ozone;
1919

2020
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DATANODE_CLIENT_ADDRESS_KEY;
21-
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DATANODE_DATA_DIR_KEY;
2221
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DATANODE_HTTP_ADDRESS_KEY;
2322
import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS;
2423
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_DU_RESERVED;
@@ -96,7 +95,6 @@ public OzoneConfiguration apply(OzoneConfiguration conf) throws IOException {
9695
}
9796
String reservedSpaceString = String.join(",", reservedSpaceList);
9897
String listOfDirs = String.join(",", dataDirs);
99-
dnConf.set(HDDS_DATANODE_DATA_DIR_KEY, listOfDirs);
10098
dnConf.set(HDDS_DATANODE_DIR_KEY, listOfDirs);
10199
dnConf.set(HDDS_DATANODE_DIR_DU_RESERVED, reservedSpaceString);
102100

0 commit comments

Comments
 (0)