Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -1877,7 +1877,11 @@ private synchronized void setDataNodeStorageCapacities(
final DataNode curDn,
long[][] storageCapacities) throws IOException {

if (storageCapacities == null || storageCapacities.length == 0) {
// Check for null/empty array AND ensure index is within bounds.
// DataNodes added without explicit storageCapacities won't have
// an entry in the storageCap list.
if (storageCapacities == null || storageCapacities.length == 0
|| curDnIdx >= storageCapacities.length) {
return;
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assumptions.assumeTrue;

import java.io.File;
Expand Down Expand Up @@ -417,4 +418,40 @@ public void testStartStopWithPorts() throws Exception {
}
}

/**
* Test that restarting a DataNode that was added without explicit
* storage capacities does not throw ArrayIndexOutOfBoundsException.
*
* @see <a href="https://issues.apache.org/jira/browse/HDFS-17870">HDFS-17870</a>
*/
@Test
@Timeout(value = 100)
public void testRestartDataNodeWithoutStorageCapacities() throws Exception {
final Configuration conf = new HdfsConfiguration();
final long[] capacities = new long[] {1024 * 1024 * 100, 1024 * 1024 * 100};

// Create cluster with 1 DataNode and explicit storage capacities
try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(1)
.storageCapacities(capacities)
.storageTypes(new StorageType[]{StorageType.DISK, StorageType.DISK})
.storagesPerDatanode(2)
.build()) {
cluster.waitActive();

// Add another DataNode without specifying storage capacities
cluster.startDataNodes(conf, 1, true, null, null);
cluster.waitActive();

assertEquals(2, cluster.getDataNodes().size());

// Restart the second DataNode - this should not throw
// ArrayIndexOutOfBoundsException
assertTrue(cluster.restartDataNode(1));
cluster.waitActive();

assertEquals(2, cluster.getDataNodes().size());
}
}

}