diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java index 87707f75dc4..4d630243e51 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java @@ -401,4 +401,7 @@ private HddsConfigKeys() { "hdds.datanode.slow.op.warning.threshold"; public static final String HDDS_DATANODE_SLOW_OP_WARNING_THRESHOLD_DEFAULT = "500ms"; + + public static final String OZONE_DATANODE_IO_METRICS_PERCENTILES_INTERVALS_SECONDS_KEY = + "ozone.volume.io.percentiles.intervals.seconds"; } diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index bc90a87b11e..f3e45c47eef 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -4544,4 +4544,16 @@ maximum number of buckets across all volumes. + + + ozone.volume.io.percentiles.intervals.seconds + 60 + OZONE, DATANODE + + This setting specifies the interval (in seconds) for monitoring percentile performance metrics. + It helps in tracking the read and write performance of DataNodes in real-time, + allowing for better identification and analysis of performance issues. + + + diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/DNMXBean.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/DNMXBean.java index d36fcdb6fc7..9c077a8e27b 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/DNMXBean.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/DNMXBean.java @@ -26,4 +26,32 @@ */ @InterfaceAudience.Private public interface DNMXBean extends ServiceRuntimeInfo { + + /** + * Gets the datanode hostname. + * + * @return the datanode hostname for the datanode. + */ + String getHostname(); + + /** + * Gets the client rpc port. + * + * @return the client rpc port + */ + String getClientRpcPort(); + + /** + * Gets the http port. + * + * @return the http port + */ + String getHttpPort(); + + /** + * Gets the https port. + * + * @return the http port + */ + String getHttpsPort(); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/DNMXBeanImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/DNMXBeanImpl.java index f7b484c6bb3..5a0a4556636 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/DNMXBeanImpl.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/DNMXBeanImpl.java @@ -25,8 +25,53 @@ * This is the JMX management class for DN information. */ public class DNMXBeanImpl extends ServiceRuntimeInfoImpl implements DNMXBean { - public DNMXBeanImpl( - VersionInfo versionInfo) { + + private String hostName; + private String clientRpcPort; + private String httpPort; + private String httpsPort; + + public DNMXBeanImpl(VersionInfo versionInfo) { super(versionInfo); } + + @Override + public String getHostname() { + return hostName; + } + + @Override + public String getClientRpcPort() { + return clientRpcPort; + } + + @Override + public String getHttpPort() { + return httpPort; + } + + @Override + public String getHttpsPort() { + return httpsPort; + } + + public void setHttpPort(String httpPort) { + this.httpPort = httpPort; + } + + public void setHostName(String hostName) { + this.hostName = hostName; + } + + public void setClientRpcPort(String rpcPort) { + this.clientRpcPort = rpcPort; + } + + public String getHostName() { + return hostName; + } + + public void setHttpsPort(String httpsPort) { + this.httpsPort = httpsPort; + } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java index 55aeb466e7f..de21e37503a 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java @@ -228,6 +228,7 @@ public String getNamespace() { String ip = InetAddress.getByName(hostname).getHostAddress(); datanodeDetails = initializeDatanodeDetails(); datanodeDetails.setHostName(hostname); + serviceRuntimeInfo.setHostName(hostname); datanodeDetails.setIpAddress(ip); datanodeDetails.setVersion( HddsVersionInfo.HDDS_VERSION_INFO.getVersion()); @@ -300,23 +301,30 @@ public String getNamespace() { httpServer = new HddsDatanodeHttpServer(conf); httpServer.start(); HttpConfig.Policy policy = HttpConfig.getHttpPolicy(conf); + if (policy.isHttpEnabled()) { - datanodeDetails.setPort(DatanodeDetails.newPort(HTTP, - httpServer.getHttpAddress().getPort())); + int httpPort = httpServer.getHttpAddress().getPort(); + datanodeDetails.setPort(DatanodeDetails.newPort(HTTP, httpPort)); + serviceRuntimeInfo.setHttpPort(String.valueOf(httpPort)); } + if (policy.isHttpsEnabled()) { - datanodeDetails.setPort(DatanodeDetails.newPort(HTTPS, - httpServer.getHttpsAddress().getPort())); + int httpsPort = httpServer.getHttpAddress().getPort(); + datanodeDetails.setPort(DatanodeDetails.newPort(HTTPS, httpsPort)); + serviceRuntimeInfo.setHttpsPort(String.valueOf(httpsPort)); } + } catch (Exception ex) { LOG.error("HttpServer failed to start.", ex); } - clientProtocolServer = new HddsDatanodeClientProtocolServer( datanodeDetails, conf, HddsVersionInfo.HDDS_VERSION_INFO, reconfigurationHandler); + int clientRpcport = clientProtocolServer.getClientRpcAddress().getPort(); + serviceRuntimeInfo.setClientRpcPort(String.valueOf(clientRpcport)); + // Get admin list String starterUser = UserGroupInformation.getCurrentUser().getShortUserName(); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java index 15cc6245ddb..5335021da9e 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java @@ -251,6 +251,21 @@ public Iterator> getContainerIterator(HddsVolume volume) { .iterator(); } + /** + * Get the number of containers based on the given volume. + * + * @param volume hdds volume. + * @return number of containers + */ + public long containerCount(HddsVolume volume) { + Preconditions.checkNotNull(volume); + Preconditions.checkNotNull(volume.getStorageID()); + String volumeUuid = volume.getStorageID(); + return containerMap.values().stream() + .filter(x -> volumeUuid.equals(x.getContainerData().getVolume() + .getStorageID())).count(); + } + /** * Return an containerMap iterator over {@link ContainerSet#containerMap}. * @return containerMap Iterator diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java index c58aab2e5ba..5fced0e39b3 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java @@ -29,6 +29,7 @@ import org.apache.commons.io.FileUtils; import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.annotation.InterfaceStability; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature; import org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; @@ -36,6 +37,7 @@ import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil; import org.apache.hadoop.ozone.container.common.utils.RawDB; import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil; +import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController; import org.apache.hadoop.ozone.container.upgrade.VersionedDatanodeFeatures; import org.apache.hadoop.ozone.container.upgrade.VersionedDatanodeFeatures.SchemaV3; import org.apache.hadoop.util.Time; @@ -44,6 +46,7 @@ import jakarta.annotation.Nullable; +import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_DATANODE_IO_METRICS_PERCENTILES_INTERVALS_SECONDS_KEY; import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB_NAME; import static org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil.initPerDiskDBStore; @@ -80,6 +83,8 @@ public class HddsVolume extends StorageVolume { private final VolumeIOStats volumeIOStats; private final VolumeInfoMetrics volumeInfoMetrics; + private ContainerController controller; + private final AtomicLong committedBytes = new AtomicLong(); // till Open containers become full // Mentions the type of volume @@ -119,8 +124,10 @@ private HddsVolume(Builder b) throws IOException { if (!b.getFailedVolume() && getVolumeInfo().isPresent()) { this.setState(VolumeState.NOT_INITIALIZED); + ConfigurationSource conf = getConf(); + int[] intervals = conf.getInts(OZONE_DATANODE_IO_METRICS_PERCENTILES_INTERVALS_SECONDS_KEY); this.volumeIOStats = new VolumeIOStats(b.getVolumeRootStr(), - this.getStorageDir().toString()); + this.getStorageDir().toString(), intervals); this.volumeInfoMetrics = new VolumeInfoMetrics(b.getVolumeRootStr(), this); @@ -382,6 +389,17 @@ public void loadDbStore(boolean readOnly) throws IOException { getStorageID()); } + public void setController(ContainerController controller) { + this.controller = controller; + } + + public long getContainers() { + if (controller != null) { + return controller.getContainerCount(this); + } + return 0; + } + /** * Pick a DbVolume for HddsVolume and init db instance. * Use the HddsVolume directly if no DbVolume found. diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeIOStats.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeIOStats.java index e22addd354f..2ce19c3bf19 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeIOStats.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeIOStats.java @@ -21,7 +21,10 @@ import org.apache.hadoop.metrics2.MetricsSystem; import org.apache.hadoop.metrics2.annotation.Metric; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; +import org.apache.hadoop.metrics2.lib.MetricsRegistry; import org.apache.hadoop.metrics2.lib.MutableCounterLong; +import org.apache.hadoop.metrics2.lib.MutableQuantiles; +import org.apache.hadoop.metrics2.lib.MutableRate; /** * This class is used to track Volume IO stats for each HDDS Volume. @@ -29,12 +32,23 @@ public class VolumeIOStats { private String metricsSourceName = VolumeIOStats.class.getSimpleName(); private String storageDirectory; - private @Metric MutableCounterLong readBytes; - private @Metric MutableCounterLong readOpCount; - private @Metric MutableCounterLong writeBytes; - private @Metric MutableCounterLong writeOpCount; - private @Metric MutableCounterLong readTime; - private @Metric MutableCounterLong writeTime; + private final MetricsRegistry registry = new MetricsRegistry("VolumeIOStats"); + @Metric + private MutableCounterLong readBytes; + @Metric + private MutableCounterLong readOpCount; + @Metric + private MutableCounterLong writeBytes; + @Metric + private MutableCounterLong writeOpCount; + @Metric + private MutableRate readTime; + @Metric + private MutableQuantiles[] readLatencyQuantiles; + @Metric + private MutableRate writeTime; + @Metric + private MutableQuantiles[] writeLatencyQuantiles; @Deprecated public VolumeIOStats() { @@ -44,9 +58,24 @@ public VolumeIOStats() { /** * @param identifier Typically, path to volume root. e.g. /data/hdds */ - public VolumeIOStats(String identifier, String storageDirectory) { + public VolumeIOStats(String identifier, String storageDirectory, int[] intervals) { this.metricsSourceName += '-' + identifier; this.storageDirectory = storageDirectory; + + // Try initializing `readLatencyQuantiles` and `writeLatencyQuantiles` + if (intervals != null && intervals.length > 0) { + final int length = intervals.length; + readLatencyQuantiles = new MutableQuantiles[intervals.length]; + writeLatencyQuantiles = new MutableQuantiles[intervals.length]; + for (int i = 0; i < length; i++) { + readLatencyQuantiles[i] = registry.newQuantiles( + "readLatency" + intervals[i] + "s", + "Read Data File Io Latency in ms", "ops", "latency", intervals[i]); + writeLatencyQuantiles[i] = registry.newQuantiles( + "writeLatency" + intervals[i] + "s", + "Write Data File Io Latency in ms", "ops", "latency", intervals[i]); + } + } init(); } @@ -99,7 +128,10 @@ public void incWriteOpCount() { * @param time */ public void incReadTime(long time) { - readTime.incr(time); + readTime.add(time); + for (MutableQuantiles q : readLatencyQuantiles) { + q.add(time); + } } /** @@ -107,7 +139,10 @@ public void incReadTime(long time) { * @param time */ public void incWriteTime(long time) { - writeTime.incr(time); + writeTime.add(time); + for (MutableQuantiles q : writeLatencyQuantiles) { + q.add(time); + } } /** @@ -147,7 +182,7 @@ public long getWriteOpCount() { * @return long */ public long getReadTime() { - return readTime.value(); + return (long) readTime.lastStat().total(); } /** @@ -155,7 +190,7 @@ public long getReadTime() { * @return long */ public long getWriteTime() { - return writeTime.value(); + return (long) writeTime.lastStat().total(); } @Metric diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfoMetrics.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfoMetrics.java index 68140600db9..cd31b8063d3 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfoMetrics.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfoMetrics.java @@ -37,6 +37,7 @@ public class VolumeInfoMetrics { private final HddsVolume volume; @Metric("Returns the RocksDB compact times of the Volume") private MutableRate dbCompactLatency; + private long containers; /** * @param identifier Typically, path to volume root. E.g. /data/hdds @@ -153,4 +154,11 @@ public void dbCompactTimesNanoSecondsIncr(long time) { dbCompactLatency.add(time); } + /** + * Return the Container Count of the Volume. + */ + @Metric("Returns the Container Count of the Volume") + public long getContainers() { + return volume.getContainers(); + } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/BackgroundContainerDataScanner.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/BackgroundContainerDataScanner.java index 327f0192243..8ff2e30876b 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/BackgroundContainerDataScanner.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/BackgroundContainerDataScanner.java @@ -63,6 +63,7 @@ public BackgroundContainerDataScanner(ContainerScannerConfiguration conf, throttler = new HddsDataTransferThrottler(conf.getBandwidthPerVolume()); canceler = new Canceler(); this.metrics = ContainerDataScannerMetrics.create(volume.toString()); + this.metrics.setStorageDirectory(volume.toString()); this.minScanGap = conf.getContainerScanMinGap(); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java index 0db98a01d82..567741a98d8 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java @@ -235,6 +235,16 @@ public Iterator> getContainers(HddsVolume volume) { return containerSet.getContainerIterator(volume); } + /** + * Get the number of containers based on the given volume. + * + * @param volume hdds volume. + * @return number of containers. + */ + public long getContainerCount(HddsVolume volume) { + return containerSet.containerCount(volume); + } + void updateDataScanTimestamp(long containerId, Instant timestamp) throws IOException { Container container = containerSet.getContainer(containerId); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerDataScannerMetrics.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerDataScannerMetrics.java index a3f71d34ba1..76e71312aed 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerDataScannerMetrics.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerDataScannerMetrics.java @@ -37,6 +37,8 @@ public final class ContainerDataScannerMetrics @Metric("disk bandwidth used by the container data scanner per volume") private MutableRate numBytesScanned; + private String storageDirectory; + public double getNumBytesScannedMean() { return numBytesScanned.lastStat().mean(); } @@ -66,4 +68,13 @@ public static ContainerDataScannerMetrics create(final String volumeName) { return ms.register(name, null, new ContainerDataScannerMetrics(name, ms)); } + + @Metric("Returns the Directory name for the volume") + public String getStorageDirectory() { + return storageDirectory; + } + + public void setStorageDirectory(final String volumeName) { + this.storageDirectory = volumeName; + } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java index 62196cdd87f..56c42338366 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java @@ -388,6 +388,18 @@ private void initContainerScanner(ContainerScannerConfiguration c) { } } + /** + * We need to inject the containerController into the hddsVolume. + * because we need to obtain the container count + * for each disk based on the container controller. + */ + private void initHddsVolumeContainer() { + for (StorageVolume v : volumeSet.getVolumesList()) { + HddsVolume hddsVolume = (HddsVolume) v; + hddsVolume.setController(controller); + } + } + private void initMetadataScanner(ContainerScannerConfiguration c) { if (this.metadataScanner == null) { this.metadataScanner = @@ -486,6 +498,8 @@ public void start(String clusterId) throws IOException { blockDeletingService.start(); recoveringContainerScrubbingService.start(); + initHddsVolumeContainer(); + // mark OzoneContainer as INITIALIZED. initializingStatus.set(InitializingStatus.INITIALIZED); } diff --git a/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/dn-overview.html b/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/dn-overview.html index fd3d7407d23..4f51b423e8a 100644 --- a/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/dn-overview.html +++ b/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/dn-overview.html @@ -22,8 +22,32 @@ +

HeartBeat Information

+ + + + + + + + + + + + + + + + + + + + + +
AddressLast Successful HeartBeatMissed CountStateTypeVersion Number
{{scm.addressString}}{{scm.lastSuccessfulHeartbeat}}{{scm.missedCount}}{{scm.state}}{{scm.type}}{{scm.versionNumber}}
+

Volume Information

- +
@@ -33,6 +57,7 @@

Volume Information

+ @@ -45,6 +70,7 @@

Volume Information

+ diff --git a/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/dn-scanner.html b/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/dn-scanner.html new file mode 100644 index 00000000000..5c54a2aa0a7 --- /dev/null +++ b/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/dn-scanner.html @@ -0,0 +1,47 @@ + + + + + DataNode Scanner Status + + +

DataNode Scanner Information

+
Directory Available Space Reserved Total CapacityContainers State
{{volumeInfo.Available}} {{volumeInfo.Reserved}} {{volumeInfo.TotalCapacity}}{{volumeInfo.Containers}} {{volumeInfo["tag.VolumeState"]}}
+ + + + + + + + + + + + + + + + + + + + +
DirectoryNumBytesScannedNumOpsNumBytesScannedAvgTimeNumContainersScannedNumScanIterationsNumUnHealthyContainers
{{scanner["tag.StorageDirectory"]}}{{scanner.NumBytesScannedNumOps}}{{scanner.NumBytesScannedAvgTime | millisecondsToMinutes}}{{scanner.NumContainersScanned}}{{scanner.NumScanIterations}}{{scanner.NumUnHealthyContainers}}
+ + \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/dn.js b/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/dn.js index adc507acce9..547e566ef8a 100644 --- a/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/dn.js +++ b/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/dn.js @@ -36,20 +36,104 @@ volume.TotalCapacity = transform(volume.TotalCapacity); }) }); + + $http.get("jmx?qry=Hadoop:service=HddsDatanode,name=SCMConnectionManager") + .then(function (result) { + ctrl.heartbeatmetrics = result.data.beans; + ctrl.heartbeatmetrics.forEach(scm => { + var scmServers = scm.SCMServers; + scmServers.forEach(scmServer => { + scmServer.lastSuccessfulHeartbeat = convertTimestampToDate(scmServer.lastSuccessfulHeartbeat) + }) + }) + }); } }); - function transform(v) { - var UNITS = ['B', 'KB', 'MB', 'GB', 'TB', 'PB', 'ZB']; - var prev = 0, i = 0; - while (Math.floor(v) > 0 && i < UNITS.length) { + + // Register ioStatus Controller + angular.module('ozone').config(function ($routeProvider) { + $routeProvider.when('/iostatus', { + templateUrl: 'iostatus.html', + controller: 'IOStatusController as ioStatusCtrl', + }); + }); + + angular.module('ozone') + .controller('IOStatusController', function ($http) { + var ctrl = this; + $http.get("jmx?qry=Hadoop:service=HddsDatanode,name=VolumeIOStats*") + .then(function (result) { + ctrl.dniostatus = result.data.beans; + }); + }); + + // Register Scanner Controller + angular.module('ozone').config(function ($routeProvider) { + $routeProvider.when('/dn-scanner', { + templateUrl: 'dn-scanner.html', + controller: 'DNScannerController as scannerStatusCtrl', + }); + }); + + angular.module('ozone') + .controller('DNScannerController', function ($http) { + var ctrl = this; + $http.get("jmx?qry=Hadoop:service=HddsDatanode,name=ContainerDataScannerMetrics*") + .then(function (result) { + ctrl.dnscanner = result.data.beans; + }); + }); + + angular.module('ozone') + .filter('millisecondsToMinutes', function() { + return function(milliseconds) { + if (isNaN(milliseconds)) { + return 'Invalid input'; + } + var minutes = Math.floor(milliseconds / 60000); // 1 minute = 60000 milliseconds + var seconds = Math.floor((milliseconds % 60000) / 1000); + return minutes + ' mins ' + seconds + ' secs'; + }; + }); + + angular.module('ozone') + .filter('twoDecimalPlaces', function() { + return function(input) { + if (isNaN(input)) { + return 'Invalid input'; + } + return parseFloat(input).toFixed(2); + }; + }); + + function transform(v) { + var UNITS = ['B', 'KB', 'MB', 'GB', 'TB', 'PB', 'ZB']; + var prev = 0, i = 0; + while (Math.floor(v) > 0 && i < UNITS.length) { prev = v; v /= 1024; i += 1; - } - if (i > 0 && i < UNITS.length) { + } + if (i > 0 && i < UNITS.length) { v = prev; i -= 1; - } - return Math.round(v * 100) / 100 + ' ' + UNITS[i]; } + return Math.round(v * 100) / 100 + ' ' + UNITS[i]; + } + + function convertTimestampToDate(timestamp) { + if (!timestamp) return ''; + var milliseconds = timestamp * 1000; + + var date = new Date(milliseconds); + + var year = date.getFullYear(); + var month = date.getMonth() + 1; + var day = date.getDate(); + var hours = date.getHours(); + var minutes = date.getMinutes(); + var seconds = date.getSeconds(); + + return `${year}-${month.toString().padStart(2, '0')}-${day.toString().padStart(2, '0')} ${hours.toString().padStart(2, '0')}:${minutes.toString().padStart(2, '0')}:${seconds.toString().padStart(2, '0')}`; + } })(); diff --git a/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/index.html b/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/index.html index 1c32fe64e0e..0e1cbf21a00 100644 --- a/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/index.html +++ b/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/index.html @@ -49,11 +49,10 @@ HDDS Datanode Service - - - - + diff --git a/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/iostatus.html b/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/iostatus.html new file mode 100644 index 00000000000..94916821bd8 --- /dev/null +++ b/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/iostatus.html @@ -0,0 +1,76 @@ + + + + + DataNode IO Status + + + +

Read Performance

+ + + + + + + + + + + + + + + + + + + + + + + +
DirectoryReadBytesReadOpCountReadAvgTimeReadLatency60s(P90)ReadLatency60s(P95)ReadLatency60s(P99)
{{volumeInfo["tag.StorageDirectory"]}}{{volumeInfo.ReadBytes}}{{volumeInfo.ReadOpCount}}{{volumeInfo.ReadTimeAvgTime | twoDecimalPlaces}} ms{{volumeInfo.ReadLatency60s90thPercentileLatency | twoDecimalPlaces}} ms{{volumeInfo.ReadLatency60s95thPercentileLatency | twoDecimalPlaces}} ms{{volumeInfo.ReadLatency60s99thPercentileLatency | twoDecimalPlaces}} ms
+ +

Write Performance

+ + + + + + + + + + + + + + + + + + + + + + + +
DirectoryWriteBytesWriteOpCountWriteAvgTimeWriteLatency60s(P90)WriteLatency60s(P95)WriteLatency60s(P99)
{{volumeInfo["tag.StorageDirectory"]}}{{volumeInfo.WriteBytes}}{{volumeInfo.WriteOpCount}}{{volumeInfo.WriteTimeAvgTime | twoDecimalPlaces}} ms{{volumeInfo.WriteLatency60s90thPercentileLatency | twoDecimalPlaces}} ms{{volumeInfo.WriteLatency60s95thPercentileLatency | twoDecimalPlaces}} ms{{volumeInfo.WriteLatency60s99thPercentileLatency | twoDecimalPlaces}} ms
+ + \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeIOStatsWithPrometheusSink.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeIOStatsWithPrometheusSink.java index c8934bab416..1df886098ab 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeIOStatsWithPrometheusSink.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeIOStatsWithPrometheusSink.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.ozone.container.common.volume; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.server.http.PrometheusMetricsSink; import org.apache.hadoop.metrics2.MetricsSystem; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; @@ -30,6 +31,7 @@ import static org.assertj.core.api.Assertions.assertThat; import static java.nio.charset.StandardCharsets.UTF_8; +import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_DATANODE_IO_METRICS_PERCENTILES_INTERVALS_SECONDS_KEY; /** * Test PrometheusMetricSink regarding VolumeIOStats. @@ -54,11 +56,14 @@ public void tearDown() { @Test public void testMultipleVolumeIOMetricsExist() throws IOException { + OzoneConfiguration conf = new OzoneConfiguration(); + int[] intervals = conf.getInts(OZONE_DATANODE_IO_METRICS_PERCENTILES_INTERVALS_SECONDS_KEY); + //GIVEN VolumeIOStats volumeIOStats1 = new VolumeIOStats("VolumeIOStat1", - "vol1/dir"); + "vol1/dir", intervals); VolumeIOStats volumeIOStat2 = new VolumeIOStats("VolumeIOStat2", - "vol2/dir"); + "vol2/dir", intervals); //WHEN String writtenMetrics = publishMetricsAndGetOutput(); diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/ozone.css b/hadoop-hdds/framework/src/main/resources/webapps/static/ozone.css index 389d9d78f21..4988cc8eeb1 100644 --- a/hadoop-hdds/framework/src/main/resources/webapps/static/ozone.css +++ b/hadoop-hdds/framework/src/main/resources/webapps/static/ozone.css @@ -94,4 +94,23 @@ body { .scm-roles-background { background-color: #dcfbcd!important; -} \ No newline at end of file +} +.toggle-btn { + background: transparent; /* No background color */ + color: #007bff; /* Button text color */ + border: none; /* No border */ + font-size: 12px; /* Font size for better readability */ + cursor: pointer; /* Pointer cursor on hover */ + padding: 5px 10px; /* Padding around the text */ + margin-bottom: 5px; /* Space below the button */ + transition: color 0.3s, transform 0.3s; /* Smooth transition for color and transform */ +} + +.toggle-btn:hover { + color: #0056b3; /* Darker color on hover */ + transform: scale(1.1); /* Slightly scale up the button on hover */ +} + +.toggle-btn:focus { + outline: none; /* Remove default focus outline */ +} diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/ozone.js b/hadoop-hdds/framework/src/main/resources/webapps/static/ozone.js index a31078cfd7b..7bb93106284 100644 --- a/hadoop-hdds/framework/src/main/resources/webapps/static/ozone.js +++ b/hadoop-hdds/framework/src/main/resources/webapps/static/ozone.js @@ -48,8 +48,14 @@ }); angular.module('ozone').component('jvmParameters', { templateUrl: 'static/templates/jvm.html', - controller: function($http) { + controller: function($http, $scope) { var ctrl = this; + + $scope.contentVisible = false; + $scope.toggleContent = function() { + $scope.contentVisible = !$scope.contentVisible; + }; + $http.get("jmx?qry=java.lang:type=Runtime") .then(function(result) { ctrl.jmx = result.data.beans[0]; @@ -245,7 +251,11 @@ angular.module('ozone').component('navmenu', { bindings: { - metrics: '<' + metrics: '<', + iostatus: '<', + ioLinkHref: '@', + scanner: '<', + scannerLinkHref: '@', }, templateUrl: 'static/templates/menu.html', controller: function($http) { diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/templates/jvm.html b/hadoop-hdds/framework/src/main/resources/webapps/static/templates/jvm.html index 9706ebdf6b3..c562ae7d9a2 100644 --- a/hadoop-hdds/framework/src/main/resources/webapps/static/templates/jvm.html +++ b/hadoop-hdds/framework/src/main/resources/webapps/static/templates/jvm.html @@ -20,7 +20,16 @@ {{$ctrl.jmx.SystemProperties.java_vm_name}} {{$ctrl.jmx.SystemProperties.java_vm_version}} - Input arguments: -
{{$ctrl.jmx.InputArguments.join('\n')}}
+ + Input arguments: + + + +
+
{{$ctrl.jmx.InputArguments.join('\n')}}
+
+ diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/templates/menu.html b/hadoop-hdds/framework/src/main/resources/webapps/static/templates/menu.html index 95f1b4842f1..9a14f356d7a 100644 --- a/hadoop-hdds/framework/src/main/resources/webapps/static/templates/menu.html +++ b/hadoop-hdds/framework/src/main/resources/webapps/static/templates/menu.html @@ -56,5 +56,7 @@ aria-hidden="true"> +
  • IO Status
  • +
  • Data Scanner
  • diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/templates/overview.html b/hadoop-hdds/framework/src/main/resources/webapps/static/templates/overview.html index 7ff118b330e..2811e8c36a5 100644 --- a/hadoop-hdds/framework/src/main/resources/webapps/static/templates/overview.html +++ b/hadoop-hdds/framework/src/main/resources/webapps/static/templates/overview.html @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. --> -

    Overview

    +

    Overview ({{$ctrl.jmx.Hostname}})