Skip to content

Commit

Permalink
adjust after tetsing new sosreports
Browse files Browse the repository at this point in the history
  • Loading branch information
thomas danan committed Dec 27, 2024
1 parent 18669b3 commit 6c6c857
Show file tree
Hide file tree
Showing 3 changed files with 47 additions and 22 deletions.
4 changes: 3 additions & 1 deletion MongoDataStore.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,9 +29,11 @@ def __init__(self, mongodbDataStoreFiles):
if (re.search(r"\}\n\{", jsonStr)):
jsonStr = re.sub(r"\}\n\{", "},{", jsonStr)
jsonStr = f"[{jsonStr}]"
jsonStr = jsonStr.replace("Type \"it\" for more","")
dsStats = json.loads(jsonStr)
self.parseJsonArray(dsStats)
except:
except Exception as e:
# print(e)
continue

def parseJsonArray(self, dsStats):
Expand Down
33 changes: 28 additions & 5 deletions PromStat.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ def getInstances(self, filterKey, filterValue):
metricInstances.append(metric["metric"][filterKey])
return metricInstances

def extractFilteredMetric(self, filterKey, filterValue, timeAggrOp, instanceAggrOp):
def extractMetricFilteredOnKey(self, filterKey, filterValue, timeAggrOp, instanceAggrOp):
if self.fromJson is not None:
metricValues = []
for metric in self.fromJson["data"]["result"]:
Expand All @@ -37,12 +37,35 @@ def extractFilteredMetric(self, filterKey, filterValue, timeAggrOp, instanceAggr
else:
return float('nan')

def extractMetric(self, timeAggrOp, instanceAggrOp):
def extractMetricFilteredOnKey(self, filterKey, filterValue, timeAggrOp, instanceAggrOp):
if self.fromJson is not None:
metricValues = []
metricValues = dict()
for metric in self.fromJson["data"]["result"]:
if(re.search(filterValue, metric["metric"][filterKey])):
groupByValue = metric["metric"][filterKey]
value = self.aggregate(self.promValuesToNumArray(metric["values"]), timeAggrOp)
if groupByValue not in metricValues.keys() or value > metricValues[groupByValue]:
metricValues[groupByValue] = self.aggregate(self.promValuesToNumArray(metric["values"]), timeAggrOp)
metricValuesArray = []
for metricValue in metricValues:
metricValuesArray.append(metricValues[metricValue])
metricValue = self.aggregate(metricValuesArray, instanceAggrOp)
return float(metricValue)
else:
return float('nan')

def extractMetricGroupedByKey(self, timeAggrOp, instanceAggrOp, groupByKey):
if self.fromJson is not None:
metricValues = dict()
for metric in self.fromJson["data"]["result"]:
metricValues.append(self.aggregate(self.promValuesToNumArray(metric["values"]), timeAggrOp))
metricValue= self.aggregate(metricValues, instanceAggrOp)
groupByValue = metric["metric"][groupByKey]
value = self.aggregate(self.promValuesToNumArray(metric["values"]), timeAggrOp)
if groupByValue not in metricValues.keys() or value > metricValues[groupByValue]:
metricValues[groupByValue] = self.aggregate(self.promValuesToNumArray(metric["values"]), timeAggrOp)
metricValuesArray = []
for metricValue in metricValues:
metricValuesArray.append(metricValues[metricValue])
metricValue = self.aggregate(metricValuesArray, instanceAggrOp)
return float(metricValue)
else:
return float('nan')
Expand Down
32 changes: 16 additions & 16 deletions SosUsage.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,8 +36,8 @@ def extractVolumesAlmostFull(self, available, capacity, filterValue):
volumes = available.getInstances( "persistentvolumeclaim", filterValue)
almostFullVolumes = dict()
for volume in volumes:
avail = available.extractFilteredMetric("persistentvolumeclaim", volume, AGGR.MAX, AGGR.MAX)
capa = capacity.extractFilteredMetric("persistentvolumeclaim", volume, AGGR.MAX, AGGR.MAX)
avail = available.extractMetricFilteredOnKey("persistentvolumeclaim", volume, AGGR.MAX, AGGR.MAX)
capa = capacity.extractMetricFilteredOnKey("persistentvolumeclaim", volume, AGGR.MAX, AGGR.MAX)
usagePercent = (capa-avail)/capa
if usagePercent > 0.80:
almostFullVolumes[volume] = '{:.1f}%'.format(usagePercent*100)
Expand All @@ -57,33 +57,33 @@ def extractUsage(self):
xcore_bytes_net = PromStat(self.sosmetrics+'/hyperdrive_http_bytes_net.json')
xcore_reclaimable = PromStat(self.sosmetrics+'/hyperdrive_http_bytes_reclaimable_free_space.json')

mongodb_capa = capacity.extractFilteredMetric("persistentvolumeclaim", 'datadir-data-db-mongodb-sharded-shard[0-9]-data-[0-9]', AGGR.MAX, AGGR.MAX)
mongodb_used = mongodb_capa - available.extractFilteredMetric("persistentvolumeclaim", 'datadir-data-db-mongodb-sharded-shard[0-9]-data-[0-9]', AGGR.MAX, AGGR.MAX)
mongodb_capa = capacity.extractMetricFilteredOnKey("persistentvolumeclaim", 'datadir-data-db-mongodb-sharded-shard[0-9]-data-[0-9]', AGGR.MAX, AGGR.MAX)
mongodb_used = mongodb_capa - available.extractMetricFilteredOnKey("persistentvolumeclaim", 'datadir-data-db-mongodb-sharded-shard[0-9]-data-[0-9]', AGGR.MAX, AGGR.MIN)

xcore_index_capa = capacity.extractFilteredMetric("persistentvolumeclaim", 'artesca-storage-service-(.*)-index', AGGR.MAX, AGGR.MAX)
xcore_index_used = xcore_index_capa - available.extractFilteredMetric("persistentvolumeclaim", 'artesca-storage-service-(.*)-index', AGGR.MAX, AGGR.MAX)
xcore_index_capa = capacity.extractMetricFilteredOnKey("persistentvolumeclaim", 'artesca-storage-service-(.*)-index', AGGR.MAX, AGGR.MAX)
xcore_index_used = xcore_index_capa - available.extractMetricFilteredOnKey("persistentvolumeclaim", 'artesca-storage-service-(.*)-index', AGGR.MAX, AGGR.MIN)

xcore_data_capa = capacity.extractFilteredMetric("persistentvolumeclaim", 'artesca-storage-service-(.*)-data(.*)', AGGR.MAX, AGGR.SUM)
xcore_data_used = xcore_data_capa - available.extractFilteredMetric("persistentvolumeclaim", 'artesca-storage-service-(.*)-data(.*)', AGGR.MAX, AGGR.SUM)
xcore_data_capa = capacity.extractMetricFilteredOnKey("persistentvolumeclaim", 'artesca-storage-service-(.*)-data(.*)', AGGR.MAX, AGGR.SUM)
xcore_data_used = xcore_data_capa - available.extractMetricFilteredOnKey("persistentvolumeclaim", 'artesca-storage-service-(.*)-data(.*)', AGGR.MAX, AGGR.SUM)


xcore_objects = xcore_objects_net.extractMetric(AGGR.MAX, AGGR.SUM)
xcore_protected = xcore_bytes_net.extractMetric(AGGR.MAX, AGGR.SUM)
xcore_reclaimable = xcore_reclaimable.extractMetric(AGGR.MAX, AGGR.MAX)
xcore_objects = xcore_objects_net.extractMetricGroupedByKey(AGGR.MAX, AGGR.SUM, "xcore_scality_com_node_name")
xcore_protected = xcore_bytes_net.extractMetricGroupedByKey(AGGR.MAX, AGGR.SUM, "xcore_scality_com_node_name")
xcore_reclaimable = xcore_reclaimable.extractMetricGroupedByKey(AGGR.MAX, AGGR.SUM, "xcore_scality_com_node_name")

mongo_data_store = MongoDataStore(self.datastore)

print("======================= USAGE =======================")
print("mongodb used = " + '{:.1f}'.format(mongodb_used/1024/1024/1024) + " GiB")
print("mongodb capa = " + '{:.1f}'.format(mongodb_capa/1024/1024/1024) + " GiB")
print("mongodb usage = " + '{:.1f}%'.format(mongodb_used/mongodb_capa*100))
print("max mongodb used = " + '{:.1f}'.format(mongodb_used/1024/1024/1024) + " GiB")
print("max mongodb usage = " + '{:.1f}%'.format(mongodb_used/mongodb_capa*100))

print("xcore index used = " + '{:.1f}'.format(xcore_index_used/1024/1024/1024) + " GiB")
print("xcore index capa = " + '{:.1f}'.format(xcore_index_capa/1024/1024/1024) + " GiB")
print("xcore index usage = " + '{:.1f}%'.format(xcore_index_used/xcore_index_capa*100))
print("max xcore index used = " + '{:.1f}'.format(xcore_index_used/1024/1024/1024) + " GiB")
print("max xcore index usage = " + '{:.1f}%'.format(xcore_index_used/xcore_index_capa*100))

print("xcore data used = " + '{:.1f}'.format(xcore_data_used/1024/1024/1024/1024) + " TiB")
print("xcore data capa = " + '{:.1f}'.format(xcore_data_capa/1024/1024/1024/1024) + " TiB")
print("xcore data used = " + '{:.1f}'.format(xcore_data_used/1024/1024/1024/1024) + " TiB")
print("xcore data usage = " + '{:.1f}%'.format(xcore_data_used/xcore_data_capa*100))

print("xcore objects = " + '{:.0f}'.format(xcore_objects))
Expand Down

0 comments on commit 6c6c857

Please sign in to comment.