Commit 590d1796 authored by Andrey Vertiprahov's avatar Andrey Vertiprahov Committed by Алексей Широких
Browse files

Migrate WEB Monitor to SelfMon.

parent d74728a8
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------
# FM Collector
# ----------------------------------------------------------------------
# Copyright (C) 2007-2018 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Python modules
from __future__ import absolute_import
import time
# NOC modules
from .base import BaseCollector
from noc.lib.nosql import get_db
from noc.main.models.pool import Pool
from noc.sa.models.managedobject import ManagedObject
from noc.fm.models.ttsystem import TTSystem
class FMObjectCollector(BaseCollector):
name = "fm"
pipeline = [{"$unwind": "$adm_path"},
{"$group": {"_id": {"adm_path": "$adm_path", "root": "$root"}, "tags": {"$sum": 1}}}
]
pool_mappings = [
(p.name,
set(ManagedObject.objects.filter(pool=p).values_list("id", flat=True))) for p in Pool.objects.filter()
]
@staticmethod
def calc_lag(ts, now):
if ts and ts < now:
lag = now - ts
else:
lag = 0
return lag
def iter_metrics(self):
db = get_db()
now = time.time()
yield ("fm_events_active_total", ), db.noc.events.active.estimated_document_count()
last_event = db.noc.events.active.find_one(sort=[("timestamp", -1)])
if last_event:
# yield ("events_active_first_ts", ), time.mktime(last_event["timestamp"].timetuple())
yield ("fm_events_active_last_lag_seconds",), self.calc_lag(
time.mktime(last_event["timestamp"].timetuple()), now)
yield ("fm_alarms_active_total", ), db.noc.alarms.active.estimated_document_count()
yield ("fm_alarms_archived_total", ), db.noc.alarms.active.estimated_document_count()
last_alarm = db.noc.alarms.active.find_one(filter={"timestamp": {"$exists": True}},
sort=[("timestamp", -1)])
if last_alarm:
# yield ("alarms_active_last_ts", ), time.mktime(last_alarm["timestamp"].timetuple())
yield ("fm_alarms_active_last_lag_seconds",), self.calc_lag(
time.mktime(last_alarm["timestamp"].timetuple()), now)
alarms_rooted = set()
alarms_nroored = set()
broken_alarms = 0
for x in db.noc.alarms.active.find({}):
if "managed_object" not in x:
broken_alarms += 1
continue
if "root" in x:
alarms_rooted.add(x["managed_object"])
else:
alarms_nroored.add(x["managed_object"])
alarms_all = alarms_rooted.union(alarms_nroored)
for pool_name, pool_mos in self.pool_mappings:
yield ("fm_alarms_active_pool_count",
("pool", pool_name)), len(pool_mos.intersection(alarms_all))
yield ("fm_alarms_active_rooted_pool_count",
("pool", pool_name)), len(pool_mos.intersection(alarms_rooted))
yield ("fm_alarms_active_nonrooted_pool_count",
("pool", pool_name)), len(pool_mos.intersection(alarms_nroored))
yield ("alarms_active_broken_count", ), broken_alarms
for shard in set(TTSystem.objects.filter(is_active=True).values_list("shard_name")):
yield ("fm_escalation_pool_count",
("shard", shard)), db["noc.scheduler.escalation.%s" % shard].estimated_document_count()
first_escalation = db["noc.scheduler.escalator.%s" % shard].find_one(sort=[("ts", -1)])
if first_escalation:
# yield ("escalation_last_ts", ("shard", shard)), time.mktime(last_escalation["ts"].timetuple())
yield ("fm_escalation_first_lag_seconds",
("shard", shard)), self.calc_lag(time.mktime(first_escalation["ts"].timetuple()), now)
last_escalation = db["noc.scheduler.escalator.%s" % shard].find_one(sort=[("ts", 1)])
if last_escalation:
# yield ("escalation_last_ts", ("shard", shard)), time.mktime(last_escalation["ts"].timetuple())
yield ("fm_escalation_lag_seconds",
("shard", shard)), self.calc_lag(time.mktime(last_escalation["ts"].timetuple()), now)
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------
# Invetory Collector
# ----------------------------------------------------------------------
# Copyright (C) 2007-2018 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Python modules
from __future__ import absolute_import
# NOC modules
from .base import BaseCollector
from noc.inv.models.interface import Interface
from noc.inv.models.link import Link
from noc.inv.models.subinterface import SubInterface
class InventoryObjectCollector(BaseCollector):
name = "inventory"
def iter_metrics(self):
# @todo by POOL / Adm Domain
yield ("inventory_iface_count"), Interface.objects.filter().count()
yield ("inventory_link_count"), Link.objects.filter().count()
yield ("inventory_subinterface_count"), SubInterface.objects.filter().count()
......@@ -29,6 +29,6 @@ class ManagedObjectCollector(BaseCollector):
pool = Pool.get_by_id(pool_id)
if not pool:
continue
yield ("managedobject_managed", ("pool", pool.name)), pool_managed
yield ("managedobject_unmanaged", ("pool", pool.name)), pool_managed
yield ("managedobject_total", ("pool", pool.name)), pool_managed + pool_unmanaged
yield ("inventory_managedobject_managed", ("pool", pool.name)), pool_managed
yield ("inventory_managedobject_unmanaged", ("pool", pool.name)), pool_managed
yield ("inventory_managedobject_total", ("pool", pool.name)), pool_managed + pool_unmanaged
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------
# Task Collector
# ----------------------------------------------------------------------
# Copyright (C) 2007-2018 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Python modules
from __future__ import absolute_import
import datetime
import six
from collections import OrderedDict
# NOC modules
from .base import BaseCollector
from noc.lib.nosql import get_db
from noc.main.models.pool import Pool
from noc.core.scheduler.job import Job
class TaskObjectCollector(BaseCollector):
name = "task"
db = get_db()
schedulers = [("scheduler", []),
("discovery", Pool.objects.all().order_by("name").values_list("name"))] # Schedulers (name, shards)
def __init__(self, service):
self.schedulers_list = self.load_discovery()
super(TaskObjectCollector, self).__init__(service)
def load_discovery(self):
r = OrderedDict()
for name, shard in self.schedulers:
if shard:
for s in shard:
r["noc.schedules.%s.%s" % (name, s)] = {"name": name, "shard": s}
else:
r["noc.schedules.%s" % name] = {"name": name}
return r
def iter_metrics(self):
now = datetime.datetime.now() + datetime.timedelta(seconds=5)
late_q = {
Job.ATTR_STATUS: Job.S_WAIT,
Job.ATTR_TS: {
"$lt": now
}
}
exp_q = {
Job.ATTR_LAST_STATUS: Job.E_EXCEPTION,
}
for scheduler_name, data in six.iteritems(self.schedulers_list):
sc = self.db[scheduler_name]
# Calculate late tasks
t0 = sc.find_one(late_q, limit=1, sort=[("ts", 1)])
ldur = list(sc.aggregate([
{
"$group": {
"_id": "$jcls",
"avg": {"$avg": "$ldur"}
}
},
{
"$sort": {"_id": 1}
}
]))
if t0 and t0["ts"] < now:
lag = (now - t0["ts"]).total_seconds()
else:
lag = 0
late_count = sc.count_documents(late_q)
yield ("task_pool_total",
("scheduler_name", data["name"]),
("pool", data.get("shard", ""))), sc.estimated_document_count()
yield ("task_exception_count",
("scheduler_name", data["name"]),
("pool", data.get("shard", ""))), sc.count_documents(exp_q)
yield ("task_running_count",
("scheduler_name", data["name"]),
("pool", data.get("shard", ""))), sc.count_documents({Job.ATTR_STATUS: Job.S_RUN})
yield ("task_late_count",
("scheduler_name", data["name"]),
("pool", data.get("shard", ""))), late_count
yield ("task_lag_seconds",
("scheduler_name", data["name"]),
("pool", data.get("shard"))), lag
yield ("task_box_time_avg_seconds",
("scheduler_name", data["name"]),
("pool", data.get("shard"))), ldur[0]["avg"] if ldur and ldur[0]["avg"] is not None else 0
yield ("task_periodic_time_avg_seconds",
("scheduler_name", data["name"]),
("pool", data.get("shard", ""))), ldur[1]["avg"] if len(ldur) > 1 and ldur[0]["avg"] is not None else 0
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment