Commit e5212c47 authored by Dmitry Volodin's avatar Dmitry Volodin
Browse files

#843 pymongo 3.7.1, mongoengine 0.15.3

parent c5c7cecb
......@@ -18,7 +18,7 @@ class Command(BaseCommand):
from noc.lib.nosql import get_db
db = get_db()
collections = set(db.collection_names())
collections = set(db.list_collection_names())
for model_id in iter_model_id():
model = get_model(model_id)
if not model:
......
......@@ -167,7 +167,7 @@ class DataStream(object):
Return total amount of items in datastream
:return:
"""
return cls.get_collection().count()
return cls.get_collection().count_documents()
@classmethod
def clean_change_id(cls, change_id):
......
......@@ -32,7 +32,7 @@ class Migration:
"sync_id": str(sync_id),
"model_id": "dns.DNSZone",
"object_id": str(zone_id)
}).count():
}).count_documents():
sc.insert({
"uuid": str(uuid.uuid4()),
"model_id": "dns.DNSZone",
......
......@@ -15,7 +15,7 @@ def fix():
for p in Pool.objects.all():
s = Scheduler("discovery", pool=p.name)
c = s.get_collection()
if not c.count():
if not c.count_documents():
continue
# Remove unused schedules fields
c.update_many({
......
......@@ -11,7 +11,7 @@ class Migration:
def forwards(self):
areas = get_db().noc.gis.areas
if not areas.find({"name": "World"}).count():
if not areas.find({"name": "World"}).count_documents():
areas.insert({
"name": "World",
"is_active": True,
......
......@@ -12,7 +12,7 @@ DEFAULT_NAME = "default"
class Migration:
def forwards(self):
c = get_db().noc.interface_profiles
if not c.find({"name": DEFAULT_NAME}).count():
if not c.find({"name": DEFAULT_NAME}).count_documents():
c.insert({
"name": DEFAULT_NAME,
"description": "Fallback interface profile.\n"
......
......@@ -16,7 +16,7 @@ class Migration(object):
# Initialize container models
collection = db.noc.networksegments
if collection.count() == 0:
if collection.count_documents() == 0:
print " Create default network segment"
collection.insert({
"name": "ALL",
......
......@@ -17,7 +17,7 @@ logger = logging.getLogger(__name__)
class Migration(object):
def forwards(self):
db = get_db()
for c in db.collection_names():
for c in db.list_collection_names():
if c.startswith("noc.schedules."):
db[c].drop_indexes()
......
......@@ -305,7 +305,7 @@ class DocInline(object):
if ordering:
data = data.order_by(*ordering)
if format == "ext":
total = data.count()
total = data.count_documents()
if start is not None and limit is not None:
data = data[int(start):int(start) + int(limit)]
out = [formatter(o, fields=only) for o in data]
......
......@@ -11,7 +11,7 @@ from noc.lib.nosql import get_db
class Migration:
def forwards(self):
s = get_db().noc.stomp_access
if not s.count():
if not s.count_documents():
s.insert({"user": "noc", "password": "noc", "is_active": True})
def backwards(self):
......
......@@ -26,7 +26,7 @@ class WhoisCache(object):
"""
db = nosql.get_db()
collection = db.noc.whois.asset.members
return bool(collection.count())
return bool(collection.count_documents())
@classmethod
def has_origin_routes(cls):
......@@ -36,7 +36,7 @@ class WhoisCache(object):
"""
db = nosql.get_db()
collection = db.noc.whois.origin.route
return bool(collection.count())
return bool(collection.count_documents())
@classmethod
def has_asset(cls, as_set):
......
......@@ -6,7 +6,7 @@ class Migration(object):
def forwards(self):
db = get_db()
coll = db["noc.dialplans"]
if not coll.count():
if not coll.count_documents():
coll.insert({
"name": "E.164",
"description": "E.164 numbering plan",
......
......@@ -5,7 +5,7 @@ from noc.lib.nosql import get_db
class Migration:
def forwards(self):
db = get_db()
scount = db.noc.pm.storages.count()
scount = db.noc.pm.storages.count_documents()
if scount == 0:
db.noc.pm.storages.insert({
"name": "default",
......
......@@ -13,7 +13,7 @@ class Migration:
PROBEUSER = "noc-probe"
mdb = get_db()
# Check probe has no storage and credentials
if mdb.noc.pm.probe.count() != 1:
if mdb.noc.pm.probe.count_documents() != 1:
return
p = mdb.noc.pm.probe.find_one({})
if p.get("storage") or p.get("user"):
......
......@@ -2,7 +2,7 @@ atomiclong==0.1.1
csiphash==0.0.5
http-parser==0.8.3
psycopg2==2.7.3.2
pymongo==3.5.1
pymongo==3.7.1
geopy==0.97
geojson==1.0.9
Cython>=0.24
......@@ -21,7 +21,7 @@ python-consul==0.7.0
python-dateutil==2.4.0
manhole>=1.3.0
mistune==0.5
mongoengine==0.13.0
mongoengine==0.15.3
networkx==1.11
numpy==1.9.2
tornado==4.5
......
......@@ -19,7 +19,7 @@ class Migration:
db = get_db()
caps = db["noc.sa.objectcapabilities"]
if not caps.count():
if not caps.count_documents():
return
caps.rename("noc.sa.objectcapabilities_old", dropTarget=True)
old_caps = db["noc.sa.objectcapabilities_old"]
......
......@@ -116,12 +116,12 @@ def escalate(alarm_id, escalation_id, escalation_delay,
"escalation_ts": {
"$gte": ets
}
}).count()
}).count_documents()
ae += ArchivedAlarm._get_collection().find({
"escalation_ts": {
"$gte": ets
}
}).count()
}).count_documents()
if ae >= config.escalator.tt_escalation_limit:
logger.error(
"Escalation limit exceeded (%s/%s). Skipping",
......
......@@ -82,8 +82,8 @@ class FMMonitorApplication(ExtApplication):
r = []
now = datetime.datetime.now()
# Classifier section
new_events = db.noc.events.new.count()
failed_events = db.noc.events.failed.count()
new_events = db.noc.events.new.count_documents()
failed_events = db.noc.events.failed.count_documents()
first_new_event = db.noc.events.new.find_one(sort=[("timestamp", 1)])
if first_new_event:
classification_lag = humanize_timedelta(now - first_new_event["timestamp"])
......@@ -96,23 +96,23 @@ class FMMonitorApplication(ExtApplication):
]
# Correlator section
sc = db.noc.schedules.fm.correlator
dispose = sc.find({"jcls": "dispose"}).count()
dispose = sc.find({"jcls": "dispose"}).count_documents()
if dispose:
f = sc.find_one({"jcls": "dispose"}, sort=[("ts", 1)])
dispose_lag = humanize_timedelta(now - f["ts"])
else:
dispose_lag = "-"
c_jobs = sc.find({"jcls": {"$ne": "dispose"}}).count()
c_jobs = sc.find({"jcls": {"$ne": "dispose"}}).count_documents()
r += [
("correlator", "dispose", dispose),
("correlator", "dispose_lag", dispose_lag),
("correlator", "jobs", c_jobs)
]
# Stats
active_events = db.noc.events.active.count()
archived_events = db.noc.events.archive.count()
active_alarms = db.noc.alarms.active.count()
archived_alarms = db.noc.alarms.archived.count()
active_events = db.noc.events.active.count_documents()
archived_events = db.noc.events.archive.count_documents()
active_alarms = db.noc.alarms.active.count_documents()
archived_alarms = db.noc.alarms.archived.count_documents()
r += [
("events", "new_events", new_events),
("events", "active_events", active_events),
......@@ -143,8 +143,8 @@ class FMMonitorApplication(ExtApplication):
}
now = datetime.datetime.now()
# Classifier section
new_events = db.noc.events.new.count()
failed_events = db.noc.events.failed.count()
new_events = db.noc.events.new.count_documents()
failed_events = db.noc.events.failed.count_documents()
first_new_event = db.noc.events.new.find_one(sort=[("timestamp", 1)])
if first_new_event:
classification_lag = humanize_timedelta(now - first_new_event["timestamp"])
......@@ -163,17 +163,17 @@ class FMMonitorApplication(ExtApplication):
dispose_lag = humanize_timedelta(now - f["ts"])
else:
dispose_lag = "-"
c_jobs = sc.find({"jcls": {"$ne": "dispose"}}).count()
c_jobs = sc.find({"jcls": {"$ne": "dispose"}}).count_documents()
r["correlator"] = {
"dispose": dispose,
"dispose_lag": dispose_lag,
"jobs": c_jobs
}
# Stats
active_events = db.noc.events.active.count()
archived_events = db.noc.events.archive.count()
active_alarms = db.noc.alarms.active.count()
archived_alarms = db.noc.alarms.archived.count()
active_events = db.noc.events.active.count_documents()
archived_events = db.noc.events.archive.count_documents()
active_alarms = db.noc.alarms.active.count_documents()
archived_alarms = db.noc.alarms.archived.count_documents()
r["events"] = {
"new_events": new_events,
"active_events": active_events,
......
......@@ -59,13 +59,13 @@ class InvMonitorApplication(ExtApplication):
lag = (now - t0["ts"]).total_seconds()
else:
lag = 0
late_count = sc.find(late_q).count()
late_count = sc.find(late_q).count_documents()
#
r[p.name.lower()] = {
"pool": p.name.lower(),
"total_tasks": sc.count(),
"exception_tasks": sc.find(exp_q).count(),
"running_tasks": sc.find({Job.ATTR_STATUS: Job.S_RUN}).count(),
"total_tasks": sc.count_documents(),
"exception_tasks": sc.find(exp_q).count_documents(),
"running_tasks": sc.find({Job.ATTR_STATUS: Job.S_RUN}).count_documents(),
"late_tasks": late_count,
"lag": lag,
"avg_box_tasks": ldur[0]["avg"] if ldur else 0,
......
......@@ -42,7 +42,7 @@ class ReportFilterApplication(SimpleReport):
for mo in mos_list:
q = Object._get_collection().find({"data.management.managed_object": {"$in": [mo.id]}})
if q.count() == 0:
if q.count_documents() == 0:
data += [[mo.name,
mo.address,
mo.vendor or None,
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment