PageRenderTime 54ms CodeModel.GetById 20ms RepoModel.GetById 0ms app.codeStats 0ms

/gnss_collector/engine/engine.py

https://gitlab.com/pineiden/collector
Python | 1356 lines | 1275 code | 39 blank | 42 comment | 9 complexity | 339f14a4ab663143b60dc3424590f3cd MD5 | raw file
  1. # stdlilb python
  2. import asyncio
  3. import concurrent.futures
  4. import datetime
  5. import functools
  6. import itertools
  7. import logging
  8. import multiprocessing
  9. import multiprocessing as mp
  10. import os
  11. import socket
  12. import sys
  13. import time
  14. import traceback
  15. from asyncio import shield, wait_for
  16. from datetime import timedelta, datetime
  17. from dataclasses import (field, dataclass)
  18. from functools import reduce
  19. from ipaddress import ip_address
  20. from pathlib import Path
  21. from tasktools.taskloop import TaskLoop
  22. from typing import List, Dict, Union, overload, Any, Tuple
  23. from collections.abc import Iterable
  24. # contrib
  25. from rethinkdb import RethinkDB
  26. from rich import print
  27. # contrib: share exceptions as message
  28. from tblib import pickling_support
  29. pickling_support.install()
  30. # contrib @dpineda
  31. from networktools.time import timestamp, now
  32. from networktools.colorprint import gprint, bprint, rprint
  33. from networktools.library import my_random_string
  34. from networktools.library import check_type
  35. from networktools.library import (pattern_value,
  36. fill_pattern, context_split,
  37. gns_loads, gns_dumps)
  38. from networktools.messages import MSGException, MessageLog
  39. from networktools.time import gps_time, now
  40. from basic_queuetools.queue import read_queue_gen
  41. from basic_logtools.filelog import LogFile as FileLog
  42. from crb import RingBuffer, Data
  43. from dataprotocols import BaseProtocol, Gsof, Eryo
  44. from data_rdb import Rethink_DBS
  45. from orm_collector.manager import SessionCollector, object_as_dict
  46. # Tasktools
  47. from tasktools.taskloop import coromask, renew, simple_fargs, simple_fargs_out
  48. from tasktools.scheduler import TaskScheduler
  49. # GSOF Protocol
  50. from .steps import (CollectSteps as CSteps, DBSteps, Logger, ORMSteps,
  51. ControlActions)
  52. # DBS Rethinkdb
  53. # same module
  54. from .subscribe import SubscribeData
  55. from .message import MessageManager
  56. # from .async_mongo import AsyncMongoDB
  57. rdb = RethinkDB()
  58. def rdbnow():
  59. return rdb.iso8601(now().isoformat())
  60. # base settings
  61. try:
  62. from .conf.settings import COMMANDS, groups, dirs
  63. except:
  64. from conf.settings import COMMANDS, groups, dirs
  65. DATA_KEYS = {"DT_GEN":"dt_gen","DELTA_TIME":"latency"}
  66. import time
  67. def load_stations(server_name, datadb, log_path='~/log'):
  68. print("Obteniendo estaciones....", server_name, datadb)
  69. dbmanager = SessionCollector(
  70. log_path=log_path,
  71. active='true',
  72. server=server_name,
  73. **datadb)
  74. u = [st for st in dbmanager.get_station_data(server=server_name)]
  75. print("STATIONS READED")
  76. print(u)
  77. dbmanager.close()
  78. return u
  79. def load_databases(datadb, log_path='~/log'):
  80. print("Obteniendo datadb lista")
  81. dbmanager = SessionCollector(
  82. log_path=log_path,
  83. **datadb)
  84. u = dbmanager.get_dbdata_data()
  85. print("Resultado...", u)
  86. dbmanager.close()
  87. return u
  88. def active_server(server_name, datadb, log_path='~/log'):
  89. print("Activando server", server_name, datadb)
  90. dbmanager = SessionCollector(
  91. log_path=log_path,
  92. **datadb)
  93. u = dbmanager.get_server_id(server_name)
  94. if u:
  95. dbmanager.active_server(u)
  96. dbmanager.close()
  97. return u
  98. def deactive_server(server_name, datadb, log_path='~/log'):
  99. dbmanager = SessionCollector(log_path=log_path, **datadb)
  100. u = dbmanager.get_server_id(server_name)
  101. if u:
  102. dbmanager.deactive_server(u)
  103. dbmanager.close()
  104. return u
  105. """
  106. Engine basic for collector
  107. """
  108. class Aux:
  109. async def stop(self):
  110. pass
  111. Value = Union[str, int, float, datetime]
  112. @dataclass
  113. class Engine(TaskScheduler):
  114. """
  115. A class for data adquisition, receive meshttp://www.cursodeprogramacion.cl/sages from anageser and
  116. save data on db
  117. """
  118. # ARGS: ordered and obligatory
  119. set_queue: List[mp.Queue]
  120. sleep_time: int
  121. est_by_proc: int
  122. stations: Dict[str, Dict[str, Value]]
  123. dbtype: Dict[str, Dict[str, Value]]
  124. protocol: Dict[str, Dict[str, Value]]
  125. status_sta: Dict[str, bool] # ok
  126. db_instances_sta: Dict[str,str] # ok
  127. status_conn: Dict[str, bool] # ok
  128. db_data: Dict[str,Dict[str,str]] # can be better
  129. dump_list: Dict[str,str]
  130. proc_tasks: Dict[str,str]
  131. assigned_tasks: Dict[str,str]
  132. free_ids: Dict[str,str]
  133. wait: Dict[str,str]
  134. inc_msg: Dict[str,str]
  135. ids:List[str]
  136. idd:List[str]
  137. ipt:List[str]
  138. idm:List[str]
  139. ico:List[str]
  140. changes: Dict[str,str]
  141. gsof_timeout:int
  142. sta_init: Dict[str,str]
  143. db_init:Dict[str, bool]
  144. db_connect:Dict[str, bool]
  145. status_tasks:Dict[str, Dict[str,str]]
  146. nproc:int
  147. idc: Dict[str,str]
  148. # KWARGS
  149. rdb_address: str = field(default_factory= lambda: "localhost")
  150. uin: int= 6
  151. args:List[Any] = field(default_factory=list)
  152. kwargs:Dict[str, Any] = field(default_factory=dict)
  153. log_path: Path = Path.home() / "collector_log"
  154. server: str = "atlas"
  155. dt_criteria: int = 4
  156. raise_timeout: bool = False
  157. timeout: int = 15
  158. dbdata: Dict[str, Any] = field(default_factory=dict)
  159. collect_objects:Dict[str, BaseProtocol] = field(default_factory=lambda: dict(
  160. GSOF=Gsof,
  161. ERYO=Eryo
  162. ))
  163. database_objects:Dict[str, Any] = field(default_factory=lambda: dict(
  164. RethinkDB=Rethink_DBS,
  165. #Mongo=AsyncMongoDB
  166. ))
  167. folder:str = 'data'
  168. sep:str = '|'
  169. rethinkdb:Dict[str, Any] = field(default_factory=dict)
  170. log_manager:Dict[str, str] = field(default_factory=dict)
  171. def __post_init__(self):
  172. self.set_queue_elems()
  173. self.set_stats_params()
  174. args = []
  175. kwargs_extra = {
  176. 'ipt': self.ipt,
  177. 'ico': self.ico,
  178. 'assigned_tasks': self.assigned_tasks,
  179. 'nproc': self.nproc,
  180. 'sta_init': self.sta_init
  181. }
  182. print("ARGS", self.args)
  183. print("KWARGS", self.kwargs)
  184. self.server_name = self.kwargs.get('server', "atlas")
  185. self.log_path = Path(self.kwargs.get('log_path', '~/log'))
  186. self.timeout = self.kwargs.get("timeout", 5)
  187. self.datadb = self.kwargs.get("dbdata", {})
  188. self.raise_timeout = self.kwargs.get("raise_timeout", False)
  189. print("datadb",self.datadb)
  190. self.kwargs.update(kwargs_extra)
  191. super().__init__(*self.args, **self.kwargs)
  192. #
  193. coros_callback_dict = {
  194. 'run_task': self.process_data,
  195. }
  196. self.set_new_run_task(**coros_callback_dict)
  197. self.instances = dict()
  198. self.db_instances = dict()
  199. # more processing
  200. self.nproc = mp.cpu_count()
  201. # list of objects
  202. # LOAD DATA TO STATIONS
  203. self.tasks = dict()
  204. self.first_time = dict()
  205. # set the main task
  206. # must be in every new process ATENTION!
  207. self.message_manager = MessageManager(self)
  208. self.subscribe_data = SubscribeData(
  209. 'collector_subscribe', self.queue_t2n)
  210. self.LOG_STA = check_type(os.environ.get('LOG_STA', False))
  211. ###############################################
  212. self.server = active_server(self.server_name, self.datadb)
  213. # table status
  214. self.stats = "STATIONS_STATS"
  215. def set_queue_elems(self):
  216. self.rq = self.set_queue[0]
  217. self.wq = self.set_queue[1]
  218. self.queue_process = self.set_queue[2]
  219. self.queue_ans_process = self.set_queue[3]
  220. self.queue_db = self.set_queue[4]
  221. self.queue_log = self.set_queue[-1]
  222. self.queue_n2t = self.rq
  223. self.queue_t2n = self.wq
  224. async def send_log(self, coroname, level, message, exc):
  225. msg = MessageLog(rdbnow(), coroname, level, message, MSGException(*exc))
  226. self.queue_log.put({"log":[msg.rdb]})
  227. def set_stats_params(self):
  228. # set ring buffer control
  229. self.mu = 0.5
  230. self.factor = 0.8
  231. self.sigma = 0.3
  232. self.buffer_size = 120*60
  233. self.u_base = self.mu + self.factor * self.sigma
  234. self.acc = 15
  235. def set_datafolder(self, folder):
  236. """
  237. Set another, different, folder to save data
  238. """
  239. self.folder = folder
  240. def set_id(self, lista):
  241. """
  242. Defines a new id for stations, check if exists
  243. """
  244. ids = my_random_string(self.uin)
  245. while True:
  246. if ids not in lista:
  247. lista.append(ids)
  248. break
  249. else:
  250. ids = my_random_string(self.uin)
  251. return ids
  252. def set_ids(self):
  253. """
  254. Defines a new id for stations, check if exists
  255. """
  256. return self.set_id(self.ids)
  257. def set_idd(self):
  258. """
  259. Defines a new id for stations, check if exists
  260. """
  261. return self.set_id(self.idd)
  262. def set_ipt(self, ipt=""):
  263. """
  264. Defines a new id for relation process-collect_task, check if exists
  265. """
  266. if ipt:
  267. self.ipt.append(ipt)
  268. else:
  269. ipt = self.set_id(self.ipt)
  270. return ipt
  271. def set_ico(self, ico):
  272. """
  273. Defines a new id for task related to collect data insice a worker, check if exists
  274. """
  275. if ico:
  276. self.ipt.append(ico)
  277. else:
  278. ico = self.set_id(self.ipt)
  279. return ico
  280. def set_idm(self):
  281. """
  282. Defines a new id for relation incoming messages, check if exists
  283. """
  284. return self.set_id(self.idm)
  285. def load_stations(self):
  286. u = load_stations(self.server_name, self.datadb, log_path=self.log_path/"orm") # ok
  287. for m in u:
  288. print(m)
  289. keys = ['id', 'code', 'db', 'dblist', 'ECEF_X', 'ECEF_Y', 'protocol_host',
  290. 'ECEF_Z', 'port', 'protocol', 'host', 'dbname']
  291. try:
  292. station = dict(
  293. id=m['id'],
  294. code=m['st_code'],
  295. name=m['st_name'],
  296. ECEF_X=m['ecef_x'],
  297. ECEF_Y=m['ecef_y'],
  298. ECEF_Z=m['ecef_z'],
  299. db=m['db_code'],
  300. dblist=m['db_list'],
  301. port=m['st_port'],
  302. protocol=m['prt_name'],
  303. protocol_host=m['protocol_host'],
  304. host=m['st_host'],
  305. on_db=True
  306. )
  307. (ids, sta) = self.add_station(**station)
  308. # print(station)
  309. except Exception as exc:
  310. raise exc
  311. def add_station(self, **sta):
  312. """
  313. Add station to list for data adquisition
  314. """
  315. try:
  316. keys = ['id',
  317. 'code',
  318. 'name',
  319. 'ECEF_X',
  320. 'ECEF_Y',
  321. 'ECEF_Z',
  322. 'host',
  323. 'port',
  324. 'interface_port',
  325. 'db',
  326. 'dblist',
  327. 'protocol',
  328. 'protocol_host',
  329. 'on_db',
  330. 'ipt']
  331. ids = self.set_ids()
  332. # if ids in self.enqueued:
  333. # self.enqueued.remove(ids)
  334. # self.enqueued.add(ids)
  335. station = dict(ids=ids)
  336. for k in keys:
  337. if k in sta.keys():
  338. if k == 'protocol':
  339. station[k] = sta.get(k, 'None').upper()
  340. else:
  341. station[k] = sta.get(k, None)
  342. else:
  343. if k == 'host':
  344. station[k] = 'localhost'
  345. elif k == 'port' or k == 'interface_port':
  346. station[k] = 0
  347. elif k in [f'ECEF_{v}' for v in ("X", "Y", "Z")]:
  348. station[k] = 0
  349. else:
  350. station[k] = None
  351. self.stations.update({ids: station})
  352. self.status_sta.update({ids: False})
  353. self.first_time.update({ids: True})
  354. return (ids, sta)
  355. except Exception as ex:
  356. raise ex
  357. def update_station(self, ids, **sta):
  358. """
  359. Add station to list for data adquisition
  360. """
  361. try:
  362. keys = ['id',
  363. 'code',
  364. 'name',
  365. 'ECEF_X',
  366. 'ECEF_Y',
  367. 'ECEF_Z',
  368. 'host',
  369. 'port',
  370. 'interface_port',
  371. 'db',
  372. 'dblist',
  373. 'protocol',
  374. 'protocol_host',
  375. 'on_db',
  376. 'ipt']
  377. station = dict(ids=ids)
  378. for k in keys:
  379. if k in sta.keys():
  380. if k == 'protocol':
  381. station[k] = sta.get(k, 'None').upper()
  382. else:
  383. station[k] = sta.get(k, None)
  384. else:
  385. if k == 'host':
  386. station[k] = 'localhost'
  387. elif k == 'port' or k == 'interface_port':
  388. station[k] = 0
  389. elif k in [f'ECEF_{v}' for v in ("X", "Y", "Z")]:
  390. station[k] = 0
  391. else:
  392. station[k] = None
  393. self.stations.update({ids: station})
  394. self.status_sta.update({ids: False})
  395. self.first_time.update({ids: True})
  396. return (ids, sta)
  397. except Exception as ex:
  398. raise ex
  399. def get_stations_keys(self):
  400. return list(self.stations.keys())
  401. def load_databases(self):
  402. u = load_databases(self.datadb, log_path=self.log_path/"orm")
  403. # ok
  404. groups = {}
  405. for m in u:
  406. dbtype = m['type_name']
  407. kwargs = dict(
  408. id=m['id'],
  409. code=m['code'],
  410. path=m['path'],
  411. host=m['host'],
  412. port=m['port'],
  413. user=m['user'],
  414. passw=m['passw'],
  415. info=m['info'],
  416. type_name=m['type_name'],
  417. type_db=m['type_db'],
  418. url=m['url'],
  419. data_list=m['data_list'],
  420. dbname=m["dbname"].rstrip(),
  421. address=(m['host'], m['port']),
  422. log_path=self.log_path/"rdb",
  423. on_db=True)
  424. groups[(m["host"], m["port"])] = kwargs
  425. #print("Different db destinies", len(groups), groups.keys())
  426. for opts in groups.values():
  427. self.new_datadb(dbtype, **opts)
  428. def new_datadb(self, dbtype, **kwargs):
  429. """
  430. Here you give the argument for every type engine for store data colected
  431. and instantiate the db for enable query on that
  432. """
  433. # generate a idd= database instance identifier
  434. try:
  435. keys = [
  436. 'id',
  437. 'user',
  438. 'passw',
  439. 'code',
  440. 'host',
  441. 'port',
  442. 'name',
  443. 'path',
  444. 'data_list',
  445. 'type_name',
  446. 'dbname',
  447. 'type_db,'
  448. 'url',
  449. 'info',
  450. 'address',
  451. 'on_db',
  452. 'log_path']
  453. uin = 4
  454. idd = self.set_idd()
  455. # create namedtuple/dataclass
  456. db_data = dict(idb=idd, name=dbtype, args={})
  457. for k in keys:
  458. if k in keys:
  459. if k in kwargs.keys():
  460. db_data['args'][k] = kwargs[k]
  461. else:
  462. if k == 'localhost':
  463. db_data['args'][k] = 'localhost'
  464. elif k == 'port':
  465. db_data['args'][k] = 0
  466. else:
  467. db_data['args'][k] = ''
  468. self.db_data[idd] = db_data
  469. return idd, db_data
  470. except Exception as ex:
  471. raise ex
  472. def mod_station(self, ids, key, value):
  473. """
  474. Modify some value in station info
  475. """
  476. if key in self.stations.get(ids).keys():
  477. self.stations[ids][key] = value
  478. def del_station(self, ids):
  479. """
  480. Delete a station from list
  481. """
  482. del self.stations[ids]
  483. del self.status_sta[ids]
  484. del self.status_conn[ids]
  485. del self.instances[ids]
  486. k = self.ids.index(ids)
  487. del self.ids[k]
  488. def save_db(self, dbmanager, tname, args):
  489. """
  490. Save data to tname with args
  491. """
  492. # TODO: actualizar la lista de campos port table
  493. # TODO: añadir serverinstance
  494. input_args = dict(
  495. station=[
  496. 'code',
  497. 'name',
  498. 'position_x',
  499. 'position_y',
  500. 'position_z',
  501. 'host',
  502. 'port',
  503. 'interface_port',
  504. 'db',
  505. 'protocol'],
  506. dbdata=[
  507. 'code',
  508. 'path',
  509. 'host',
  510. 'port',
  511. 'user',
  512. 'passw',
  513. 'info',
  514. 'dbtype'],
  515. dbtype=['typedb', 'name', 'url', 'data_list'],
  516. protocol=['name', 'red_url', 'class_name', 'git_url']
  517. )
  518. name_args = input_args[tname]
  519. my_args = []
  520. id_instance = None
  521. if dbmanager == None:
  522. dbmanager = SessionCollector()
  523. instance = object
  524. if tname == 'station':
  525. instance = dbmanager.station(**args)
  526. elif tname == 'dbdata':
  527. instance = dbmanager.dbdata(**args)
  528. elif tname == 'dbtype':
  529. instance = dbmanager.dbtype(**args)
  530. elif tname == 'protocol':
  531. instance = dbmanager.protocol(**args)
  532. id_instance = instance.id
  533. return id_instance
  534. def save_station(self, ids):
  535. """
  536. Save station to database
  537. """
  538. # check if exists
  539. # if exist get data and compare
  540. # then update
  541. # if not, save
  542. pass
  543. def drop_station(self, ids):
  544. """
  545. Delete station from database
  546. """
  547. # get id from station ids
  548. # delete on database
  549. pass
  550. def del_db(self, varlist):
  551. """
  552. Delete element from database identified by idx in varlist
  553. """
  554. pass
  555. ###############
  556. def add_sta_instance(self, ids, loop):
  557. """
  558. Crear la instancia que accederá a los datos
  559. a través del socket
  560. """
  561. station = self.stations.get(ids)
  562. if station:
  563. protocol = self.stations[ids]['protocol']
  564. kwargs = self.stations[ids]
  565. self.stations[ids].update({'on_collector': True})
  566. kwargs['code'] = self.stations[ids]['code']
  567. kwargs['host'] = self.stations[ids]['protocol_host']
  568. kwargs['port'] = self.stations[ids]['port']
  569. kwargs['sock'] = None
  570. kwargs['timeout'] = self.gsof_timeout
  571. kwargs["raise_timeout"] = False
  572. kwargs['loop'] = loop
  573. kwargs['log_path'] = self.log_path/"protocols"
  574. instance = self.collect_objects[protocol](**kwargs)
  575. code = kwargs["code"]
  576. table_name = f"{code}_{protocol}"
  577. return instance, table_name
  578. else:
  579. print("No station")
  580. return (None, None)
  581. def set_status_sta(self, ids:str, value:bool)->None:
  582. if isinstance(value, bool):
  583. self.status_sta[ids] = value
  584. def set_status_conn(self, ids:str, value:bool)->None:
  585. if isinstance(value, bool):
  586. self.status_conn[ids] = value
  587. def del_sta(self, ids:str):
  588. del self.instances[ids]
  589. del self.status_sta[ids]
  590. del self.status_conn[ids]
  591. del self.first_time[ids]
  592. # del self.db_instances[ids]
  593. del self.ids
  594. def get_tname(self, varname):
  595. assert isinstance(varname, str)
  596. if varname == 'STA' or varname == 'STATION':
  597. return 'station'
  598. elif varname == 'DB' or varname == 'DBDATA':
  599. return 'database'
  600. elif varname == 'PROT' or varname == 'PROTOCOL':
  601. return 'protocol'
  602. elif varname == 'DBTYPE':
  603. return 'dbtype'
  604. else:
  605. return None
  606. def get_id_by_code(self, varname, code):
  607. if varname == 'STATIONS':
  608. this_var = self.stations
  609. for k in this_var.keys():
  610. if this_var[k]['code'] == code:
  611. return k
  612. elif varname == 'DBDATA':
  613. this_var = self.db_data
  614. # variable in function dbtype
  615. for k in this_var.keys():
  616. # code_r=''
  617. try:
  618. if this_var[k]['args']['code'] == code:
  619. return k
  620. except Exception as ex:
  621. raise ex
  622. def get_var(self, varname):
  623. varin = ''
  624. if varname == 'STA':
  625. varin = self.stations
  626. elif varname == 'DB':
  627. varin = self.db_data
  628. else:
  629. varin = None
  630. return varin
  631. async def connect(self, ids):
  632. if self.status_sta[ids]:
  633. await self.instances[ids].connect()
  634. self.set_status_conn(ids, True)
  635. self.set_status_sta(ids, False)
  636. self.first_time[ids] = False
  637. async def stop(self, ipt, ids):
  638. if self.status_sta[ids]:
  639. icos = [ico_dict for ipt, ico_dict in self.assigned_tasks.items()]
  640. ico_list = []
  641. for ico_dict in icos:
  642. ico_list += [ico for ico, _ids in ico_dict.items()
  643. if _ids == ids]
  644. for ico in ico_list:
  645. self.unset_sta_assigned(ipt, ico, ids)
  646. instance_obj = self.instances.get(ids, Aux())
  647. await instance_obj.stop()
  648. self.set_status_conn(ids, False)
  649. self.set_status_sta(ids, False)
  650. async def reset_station_conn(self, sta_insta, ids, idc):
  651. self.set_status_sta(ids, False)
  652. self.set_status_conn(ids, False)
  653. self.first_time[ids] = True
  654. v = 1
  655. message = ""
  656. if idc:
  657. try:
  658. await sta_insta.close(idc)
  659. message = f"Station {sta_insta.station} closed at {idc}"
  660. except Exception as e:
  661. print("sta insta yet closed")
  662. except asyncio.TimeoutError as te:
  663. print("sta insta yet closed")
  664. return message, logging.INFO
  665. def connect_to_sta(self, ids):
  666. return self.sta_init[ids] and not self.status_conn[ids] and self.first_time[ids]
  667. def is_connected(self, ids):
  668. return self.sta_init[ids] and self.status_conn[ids] and not self.first_time[ids]
  669. def add_db_instance(self, ipt):
  670. """
  671. Create a new instance for ending database to save the raw data
  672. """
  673. try:
  674. if self.db_data:
  675. rdbs_destinies = [key for key in self.db_data.keys()]
  676. key_data = rdbs_destinies.pop()
  677. data = self.db_data.get(key_data)
  678. name_db = data['name']
  679. object_db = self.database_objects[name_db]
  680. data.update({
  681. "dbname": data["args"]["dbname"],
  682. 'address': data["args"]["address"],
  683. 'hostname': 'atlas'})
  684. db_insta = object_db(**data)
  685. self.rethinkdb[ipt] = False
  686. self.db_init[ipt] = True
  687. self.db_connect[ipt] = True
  688. if data['name'] == 'RethinkDB':
  689. self.rethinkdb[ipt] = True
  690. self.db_instances[ipt] = db_insta
  691. return db_insta
  692. else:
  693. print("Ipt not in DB_DATA")
  694. return None
  695. except Exception as ex:
  696. print("Error creando instancia database %s" % format(self.db_data))
  697. raise ex
  698. def db_task(self):
  699. # enable db task
  700. # Queue log join
  701. queue_log = asyncio.Queue()
  702. loop = asyncio.get_event_loop()
  703. queue_db = self.queue_db
  704. control = {f"DB_{i}":DBSteps.CREATE for i in range(24)}
  705. counter = {}
  706. task_name = f"db_task"
  707. ipt = "DB_PROCESS"
  708. flags = {key:True for key in control}
  709. db_instances = {}
  710. assigned = {key:{} for key in control}
  711. backup = {key:{} for key in control}
  712. db_args = [ipt, control, queue_db, db_instances,
  713. counter, now(), now(), flags, assigned, backup, queue_log]
  714. db_task = TaskLoop(self.db_work, db_args, {"last_dataset":{}},
  715. name=task_name)
  716. db_task.set_name(f"db_task_{ipt}")
  717. db_task.create()
  718. # Queue log join
  719. args = ["db_task_log", queue_log,]
  720. task_name = f"queue_log_join:{ipt}"
  721. task_db = TaskLoop(
  722. self.queue_log_join,
  723. args,
  724. {},
  725. **{"name": task_name})
  726. task_db.set_name(task_name)
  727. task_db.create()
  728. # db task to receive and send to pool rdb
  729. if not loop.is_running():
  730. loop.run_forever()
  731. async def db_work(self, ipt, control, queue_db,
  732. db_instances, counter,
  733. last_data, last_time, flags, assigned, backup,
  734. queue_log, **kwargs):
  735. """
  736. TODO: Control exceptions
  737. """
  738. # task_name = asyncio.Task.current_task()
  739. level = Logger.INFO
  740. messages = []
  741. message = ""
  742. kwargs["dataset"] = []
  743. last_dataset = kwargs["last_dataset"]
  744. now_check = now()
  745. task_name = f"db_task_{ipt}"
  746. loop = asyncio.get_event_loop()
  747. coroname = "db_work"
  748. exc = MSGException()
  749. control_changes = {}
  750. cnow = now()
  751. free = set()
  752. for key, futures in assigned.items():
  753. db_insta = db_instances.get(key)
  754. task_group = all(map(lambda f:f.done(), futures.values()))
  755. falses = {t:f for t,f in futures.items() if not f.done()}
  756. if task_group:
  757. flags[key] = True
  758. free.add(key)
  759. elif falses:
  760. tosend = {}
  761. for table_name, future in falses.items():
  762. bk = backup.get(key,{}).get(table_name)
  763. time = bk.get("time")
  764. dataset = bk.get("dataset")
  765. if (not future.done()) and (cnow >= time +timedelta(seconds=15)):
  766. await db_insta.close()
  767. future.cancel()
  768. tosend[table_name] = dataset
  769. #await queue_db.put([])
  770. # TODO
  771. control[key] = DBSteps.CONNECT
  772. exc = MSGException()
  773. try:
  774. future.exception()
  775. except Exception as e:
  776. exc = MSGException(*sys.exc_info())
  777. message = f"Task cancelled for {key}->{table_name}"
  778. level = Logger.ERROR
  779. messages.append(MessageLog(rdbnow(),coroname, level, message, exc))
  780. if tosend:
  781. queue_db.put(tosend)
  782. for key in free:
  783. assigned[key] = {}
  784. for key, dbcontrol in control.items():
  785. db_insta = db_instances.get(key)
  786. if dbcontrol == DBSteps.CREATE and db_insta:
  787. if db_insta:
  788. await db_insta.close()
  789. del db_insta
  790. db_insta = None
  791. message = f"Deleted weird db instance at ipt {ipt}, db {key}"
  792. level = Logger.WARNING
  793. messages.append(MessageLog(rdbnow(),coroname, level, message, exc))
  794. for key, dbcontrol in control.items():
  795. #print("KEY", key,"CONTROL", dbcontrol)
  796. db_insta = db_instances.get(key)
  797. if dbcontrol == DBSteps.CREATE or not db_insta:
  798. db_insta = self.add_db_instance(ipt)
  799. db_instances[key] = db_insta
  800. kwargs["instance"] = db_insta
  801. if db_insta:
  802. control_changes[key] = DBSteps.CONNECT
  803. message = f"RDB {db_insta} at {ipt} created and passed to connect, db {key}"
  804. else:
  805. message = f"RDB {db_insta} at {ipt} can't created and try to recreate, db {key}"
  806. level = Logger.WARNING
  807. rprint("cannot create db object")
  808. #messages.append((level, message, {}))
  809. messages.append(MessageLog(rdbnow(), coroname, level, message, exc))
  810. control.update(control_changes)
  811. #print({k:db.active for k, db in db_instances.items()})
  812. for key, dbcontrol in control.items():
  813. db_insta = db_instances.get(key)
  814. if db_insta and dbcontrol == DBSteps.CONNECT:
  815. if not db_insta.active:
  816. exc = MSGException()
  817. try:
  818. address = db_insta.client_address
  819. if db_insta.active:
  820. await db_insta.close()
  821. db_insta.clean_client()
  822. future = asyncio.create_task(db_insta.async_connect())
  823. stage = "connect"
  824. # await queue_control.put((
  825. # task_name,
  826. # now(),
  827. # stage,
  828. # future))
  829. coro = await wait_for(
  830. shield(future),
  831. timeout=20)
  832. await asyncio.shield(db_insta.list_dbs())
  833. await asyncio.shield(db_insta.create_db(db_insta.default_db))
  834. await asyncio.shield(db_insta.list_tables())
  835. message = f"RDB {db_insta} at {ipt} was connected, then passed to save data, db {key}"
  836. level = Logger.INFO
  837. control_changes[key] = DBSteps.SAVE
  838. except asyncio.CancelledError as e:
  839. exc = MSGException(*sys.exc_info())
  840. message = f"RDB {db_insta} at {ipt} has canceled task, but protected by shield"
  841. level = Logger.ERROR
  842. control_changes[key] = DBSteps.CONNECT
  843. gprint(f"Reconnect to db IPT -> {ipt}")
  844. await db_insta.close()
  845. messages.append(MessageLog(rdbnow(), coroname, level, message, exc))
  846. except Exception as e:
  847. exc = MSGException(*sys.exc_info())
  848. message = f"RDB {db_insta} at {ipt} has an exception {e}"
  849. level = Logger.CRITICAL
  850. control_changes[key] = DBSteps.CONNECT
  851. gprint(f"Exception connecting to db IPT -> {ipt}, {e}")
  852. await asyncio.sleep(3)
  853. await db_insta.close()
  854. messages.append(MessageLog(rdbnow(), coroname, level, message, exc))
  855. #print(now(),f"{ipt} Rethinkdb connection", db_insta.client_address)
  856. #messages.append((level, message, exc))
  857. messages.append(MessageLog(rdbnow(), coroname, level, message, exc))
  858. else:
  859. exc = MSGException()
  860. message = f"At {ipt} tried to connect but active {db_insta.active}"
  861. level = Logger.WARNING
  862. #messages.append((level, message, exc))
  863. messages.append(MessageLog(rdbnow(), coroname, level, message, exc))
  864. control_changes[key] = DBSteps.SAVE
  865. control.update(control_changes)
  866. tasks = []
  867. for key, dbcontrol in control.items():
  868. db_insta = db_instances.get(key)
  869. db_flag = flags.get(key, True)
  870. opts = {}
  871. #print(now(), f"Saving data db {key}, flag {db_flag}")
  872. if db_insta.active and dbcontrol == DBSteps.SAVE and (not queue_db.empty()) and db_flag:
  873. """
  874. Leer la cola queue que debe ser una tupla (table_name,
  875. data)
  876. chequear si existe, si no crear
  877. """
  878. dataset = {}
  879. for i in range(queue_db.qsize()):
  880. item = queue_db.get()
  881. for t, array in item.items():
  882. if t not in dataset:
  883. dataset[t] = []
  884. dataset[t] += array
  885. queue_db.task_done()
  886. i = 0
  887. # maybe group by table_name and then save as bulk
  888. flags[key] = False
  889. opts[key] = True
  890. assigned[key] = {}
  891. for table_name, items in dataset.items():
  892. if table_name not in db_insta.tables:
  893. create = await db_insta.create_table(table_name)
  894. await db_insta.create_index(
  895. table_name,
  896. index='DT_GEN')
  897. message = ""
  898. dataset = items
  899. if table_name:
  900. exc = MSGException()
  901. try:
  902. # print(now(), f"Saving to {table_name}"+\
  903. # f"#{len(dataset)},
  904. # {db_insta.client_address}")
  905. last_dt_gen = last_dataset.get(table_name, rdbnow()-timedelta(seconds=5))
  906. dt_gens = [elem.get("DT_GEN") for elem in
  907. dataset if
  908. elem.get("DT_GEN")>last_dt_gen]
  909. last_dt = max(dt_gens)
  910. filtered_dataset = [d for d in dataset if
  911. d["DT_GEN"] in dt_gens]
  912. future = asyncio.create_task(
  913. db_insta.save_data(
  914. table_name,
  915. filtered_dataset),
  916. name=f"save_data_{key}_{table_name}_{len(dataset)}")
  917. for d in filtered_dataset:
  918. print("SAVING",
  919. my_random_string(),
  920. f"{table_name} {d.get('TRACE', -1)} {d['DT_GEN']}")
  921. last_dataset[table_name] = last_dt
  922. tasks.append(future)
  923. assigned[key][table_name] = future
  924. backup[key][table_name] = {
  925. "time":now(),
  926. "dataset": dataset}
  927. if table_name in counter:
  928. counter[table_name] += len(dataset)
  929. else:
  930. counter[table_name] = 0
  931. if counter[table_name] == 60:
  932. message = f"At ipt {ipt} saved successfully last {counter[table_name]}"+\
  933. f" messages for {table_name}, last " +\
  934. f"result"
  935. level = Logger.INFO
  936. counter[table_name] = 0
  937. last_data = now()
  938. except asyncio.CancelledError as e:
  939. message = f"RDB {db_insta} at {ipt} has canceled task, but protected by shield"
  940. level = Logger.ERROR
  941. exc = MSGException(*sys.exc_info())
  942. messages.append(MessageLog(rdbnow(), coroname, level, message, exc))
  943. control_changes[key] = DBSteps.CONNECT
  944. gprint(f"Reconnect to db IPT -> {ipt}")
  945. await db_insta.close()
  946. break
  947. except Exception as e:
  948. message = f"RDB {db_insta} at {ipt} has an exception {e}"
  949. level = Logger.CRITICAL
  950. exc = MSGException(*sys.exc_info())
  951. messages.append(MessageLog(rdbnow(), coroname, level, message, exc))
  952. control_changes[key] = DBSteps.CONNECT
  953. gprint(f"Exception connecting to db {db_insta.client_address} IPT -> {ipt}, {e}")
  954. await db_insta.close()
  955. break
  956. if message:
  957. messages.append(MessageLog(rdbnow(), coroname, level, message, exc))
  958. control.update(control_changes)
  959. asyncio.gather(*tasks, return_exceptions=True)
  960. # stage = "free"
  961. # await queue_control.put((
  962. # task_name,
  963. # now(),
  964. # stage,
  965. # {}))
  966. #gprint(f"No data on queue, db_insta {db_insta}")
  967. if queue_db.empty():
  968. await asyncio.sleep(1)
  969. # do log
  970. if messages:
  971. queue_messages = {"log":[msg.rdb for msg in messages]}
  972. queue_db.put(queue_messages)
  973. if level not in {logging.INFO, logging.DEBUG}:
  974. await asyncio.sleep(5)
  975. return [ipt, control, queue_db,
  976. db_instances, counter,
  977. last_data, last_time,
  978. flags, assigned, backup,
  979. queue_log], kwargs
  980. async def process_data(self,
  981. ipt:str,
  982. ico:str,
  983. control:CSteps,
  984. sta_insta:Gsof,
  985. last_data:datetime,
  986. last_time:datetime,
  987. counter:int,
  988. queue_control:asyncio.Queue,
  989. queue_log:asyncio.Queue,
  990. *args, **kwargs
  991. ) -> Tuple[Tuple[
  992. str,
  993. str,
  994. CSteps,
  995. datetime,
  996. datetime,
  997. int,
  998. asyncio.Queue,
  999. asyncio.Queue], Dict[str, Any]]:
  1000. loop = asyncio.get_event_loop()
  1001. ids = self.assigned_tasks.get(ipt, {}).get(ico, None)
  1002. assigned_tasks = self.assigned_tasks.get(ipt, {})
  1003. ids = assigned_tasks.get(ico)
  1004. level = Logger.INFO
  1005. messages = []
  1006. message = ""
  1007. task_name = f"process_sta_task:{ipt}:{ico}"
  1008. coroname = "process_data"
  1009. connected = kwargs["connected"]
  1010. created = kwargs["created"]
  1011. # print(now(), task_name, control, f"IDS {ids}")
  1012. if now() >= last_time + timedelta(seconds=5):
  1013. stage = "update"
  1014. # await queue_control.put((task_name, now(), stage, sta_insta))
  1015. # counter["DB_WORK"] = 0
  1016. if ids:
  1017. if self.changes.get(ids, False):
  1018. """
  1019. Linked to db_loop, if there are a new change then
  1020. create new instance,
  1021. """
  1022. del sta_insta
  1023. sta_insta = None
  1024. control = CSteps.CREATE
  1025. code_db = self.stations.get(ids, {}).get('db')
  1026. code = self.stations.get(ids, {}).get('code')
  1027. idd = self.get_id_by_code('DBDATA', code_db)
  1028. idc = self.idc.get(ids)
  1029. if idc and sta_insta:
  1030. if idc not in sta_insta.clients:
  1031. del sta_insta
  1032. sta_insta = None
  1033. control = CSteps.CREATE
  1034. #############
  1035. # For some actions that modify status of
  1036. # the variables on this coroutine
  1037. # self.free_ids[ids] = False
  1038. # while self.wait.get(ids, False):
  1039. # await asyncio.sleep(.01)
  1040. # if not self.status_sta[ids]:
  1041. # v = 1
  1042. ##############
  1043. """
  1044. Si no se ha creado instancia de conexion a estación
  1045. se crea
  1046. sta_init un diccionario {ids:bool}
  1047. indice si la estación fue inicializada
  1048. """
  1049. check_0 = now()
  1050. if control == CSteps.CREATE:
  1051. # step 0 initialize the objects, source and end
  1052. exc = MSGException()
  1053. try:
  1054. sta_insta, table_name = self.add_sta_instance(
  1055. ids, loop)
  1056. # print(now(), f"STA INSTA {table_name} created")
  1057. try:
  1058. ring_buffer = kwargs.get("ring_buffer")
  1059. if not ring_buffer:
  1060. ring_buffer = RingBuffer(name=table_name,
  1061. size=self.buffer_size)
  1062. kwargs["ring_buffer"] = ring_buffer
  1063. else:
  1064. ring_buffer.clear()
  1065. except Exception as e:
  1066. print("Error al crear ring buffer", e)
  1067. message = f"RingBuffer for {table_name} can't be created because {e}"
  1068. level = Logger.ERROR
  1069. exc = MSGException(*sys.exc_info())
  1070. messages.append(MessageLog(rdbnow(), coroname, level, message, exc))
  1071. kwargs["table_name"] = table_name
  1072. message = f"Station instance {sta_insta} created "+\
  1073. f"for {table_name}, control {control.value}"
  1074. level = Logger.INFO
  1075. if sta_insta:
  1076. control = CSteps.CONNECT
  1077. self.changes[ids] = False
  1078. except Exception as ex:
  1079. exc = MSGException(*sys.exc_info())
  1080. message = f"PD_00: Conexión de estación con falla-> {ids}:{code}"
  1081. level = Logger.ERROR
  1082. idc = self.idc.get(ids, None)
  1083. msg, close_level = await self.reset_station_conn(sta_insta, ids, idc)
  1084. control = CSteps.CREATE
  1085. kwargs["origin_exception"] = f"PD_00 + {code}"
  1086. if message:
  1087. messages.append(MessageLog(rdbnow(), coroname, level, message, exc))
  1088. """
  1089. Si no se ha creado la instanca de database:
  1090. se crea la db instancia
  1091. """
  1092. """
  1093. En caso que instancia de collect a estacion se haya iniciado
  1094. 1° conectar
  1095. 2° extraer datos
  1096. """
  1097. else:
  1098. await asyncio.sleep(.1)
  1099. exc = MSGException()
  1100. message = ""
  1101. check_1 = now()
  1102. if sta_insta:
  1103. queue_db = kwargs.get("queue_db")
  1104. table_name = kwargs.get("table_name")
  1105. if control == CSteps.CONNECT:
  1106. # step 1
  1107. # si es primera vez de acceso
  1108. # conectar al socket correspondiente
  1109. # step 1.a connect and set flags to run data
  1110. code = sta_insta.station
  1111. idc = None
  1112. exc = MSGException()
  1113. try:
  1114. future = asyncio.create_task(sta_insta.connect())
  1115. stage = "connect"
  1116. # print(now(), f"STA INSTA {table_name} connected")
  1117. # drop control
  1118. # await queue_control.put((
  1119. # task_name,
  1120. # now(),
  1121. # stage,
  1122. # future))
  1123. idc = await wait_for(
  1124. shield(future),
  1125. timeout=20)
  1126. check_1 = now()
  1127. kwargs["connected"] = True
  1128. if idc:
  1129. self.idc[ids] = idc
  1130. self.set_status_sta(ids, True)
  1131. self.set_status_conn(ids, True)
  1132. self.first_time[ids] = False
  1133. check_a = now()
  1134. control = CSteps.COLLECT
  1135. message = f"Station {sta_insta} connected at"+\
  1136. f" {ipt} "+\
  1137. f" to address {sta_insta.address}"
  1138. level = Logger.INFO
  1139. else:
  1140. control = CSteps.CONNECT
  1141. message = f"Station {sta_insta} not connected at"+\
  1142. f" {ipt} "+\
  1143. f" to address {sta_insta.address}"
  1144. level = Logger.WARNING
  1145. messages.append(MessageLog(rdbnow(), coroname, level, message, exc))
  1146. except asyncio.TimeoutError as e:
  1147. exc = MSGException(*sys.exc_info())
  1148. message = f"Tiempo fuera para conectar instancia " +\
  1149. f"de estación {sta_insta} en ipt {ipt}, idc <{idc}>"
  1150. level = Logger.ERROR
  1151. control = CSteps.CONNECT
  1152. msg, lvl = await self.reset_station_conn(
  1153. sta_insta,
  1154. ids,
  1155. idc)
  1156. control = CSteps.CONNECT
  1157. kwargs["connected"] = False
  1158. messages.append(MessageLog(rdbnow(), coroname, level, message, exc))
  1159. except Exception as ex:
  1160. exc = MSGException(*sys.exc_info())
  1161. message = f"PD_02: Error al conectar estación {sta_insta}, ids {ids}, ipt {ipt}, {ex}"
  1162. level = Logger.ERROR
  1163. control = CSteps.CONNECT
  1164. msg, lvl = await self.reset_station_conn(
  1165. sta_insta,
  1166. ids,
  1167. idc)
  1168. control = CSteps.CONNECT
  1169. kwargs["connected"] = False
  1170. messages.append(MessageLog(rdbnow(), coroname, level, message, exc))
  1171. # si ya esta conectado :), obtener dato
  1172. """
  1173. Si ya está inicializado y conectad
  1174. proceder a obtener datos
  1175. """
  1176. sta_dict = {}
  1177. # print(now(), f"STA INSTA {table_name} pre-collect", control, table_name)
  1178. if control == CSteps.COLLECT and table_name:
  1179. check_2 = now()
  1180. # if connected:
  1181. # print(f"Table {table_name}", f"Check connect {check_1}", f"{check_2}", "Connected", connected)
  1182. # print(f"From connect to collect first {(check_2-check_1)}")
  1183. # connected = False
  1184. ring_buffer = kwargs.get("ring_buffer")
  1185. code = sta_insta.station
  1186. idc = self.idc.get(ids)
  1187. exc = MSGException()
  1188. # just for checking
  1189. # step 1.b collect data and process to save the raw data
  1190. try:
  1191. pre_get = now()
  1192. async def get_records():
  1193. set_header = await sta_insta.get_message_header(idc)
  1194. done, sta_dict = await sta_insta.get_records()
  1195. dt0, source = gps_time(sta_dict, sta_insta.tipo)
  1196. dt_iso = rdb.iso8601(dt0.isoformat())
  1197. rnow = now()
  1198. recv_now = rdbnow()
  1199. # print(rnow)
  1200. delta = (rnow - dt0).total_seconds()
  1201. sta_dict.update({
  1202. "TRACE": (ipt, ids, idc),
  1203. 'DT_GEN': dt_iso,
  1204. 'DT_RECV': recv_now,
  1205. "DELTA_TIME": delta})
  1206. data = Data(**{v:sta_dict.get(key) for key,v in DATA_KEYS.items()})
  1207. ring_buffer.add(data)
  1208. last_data = recv_now
  1209. await queue_db.put((table_name, sta_dict))
  1210. return delta, last_data
  1211. # Control criteria
  1212. # queue_db.pu