PageRenderTime 36ms CodeModel.GetById 12ms RepoModel.GetById 0ms app.codeStats 1ms

/paasmaker/common/configuration/configuration.py

https://bitbucket.org/paasmaker/paasmaker
Python | 1404 lines | 1218 code | 65 blank | 121 comment | 64 complexity | 32ecd364266bf88c27ab297c449e3295 MD5 | raw file
  1. #
  2. # Paasmaker - Platform as a Service
  3. #
  4. # This Source Code Form is subject to the terms of the Mozilla Public
  5. # License, v. 2.0. If a copy of the MPL was not distributed with this
  6. # file, You can obtain one at http://mozilla.org/MPL/2.0/.
  7. #
  8. import unittest
  9. import os
  10. import signal
  11. import logging
  12. import tempfile
  13. import uuid
  14. import shutil
  15. import hashlib
  16. import logging
  17. import subprocess
  18. import socket
  19. import datetime
  20. import time
  21. import copy
  22. import traceback
  23. from distutils.spawn import find_executable
  24. import paasmaker
  25. from paasmaker.util.configurationhelper import InvalidConfigurationParameterException
  26. from paasmaker.util.configurationhelper import InvalidConfigurationFormatException
  27. from paasmaker.util.configurationhelper import NoConfigurationFileException
  28. from paasmaker.util.configurationhelper import StrictAboutExtraKeysColanderMappingSchema
  29. from paasmaker.common.core import constants
  30. from pubsub import pub
  31. from sqlalchemy import create_engine
  32. from sqlalchemy.orm import sessionmaker
  33. from sqlalchemy import func
  34. import sqlalchemy
  35. import colander
  36. from paasmaker.thirdparty.pika import TornadoConnection
  37. from paasmaker.thirdparty.tornadoredis import Client as TornadoRedisClient
  38. import pika
  39. import yaml
  40. # For parsing command line options.
  41. from tornado.options import define, options
  42. import tornado.testing
  43. # Set up logging for this module.
  44. logger = logging.getLogger(__name__)
  45. logger.addHandler(logging.NullHandler())
  46. # Set up command line options.
  47. define("debug", type=int, default=0, help="Enable Tornado debug mode. Also prevents Paasmaker from forking into the background.")
  48. define("configfile", type=str, default="", help="Override configuration file.")
  49. # Default ports.
  50. DEFAULT_API_PORT = 42500
  51. DEFAULT_ROUTER_REDIS_MASTER = 42510
  52. DEFAULT_ROUTER_REDIS_SLAVE = 42511
  53. DEFAULT_ROUTER_REDIS_STATS = 42512
  54. DEFAULT_REDIS_JOBS = 42513
  55. DEFAULT_NGINX_DIRECT = 42530
  56. DEFAULT_NGINX_PORT80 = 42531
  57. DEFAULT_NGINX_PORT443 = 42532
  58. DEFAULT_APPLICATION_MIN = 42600
  59. DEFAULT_APPLICATION_MAX = 42699
  60. # The Configuration Schema.
  61. class PluginSchema(StrictAboutExtraKeysColanderMappingSchema):
  62. name = colander.SchemaNode(colander.String(),
  63. title="Symbolic name",
  64. description="The symbolic name for this plugin, used to match it up in application configuration")
  65. klass = colander.SchemaNode(colander.String(),
  66. name="class",
  67. title="Plugin class",
  68. description="The class used to provide this plugin")
  69. title = colander.SchemaNode(colander.String(),
  70. title="Friendly name",
  71. description="The friendly name for this plugin")
  72. parameters = colander.SchemaNode(colander.Mapping(unknown='preserve'),
  73. title="Plugin Parameters",
  74. description="Parameters for this particular plugin",
  75. missing={},
  76. default={})
  77. class PluginsSchema(colander.SequenceSchema):
  78. plugin = PluginSchema()
  79. class PeriodicSchema(StrictAboutExtraKeysColanderMappingSchema):
  80. plugin = colander.SchemaNode(colander.String(),
  81. title="Periodic Plugin",
  82. description="The periodic plugin to run.")
  83. interval = colander.SchemaNode(colander.Integer(),
  84. title="Periodic Interval",
  85. description="How often to run this periodic plugin.")
  86. class PeriodicsSchema(colander.SequenceSchema):
  87. periodic = PeriodicSchema()
  88. class PeriodicsOnlySchema(StrictAboutExtraKeysColanderMappingSchema):
  89. periodics = PeriodicsSchema()
  90. class ScmListerSchema(StrictAboutExtraKeysColanderMappingSchema):
  91. for_name = colander.SchemaNode(colander.String(),
  92. name="for",
  93. title="SCM name",
  94. description="The SCM that this list of listers is for.")
  95. plugins = colander.SchemaNode(colander.Sequence(),
  96. colander.SchemaNode(colander.String()),
  97. title="Plugins List",
  98. description="A list of plugin names that can list repositories for this SCM.",
  99. default=[],
  100. missing=[])
  101. class ScmListersSchema(colander.SequenceSchema):
  102. lister = ScmListerSchema()
  103. class HealthPluginSchema(StrictAboutExtraKeysColanderMappingSchema):
  104. plugin = colander.SchemaNode(colander.String(),
  105. title="Plugin name",
  106. description="Plugin name for this particular health check.")
  107. order = colander.SchemaNode(colander.Integer(),
  108. title="Plugin order",
  109. description="The order of execution for this particular plugin. Plugins with the same order are run at the same time. Plugins with lower order numbers are run first. Order numbers do not need to be consecutive.")
  110. parameters = colander.SchemaNode(colander.Mapping(unknown='preserve'),
  111. title="Plugin Parameters",
  112. description="Parameters for this particular plugin",
  113. missing={},
  114. default={})
  115. class HealthPluginsSchema(colander.SequenceSchema):
  116. health = HealthPluginSchema()
  117. @staticmethod
  118. def default():
  119. return {'plugins': []}
  120. class HealthGroupSchema(StrictAboutExtraKeysColanderMappingSchema):
  121. name = colander.SchemaNode(colander.String(),
  122. title="Symbolic name",
  123. description="The symbolic name for this health check group.")
  124. title = colander.SchemaNode(colander.String(),
  125. title="Friendly name",
  126. description="The friendly name for this health check group.")
  127. period = colander.SchemaNode(colander.Integer(),
  128. title="Group recheck period",
  129. description="How often to run this health check group, in seconds.")
  130. plugins = HealthPluginsSchema(
  131. title="Plugins",
  132. description="A set of plugins and their run order.",
  133. default=HealthPluginsSchema.default(),
  134. missing=HealthPluginsSchema.default()
  135. )
  136. class HealthGroupsSchema(colander.SequenceSchema):
  137. group = HealthGroupSchema()
  138. class HealthGroupsOnlySchema(StrictAboutExtraKeysColanderMappingSchema):
  139. groups = HealthGroupsSchema()
  140. class HealthCombinedSchema(StrictAboutExtraKeysColanderMappingSchema):
  141. groups = HealthGroupsSchema(
  142. missing=[],
  143. default=[]
  144. )
  145. enabled = colander.SchemaNode(colander.Boolean(),
  146. title="Run health checks",
  147. description="If true, run health checks on this node. If you have multiple pacemakers, you will only want to run this on one node. However, you could configure two pacemakers to perform different health checks.",
  148. missing=True,
  149. default=True)
  150. use_default_checks = colander.SchemaNode(colander.Boolean(),
  151. title="Include default health checks",
  152. description="Include default health checks. These are added to any groups. If you do enable this, you should not define a 'default' group. The default checks are in paasmaker/data/defaults/health.yml.",
  153. missing=True,
  154. default=True)
  155. @staticmethod
  156. def default():
  157. return {'enabled': True, 'groups': [], 'use_default_checks': True}
  158. class PacemakerSchema(StrictAboutExtraKeysColanderMappingSchema):
  159. enabled = colander.SchemaNode(colander.Boolean(),
  160. title="Pacemaker enabled",
  161. description="Pacemaker is enabled for this node",
  162. missing=False,
  163. default=False)
  164. require_ssl = colander.SchemaNode(
  165. colander.Boolean(),
  166. title="Require SSL",
  167. description="If true, require SSL for access to the console and API.",
  168. missing=False,
  169. default=False
  170. )
  171. dsn = colander.SchemaNode(colander.String(),
  172. title="Database DSN",
  173. description="Database connection details for this pacemaker, in SQLAlchemy format")
  174. database_options = colander.SchemaNode(
  175. colander.Mapping(unknown='preserve'),
  176. title="Additional Database options",
  177. description="Additional Database options, passed as keyword arguments to engine_create. See http://docs.sqlalchemy.org/en/rel_0_7/core/engines.html#engine-creation-api for the available options. This can be used to tweak database connection details.",
  178. missing={},
  179. default={})
  180. login_age = colander.SchemaNode(colander.Integer(),
  181. title="Login age",
  182. description="The number of days to grant access when logging in, before requiring a new login.",
  183. default=7,
  184. missing=7)
  185. scmlisters = ScmListersSchema(
  186. title="SCM listers",
  187. description="A set of SCM listers and their matching SCMs.",
  188. missing=[],
  189. default=[])
  190. cluster_hostname = colander.SchemaNode(colander.String(),
  191. title="Cluster Hostname",
  192. description="The hostname postfix used to automatically generate hostnames when required. Eg, each application version gets a URL - N.<type>.<application>.cluster_hostname.")
  193. pacemaker_prefix = colander.SchemaNode(colander.String(),
  194. title="Pacemaker Prefix",
  195. description="The prefix added to the cluster hostname to make a hostname for the pacemakers.",
  196. missing="pacemaker",
  197. default="pacemaker")
  198. # TODO: Consider the security implications of this more.
  199. allow_supertoken = colander.SchemaNode(colander.Boolean(),
  200. title="Allow Super Token authentication",
  201. description="If true, enable super token authentication.",
  202. default=False,
  203. missing=False)
  204. super_token = colander.SchemaNode(colander.String(),
  205. title="Super authentication token",
  206. description="An authentication token that can be used to do anything, specifically designed to bootstrap the system. Also used for encrypting cookies.")
  207. run_crons = colander.SchemaNode(colander.Boolean(),
  208. title="Run cron tasks",
  209. description="If true, run the cron tasks on this node. If you have multiple pacemakers, you won't want to do this on more than one of them.",
  210. missing=True,
  211. default=True)
  212. allow_uploads = colander.SchemaNode(colander.Boolean(),
  213. title="Enable file uploads",
  214. description="Allow file uploads to this node.",
  215. missing=True,
  216. default=True)
  217. health = HealthCombinedSchema(
  218. title="Health Check configuration",
  219. description="Options for the health checking system.",
  220. missing=HealthCombinedSchema.default(),
  221. default=HealthCombinedSchema.default()
  222. )
  223. frontend_domain_postfix = colander.SchemaNode(colander.String(),
  224. title="Frontend domain name postfix",
  225. description="In the web interface, append this string to any hostnames that the system generates for users. This is designed to add your router's port to the cluster domain name at display time.",
  226. default="",
  227. missing="")
  228. node_connectivity_check_timeout = colander.SchemaNode(
  229. colander.Float(),
  230. title="Node Connectivity Check timeout",
  231. description="The timeout when hitting the /info endpoint on a remote node before accepting it's registration.",
  232. default=1.0,
  233. missing=1.0
  234. )
  235. @staticmethod
  236. def default():
  237. return {'enabled': False, 'scmlisters': [], 'health': HealthCombinedSchema.default()}
  238. class HeartSchema(StrictAboutExtraKeysColanderMappingSchema):
  239. enabled = colander.SchemaNode(colander.Boolean(),
  240. title="Heart enabled",
  241. description="Heart is enabled for this node",
  242. missing=False,
  243. default=False)
  244. working_dir = colander.SchemaNode(colander.String(),
  245. title="Working directory",
  246. description="Directory where active application instance files are stored. By default, Paasmaker will choose a path inside the scratch directory. You can customize this if your heart node has a special storage location optimised for application instances.",
  247. # None here means to automatically figure out the path.
  248. missing=None,
  249. default=None)
  250. shutdown_on_exit = colander.SchemaNode(colander.Boolean(),
  251. title="Shutdown applications on exit",
  252. description="Shutdown all applications on exit, rather than leaving them running. This is designed for testing and development, and not for production.",
  253. default=False,
  254. missing=False)
  255. @staticmethod
  256. def default():
  257. return {'enabled': False}
  258. class RedisConnectionSchema(StrictAboutExtraKeysColanderMappingSchema):
  259. host = colander.SchemaNode(colander.String(),
  260. title="Hostname",
  261. description="Redis Hostname")
  262. port = colander.SchemaNode(colander.Integer(),
  263. title="Port",
  264. description="Redis Port")
  265. password = colander.SchemaNode(colander.String(),
  266. title="Password",
  267. description="Redis Password",
  268. missing=None,
  269. default=None)
  270. managed = colander.SchemaNode(colander.Boolean(),
  271. title="Managed",
  272. description="If true, this is a managed redis instance. Paasmaker will create it on demand and manage storing it's data.",
  273. default=False,
  274. missing=False)
  275. shutdown = colander.SchemaNode(colander.Boolean(),
  276. title="Shutdown with node",
  277. description="If true, this managed redis instance is shut down when the node is shut down.",
  278. default=False,
  279. missing=False)
  280. @staticmethod
  281. def default_router_table():
  282. return {'host': 'localhost', 'port': DEFAULT_ROUTER_REDIS_MASTER, 'password': None, 'managed': True, 'shutdown': False}
  283. @staticmethod
  284. def default_router_stats():
  285. return {'host': 'localhost', 'port': DEFAULT_ROUTER_REDIS_STATS, 'password': None, 'managed': True, 'shutdown': False}
  286. @staticmethod
  287. def default_jobs():
  288. return {'host': 'localhost', 'port': DEFAULT_REDIS_JOBS, 'password': None, 'managed': True, 'shutdown': False}
  289. class RedisConnectionSlaveSchema(RedisConnectionSchema):
  290. enabled = colander.SchemaNode(colander.Boolean(),
  291. title="Enable automatic slaving",
  292. description="Enable automatic slaving of this router table to the supplied values.",
  293. missing=False,
  294. default=False)
  295. @staticmethod
  296. def default():
  297. return {'enabled': False}
  298. class NginxSchema(StrictAboutExtraKeysColanderMappingSchema):
  299. managed = colander.SchemaNode(colander.Boolean(),
  300. title="Enable managed nginx",
  301. description="If enabled, a managed version of NGINX is started as appropriate, pointing to the correct resources for this node. Note that you must specify a port, and it must be >1024, as this node won't be run as root.",
  302. default=False,
  303. missing=False)
  304. port_direct = colander.SchemaNode(colander.Integer(),
  305. title="Managed NGINX port - direct connection",
  306. description="The port to run the managed NGINX on. This port sends X-Forwarded-Port: <port_direct> to applications.",
  307. default=DEFAULT_NGINX_DIRECT,
  308. missing=DEFAULT_NGINX_DIRECT)
  309. port_80 = colander.SchemaNode(colander.Integer(),
  310. title="Managed NGINX port - port 80",
  311. description="The port to run the managed NGINX on. This port sends X-Forwarded-Port: 80 to applications.",
  312. default=DEFAULT_NGINX_PORT80,
  313. missing=DEFAULT_NGINX_PORT80)
  314. port_443 = colander.SchemaNode(colander.Integer(),
  315. title="Managed NGINX port - port 443",
  316. description="The port to run the managed NGINX on. This port sends X-Forwarded-Port: 443 and X-Forwarded-Proto: https to applications.",
  317. default=DEFAULT_NGINX_PORT443,
  318. missing=DEFAULT_NGINX_PORT443)
  319. shutdown = colander.SchemaNode(colander.Boolean(),
  320. title="Shutdown with node",
  321. description="If true, this managed nginx instance is shut down when the node is shut down.",
  322. default=False,
  323. missing=False)
  324. @staticmethod
  325. def default():
  326. return {'managed': True, 'port_direct': DEFAULT_NGINX_DIRECT, 'port_80': DEFAULT_NGINX_PORT80, 'port_443': DEFAULT_NGINX_PORT443, 'shutdown': False}
  327. class RouterSchema(StrictAboutExtraKeysColanderMappingSchema):
  328. enabled = colander.SchemaNode(colander.Boolean(),
  329. title="Router enabled",
  330. description="Router is enabled for this node",
  331. missing=False,
  332. default=False)
  333. process_stats = colander.SchemaNode(colander.Boolean(),
  334. title="Process Stats",
  335. description="If true, process the special JSON formatted nginx log file for statistics.",
  336. default=True,
  337. missing=True)
  338. stats_log = colander.SchemaNode(colander.String(),
  339. title="Stats log location",
  340. description="NGINX Paasmaker stats log file location. If Paasmaker is managing the nginx, it sets this location automatically.")
  341. stats_interval = colander.SchemaNode(colander.Integer(),
  342. title="Stats read interval",
  343. description="The interval between reading log files, in milliseconds.",
  344. default=500,
  345. missing=500)
  346. nginx = NginxSchema(
  347. title="Managed NGINX configuration",
  348. description="The configuration for a managed Nginx that Paasmaker can start for you.",
  349. missing=NginxSchema.default(),
  350. default=NginxSchema.default()
  351. )
  352. @staticmethod
  353. def default():
  354. return {
  355. 'enabled': False,
  356. 'nginx': NginxSchema.default()
  357. }
  358. class RedisSchema(StrictAboutExtraKeysColanderMappingSchema):
  359. table = RedisConnectionSchema(default=RedisConnectionSchema.default_router_table(), missing=RedisConnectionSchema.default_router_table())
  360. stats = RedisConnectionSchema(default=RedisConnectionSchema.default_router_stats(), missing=RedisConnectionSchema.default_router_stats())
  361. slaveof = RedisConnectionSlaveSchema(default=RedisConnectionSlaveSchema.default(), missing=RedisConnectionSlaveSchema.default())
  362. jobs = RedisConnectionSchema(default=RedisConnectionSchema.default_jobs(), missing=RedisConnectionSchema.default_jobs())
  363. @staticmethod
  364. def default():
  365. return {
  366. 'table': RedisConnectionSchema.default_router_table(),
  367. 'stats': RedisConnectionSchema.default_router_stats(),
  368. 'slaveof': RedisConnectionSlaveSchema.default(),
  369. 'jobs': RedisConnectionSchema.default_jobs()
  370. }
  371. class MiscPortsSchema(StrictAboutExtraKeysColanderMappingSchema):
  372. minimum = colander.SchemaNode(colander.Integer(),
  373. title="Minimum port",
  374. description="Lower end of the port range to search for free ports on.",
  375. missing=DEFAULT_APPLICATION_MIN,
  376. default=DEFAULT_APPLICATION_MIN)
  377. maximum = colander.SchemaNode(colander.Integer(),
  378. title="Maximum port",
  379. description="Upper end of the port range to search for free ports on.",
  380. missing=DEFAULT_APPLICATION_MAX,
  381. default=DEFAULT_APPLICATION_MAX)
  382. @staticmethod
  383. def default():
  384. return {'minimum': DEFAULT_APPLICATION_MIN, 'maximum': DEFAULT_APPLICATION_MAX}
  385. class MasterSchema(StrictAboutExtraKeysColanderMappingSchema):
  386. host = colander.SchemaNode(colander.String(),
  387. title="Master Node",
  388. description="The master node for this cluster.")
  389. port = colander.SchemaNode(colander.Integer(),
  390. title="Master Node HTTP port",
  391. description="The master node HTTP port for API requests.",
  392. default=DEFAULT_API_PORT,
  393. missing=DEFAULT_API_PORT)
  394. @staticmethod
  395. def default():
  396. return {'host': 'localhost', 'port': DEFAULT_API_PORT, 'isitme': False}
  397. class ConfigurationSchema(StrictAboutExtraKeysColanderMappingSchema):
  398. http_port = colander.SchemaNode(colander.Integer(),
  399. title="HTTP Port",
  400. description="The HTTP port that this node listens on for API requests",
  401. missing=DEFAULT_API_PORT,
  402. default=DEFAULT_API_PORT)
  403. https_port = colander.SchemaNode(
  404. colander.Integer(),
  405. title="HTTPS Port",
  406. description="The HTTPS port that this node listens on for API requests. To enable, you must set this to a port and also set ssl_key and ssl_cert options.",
  407. missing=None,
  408. default=None
  409. )
  410. ssl_options = colander.SchemaNode(
  411. colander.Mapping(unknown='preserve'),
  412. title="SSL options",
  413. description="The SSL options if SSL is enabled. You need at least `keyfile` and `certfile`. You can put any other options in here from http://docs.python.org/2/library/ssl.html#ssl.wrap_socket",
  414. missing={},
  415. default={}
  416. )
  417. misc_ports = MiscPortsSchema(
  418. title="Misc Ports",
  419. description="The range of ports allocated to application instances or other times where a free port is required.",
  420. default=MiscPortsSchema.default(),
  421. missing=MiscPortsSchema.default()
  422. )
  423. default_plugins = colander.SchemaNode(colander.Boolean(),
  424. title="Set up default plugins",
  425. description="If true, sets up a set of internal plugins for job handling and other tasks. If you turn this off, you will have full control over all plugins - and will need to include job plugins. The default plugins are read from paasmaker/data/defaults/plugins.yml.",
  426. missing=True,
  427. default=True)
  428. my_name = colander.SchemaNode(colander.String(),
  429. title="Node name",
  430. description="Friendly node name, or if not supplied, will attempt to detect the hostname.",
  431. missing=None,
  432. default=None)
  433. my_route = colander.SchemaNode(colander.String(),
  434. title="Route to this node",
  435. description="The route (IP address or Hostname) that should be used to contact this host. If not specified, it will be automatically determined",
  436. missing=None,
  437. default=None)
  438. cluster_name = colander.SchemaNode(
  439. colander.String(),
  440. title="Friendly Cluster name",
  441. description="The friendly cluster name presented to users when they log in, and also in page titles to identify this cluster.",
  442. missing=None,
  443. default=None
  444. )
  445. node_token = colander.SchemaNode(colander.String(),
  446. title="Node Authentication Token",
  447. description="Token used by nodes to validate each other. All nodes should have the same token")
  448. log_directory = colander.SchemaNode(colander.String(),
  449. title="Log Directory",
  450. description="Directory used to store log files. If set to None, it will choose a path automatically.",
  451. # None here means to automatically figure out/generate the path.
  452. default=None,
  453. missing=None)
  454. server_log_level = colander.SchemaNode(colander.String(),
  455. title="Server log level",
  456. description="The log level for the server log file. One of DEBUG, INFO, WARNING, ERROR, or CRITICAL. DEBUG is particularly noisy.",
  457. default="INFO",
  458. missing="INFO")
  459. scratch_directory = colander.SchemaNode(colander.String(),
  460. title="Scratch Directory",
  461. description="Directory used for data storage. Instances are stored here, as well as run time configuration, managed daemon files and data. This should be a persistent location.",
  462. default="scratch",
  463. missing="scratch")
  464. pid_path = colander.SchemaNode(colander.String(),
  465. title="PID path",
  466. description="The path at which to write the PID file.",
  467. default="paasmaker.pid",
  468. missing="paasmaker.pid")
  469. master = MasterSchema(
  470. title="Master Node details",
  471. description="Connection information for the master node.",
  472. default=MasterSchema.default(),
  473. missing=MasterSchema.default()
  474. )
  475. tags = colander.SchemaNode(colander.Mapping(unknown='preserve'),
  476. title="User tags",
  477. description="A generic set of tags or information stored for the node. Can be used to write custom placement filters, or find nodes. Applications are passed these tags as well, so you will want to be careful what you put in here.",
  478. missing={},
  479. default={})
  480. node_report_interval = colander.SchemaNode(colander.Integer(),
  481. title="Node report interval",
  482. description="How long in milliseconds between reports back to the master node. Default is 60 seconds.",
  483. default=60000,
  484. missing=60000)
  485. job_manager_check_interval = colander.SchemaNode(
  486. colander.Integer(),
  487. title="Job Manager check interval",
  488. description="How often, in milliseconds, between checks of the job manager backend to ensure it's still connected.",
  489. default=5000,
  490. missing=5000
  491. )
  492. pacemaker = PacemakerSchema(
  493. title="Pacemaker configuration",
  494. description="The configuration options for the Pacemaker, if enabled.",
  495. default=PacemakerSchema.default(),
  496. missing=PacemakerSchema.default()
  497. )
  498. heart = HeartSchema(
  499. title="Heart configuration",
  500. description="The configuration options for the heart, if enabled.",
  501. default=HeartSchema.default(),
  502. missing=HeartSchema.default()
  503. )
  504. router = RouterSchema(
  505. title="Router configuration",
  506. description="The configuration options for the router, if enabled.",
  507. default=RouterSchema.default(),
  508. missing=RouterSchema.default()
  509. )
  510. redis = RedisSchema(
  511. title="Redis configuration",
  512. description="A complete set of information on how to connect to Redis instances. If omitted, defaults to a fully managed single node configuration.",
  513. default=RedisSchema.default(),
  514. missing=RedisSchema.default()
  515. )
  516. plugins = PluginsSchema(
  517. title="Plugins",
  518. description="A list of plugins registered on this node. It's up to you to make sure they're applicable for this node type.",
  519. missing=[],
  520. default=[])
  521. periodics = PeriodicsSchema(
  522. title="Periodic tasks",
  523. description="A list of periodic tasks to run on this node.",
  524. missing=[],
  525. default=[]
  526. )
  527. default_periodics = colander.SchemaNode(colander.Boolean(),
  528. title="Include default periodics",
  529. description="If true, use the default periodics. These are merged with any periodics that you supply.",
  530. missing=True,
  531. default=True
  532. )
  533. # Server related configuration. This is for an Ubuntu server, set up as
  534. # per the installation instructions. Obviously, for other platforms
  535. # this will need to be altered.
  536. redis_binary = colander.SchemaNode(colander.String(),
  537. title = "Redis server binary",
  538. description = "The full path to the redis server binary.",
  539. default = find_executable("redis-server"),
  540. missing = find_executable("redis-server"))
  541. nginx_binary =colander.SchemaNode(colander.String(),
  542. title="nginx server binary",
  543. description="The full path to the nginx server binary.",
  544. default="/usr/local/openresty/nginx/sbin/nginx",
  545. missing="/usr/local/openresty/nginx/sbin/nginx")
  546. class PluginsOnlySchema(StrictAboutExtraKeysColanderMappingSchema):
  547. plugins = PluginsSchema(
  548. title="Plugins",
  549. description="The list of plugins.",
  550. missing=[],
  551. default=[])
  552. class ImNotA(Exception):
  553. """
  554. Base exception thrown when a configuration method
  555. is called that is not supported by the node type.
  556. """
  557. pass
  558. class ImNotAHeart(ImNotA):
  559. """
  560. Thrown when a heart-only method is called from a non-heart node.
  561. """
  562. pass
  563. class ImNotAPacemaker(ImNotA):
  564. """
  565. Thrown when a pacemaker-only method is called from a non-pacemaker
  566. node.
  567. """
  568. pass
  569. class ImNotARouter(ImNotA):
  570. """
  571. Thrown when a router-only method is called from a non-router node.
  572. """
  573. pass
  574. class JobStatusMessage(object):
  575. """
  576. A job status message object that is passed to any subscribers to
  577. the internal job status notification.
  578. :arg str job_id: The job ID.
  579. :arg str state: One of the state constants for a job.
  580. :arg str source: The UUID of the node that originated this
  581. message.
  582. :arg str|None parent_id: The parent ID of this job. Typically
  583. only set when the job is NEW.
  584. :arg str|None summary: The summary of the job. Only set when
  585. the job enters a finished state.
  586. """
  587. def __init__(self, job_id, state, source, parent_id=None, summary=None):
  588. self.job_id = job_id
  589. self.state = state
  590. self.source = source
  591. self.parent_id = parent_id
  592. self.summary = summary
  593. def flatten(self):
  594. """
  595. Flatten the internal variables into a dict.
  596. """
  597. return {
  598. 'job_id': self.job_id,
  599. 'state': self.state,
  600. 'source': self.source,
  601. 'parent_id': self.parent_id,
  602. 'summary': self.summary
  603. }
  604. class ThreadedDatabaseSessionFetcher(paasmaker.util.threadcallback.ThreadCallback):
  605. """
  606. Helper class to grab a SQLalchemy session in another thread, in
  607. case it blocks waiting on connection or external resources.
  608. """
  609. def _work(self, sessionmaker):
  610. logger.debug("Creating database session (or waiting for one).")
  611. session = sessionmaker()
  612. logger.debug("Got database session, returning to caller.")
  613. self._callback(session)
  614. class Configuration(paasmaker.util.configurationhelper.ConfigurationHelper):
  615. """
  616. The main configuration object for the Paasmaker system.
  617. This object contains the configuration and context for the entire
  618. application. Most components in the system accept an instance of
  619. this object, and use that to look up shared resources such as database
  620. sessions, Redis instances, or other information.
  621. This class also handles loading the configuration file as well,
  622. and validating it's contents. Additionally, it also handles plugins.
  623. Instance variables that are available for public use:
  624. * **plugins**: The plugin registry instance for the system. You
  625. can call this to instantiate plugins. For example::
  626. self.configuration.plugins.instantiate( ... )
  627. * **io_loop**: The tornado IO loop. Use the IO loop from this
  628. object directly wherever you need one. This is because the
  629. ``ConfigurationStub()`` class will have this set correctly,
  630. meaning your production code and unit test code are identical.
  631. * **job_manager**: You can acces the job manager directly from
  632. here when needed.
  633. Other instance variables, whilst not prefixed with an underscore,
  634. should be considered protected. Only use the instance variables
  635. documented above in your code.
  636. To access the configuration options, you have two options:
  637. * Use the configuration object as a dict, checking for keys
  638. as nessecary before trying to access keys that may or may not
  639. be present. For example::
  640. pacemaker = configuration['pacemaker']['enabled']
  641. * Use the ``get_flat()`` method with a path. For example::
  642. pacemaker = configuration.get_flat('pacemaker.enabled')
  643. """
  644. def __init__(self, io_loop=None, debug=False):
  645. super(Configuration, self).__init__(ConfigurationSchema())
  646. self.port_allocator = paasmaker.util.port.FreePortFinder()
  647. self.plugins = paasmaker.util.PluginRegistry(self)
  648. self.uuid = None
  649. self.job_watcher = None
  650. self.job_manager = paasmaker.common.job.manager.manager.JobManager(self)
  651. self.io_loop = io_loop or tornado.ioloop.IOLoop.instance()
  652. self.start_time = datetime.datetime.utcnow()
  653. self.node_logging_configured = False
  654. self.database_sessions_checked_out = 0
  655. self.database_sessions_checkout_pending = 0
  656. self.redis_scripts = {}
  657. # Debug flag handling.
  658. self.debug = debug
  659. if options.debug == 1:
  660. self.debug = True
  661. def uptime(self):
  662. """
  663. Calculate the uptime of this configuration object, and return
  664. a value in seconds.
  665. """
  666. return (datetime.datetime.utcnow() - self.start_time).total_seconds()
  667. def load_from_file(self, search_path):
  668. """
  669. Load the configuration from file. If a specific configuration
  670. file was specified on the command line, attempt to load
  671. from that file.
  672. """
  673. # If we were supplied a configuration file on the command line,
  674. # insert that into the search path.
  675. new_search_path = list(search_path)
  676. if options.configfile != "":
  677. new_search_path.insert(0, options.configfile)
  678. super(Configuration, self).load_from_file(new_search_path)
  679. def post_load(self):
  680. """
  681. Perform post configuration loading tasks.
  682. This creates directories as needed, determines things like
  683. the hostname and route (if required), registers plugins,
  684. including default plugins.
  685. """
  686. if self['https_port'] is not None:
  687. # Make sure the keyfile and certfile are set.
  688. if not 'keyfile' in self['ssl_options'] or not 'certfile' in self['ssl_options']:
  689. raise InvalidConfigurationParameterException('keyfile and certfile must both be set if you enable listening on an SSL port. Set them inside the ``ssl_options`` mapping.')
  690. # Convert the scratch directory into a fully qualified path.
  691. self['scratch_directory'] = os.path.abspath(self['scratch_directory'])
  692. # Now make sure it exists.
  693. if not os.path.exists(self['scratch_directory']):
  694. # Attempt to create it.
  695. try:
  696. os.mkdir(self['scratch_directory'])
  697. except OSError, ex:
  698. raise InvalidConfigurationParameterException("Scratch directory %s does not exist, and we were unable to create it: %s" % (self['scratch_directory'], str(ex)))
  699. # Check the logs dir.
  700. if self['log_directory'] is None:
  701. self['log_directory'] = os.path.join(self['scratch_directory'], 'logs')
  702. if not os.path.exists(self['log_directory']):
  703. # Attempt to create it.
  704. try:
  705. os.mkdir(self['log_directory'])
  706. except OSError, ex:
  707. raise InvalidConfigurationParameterException("Logs directory %s does not exist, and we were unable to create it: %s" % (self['log_directory'], str(ex)))
  708. if self['heart']['enabled']:
  709. if self['heart']['working_dir'] is None:
  710. self['heart']['working_dir'] = os.path.join(self['scratch_directory'], 'instances')
  711. if not os.path.exists(self['heart']['working_dir']):
  712. # Attempt to create it.
  713. try:
  714. os.mkdir(self['heart']['working_dir'])
  715. except OSError, ex:
  716. raise InvalidConfigurationParameterException("Heart working directory %s does not exist, and we were unable to create it: %s" % (self['log_directory'], str(ex)))
  717. if self['my_name'] is None:
  718. self['my_name'] = os.uname()[1]
  719. if self['my_route'] is None:
  720. # TODO: improve this detection and use.
  721. self['my_route'] = socket.getfqdn()
  722. if '.in-addr.arpa' in self['my_route']:
  723. # Sometimes on OSX this gives the wrong address; and gives you
  724. # a reverse resolve. Use localhost instead.
  725. self['my_route'] = 'localhost'
  726. # Update the flat representation again before proceeding.
  727. self.update_flat()
  728. # Heart initialisation.
  729. if self.is_heart():
  730. # Instance manager.
  731. self.instances = paasmaker.heart.helper.instancemanager.InstanceManager(self)
  732. # Mark allocated ports as allocated.
  733. allocated_ports = self.instances.get_used_ports()
  734. self.port_allocator.add_allocated_port(allocated_ports)
  735. if self.get_flat('default_plugins'):
  736. # TODO: Split into Heart/Pacemaker/Router only jobs.
  737. default_path = os.path.normpath(os.path.dirname(__file__) + '/../../data/defaults')
  738. default_file = os.path.join(default_path, 'plugins.yml')
  739. default_plugins_raw = open(default_file, 'r').read()
  740. default_plugins_parsed = yaml.safe_load(default_plugins_raw)
  741. default_plugins_ready = PluginsOnlySchema().deserialize(default_plugins_parsed)
  742. self.load_plugins(self.plugins, default_plugins_ready['plugins'])
  743. if self.is_pacemaker() and self.get_flat('pacemaker.health.enabled'):
  744. # Load the default health groups. We merge these with the others - although
  745. # if you duplicate the default group in yours, the results will be undefined.
  746. default_file = os.path.join(default_path, 'health.yml')
  747. default_health_raw = open(default_file, 'r').read()
  748. default_health_parsed = yaml.safe_load(default_health_raw)
  749. default_health_ready = HealthGroupsOnlySchema().deserialize(default_health_parsed)
  750. self['pacemaker']['health']['groups'].extend(default_health_ready['groups'])
  751. if self.get_flat('default_periodics'):
  752. # Load the default periodics plugins. We merge these with the others.
  753. default_file = os.path.join(default_path, 'periodics.yml')
  754. default_periodics_raw = open(default_file, 'r').read()
  755. default_periodics_parsed = yaml.safe_load(default_periodics_raw)
  756. default_periodics_ready = PeriodicsOnlySchema().deserialize(default_periodics_parsed)
  757. self['periodics'].extend(default_periodics_ready['periodics'])
  758. # Plugins. Note that we load these after the defaults,
  759. # so you can re-register the defaults with different options.
  760. self.load_plugins(self.plugins, self['plugins'])
  761. # Check SCM lister entries.
  762. if self.is_pacemaker():
  763. for meta in self['pacemaker']['scmlisters']:
  764. if not self.plugins.exists(meta['for'], paasmaker.util.plugin.MODE.SCM_EXPORT):
  765. raise InvalidConfigurationParameterException("SCM lister entry refers to SCM plugin %s that doesn't exist." % meta['for'])
  766. for listplugin in meta['plugins']:
  767. if not self.plugins.exists(listplugin, paasmaker.util.plugin.MODE.SCM_LIST):
  768. raise InvalidConfigurationParameterException("SCM lister plugin %s doesn't exist (in list for SCM %s)." % (listplugin, meta['for']))
  769. # Check that the health manager plugins exist.
  770. # And also that their parameters validate.
  771. if self.is_pacemaker():
  772. for group in self['pacemaker']['health']['groups']:
  773. for plugin in group['plugins']:
  774. if not self.plugins.exists(plugin['plugin'], paasmaker.util.plugin.MODE.HEALTH_CHECK):
  775. raise InvalidConfigurationParameterException("Health check plugin %s doesn't exist." % plugin['plugin'])
  776. # Try to instantiate it with the parameters.
  777. # This will raise an exception if it's wrong.
  778. try:
  779. instance = self.plugins.instantiate(
  780. plugin['plugin'],
  781. paasmaker.util.plugin.MODE.HEALTH_CHECK,
  782. plugin['parameters']
  783. )
  784. except InvalidConfigurationFormatException, ex:
  785. logger.error("Exception: ", exc_info=ex)
  786. raise InvalidConfigurationParameterException("Error in health check plugin %s parameters. See above." % plugin['plugin'])
  787. # Check that the periodic plugins exist.
  788. for periodic in self['periodics']:
  789. if not self.plugins.exists(periodic['plugin'], paasmaker.util.plugin.MODE.PERIODIC):
  790. raise InvalidConfigurationParameterException("Periodic plugin %s doesn't exist." % periodic['plugin'])
  791. # Make sure at least one stats plugin is registered.
  792. if self.plugins.plugins_for(paasmaker.util.plugin.MODE.NODE_STATS) == 0:
  793. raise InvalidConfigurationParameterException("No NODE_STATS plugins are registered. Paasmaker can't calculate node scores without that.")
  794. # Make sure at least one score plugin is registered.
  795. if self.plugins.plugins_for(paasmaker.util.plugin.MODE.NODE_SCORE) == 0:
  796. raise InvalidConfigurationParameterException("No NODE_SCORE plugins are registered. Paasmaker can't calculate node scores without that.")
  797. self.update_flat()
  798. def is_pacemaker(self):
  799. """
  800. Determine if this node is a pacemaker.
  801. """
  802. return self.get_flat('pacemaker.enabled')
  803. def is_heart(self):
  804. """
  805. Determine if this node is a heart.
  806. """
  807. return self.get_flat('heart.enabled')
  808. def is_router(self):
  809. """
  810. Determine if this node is a router.
  811. """
  812. return self.get_flat('router.enabled')
  813. def get_runtimes(self, callback):
  814. """
  815. Get a list of runtimes and their associated versions.
  816. Once the list is generated, it is cached for the lifetime
  817. of the server. Subsequent calls return the same list generated
  818. the first time.
  819. The callback is called with a dict. The keys are the runtime
  820. names, and the values are lists of versions that this node
  821. can run.
  822. """
  823. if not self.is_heart() and not self.is_pacemaker():
  824. raise ImNotAHeart("I'm not a heart or pacemaker, so I have no runtimes.")
  825. # Use a cached version if present.
  826. # The idea is that we don't do this expensive version determination each
  827. # time we re-register with the master.
  828. if hasattr(self, '_runtime_cache'):
  829. logger.debug("Using existing runtime cache.")
  830. callback(self._runtime_cache)
  831. return
  832. else:
  833. logger.info("Generating runtime list...")
  834. tags = {}
  835. runtime_plugins = self.plugins.plugins_for(paasmaker.util.plugin.MODE.RUNTIME_VERSIONS)
  836. def get_versions():
  837. try:
  838. def got_versions(versions):
  839. if len(versions) > 0:
  840. # Only report that we have this runtime at all if we have
  841. # more than one version.
  842. tags[plugin] = versions
  843. # Move onto the next plugin.
  844. get_versions()
  845. # end of got_versions()
  846. plugin = runtime_plugins.pop()
  847. runtime = self.plugins.instantiate(
  848. plugin,
  849. paasmaker.util.plugin.MODE.RUNTIME_VERSIONS
  850. )
  851. runtime.get_versions(got_versions)
  852. except IndexError, ex:
  853. # No more to process.
  854. # Send back the tags.
  855. self._runtime_cache = tags
  856. callback(self._runtime_cache)
  857. # end of get_versions()
  858. # Kick off the process.
  859. get_versions()
  860. def get_dynamic_tags(self, callback):
  861. """
  862. Get a list of dynamic tags and their associated values.
  863. This reaches out to plugins to generate the tags. Once run
  864. the first time, this is not run again until the node restarts.
  865. """
  866. if hasattr(self, '_dynamic_tags_cache'):
  867. logger.debug("Using existing dynamic tags cache.")
  868. callback(self._dynamic_tags_cache)
  869. return
  870. else:
  871. logger.info("Generating dynamic tags...")
  872. dynamic_tags = copy.deepcopy(self['tags'])
  873. tags_plugins = self.plugins.plugins_for(
  874. paasmaker.util.plugin.MODE.NODE_DYNAMIC_TAGS
  875. )
  876. def get_tags():
  877. try:
  878. def got_tags(tags):
  879. # Move onto the next plugin.
  880. get_tags()
  881. # end of got_tags()
  882. plugin = tags_plugins.pop()
  883. tagger = self.plugins.instantiate(
  884. plugin,
  885. paasmaker.util.plugin.MODE.NODE_DYNAMIC_TAGS
  886. )
  887. tagger.fetch(dynamic_tags, got_tags)
  888. except IndexError, ex:
  889. # No more to process.
  890. # Send back the tags.
  891. self._dynamic_tags_cache = dynamic_tags
  892. callback(self._dynamic_tags_cache)
  893. # end of get_tags()
  894. # Kick off the process.
  895. get_tags()
  896. def get_node_stats(self, callback):
  897. """
  898. Get a set of stats for the node. This can call out to
  899. plugins to generate the stats. Once complete, it will call
  900. the callback with the generated stats.
  901. """
  902. stats = {}
  903. stats_plugins = self.plugins.plugins_for(
  904. paasmaker.util.plugin.MODE.NODE_STATS
  905. )
  906. def get_stats():
  907. try:
  908. def got_stats(stats):
  909. # Move onto the next plugin.
  910. get_stats()
  911. # end of got_stats()
  912. plugin = stats_plugins.pop()
  913. stat_collector = self.plugins.instantiate(
  914. plugin,
  915. paasmaker.util.plugin.MODE.NODE_STATS
  916. )
  917. stat_collector.stats(stats, got_stats)
  918. except IndexError, ex:
  919. # No more to process.
  920. callback(stats)
  921. # end of get_stats()
  922. # Kick off the process.
  923. get_stats()
  924. def get_node_score(self, stats):
  925. """
  926. Generate a score for this node. This can call out to a set of
  927. plugins, and the highest score from the plugins is used (as the
  928. order of execution of plugins is not defined).
  929. :arg dict stats: The node's stats.
  930. """
  931. scores = []
  932. score_plugins = self.plugins.plugins_for(
  933. paasmaker.util.plugin.MODE.NODE_SCORE
  934. )
  935. for plugin in score_plugins:
  936. instance = self.plugins.instantiate(
  937. plugin,
  938. paasmaker.util.plugin.MODE.NODE_SCORE
  939. )
  940. scores.append(instance.score(stats))
  941. if len(scores) == 0:
  942. # No score plugins.
  943. # Should not be possible, as it's checked on startup.
  944. # Give the node a high score, as it's misconfigured.
  945. scores.append(1.0)
  946. return max(scores)
  947. def setup_database(self):
  948. """
  949. Set up the database; creating tables on the first startup,
  950. or otherwise doing nothing on subsequent operations.
  951. """
  952. if not self.is_pacemaker():
  953. raise ImNotAPacemaker("I'm not a pacemaker, so I have no database.")
  954. # Connect.
  955. self.engine = create_engine(self.get_flat('pacemaker.dsn'), **self['pacemaker']['database_options'])
  956. self.session = sessionmaker(bind=self.engine)
  957. # Bind the session to the metadata.
  958. paasmaker.model.Base.metadata.bind = self.engine
  959. # Now see if we need to create tables. If so, do that,
  960. # and set the Alembic migrations starting point.
  961. check_session = None
  962. try:
  963. check_session = self.session()
  964. query = check_session.query(
  965. func.count(paasmaker.model.Workspace.id)
  966. )
  967. # This will throw an exception if the table
  968. # does not exist. We query Workspace because
  969. # it exists in the oldest versions of Paasmaker,
  970. # and additionally it shouldn't contain too
  971. # many entries.
  972. count = query.count()
  973. logger.info("Database already created. No action to take.")
  974. except sqlalchemy.exc.OperationalError, ex:
  975. # We need to create the tables. Do that now.
  976. logger.info("No database created. Creating tables now...")
  977. paasmaker.model.Base.metadata.create_all()
  978. # A hack to prevent alembic from altering the
  979. # Python logger settings.
  980. paasmaker.ALEMBIC_NO_LOGGING = True
  981. # And tell alembic to stamp this database at the head
  982. # migration, for future compatibility.
  983. from alembic.config import Config
  984. from alembic import command
  985. alembic_cfg = Config("alembic.ini")
  986. command.stamp(alembic_cfg, "head")
  987. def get_free_port(self):
  988. """
  989. Get a free TCP port in the misc ports range.
  990. """
  991. return self.port_allocator.free_in_range(self.get_flat('misc_ports.minimum'), self.get_flat('misc_ports.maximum'))
  992. def get_database_session(self, callback, error_callback):
  993. """
  994. Get a database session object. Each requesthandler should fetch
  995. one of these when it needs to, but hang onto it - repeated
  996. calls will fetch new sessions every time.
  997. In production, it was discovered that SQLalchemy will pool these
  998. for performance, which is what we want, but block when the pool
  999. is already checked out. For that reason, we push the checkout
  1000. from the pool onto a thread, and callback the caller when the
  1001. session is available.
  1002. """
  1003. if not self.is_pacemaker():
  1004. raise ImNotA("I'm not a pacemaker, so I have no database.")
  1005. debug_source_traceback = None
  1006. if self.debug:
  1007. debug_source_traceback = traceback.extract_stack()
  1008. def attach_tracker(session):
  1009. self.database_sessions_checkout_pending -= 1
  1010. self.database_sessions_checked_out += 1
  1011. if self.debug:
  1012. if not hasattr(self, '_session_close_tracker'):
  1013. self._session_close_tracker = {}
  1014. # Track sessions to make sure they get closed.
  1015. # This is very hackish, and thus only enabled in debug mode.
  1016. session_key = str(session)
  1017. session.original_close = session.close
  1018. def tracking_close():
  1019. self.database_sessions_checked_out -= 1
  1020. logger.debug(
  1021. "Returning database session. %d pending, %d out.",
  1022. self.database_sessions_checkout_pending,
  1023. self.database_sessions_checked_out
  1024. )
  1025. if session_key in self._session_close_tracker:
  1026. self.io_loop.remove_timeout(self._session_close_tracker[session_key]['timeout'])
  1027. del self._session_close_tracker[session_key]
  1028. session.original_close()
  1029. def session_timeout():
  1030. if session_key in self._session_close_tracker:
  1031. print "Session not closed after 20 seconds."
  1032. print "Opened here:"
  1033. print "".join(traceback.format_list(self._session_close_tracker[session_key]['traceback']))
  1034. del self._session_close_tracker[session_key]
  1035. session.close = tracking_close
  1036. self._session_close_tracker[session_key] = {
  1037. 'timeout': self.io_loop.add_timeout(time.time() + 20, session_timeout),
  1038. 'traceback': debug_source_traceback
  1039. }
  1040. callback(session)
  1041. def attach_lightweight_counter(session):
  1042. self.database_sessions_checkout_pending -= 1
  1043. self.database_sessions_checked_out += 1
  1044. session.original_close = session.close
  1045. def counter_decrement():
  1046. self.database_sessions_checked_out -= 1
  1047. logger.debug(
  1048. "Returning database session. %d pending, %d out.",
  1049. self.database_sessions_checkout_pending,
  1050. self.database_sessions_checked_out
  1051. )
  1052. session.original_close()
  1053. session.close = counter_decrement
  1054. callback(session)
  1055. # Figure out which callback to go back through.
  1056. real_callback = attach_lightweight_counter
  1057. if self.debug:
  1058. real_callback = attach_tracker
  1059. logger.debug(
  1060. "Fetch database session. %d pending, %d out.",
  1061. self.database_sessions_checkout_pending,
  1062. self.database_sessions_checked_out
  1063. )
  1064. self.database_sessions_checkout_pending += 1
  1065. fetcher = ThreadedDatabaseSessionFetcher(self.io_loop, real_callback, error_callback)
  1066. fetcher.work(self.session)
  1067. def _connect_redis(self, credentials, callback, error_callback):
  1068. """
  1069. Internal function to connect to the given redis server, calling
  1070. the callback when it's ready with the client object.
  1071. You should not call this externally.
  1072. :arg dict credentials: A dict containing three keys, ``host``,
  1073. ``port``, and ``password``.
  1074. :arg callable callback: The callback to call when completed. The
  1075. callback is passed the client object, an instance of
  1076. ``tornadoredis.Client``.
  1077. :arg callable error_callback: A callback called if an error occurs.
  1078. """
  1079. client = TornadoRedisClient(
  1080. host=credentials['host'],
  1081. port=credentials['port'],
  1082. password=credentials['password'],
  1083. io_loop=self.io_loop
  1084. )
  1085. client.connect()
  1086. # TODO: Handle where it failed.
  1087. callback(client)
  1088. def _get_redis(self, name, credentials, callback, error_callback):
  1089. """
  1090. Internal helper to get a redis connection.
  1091. * Not a managed redis? Proceed to fetching a connection.
  1092. * A managed redis? And not started? Start it, and then
  1093. return a client to it.
  1094. * A managed redis? And still starting up? Queue up the incoming
  1095. requests.
  1096. * A managed redis that's started? Proceed to fetching a connection.
  1097. """
  1098. if not credentials['managed']:
  1099. # It's not managed. Just attempt to connect to it.
  1100. self._connect_redis(credentials, callback, error_callback)
  1101. else:
  1102. # It's managed. Check it's state.
  1103. meta_key = "%s_%d" % (credentials['host'], credentials['port'])
  1104. if not hasattr(self, 'redis_meta'):
  1105. self.redis_meta = {}
  1106. if not meta_key in self.redis_meta:
  1107. self.redis_meta[meta_key] = {
  1108. 'state': 'CREATE',
  1109. 'queue': [],
  1110. 'shutdown': credentials['shutdown']
  1111. }
  1112. meta = self.redis_meta[meta_key]
  1113. # Callback to handle when it's up and running.
  1114. def on_redis_started(message):
  1115. # Mark it as started.
  1116. meta['state'] = 'STARTED'
  1117. # Play back all our callbacks.
  1118. for queued in meta['queue']:
  1119. self._connect_redis(queued[0], queued[1], queued[2])
  1120. # Is this a router table, that's a slave of another?
  1121. if name == 'table':
  1122. # TODO: Ensure this retries if it fails on first startup.
  1123. if self.get_flat('redis.slaveof.enabled'):
  1124. def on_slaved(result):
  1125. logger.info("Successfully set up redis server as slave of the master.")
  1126. logger.debug("%s", str(result))
  1127. def got_redis(client):
  1128. # TODO: Does not support password protected Redis instances!
  1129. client.execute_command(
  1130. 'SLAVEOF',
  1131. self.get_flat('redis.slaveof.host'),
  1132. self.get_flat('redis.slaveof.port'),
  1133. callback=on_slaved
  1134. )
  1135. def failed_redis(message, exception=None):
  1136. # Nope. TODO: Take some other action?
  1137. logger.error("Unable to get redis to make into slave: %s", message)
  1138. if exception:
  1139. logger.error("Exception:", exc_info=exception)
  1140. # It's a slave. Make it so.
  1141. self._connect_redis(credentials, got_redis, failed_redis)
  1142. def on_redis_startup_failure(message, exception=None):
  1143. error_message = "Failed to start managed redis for %s: %s" % (name, message)
  1144. logger.error(error_message)
  1145. if exception:
  1146. logger.error("Exception:", exc_info=exception)
  1147. error_callback(error_message)
  1148. # Change the action based on our state.
  1149. if meta['state'] == 'CREATE':
  1150. # This is the first attempt to access it.
  1151. # Start up the service.
  1152. meta['state'] = 'STARTING'
  1153. meta['queue'].append((credentials, callback, error_callback))
  1154. def redis_configured(message):
  1155. logger.debug("Starting redis for %s", name)
  1156. meta['manager'].start_if_not_running(on_redis_started, on_redis_startup_failure)
  1157. directory = self.get_scratch_path_exists(
  1158. 'redis', name
  1159. )
  1160. meta['manager'] = paasmaker.util.redisdaemon.RedisDaemon(self)
  1161. try:
  1162. meta['manager'].load_parameters(directory)
  1163. redis_configured("Configured")
  1164. except paasmaker.util.ManagedDaemonError, ex:
  1165. # Doesn't yet exist. Create it.
  1166. logger.debug("Creating redis for %s", name)
  1167. meta['manager'].configure(
  1168. directory,
  1169. credentials['port'],
  1170. credentials['host'],
  1171. redis_configured,
  1172. error_callback,
  1173. password=credentials['password']
  1174. )
  1175. elif meta['state'] == 'STARTING':
  1176. # Queue up our callbacks.
  1177. meta['queue'].append((credentials, callback, error_callback))
  1178. else:
  1179. # Must be started. Just connect.
  1180. if not meta['man