PageRenderTime 52ms CodeModel.GetById 24ms RepoModel.GetById 0ms app.codeStats 0ms

/joblib/_dask.py

https://github.com/joblib/joblib
Python | 366 lines | 255 code | 59 blank | 52 comment | 54 complexity | 23b69dc3cdfce9c6a94a6b9d4477e82e MD5 | raw file
  1. from __future__ import print_function, division, absolute_import
  2. import asyncio
  3. import concurrent.futures
  4. import contextlib
  5. import time
  6. from uuid import uuid4
  7. import weakref
  8. from .parallel import AutoBatchingMixin, ParallelBackendBase, BatchedCalls
  9. from .parallel import parallel_backend
  10. try:
  11. import distributed
  12. except ImportError:
  13. distributed = None
  14. if distributed is not None:
  15. from dask.utils import funcname, itemgetter
  16. from dask.sizeof import sizeof
  17. from dask.distributed import (
  18. Client,
  19. as_completed,
  20. get_client,
  21. secede,
  22. rejoin,
  23. get_worker
  24. )
  25. from distributed.utils import thread_state
  26. try:
  27. # asyncio.TimeoutError, Python3-only error thrown by recent versions of
  28. # distributed
  29. from distributed.utils import TimeoutError as _TimeoutError
  30. except ImportError:
  31. from tornado.gen import TimeoutError as _TimeoutError
  32. def is_weakrefable(obj):
  33. try:
  34. weakref.ref(obj)
  35. return True
  36. except TypeError:
  37. return False
  38. class _WeakKeyDictionary:
  39. """A variant of weakref.WeakKeyDictionary for unhashable objects.
  40. This datastructure is used to store futures for broadcasted data objects
  41. such as large numpy arrays or pandas dataframes that are not hashable and
  42. therefore cannot be used as keys of traditional python dicts.
  43. Furthermore using a dict with id(array) as key is not safe because the
  44. Python is likely to reuse id of recently collected arrays.
  45. """
  46. def __init__(self):
  47. self._data = {}
  48. def __getitem__(self, obj):
  49. ref, val = self._data[id(obj)]
  50. if ref() is not obj:
  51. # In case of a race condition with on_destroy.
  52. raise KeyError(obj)
  53. return val
  54. def __setitem__(self, obj, value):
  55. key = id(obj)
  56. try:
  57. ref, _ = self._data[key]
  58. if ref() is not obj:
  59. # In case of race condition with on_destroy.
  60. raise KeyError(obj)
  61. except KeyError:
  62. # Insert the new entry in the mapping along with a weakref
  63. # callback to automatically delete the entry from the mapping
  64. # as soon as the object used as key is garbage collected.
  65. def on_destroy(_):
  66. del self._data[key]
  67. ref = weakref.ref(obj, on_destroy)
  68. self._data[key] = ref, value
  69. def __len__(self):
  70. return len(self._data)
  71. def clear(self):
  72. self._data.clear()
  73. def _funcname(x):
  74. try:
  75. if isinstance(x, list):
  76. x = x[0][0]
  77. except Exception:
  78. pass
  79. return funcname(x)
  80. def _make_tasks_summary(tasks):
  81. """Summarize of list of (func, args, kwargs) function calls"""
  82. unique_funcs = {func for func, args, kwargs in tasks}
  83. if len(unique_funcs) == 1:
  84. mixed = False
  85. else:
  86. mixed = True
  87. return len(tasks), mixed, _funcname(tasks)
  88. class Batch:
  89. """dask-compatible wrapper that executes a batch of tasks"""
  90. def __init__(self, tasks):
  91. # collect some metadata from the tasks to ease Batch calls
  92. # introspection when debugging
  93. self._num_tasks, self._mixed, self._funcname = _make_tasks_summary(
  94. tasks
  95. )
  96. def __call__(self, tasks=None):
  97. results = []
  98. with parallel_backend('dask'):
  99. for func, args, kwargs in tasks:
  100. results.append(func(*args, **kwargs))
  101. return results
  102. def __repr__(self):
  103. descr = f"batch_of_{self._funcname}_{self._num_tasks}_calls"
  104. if self._mixed:
  105. descr = "mixed_" + descr
  106. return descr
  107. def _joblib_probe_task():
  108. # Noop used by the joblib connector to probe when workers are ready.
  109. pass
  110. class DaskDistributedBackend(AutoBatchingMixin, ParallelBackendBase):
  111. MIN_IDEAL_BATCH_DURATION = 0.2
  112. MAX_IDEAL_BATCH_DURATION = 1.0
  113. supports_timeout = True
  114. def __init__(self, scheduler_host=None, scatter=None,
  115. client=None, loop=None, wait_for_workers_timeout=10,
  116. **submit_kwargs):
  117. super().__init__()
  118. if distributed is None:
  119. msg = ("You are trying to use 'dask' as a joblib parallel backend "
  120. "but dask is not installed. Please install dask "
  121. "to fix this error.")
  122. raise ValueError(msg)
  123. if client is None:
  124. if scheduler_host:
  125. client = Client(scheduler_host, loop=loop,
  126. set_as_default=False)
  127. else:
  128. try:
  129. client = get_client()
  130. except ValueError as e:
  131. msg = ("To use Joblib with Dask first create a Dask Client"
  132. "\n\n"
  133. " from dask.distributed import Client\n"
  134. " client = Client()\n"
  135. "or\n"
  136. " client = Client('scheduler-address:8786')")
  137. raise ValueError(msg) from e
  138. self.client = client
  139. if scatter is not None and not isinstance(scatter, (list, tuple)):
  140. raise TypeError("scatter must be a list/tuple, got "
  141. "`%s`" % type(scatter).__name__)
  142. if scatter is not None and len(scatter) > 0:
  143. # Keep a reference to the scattered data to keep the ids the same
  144. self._scatter = list(scatter)
  145. scattered = self.client.scatter(scatter, broadcast=True)
  146. self.data_futures = {id(x): f for x, f in zip(scatter, scattered)}
  147. else:
  148. self._scatter = []
  149. self.data_futures = {}
  150. self.wait_for_workers_timeout = wait_for_workers_timeout
  151. self.submit_kwargs = submit_kwargs
  152. self.waiting_futures = as_completed(
  153. [],
  154. loop=client.loop,
  155. with_results=True,
  156. raise_errors=False
  157. )
  158. self._results = {}
  159. self._callbacks = {}
  160. async def _collect(self):
  161. while self._continue:
  162. async for future, result in self.waiting_futures:
  163. cf_future = self._results.pop(future)
  164. callback = self._callbacks.pop(future)
  165. if future.status == "error":
  166. typ, exc, tb = result
  167. cf_future.set_exception(exc)
  168. else:
  169. cf_future.set_result(result)
  170. callback(result)
  171. await asyncio.sleep(0.01)
  172. def __reduce__(self):
  173. return (DaskDistributedBackend, ())
  174. def get_nested_backend(self):
  175. return DaskDistributedBackend(client=self.client), -1
  176. def configure(self, n_jobs=1, parallel=None, **backend_args):
  177. self.parallel = parallel
  178. return self.effective_n_jobs(n_jobs)
  179. def start_call(self):
  180. self._continue = True
  181. self.client.loop.add_callback(self._collect)
  182. self.call_data_futures = _WeakKeyDictionary()
  183. def stop_call(self):
  184. # The explicit call to clear is required to break a cycling reference
  185. # to the futures.
  186. self._continue = False
  187. # wait for the future collection routine (self._backend._collect) to
  188. # finish in order to limit asyncio warnings due to aborting _collect
  189. # during a following backend termination call
  190. time.sleep(0.01)
  191. self.call_data_futures.clear()
  192. def effective_n_jobs(self, n_jobs):
  193. effective_n_jobs = sum(self.client.ncores().values())
  194. if effective_n_jobs != 0 or not self.wait_for_workers_timeout:
  195. return effective_n_jobs
  196. # If there is no worker, schedule a probe task to wait for the workers
  197. # to come up and be available. If the dask cluster is in adaptive mode
  198. # task might cause the cluster to provision some workers.
  199. try:
  200. self.client.submit(_joblib_probe_task).result(
  201. timeout=self.wait_for_workers_timeout)
  202. except _TimeoutError as e:
  203. error_msg = (
  204. "DaskDistributedBackend has no worker after {} seconds. "
  205. "Make sure that workers are started and can properly connect "
  206. "to the scheduler and increase the joblib/dask connection "
  207. "timeout with:\n\n"
  208. "parallel_backend('dask', wait_for_workers_timeout={})"
  209. ).format(self.wait_for_workers_timeout,
  210. max(10, 2 * self.wait_for_workers_timeout))
  211. raise TimeoutError(error_msg) from e
  212. return sum(self.client.ncores().values())
  213. async def _to_func_args(self, func):
  214. itemgetters = dict()
  215. # Futures that are dynamically generated during a single call to
  216. # Parallel.__call__.
  217. call_data_futures = getattr(self, 'call_data_futures', None)
  218. async def maybe_to_futures(args):
  219. out = []
  220. for arg in args:
  221. arg_id = id(arg)
  222. if arg_id in itemgetters:
  223. out.append(itemgetters[arg_id])
  224. continue
  225. f = self.data_futures.get(arg_id, None)
  226. if f is None and call_data_futures is not None:
  227. try:
  228. f = await call_data_futures[arg]
  229. except KeyError:
  230. pass
  231. if f is None:
  232. if is_weakrefable(arg) and sizeof(arg) > 1e3:
  233. # Automatically scatter large objects to some of
  234. # the workers to avoid duplicated data transfers.
  235. # Rely on automated inter-worker data stealing if
  236. # more workers need to reuse this data
  237. # concurrently.
  238. # set hash=False - nested scatter calls (i.e
  239. # calling client.scatter inside a dask worker)
  240. # using hash=True often raise CancelledError,
  241. # see dask/distributed#3703
  242. _coro = self.client.scatter(
  243. arg,
  244. asynchronous=True,
  245. hash=False
  246. )
  247. # Centralize the scattering of identical arguments
  248. # between concurrent apply_async callbacks by
  249. # exposing the running coroutine in
  250. # call_data_futures before it completes.
  251. t = asyncio.Task(_coro)
  252. call_data_futures[arg] = t
  253. f = await t
  254. if f is not None:
  255. out.append(f)
  256. else:
  257. out.append(arg)
  258. return out
  259. tasks = []
  260. for f, args, kwargs in func.items:
  261. args = list(await maybe_to_futures(args))
  262. kwargs = dict(zip(kwargs.keys(),
  263. await maybe_to_futures(kwargs.values())))
  264. tasks.append((f, args, kwargs))
  265. return (Batch(tasks), tasks)
  266. def apply_async(self, func, callback=None):
  267. cf_future = concurrent.futures.Future()
  268. cf_future.get = cf_future.result # achieve AsyncResult API
  269. async def f(func, callback):
  270. batch, tasks = await self._to_func_args(func)
  271. key = f'{repr(batch)}-{uuid4().hex}'
  272. dask_future = self.client.submit(
  273. batch, tasks=tasks, key=key, **self.submit_kwargs
  274. )
  275. self.waiting_futures.add(dask_future)
  276. self._callbacks[dask_future] = callback
  277. self._results[dask_future] = cf_future
  278. self.client.loop.add_callback(f, func, callback)
  279. return cf_future
  280. def abort_everything(self, ensure_ready=True):
  281. """ Tell the client to cancel any task submitted via this instance
  282. joblib.Parallel will never access those results
  283. """
  284. with self.waiting_futures.lock:
  285. self.waiting_futures.futures.clear()
  286. while not self.waiting_futures.queue.empty():
  287. self.waiting_futures.queue.get()
  288. @contextlib.contextmanager
  289. def retrieval_context(self):
  290. """Override ParallelBackendBase.retrieval_context to avoid deadlocks.
  291. This removes thread from the worker's thread pool (using 'secede').
  292. Seceding avoids deadlock in nested parallelism settings.
  293. """
  294. # See 'joblib.Parallel.__call__' and 'joblib.Parallel.retrieve' for how
  295. # this is used.
  296. if hasattr(thread_state, 'execution_state'):
  297. # we are in a worker. Secede to avoid deadlock.
  298. secede()
  299. yield
  300. if hasattr(thread_state, 'execution_state'):
  301. rejoin()