asyn.py 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110
  1. import asyncio
  2. import asyncio.events
  3. import functools
  4. import inspect
  5. import io
  6. import numbers
  7. import os
  8. import re
  9. import threading
  10. from contextlib import contextmanager
  11. from glob import has_magic
  12. from typing import TYPE_CHECKING, Iterable
  13. from .callbacks import DEFAULT_CALLBACK
  14. from .exceptions import FSTimeoutError
  15. from .implementations.local import LocalFileSystem, make_path_posix, trailing_sep
  16. from .spec import AbstractBufferedFile, AbstractFileSystem
  17. from .utils import glob_translate, is_exception, other_paths
  18. private = re.compile("_[^_]")
  19. iothread = [None] # dedicated fsspec IO thread
  20. loop = [None] # global event loop for any non-async instance
  21. _lock = None # global lock placeholder
  22. get_running_loop = asyncio.get_running_loop
  23. def get_lock():
  24. """Allocate or return a threading lock.
  25. The lock is allocated on first use to allow setting one lock per forked process.
  26. """
  27. global _lock
  28. if not _lock:
  29. _lock = threading.Lock()
  30. return _lock
  31. def reset_lock():
  32. """Reset the global lock.
  33. This should be called only on the init of a forked process to reset the lock to
  34. None, enabling the new forked process to get a new lock.
  35. """
  36. global _lock
  37. iothread[0] = None
  38. loop[0] = None
  39. _lock = None
  40. async def _runner(event, coro, result, timeout=None):
  41. timeout = timeout if timeout else None # convert 0 or 0.0 to None
  42. if timeout is not None:
  43. coro = asyncio.wait_for(coro, timeout=timeout)
  44. try:
  45. result[0] = await coro
  46. except Exception as ex:
  47. result[0] = ex
  48. finally:
  49. event.set()
  50. def sync(loop, func, *args, timeout=None, **kwargs):
  51. """
  52. Make loop run coroutine until it returns. Runs in other thread
  53. Examples
  54. --------
  55. >>> fsspec.asyn.sync(fsspec.asyn.get_loop(), func, *args,
  56. timeout=timeout, **kwargs)
  57. """
  58. timeout = timeout if timeout else None # convert 0 or 0.0 to None
  59. # NB: if the loop is not running *yet*, it is OK to submit work
  60. # and we will wait for it
  61. if loop is None or loop.is_closed():
  62. raise RuntimeError("Loop is not running")
  63. try:
  64. loop0 = asyncio.events.get_running_loop()
  65. if loop0 is loop:
  66. raise NotImplementedError("Calling sync() from within a running loop")
  67. except NotImplementedError:
  68. raise
  69. except RuntimeError:
  70. pass
  71. coro = func(*args, **kwargs)
  72. result = [None]
  73. event = threading.Event()
  74. asyncio.run_coroutine_threadsafe(_runner(event, coro, result, timeout), loop)
  75. while True:
  76. # this loops allows thread to get interrupted
  77. if event.wait(1):
  78. break
  79. if timeout is not None:
  80. timeout -= 1
  81. if timeout < 0:
  82. raise FSTimeoutError
  83. return_result = result[0]
  84. if isinstance(return_result, asyncio.TimeoutError):
  85. # suppress asyncio.TimeoutError, raise FSTimeoutError
  86. raise FSTimeoutError from return_result
  87. elif isinstance(return_result, BaseException):
  88. raise return_result
  89. else:
  90. return return_result
  91. def sync_wrapper(func, obj=None):
  92. """Given a function, make so can be called in blocking contexts
  93. Leave obj=None if defining within a class. Pass the instance if attaching
  94. as an attribute of the instance.
  95. """
  96. @functools.wraps(func)
  97. def wrapper(*args, **kwargs):
  98. self = obj or args[0]
  99. return sync(self.loop, func, *args, **kwargs)
  100. return wrapper
  101. @contextmanager
  102. def _selector_policy():
  103. original_policy = asyncio.get_event_loop_policy()
  104. try:
  105. if os.name == "nt" and hasattr(asyncio, "WindowsSelectorEventLoopPolicy"):
  106. asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
  107. yield
  108. finally:
  109. asyncio.set_event_loop_policy(original_policy)
  110. def get_loop():
  111. """Create or return the default fsspec IO loop
  112. The loop will be running on a separate thread.
  113. """
  114. if loop[0] is None:
  115. with get_lock():
  116. # repeat the check just in case the loop got filled between the
  117. # previous two calls from another thread
  118. if loop[0] is None:
  119. with _selector_policy():
  120. loop[0] = asyncio.new_event_loop()
  121. th = threading.Thread(target=loop[0].run_forever, name="fsspecIO")
  122. th.daemon = True
  123. th.start()
  124. iothread[0] = th
  125. return loop[0]
  126. def reset_after_fork():
  127. global lock
  128. loop[0] = None
  129. iothread[0] = None
  130. lock = None
  131. if hasattr(os, "register_at_fork"):
  132. # should be posix; this will do nothing for spawn or forkserver subprocesses
  133. os.register_at_fork(after_in_child=reset_after_fork)
  134. if TYPE_CHECKING:
  135. import resource
  136. ResourceError = resource.error
  137. else:
  138. try:
  139. import resource
  140. except ImportError:
  141. resource = None
  142. ResourceError = OSError
  143. else:
  144. ResourceError = getattr(resource, "error", OSError)
  145. _DEFAULT_BATCH_SIZE = 128
  146. _NOFILES_DEFAULT_BATCH_SIZE = 1280
  147. def _get_batch_size(nofiles=False):
  148. from fsspec.config import conf
  149. if nofiles:
  150. if "nofiles_gather_batch_size" in conf:
  151. return conf["nofiles_gather_batch_size"]
  152. else:
  153. if "gather_batch_size" in conf:
  154. return conf["gather_batch_size"]
  155. if nofiles:
  156. return _NOFILES_DEFAULT_BATCH_SIZE
  157. if resource is None:
  158. return _DEFAULT_BATCH_SIZE
  159. try:
  160. soft_limit, _ = resource.getrlimit(resource.RLIMIT_NOFILE)
  161. except (ImportError, ValueError, ResourceError):
  162. return _DEFAULT_BATCH_SIZE
  163. if soft_limit == resource.RLIM_INFINITY:
  164. return -1
  165. else:
  166. return soft_limit // 8
  167. def running_async() -> bool:
  168. """Being executed by an event loop?"""
  169. try:
  170. asyncio.get_running_loop()
  171. return True
  172. except RuntimeError:
  173. return False
  174. async def _run_coros_in_chunks(
  175. coros,
  176. batch_size=None,
  177. callback=DEFAULT_CALLBACK,
  178. timeout=None,
  179. return_exceptions=False,
  180. nofiles=False,
  181. ):
  182. """Run the given coroutines in chunks.
  183. Parameters
  184. ----------
  185. coros: list of coroutines to run
  186. batch_size: int or None
  187. Number of coroutines to submit/wait on simultaneously.
  188. If -1, then it will not be any throttling. If
  189. None, it will be inferred from _get_batch_size()
  190. callback: fsspec.callbacks.Callback instance
  191. Gets a relative_update when each coroutine completes
  192. timeout: number or None
  193. If given, each coroutine times out after this time. Note that, since
  194. there are multiple batches, the total run time of this function will in
  195. general be longer
  196. return_exceptions: bool
  197. Same meaning as in asyncio.gather
  198. nofiles: bool
  199. If inferring the batch_size, does this operation involve local files?
  200. If yes, you normally expect smaller batches.
  201. """
  202. if batch_size is None:
  203. batch_size = _get_batch_size(nofiles=nofiles)
  204. if batch_size == -1:
  205. batch_size = len(coros)
  206. assert batch_size > 0
  207. async def _run_coro(coro, i):
  208. try:
  209. return await asyncio.wait_for(coro, timeout=timeout), i
  210. except Exception as e:
  211. if not return_exceptions:
  212. raise
  213. return e, i
  214. finally:
  215. callback.relative_update(1)
  216. i = 0
  217. n = len(coros)
  218. results = [None] * n
  219. pending = set()
  220. while pending or i < n:
  221. while len(pending) < batch_size and i < n:
  222. pending.add(asyncio.ensure_future(_run_coro(coros[i], i)))
  223. i += 1
  224. if not pending:
  225. break
  226. done, pending = await asyncio.wait(pending, return_when=asyncio.FIRST_COMPLETED)
  227. while done:
  228. result, k = await done.pop()
  229. results[k] = result
  230. return results
  231. # these methods should be implemented as async by any async-able backend
  232. async_methods = [
  233. "_ls",
  234. "_cat_file",
  235. "_get_file",
  236. "_put_file",
  237. "_rm_file",
  238. "_cp_file",
  239. "_pipe_file",
  240. "_expand_path",
  241. "_info",
  242. "_isfile",
  243. "_isdir",
  244. "_exists",
  245. "_walk",
  246. "_glob",
  247. "_find",
  248. "_du",
  249. "_size",
  250. "_mkdir",
  251. "_makedirs",
  252. ]
  253. class AsyncFileSystem(AbstractFileSystem):
  254. """Async file operations, default implementations
  255. Passes bulk operations to asyncio.gather for concurrent operation.
  256. Implementations that have concurrent batch operations and/or async methods
  257. should inherit from this class instead of AbstractFileSystem. Docstrings are
  258. copied from the un-underscored method in AbstractFileSystem, if not given.
  259. """
  260. # note that methods do not have docstring here; they will be copied
  261. # for _* methods and inferred for overridden methods.
  262. async_impl = True
  263. mirror_sync_methods = True
  264. disable_throttling = False
  265. def __init__(self, *args, asynchronous=False, loop=None, batch_size=None, **kwargs):
  266. self.asynchronous = asynchronous
  267. self._pid = os.getpid()
  268. if not asynchronous:
  269. self._loop = loop or get_loop()
  270. else:
  271. self._loop = None
  272. self.batch_size = batch_size
  273. super().__init__(*args, **kwargs)
  274. @property
  275. def loop(self):
  276. if self._pid != os.getpid():
  277. raise RuntimeError("This class is not fork-safe")
  278. return self._loop
  279. async def _rm_file(self, path, **kwargs):
  280. raise NotImplementedError
  281. async def _rm(self, path, recursive=False, batch_size=None, **kwargs):
  282. # TODO: implement on_error
  283. batch_size = batch_size or self.batch_size
  284. path = await self._expand_path(path, recursive=recursive)
  285. return await _run_coros_in_chunks(
  286. [self._rm_file(p, **kwargs) for p in reversed(path)],
  287. batch_size=batch_size,
  288. nofiles=True,
  289. )
  290. async def _cp_file(self, path1, path2, **kwargs):
  291. raise NotImplementedError
  292. async def _mv_file(self, path1, path2):
  293. await self._cp_file(path1, path2)
  294. await self._rm_file(path1)
  295. async def _copy(
  296. self,
  297. path1,
  298. path2,
  299. recursive=False,
  300. on_error=None,
  301. maxdepth=None,
  302. batch_size=None,
  303. **kwargs,
  304. ):
  305. if on_error is None and recursive:
  306. on_error = "ignore"
  307. elif on_error is None:
  308. on_error = "raise"
  309. if isinstance(path1, list) and isinstance(path2, list):
  310. # No need to expand paths when both source and destination
  311. # are provided as lists
  312. paths1 = path1
  313. paths2 = path2
  314. else:
  315. source_is_str = isinstance(path1, str)
  316. paths1 = await self._expand_path(
  317. path1, maxdepth=maxdepth, recursive=recursive
  318. )
  319. if source_is_str and (not recursive or maxdepth is not None):
  320. # Non-recursive glob does not copy directories
  321. paths1 = [
  322. p for p in paths1 if not (trailing_sep(p) or await self._isdir(p))
  323. ]
  324. if not paths1:
  325. return
  326. source_is_file = len(paths1) == 1
  327. dest_is_dir = isinstance(path2, str) and (
  328. trailing_sep(path2) or await self._isdir(path2)
  329. )
  330. exists = source_is_str and (
  331. (has_magic(path1) and source_is_file)
  332. or (not has_magic(path1) and dest_is_dir and not trailing_sep(path1))
  333. )
  334. paths2 = other_paths(
  335. paths1,
  336. path2,
  337. exists=exists,
  338. flatten=not source_is_str,
  339. )
  340. batch_size = batch_size or self.batch_size
  341. coros = [self._cp_file(p1, p2, **kwargs) for p1, p2 in zip(paths1, paths2)]
  342. result = await _run_coros_in_chunks(
  343. coros, batch_size=batch_size, return_exceptions=True, nofiles=True
  344. )
  345. for ex in filter(is_exception, result):
  346. if on_error == "ignore" and isinstance(ex, FileNotFoundError):
  347. continue
  348. raise ex
  349. async def _pipe_file(self, path, value, mode="overwrite", **kwargs):
  350. raise NotImplementedError
  351. async def _pipe(self, path, value=None, batch_size=None, **kwargs):
  352. if isinstance(path, str):
  353. path = {path: value}
  354. batch_size = batch_size or self.batch_size
  355. return await _run_coros_in_chunks(
  356. [self._pipe_file(k, v, **kwargs) for k, v in path.items()],
  357. batch_size=batch_size,
  358. nofiles=True,
  359. )
  360. async def _process_limits(self, url, start, end):
  361. """Helper for "Range"-based _cat_file"""
  362. size = None
  363. suff = False
  364. if start is not None and start < 0:
  365. # if start is negative and end None, end is the "suffix length"
  366. if end is None:
  367. end = -start
  368. start = ""
  369. suff = True
  370. else:
  371. size = size or (await self._info(url))["size"]
  372. start = size + start
  373. elif start is None:
  374. start = 0
  375. if not suff:
  376. if end is not None and end < 0:
  377. if start is not None:
  378. size = size or (await self._info(url))["size"]
  379. end = size + end
  380. elif end is None:
  381. end = ""
  382. if isinstance(end, numbers.Integral):
  383. end -= 1 # bytes range is inclusive
  384. return f"bytes={start}-{end}"
  385. async def _cat_file(self, path, start=None, end=None, **kwargs):
  386. raise NotImplementedError
  387. async def _cat(
  388. self, path, recursive=False, on_error="raise", batch_size=None, **kwargs
  389. ):
  390. paths = await self._expand_path(path, recursive=recursive)
  391. coros = [self._cat_file(path, **kwargs) for path in paths]
  392. batch_size = batch_size or self.batch_size
  393. out = await _run_coros_in_chunks(
  394. coros, batch_size=batch_size, nofiles=True, return_exceptions=True
  395. )
  396. if on_error == "raise":
  397. ex = next(filter(is_exception, out), False)
  398. if ex:
  399. raise ex
  400. if (
  401. len(paths) > 1
  402. or isinstance(path, list)
  403. or paths[0] != self._strip_protocol(path)
  404. ):
  405. return {
  406. k: v
  407. for k, v in zip(paths, out)
  408. if on_error != "omit" or not is_exception(v)
  409. }
  410. else:
  411. return out[0]
  412. async def _cat_ranges(
  413. self,
  414. paths,
  415. starts,
  416. ends,
  417. max_gap=None,
  418. batch_size=None,
  419. on_error="return",
  420. **kwargs,
  421. ):
  422. """Get the contents of byte ranges from one or more files
  423. Parameters
  424. ----------
  425. paths: list
  426. A list of of filepaths on this filesystems
  427. starts, ends: int or list
  428. Bytes limits of the read. If using a single int, the same value will be
  429. used to read all the specified files.
  430. """
  431. # TODO: on_error
  432. if max_gap is not None:
  433. # use utils.merge_offset_ranges
  434. raise NotImplementedError
  435. if not isinstance(paths, list):
  436. raise TypeError
  437. if not isinstance(starts, Iterable):
  438. starts = [starts] * len(paths)
  439. if not isinstance(ends, Iterable):
  440. ends = [ends] * len(paths)
  441. if len(starts) != len(paths) or len(ends) != len(paths):
  442. raise ValueError
  443. coros = [
  444. self._cat_file(p, start=s, end=e, **kwargs)
  445. for p, s, e in zip(paths, starts, ends)
  446. ]
  447. batch_size = batch_size or self.batch_size
  448. return await _run_coros_in_chunks(
  449. coros, batch_size=batch_size, nofiles=True, return_exceptions=True
  450. )
  451. async def _put_file(self, lpath, rpath, mode="overwrite", **kwargs):
  452. raise NotImplementedError
  453. async def _put(
  454. self,
  455. lpath,
  456. rpath,
  457. recursive=False,
  458. callback=DEFAULT_CALLBACK,
  459. batch_size=None,
  460. maxdepth=None,
  461. **kwargs,
  462. ):
  463. """Copy file(s) from local.
  464. Copies a specific file or tree of files (if recursive=True). If rpath
  465. ends with a "/", it will be assumed to be a directory, and target files
  466. will go within.
  467. The put_file method will be called concurrently on a batch of files. The
  468. batch_size option can configure the amount of futures that can be executed
  469. at the same time. If it is -1, then all the files will be uploaded concurrently.
  470. The default can be set for this instance by passing "batch_size" in the
  471. constructor, or for all instances by setting the "gather_batch_size" key
  472. in ``fsspec.config.conf``, falling back to 1/8th of the system limit .
  473. """
  474. if isinstance(lpath, list) and isinstance(rpath, list):
  475. # No need to expand paths when both source and destination
  476. # are provided as lists
  477. rpaths = rpath
  478. lpaths = lpath
  479. else:
  480. source_is_str = isinstance(lpath, str)
  481. if source_is_str:
  482. lpath = make_path_posix(lpath)
  483. fs = LocalFileSystem()
  484. lpaths = fs.expand_path(lpath, recursive=recursive, maxdepth=maxdepth)
  485. if source_is_str and (not recursive or maxdepth is not None):
  486. # Non-recursive glob does not copy directories
  487. lpaths = [p for p in lpaths if not (trailing_sep(p) or fs.isdir(p))]
  488. if not lpaths:
  489. return
  490. source_is_file = len(lpaths) == 1
  491. dest_is_dir = isinstance(rpath, str) and (
  492. trailing_sep(rpath) or await self._isdir(rpath)
  493. )
  494. rpath = self._strip_protocol(rpath)
  495. exists = source_is_str and (
  496. (has_magic(lpath) and source_is_file)
  497. or (not has_magic(lpath) and dest_is_dir and not trailing_sep(lpath))
  498. )
  499. rpaths = other_paths(
  500. lpaths,
  501. rpath,
  502. exists=exists,
  503. flatten=not source_is_str,
  504. )
  505. is_dir = {l: os.path.isdir(l) for l in lpaths}
  506. rdirs = [r for l, r in zip(lpaths, rpaths) if is_dir[l]]
  507. file_pairs = [(l, r) for l, r in zip(lpaths, rpaths) if not is_dir[l]]
  508. await asyncio.gather(*[self._makedirs(d, exist_ok=True) for d in rdirs])
  509. batch_size = batch_size or self.batch_size
  510. coros = []
  511. callback.set_size(len(file_pairs))
  512. for lfile, rfile in file_pairs:
  513. put_file = callback.branch_coro(self._put_file)
  514. coros.append(put_file(lfile, rfile, **kwargs))
  515. return await _run_coros_in_chunks(
  516. coros, batch_size=batch_size, callback=callback
  517. )
  518. async def _get_file(self, rpath, lpath, **kwargs):
  519. raise NotImplementedError
  520. async def _get(
  521. self,
  522. rpath,
  523. lpath,
  524. recursive=False,
  525. callback=DEFAULT_CALLBACK,
  526. maxdepth=None,
  527. **kwargs,
  528. ):
  529. """Copy file(s) to local.
  530. Copies a specific file or tree of files (if recursive=True). If lpath
  531. ends with a "/", it will be assumed to be a directory, and target files
  532. will go within. Can submit a list of paths, which may be glob-patterns
  533. and will be expanded.
  534. The get_file method will be called concurrently on a batch of files. The
  535. batch_size option can configure the amount of futures that can be executed
  536. at the same time. If it is -1, then all the files will be uploaded concurrently.
  537. The default can be set for this instance by passing "batch_size" in the
  538. constructor, or for all instances by setting the "gather_batch_size" key
  539. in ``fsspec.config.conf``, falling back to 1/8th of the system limit .
  540. """
  541. if isinstance(lpath, list) and isinstance(rpath, list):
  542. # No need to expand paths when both source and destination
  543. # are provided as lists
  544. rpaths = rpath
  545. lpaths = lpath
  546. else:
  547. source_is_str = isinstance(rpath, str)
  548. # First check for rpath trailing slash as _strip_protocol removes it.
  549. source_not_trailing_sep = source_is_str and not trailing_sep(rpath)
  550. rpath = self._strip_protocol(rpath)
  551. rpaths = await self._expand_path(
  552. rpath, recursive=recursive, maxdepth=maxdepth
  553. )
  554. if source_is_str and (not recursive or maxdepth is not None):
  555. # Non-recursive glob does not copy directories
  556. rpaths = [
  557. p for p in rpaths if not (trailing_sep(p) or await self._isdir(p))
  558. ]
  559. if not rpaths:
  560. return
  561. lpath = make_path_posix(lpath)
  562. source_is_file = len(rpaths) == 1
  563. dest_is_dir = isinstance(lpath, str) and (
  564. trailing_sep(lpath) or LocalFileSystem().isdir(lpath)
  565. )
  566. exists = source_is_str and (
  567. (has_magic(rpath) and source_is_file)
  568. or (not has_magic(rpath) and dest_is_dir and source_not_trailing_sep)
  569. )
  570. lpaths = other_paths(
  571. rpaths,
  572. lpath,
  573. exists=exists,
  574. flatten=not source_is_str,
  575. )
  576. [os.makedirs(os.path.dirname(lp), exist_ok=True) for lp in lpaths]
  577. batch_size = kwargs.pop("batch_size", self.batch_size)
  578. coros = []
  579. callback.set_size(len(lpaths))
  580. for lpath, rpath in zip(lpaths, rpaths):
  581. get_file = callback.branch_coro(self._get_file)
  582. coros.append(get_file(rpath, lpath, **kwargs))
  583. return await _run_coros_in_chunks(
  584. coros, batch_size=batch_size, callback=callback
  585. )
  586. async def _isfile(self, path):
  587. try:
  588. return (await self._info(path))["type"] == "file"
  589. except: # noqa: E722
  590. return False
  591. async def _isdir(self, path):
  592. try:
  593. return (await self._info(path))["type"] == "directory"
  594. except OSError:
  595. return False
  596. async def _size(self, path):
  597. return (await self._info(path)).get("size", None)
  598. async def _sizes(self, paths, batch_size=None):
  599. batch_size = batch_size or self.batch_size
  600. return await _run_coros_in_chunks(
  601. [self._size(p) for p in paths], batch_size=batch_size
  602. )
  603. async def _exists(self, path, **kwargs):
  604. try:
  605. await self._info(path, **kwargs)
  606. return True
  607. except FileNotFoundError:
  608. return False
  609. async def _info(self, path, **kwargs):
  610. raise NotImplementedError
  611. async def _ls(self, path, detail=True, **kwargs):
  612. raise NotImplementedError
  613. async def _walk(self, path, maxdepth=None, on_error="omit", **kwargs):
  614. if maxdepth is not None and maxdepth < 1:
  615. raise ValueError("maxdepth must be at least 1")
  616. path = self._strip_protocol(path)
  617. full_dirs = {}
  618. dirs = {}
  619. files = {}
  620. detail = kwargs.pop("detail", False)
  621. try:
  622. listing = await self._ls(path, detail=True, **kwargs)
  623. except (FileNotFoundError, OSError) as e:
  624. if on_error == "raise":
  625. raise
  626. elif callable(on_error):
  627. on_error(e)
  628. if detail:
  629. yield path, {}, {}
  630. else:
  631. yield path, [], []
  632. return
  633. for info in listing:
  634. # each info name must be at least [path]/part , but here
  635. # we check also for names like [path]/part/
  636. pathname = info["name"].rstrip("/")
  637. name = pathname.rsplit("/", 1)[-1]
  638. if info["type"] == "directory" and pathname != path:
  639. # do not include "self" path
  640. full_dirs[name] = pathname
  641. dirs[name] = info
  642. elif pathname == path:
  643. # file-like with same name as give path
  644. files[""] = info
  645. else:
  646. files[name] = info
  647. if detail:
  648. yield path, dirs, files
  649. else:
  650. yield path, list(dirs), list(files)
  651. if maxdepth is not None:
  652. maxdepth -= 1
  653. if maxdepth < 1:
  654. return
  655. for d in dirs:
  656. async for _ in self._walk(
  657. full_dirs[d], maxdepth=maxdepth, detail=detail, **kwargs
  658. ):
  659. yield _
  660. async def _glob(self, path, maxdepth=None, **kwargs):
  661. if maxdepth is not None and maxdepth < 1:
  662. raise ValueError("maxdepth must be at least 1")
  663. import re
  664. seps = (os.path.sep, os.path.altsep) if os.path.altsep else (os.path.sep,)
  665. ends_with_sep = path.endswith(seps) # _strip_protocol strips trailing slash
  666. path = self._strip_protocol(path)
  667. append_slash_to_dirname = ends_with_sep or path.endswith(
  668. tuple(sep + "**" for sep in seps)
  669. )
  670. idx_star = path.find("*") if path.find("*") >= 0 else len(path)
  671. idx_qmark = path.find("?") if path.find("?") >= 0 else len(path)
  672. idx_brace = path.find("[") if path.find("[") >= 0 else len(path)
  673. min_idx = min(idx_star, idx_qmark, idx_brace)
  674. detail = kwargs.pop("detail", False)
  675. if not has_magic(path):
  676. if await self._exists(path, **kwargs):
  677. if not detail:
  678. return [path]
  679. else:
  680. return {path: await self._info(path, **kwargs)}
  681. else:
  682. if not detail:
  683. return [] # glob of non-existent returns empty
  684. else:
  685. return {}
  686. elif "/" in path[:min_idx]:
  687. min_idx = path[:min_idx].rindex("/")
  688. root = path[: min_idx + 1]
  689. depth = path[min_idx + 1 :].count("/") + 1
  690. else:
  691. root = ""
  692. depth = path[min_idx + 1 :].count("/") + 1
  693. if "**" in path:
  694. if maxdepth is not None:
  695. idx_double_stars = path.find("**")
  696. depth_double_stars = path[idx_double_stars:].count("/") + 1
  697. depth = depth - depth_double_stars + maxdepth
  698. else:
  699. depth = None
  700. allpaths = await self._find(
  701. root, maxdepth=depth, withdirs=True, detail=True, **kwargs
  702. )
  703. pattern = glob_translate(path + ("/" if ends_with_sep else ""))
  704. pattern = re.compile(pattern)
  705. out = {
  706. p: info
  707. for p, info in sorted(allpaths.items())
  708. if pattern.match(
  709. p + "/"
  710. if append_slash_to_dirname and info["type"] == "directory"
  711. else p
  712. )
  713. }
  714. if detail:
  715. return out
  716. else:
  717. return list(out)
  718. async def _du(self, path, total=True, maxdepth=None, **kwargs):
  719. sizes = {}
  720. # async for?
  721. for f in await self._find(path, maxdepth=maxdepth, **kwargs):
  722. info = await self._info(f)
  723. sizes[info["name"]] = info["size"]
  724. if total:
  725. return sum(sizes.values())
  726. else:
  727. return sizes
  728. async def _find(self, path, maxdepth=None, withdirs=False, **kwargs):
  729. path = self._strip_protocol(path)
  730. out = {}
  731. detail = kwargs.pop("detail", False)
  732. # Add the root directory if withdirs is requested
  733. # This is needed for posix glob compliance
  734. if withdirs and path != "" and await self._isdir(path):
  735. out[path] = await self._info(path)
  736. # async for?
  737. async for _, dirs, files in self._walk(path, maxdepth, detail=True, **kwargs):
  738. if withdirs:
  739. files.update(dirs)
  740. out.update({info["name"]: info for name, info in files.items()})
  741. if not out and (await self._isfile(path)):
  742. # walk works on directories, but find should also return [path]
  743. # when path happens to be a file
  744. out[path] = {}
  745. names = sorted(out)
  746. if not detail:
  747. return names
  748. else:
  749. return {name: out[name] for name in names}
  750. async def _expand_path(self, path, recursive=False, maxdepth=None):
  751. if maxdepth is not None and maxdepth < 1:
  752. raise ValueError("maxdepth must be at least 1")
  753. if isinstance(path, str):
  754. out = await self._expand_path([path], recursive, maxdepth)
  755. else:
  756. out = set()
  757. path = [self._strip_protocol(p) for p in path]
  758. for p in path: # can gather here
  759. if has_magic(p):
  760. bit = set(await self._glob(p, maxdepth=maxdepth))
  761. out |= bit
  762. if recursive:
  763. # glob call above expanded one depth so if maxdepth is defined
  764. # then decrement it in expand_path call below. If it is zero
  765. # after decrementing then avoid expand_path call.
  766. if maxdepth is not None and maxdepth <= 1:
  767. continue
  768. out |= set(
  769. await self._expand_path(
  770. list(bit),
  771. recursive=recursive,
  772. maxdepth=maxdepth - 1 if maxdepth is not None else None,
  773. )
  774. )
  775. continue
  776. elif recursive:
  777. rec = set(await self._find(p, maxdepth=maxdepth, withdirs=True))
  778. out |= rec
  779. if p not in out and (recursive is False or (await self._exists(p))):
  780. # should only check once, for the root
  781. out.add(p)
  782. if not out:
  783. raise FileNotFoundError(path)
  784. return sorted(out)
  785. async def _mkdir(self, path, create_parents=True, **kwargs):
  786. pass # not necessary to implement, may not have directories
  787. async def _makedirs(self, path, exist_ok=False):
  788. pass # not necessary to implement, may not have directories
  789. async def open_async(self, path, mode="rb", **kwargs):
  790. if "b" not in mode or kwargs.get("compression"):
  791. raise ValueError
  792. raise NotImplementedError
  793. def mirror_sync_methods(obj):
  794. """Populate sync and async methods for obj
  795. For each method will create a sync version if the name refers to an async method
  796. (coroutine) and there is no override in the child class; will create an async
  797. method for the corresponding sync method if there is no implementation.
  798. Uses the methods specified in
  799. - async_methods: the set that an implementation is expected to provide
  800. - default_async_methods: that can be derived from their sync version in
  801. AbstractFileSystem
  802. - AsyncFileSystem: async-specific default coroutines
  803. """
  804. from fsspec import AbstractFileSystem
  805. for method in async_methods + dir(AsyncFileSystem):
  806. if not method.startswith("_"):
  807. continue
  808. smethod = method[1:]
  809. if private.match(method):
  810. isco = inspect.iscoroutinefunction(getattr(obj, method, None))
  811. unsync = getattr(getattr(obj, smethod, False), "__func__", None)
  812. is_default = unsync is getattr(AbstractFileSystem, smethod, "")
  813. if isco and is_default:
  814. mth = sync_wrapper(getattr(obj, method), obj=obj)
  815. setattr(obj, smethod, mth)
  816. if not mth.__doc__:
  817. mth.__doc__ = getattr(
  818. getattr(AbstractFileSystem, smethod, None), "__doc__", ""
  819. )
  820. class FSSpecCoroutineCancel(Exception):
  821. pass
  822. def _dump_running_tasks(
  823. printout=True, cancel=True, exc=FSSpecCoroutineCancel, with_task=False
  824. ):
  825. import traceback
  826. tasks = [t for t in asyncio.tasks.all_tasks(loop[0]) if not t.done()]
  827. if printout:
  828. [task.print_stack() for task in tasks]
  829. out = [
  830. {
  831. "locals": task._coro.cr_frame.f_locals,
  832. "file": task._coro.cr_frame.f_code.co_filename,
  833. "firstline": task._coro.cr_frame.f_code.co_firstlineno,
  834. "linelo": task._coro.cr_frame.f_lineno,
  835. "stack": traceback.format_stack(task._coro.cr_frame),
  836. "task": task if with_task else None,
  837. }
  838. for task in tasks
  839. ]
  840. if cancel:
  841. for t in tasks:
  842. cbs = t._callbacks
  843. t.cancel()
  844. asyncio.futures.Future.set_exception(t, exc)
  845. asyncio.futures.Future.cancel(t)
  846. [cb[0](t) for cb in cbs] # cancels any dependent concurrent.futures
  847. try:
  848. t._coro.throw(exc) # exits coro, unless explicitly handled
  849. except exc:
  850. pass
  851. return out
  852. class AbstractAsyncStreamedFile(AbstractBufferedFile):
  853. # no read buffering, and always auto-commit
  854. # TODO: readahead might still be useful here, but needs async version
  855. async def read(self, length=-1):
  856. """
  857. Return data from cache, or fetch pieces as necessary
  858. Parameters
  859. ----------
  860. length: int (-1)
  861. Number of bytes to read; if <0, all remaining bytes.
  862. """
  863. length = -1 if length is None else int(length)
  864. if self.mode != "rb":
  865. raise ValueError("File not in read mode")
  866. if length < 0:
  867. length = self.size - self.loc
  868. if self.closed:
  869. raise ValueError("I/O operation on closed file.")
  870. if length == 0:
  871. # don't even bother calling fetch
  872. return b""
  873. out = await self._fetch_range(self.loc, self.loc + length)
  874. self.loc += len(out)
  875. return out
  876. async def write(self, data):
  877. """
  878. Write data to buffer.
  879. Buffer only sent on flush() or if buffer is greater than
  880. or equal to blocksize.
  881. Parameters
  882. ----------
  883. data: bytes
  884. Set of bytes to be written.
  885. """
  886. if self.mode not in {"wb", "ab"}:
  887. raise ValueError("File not in write mode")
  888. if self.closed:
  889. raise ValueError("I/O operation on closed file.")
  890. if self.forced:
  891. raise ValueError("This file has been force-flushed, can only close")
  892. out = self.buffer.write(data)
  893. self.loc += out
  894. if self.buffer.tell() >= self.blocksize:
  895. await self.flush()
  896. return out
  897. async def close(self):
  898. """Close file
  899. Finalizes writes, discards cache
  900. """
  901. if getattr(self, "_unclosable", False):
  902. return
  903. if self.closed:
  904. return
  905. if self.mode == "rb":
  906. self.cache = None
  907. else:
  908. if not self.forced:
  909. await self.flush(force=True)
  910. if self.fs is not None:
  911. self.fs.invalidate_cache(self.path)
  912. self.fs.invalidate_cache(self.fs._parent(self.path))
  913. self.closed = True
  914. async def flush(self, force=False):
  915. if self.closed:
  916. raise ValueError("Flush on closed file")
  917. if force and self.forced:
  918. raise ValueError("Force flush cannot be called more than once")
  919. if force:
  920. self.forced = True
  921. if self.mode not in {"wb", "ab"}:
  922. # no-op to flush on read-mode
  923. return
  924. if not force and self.buffer.tell() < self.blocksize:
  925. # Defer write on small block
  926. return
  927. if self.offset is None:
  928. # Initialize a multipart upload
  929. self.offset = 0
  930. try:
  931. await self._initiate_upload()
  932. except:
  933. self.closed = True
  934. raise
  935. if await self._upload_chunk(final=force) is not False:
  936. self.offset += self.buffer.seek(0, 2)
  937. self.buffer = io.BytesIO()
  938. async def __aenter__(self):
  939. return self
  940. async def __aexit__(self, exc_type, exc_val, exc_tb):
  941. await self.close()
  942. async def _fetch_range(self, start, end):
  943. raise NotImplementedError
  944. async def _initiate_upload(self):
  945. pass
  946. async def _upload_chunk(self, final=False):
  947. raise NotImplementedError