reference.py 48 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305
  1. import base64
  2. import collections
  3. import io
  4. import itertools
  5. import logging
  6. import math
  7. import os
  8. from functools import lru_cache
  9. from itertools import chain
  10. from typing import TYPE_CHECKING, Literal
  11. import fsspec.core
  12. from fsspec.spec import AbstractBufferedFile
  13. try:
  14. import ujson as json
  15. except ImportError:
  16. if not TYPE_CHECKING:
  17. import json
  18. from fsspec.asyn import AsyncFileSystem
  19. from fsspec.callbacks import DEFAULT_CALLBACK
  20. from fsspec.core import filesystem, open, split_protocol
  21. from fsspec.implementations.asyn_wrapper import AsyncFileSystemWrapper
  22. from fsspec.utils import isfilelike, merge_offset_ranges, other_paths
  23. logger = logging.getLogger("fsspec.reference")
  24. class ReferenceNotReachable(RuntimeError):
  25. def __init__(self, reference, target, *args):
  26. super().__init__(*args)
  27. self.reference = reference
  28. self.target = target
  29. def __str__(self):
  30. return f'Reference "{self.reference}" failed to fetch target {self.target}'
  31. def _first(d):
  32. return next(iter(d.values()))
  33. def _prot_in_references(path, references):
  34. ref = references.get(path)
  35. if isinstance(ref, (list, tuple)) and isinstance(ref[0], str):
  36. return split_protocol(ref[0])[0] if ref[0] else ref[0]
  37. def _protocol_groups(paths, references):
  38. if isinstance(paths, str):
  39. return {_prot_in_references(paths, references): [paths]}
  40. out = {}
  41. for path in paths:
  42. protocol = _prot_in_references(path, references)
  43. out.setdefault(protocol, []).append(path)
  44. return out
  45. class RefsValuesView(collections.abc.ValuesView):
  46. def __iter__(self):
  47. for val in self._mapping.zmetadata.values():
  48. yield json.dumps(val).encode()
  49. yield from self._mapping._items.values()
  50. for field in self._mapping.listdir():
  51. chunk_sizes = self._mapping._get_chunk_sizes(field)
  52. if len(chunk_sizes) == 0:
  53. yield self._mapping[field + "/0"]
  54. continue
  55. yield from self._mapping._generate_all_records(field)
  56. class RefsItemsView(collections.abc.ItemsView):
  57. def __iter__(self):
  58. return zip(self._mapping.keys(), self._mapping.values())
  59. def ravel_multi_index(idx, sizes):
  60. val = 0
  61. mult = 1
  62. for i, s in zip(idx[::-1], sizes[::-1]):
  63. val += i * mult
  64. mult *= s
  65. return val
  66. class LazyReferenceMapper(collections.abc.MutableMapping):
  67. """This interface can be used to read/write references from Parquet stores.
  68. It is not intended for other types of references.
  69. It can be used with Kerchunk's MultiZarrToZarr method to combine
  70. references into a parquet store.
  71. Examples of this use-case can be found here:
  72. https://fsspec.github.io/kerchunk/advanced.html?highlight=parquet#parquet-storage"""
  73. # import is class level to prevent numpy dep requirement for fsspec
  74. @property
  75. def np(self):
  76. import numpy as np
  77. return np
  78. @property
  79. def pd(self):
  80. import pandas as pd
  81. return pd
  82. def __init__(
  83. self,
  84. root,
  85. fs=None,
  86. out_root=None,
  87. cache_size=128,
  88. categorical_threshold=10,
  89. engine: Literal["fastparquet", "pyarrow"] = "fastparquet",
  90. ):
  91. """
  92. This instance will be writable, storing changes in memory until full partitions
  93. are accumulated or .flush() is called.
  94. To create an empty lazy store, use .create()
  95. Parameters
  96. ----------
  97. root : str
  98. Root of parquet store
  99. fs : fsspec.AbstractFileSystem
  100. fsspec filesystem object, default is local filesystem.
  101. cache_size : int, default=128
  102. Maximum size of LRU cache, where cache_size*record_size denotes
  103. the total number of references that can be loaded in memory at once.
  104. categorical_threshold : int
  105. Encode urls as pandas.Categorical to reduce memory footprint if the ratio
  106. of the number of unique urls to total number of refs for each variable
  107. is greater than or equal to this number. (default 10)
  108. engine: Literal["fastparquet","pyarrow"]
  109. Engine choice for reading parquet files. (default is "fastparquet")
  110. """
  111. self.root = root
  112. self.chunk_sizes = {}
  113. self.cat_thresh = categorical_threshold
  114. self.engine = engine
  115. self.cache_size = cache_size
  116. self.url = self.root + "/{field}/refs.{record}.parq"
  117. # TODO: derive fs from `root`
  118. self.fs = fsspec.filesystem("file") if fs is None else fs
  119. self.out_root = self.fs.unstrip_protocol(out_root or self.root)
  120. from importlib.util import find_spec
  121. if self.engine == "pyarrow" and find_spec("pyarrow") is None:
  122. raise ImportError("engine choice `pyarrow` is not installed.")
  123. def __getattr__(self, item):
  124. if item in ("_items", "record_size", "zmetadata"):
  125. self.setup()
  126. # avoid possible recursion if setup fails somehow
  127. return self.__dict__[item]
  128. raise AttributeError(item)
  129. def setup(self):
  130. self._items = {}
  131. self._items[".zmetadata"] = self.fs.cat_file(
  132. "/".join([self.root, ".zmetadata"])
  133. )
  134. met = json.loads(self._items[".zmetadata"])
  135. self.record_size = met["record_size"]
  136. self.zmetadata = met["metadata"]
  137. # Define function to open and decompress refs
  138. @lru_cache(maxsize=self.cache_size)
  139. def open_refs(field, record):
  140. """cached parquet file loader"""
  141. path = self.url.format(field=field, record=record)
  142. data = io.BytesIO(self.fs.cat_file(path))
  143. try:
  144. df = self.pd.read_parquet(data, engine=self.engine)
  145. refs = {c: df[c].to_numpy() for c in df.columns}
  146. except OSError:
  147. refs = None
  148. return refs
  149. self.open_refs = open_refs
  150. @staticmethod
  151. def create(root, storage_options=None, fs=None, record_size=10000, **kwargs):
  152. """Make empty parquet reference set
  153. First deletes the contents of the given directory, if it exists.
  154. Parameters
  155. ----------
  156. root: str
  157. Directory to contain the output; will be created
  158. storage_options: dict | None
  159. For making the filesystem to use for writing is fs is None
  160. fs: FileSystem | None
  161. Filesystem for writing
  162. record_size: int
  163. Number of references per parquet file
  164. kwargs: passed to __init__
  165. Returns
  166. -------
  167. LazyReferenceMapper instance
  168. """
  169. met = {"metadata": {}, "record_size": record_size}
  170. if fs is None:
  171. fs, root = fsspec.core.url_to_fs(root, **(storage_options or {}))
  172. if fs.exists(root):
  173. fs.rm(root, recursive=True)
  174. fs.makedirs(root, exist_ok=True)
  175. fs.pipe("/".join([root, ".zmetadata"]), json.dumps(met).encode())
  176. return LazyReferenceMapper(root, fs, **kwargs)
  177. @lru_cache()
  178. def listdir(self):
  179. """List top-level directories"""
  180. dirs = (p.rsplit("/", 1)[0] for p in self.zmetadata if not p.startswith(".z"))
  181. return set(dirs)
  182. def ls(self, path="", detail=True):
  183. """Shortcut file listings"""
  184. path = path.rstrip("/")
  185. pathdash = path + "/" if path else ""
  186. dirnames = self.listdir()
  187. dirs = [
  188. d
  189. for d in dirnames
  190. if d.startswith(pathdash) and "/" not in d.lstrip(pathdash)
  191. ]
  192. if dirs:
  193. others = {
  194. f
  195. for f in chain(
  196. [".zmetadata"],
  197. (name for name in self.zmetadata),
  198. (name for name in self._items),
  199. )
  200. if f.startswith(pathdash) and "/" not in f.lstrip(pathdash)
  201. }
  202. if detail is False:
  203. others.update(dirs)
  204. return sorted(others)
  205. dirinfo = [{"name": name, "type": "directory", "size": 0} for name in dirs]
  206. fileinfo = [
  207. {
  208. "name": name,
  209. "type": "file",
  210. "size": len(
  211. json.dumps(self.zmetadata[name])
  212. if name in self.zmetadata
  213. else self._items[name]
  214. ),
  215. }
  216. for name in others
  217. ]
  218. return sorted(dirinfo + fileinfo, key=lambda s: s["name"])
  219. field = path
  220. others = set(
  221. [name for name in self.zmetadata if name.startswith(f"{path}/")]
  222. + [name for name in self._items if name.startswith(f"{path}/")]
  223. )
  224. fileinfo = [
  225. {
  226. "name": name,
  227. "type": "file",
  228. "size": len(
  229. json.dumps(self.zmetadata[name])
  230. if name in self.zmetadata
  231. else self._items[name]
  232. ),
  233. }
  234. for name in others
  235. ]
  236. keys = self._keys_in_field(field)
  237. if detail is False:
  238. return list(others) + list(keys)
  239. recs = self._generate_all_records(field)
  240. recinfo = [
  241. {"name": name, "type": "file", "size": rec[-1]}
  242. for name, rec in zip(keys, recs)
  243. if rec[0] # filters out path==None, deleted/missing
  244. ]
  245. return fileinfo + recinfo
  246. def _load_one_key(self, key):
  247. """Get the reference for one key
  248. Returns bytes, one-element list or three-element list.
  249. """
  250. if key in self._items:
  251. return self._items[key]
  252. elif key in self.zmetadata:
  253. return json.dumps(self.zmetadata[key]).encode()
  254. elif "/" not in key or self._is_meta(key):
  255. raise KeyError(key)
  256. field, _ = key.rsplit("/", 1)
  257. record, ri, chunk_size = self._key_to_record(key)
  258. maybe = self._items.get((field, record), {}).get(ri, False)
  259. if maybe is None:
  260. # explicitly deleted
  261. raise KeyError
  262. elif maybe:
  263. return maybe
  264. elif chunk_size == 0:
  265. return b""
  266. # Chunk keys can be loaded from row group and cached in LRU cache
  267. try:
  268. refs = self.open_refs(field, record)
  269. except (ValueError, TypeError, FileNotFoundError) as exc:
  270. raise KeyError(key) from exc
  271. columns = ["path", "offset", "size", "raw"]
  272. selection = [refs[c][ri] if c in refs else None for c in columns]
  273. raw = selection[-1]
  274. if raw is not None:
  275. return raw
  276. if selection[0] is None:
  277. raise KeyError("This reference does not exist or has been deleted")
  278. if selection[1:3] == [0, 0]:
  279. # URL only
  280. return selection[:1]
  281. # URL, offset, size
  282. return selection[:3]
  283. @lru_cache(4096)
  284. def _key_to_record(self, key):
  285. """Details needed to construct a reference for one key"""
  286. field, chunk = key.rsplit("/", 1)
  287. chunk_sizes = self._get_chunk_sizes(field)
  288. if len(chunk_sizes) == 0:
  289. return 0, 0, 0
  290. chunk_idx = [int(c) for c in chunk.split(".")]
  291. chunk_number = ravel_multi_index(chunk_idx, chunk_sizes)
  292. record = chunk_number // self.record_size
  293. ri = chunk_number % self.record_size
  294. return record, ri, len(chunk_sizes)
  295. def _get_chunk_sizes(self, field):
  296. """The number of chunks along each axis for a given field"""
  297. if field not in self.chunk_sizes:
  298. zarray = self.zmetadata[f"{field}/.zarray"]
  299. size_ratio = [
  300. math.ceil(s / c) for s, c in zip(zarray["shape"], zarray["chunks"])
  301. ]
  302. self.chunk_sizes[field] = size_ratio or [1]
  303. return self.chunk_sizes[field]
  304. def _generate_record(self, field, record):
  305. """The references for a given parquet file of a given field"""
  306. refs = self.open_refs(field, record)
  307. it = iter(zip(*refs.values()))
  308. if len(refs) == 3:
  309. # All urls
  310. return (list(t) for t in it)
  311. elif len(refs) == 1:
  312. # All raws
  313. return refs["raw"]
  314. else:
  315. # Mix of urls and raws
  316. return (list(t[:3]) if not t[3] else t[3] for t in it)
  317. def _generate_all_records(self, field):
  318. """Load all the references within a field by iterating over the parquet files"""
  319. nrec = 1
  320. for ch in self._get_chunk_sizes(field):
  321. nrec *= ch
  322. nrec = math.ceil(nrec / self.record_size)
  323. for record in range(nrec):
  324. yield from self._generate_record(field, record)
  325. def values(self):
  326. return RefsValuesView(self)
  327. def items(self):
  328. return RefsItemsView(self)
  329. def __hash__(self):
  330. return id(self)
  331. def __getitem__(self, key):
  332. return self._load_one_key(key)
  333. def __setitem__(self, key, value):
  334. if "/" in key and not self._is_meta(key):
  335. field, chunk = key.rsplit("/", 1)
  336. record, i, _ = self._key_to_record(key)
  337. subdict = self._items.setdefault((field, record), {})
  338. subdict[i] = value
  339. if len(subdict) == self.record_size:
  340. self.write(field, record)
  341. else:
  342. # metadata or top-level
  343. if hasattr(value, "to_bytes"):
  344. val = value.to_bytes().decode()
  345. elif isinstance(value, bytes):
  346. val = value.decode()
  347. else:
  348. val = value
  349. self._items[key] = val
  350. new_value = json.loads(val)
  351. self.zmetadata[key] = {**self.zmetadata.get(key, {}), **new_value}
  352. @staticmethod
  353. def _is_meta(key):
  354. return key.startswith(".z") or "/.z" in key
  355. def __delitem__(self, key):
  356. if key in self._items:
  357. del self._items[key]
  358. elif key in self.zmetadata:
  359. del self.zmetadata[key]
  360. else:
  361. if "/" in key and not self._is_meta(key):
  362. field, _ = key.rsplit("/", 1)
  363. record, i, _ = self._key_to_record(key)
  364. subdict = self._items.setdefault((field, record), {})
  365. subdict[i] = None
  366. if len(subdict) == self.record_size:
  367. self.write(field, record)
  368. else:
  369. # metadata or top-level
  370. self._items[key] = None
  371. def write(self, field, record, base_url=None, storage_options=None):
  372. # extra requirements if writing
  373. import kerchunk.df
  374. import numpy as np
  375. import pandas as pd
  376. partition = self._items[(field, record)]
  377. original = False
  378. if len(partition) < self.record_size:
  379. try:
  380. original = self.open_refs(field, record)
  381. except OSError:
  382. pass
  383. if original:
  384. paths = original["path"]
  385. offsets = original["offset"]
  386. sizes = original["size"]
  387. raws = original["raw"]
  388. else:
  389. paths = np.full(self.record_size, np.nan, dtype="O")
  390. offsets = np.zeros(self.record_size, dtype="int64")
  391. sizes = np.zeros(self.record_size, dtype="int64")
  392. raws = np.full(self.record_size, np.nan, dtype="O")
  393. for j, data in partition.items():
  394. if isinstance(data, list):
  395. if (
  396. str(paths.dtype) == "category"
  397. and data[0] not in paths.dtype.categories
  398. ):
  399. paths = paths.add_categories(data[0])
  400. paths[j] = data[0]
  401. if len(data) > 1:
  402. offsets[j] = data[1]
  403. sizes[j] = data[2]
  404. elif data is None:
  405. # delete
  406. paths[j] = None
  407. offsets[j] = 0
  408. sizes[j] = 0
  409. raws[j] = None
  410. else:
  411. # this is the only call into kerchunk, could remove
  412. raws[j] = kerchunk.df._proc_raw(data)
  413. # TODO: only save needed columns
  414. df = pd.DataFrame(
  415. {
  416. "path": paths,
  417. "offset": offsets,
  418. "size": sizes,
  419. "raw": raws,
  420. },
  421. copy=False,
  422. )
  423. if df.path.count() / (df.path.nunique() or 1) > self.cat_thresh:
  424. df["path"] = df["path"].astype("category")
  425. object_encoding = {"raw": "bytes", "path": "utf8"}
  426. has_nulls = ["path", "raw"]
  427. fn = f"{base_url or self.out_root}/{field}/refs.{record}.parq"
  428. self.fs.mkdirs(f"{base_url or self.out_root}/{field}", exist_ok=True)
  429. if self.engine == "pyarrow":
  430. df_backend_kwargs = {"write_statistics": False}
  431. elif self.engine == "fastparquet":
  432. df_backend_kwargs = {
  433. "stats": False,
  434. "object_encoding": object_encoding,
  435. "has_nulls": has_nulls,
  436. }
  437. else:
  438. raise NotImplementedError(f"{self.engine} not supported")
  439. df.to_parquet(
  440. fn,
  441. engine=self.engine,
  442. storage_options=storage_options
  443. or getattr(self.fs, "storage_options", None),
  444. compression="zstd",
  445. index=False,
  446. **df_backend_kwargs,
  447. )
  448. partition.clear()
  449. self._items.pop((field, record))
  450. def flush(self, base_url=None, storage_options=None):
  451. """Output any modified or deleted keys
  452. Parameters
  453. ----------
  454. base_url: str
  455. Location of the output
  456. """
  457. # write what we have so far and clear sub chunks
  458. for thing in list(self._items):
  459. if isinstance(thing, tuple):
  460. field, record = thing
  461. self.write(
  462. field,
  463. record,
  464. base_url=base_url,
  465. storage_options=storage_options,
  466. )
  467. # gather .zmetadata from self._items and write that too
  468. for k in list(self._items):
  469. if k != ".zmetadata" and ".z" in k:
  470. self.zmetadata[k] = json.loads(self._items.pop(k))
  471. met = {"metadata": self.zmetadata, "record_size": self.record_size}
  472. self._items.clear()
  473. self._items[".zmetadata"] = json.dumps(met).encode()
  474. self.fs.pipe(
  475. "/".join([base_url or self.out_root, ".zmetadata"]),
  476. self._items[".zmetadata"],
  477. )
  478. # TODO: only clear those that we wrote to?
  479. self.open_refs.cache_clear()
  480. def __len__(self):
  481. # Caveat: This counts expected references, not actual - but is fast
  482. count = 0
  483. for field in self.listdir():
  484. if field.startswith("."):
  485. count += 1
  486. else:
  487. count += math.prod(self._get_chunk_sizes(field))
  488. count += len(self.zmetadata) # all metadata keys
  489. # any other files not in reference partitions
  490. count += sum(1 for _ in self._items if not isinstance(_, tuple))
  491. return count
  492. def __iter__(self):
  493. # Caveat: returns only existing keys, so the number of these does not
  494. # match len(self)
  495. metas = set(self.zmetadata)
  496. metas.update(self._items)
  497. for bit in metas:
  498. if isinstance(bit, str):
  499. yield bit
  500. for field in self.listdir():
  501. for k in self._keys_in_field(field):
  502. if k in self:
  503. yield k
  504. def __contains__(self, item):
  505. try:
  506. self._load_one_key(item)
  507. return True
  508. except KeyError:
  509. return False
  510. def _keys_in_field(self, field):
  511. """List key names in given field
  512. Produces strings like "field/x.y" appropriate from the chunking of the array
  513. """
  514. chunk_sizes = self._get_chunk_sizes(field)
  515. if len(chunk_sizes) == 0:
  516. yield field + "/0"
  517. return
  518. inds = itertools.product(*(range(i) for i in chunk_sizes))
  519. for ind in inds:
  520. yield field + "/" + ".".join([str(c) for c in ind])
  521. class ReferenceFileSystem(AsyncFileSystem):
  522. """View byte ranges of some other file as a file system
  523. Initial version: single file system target, which must support
  524. async, and must allow start and end args in _cat_file. Later versions
  525. may allow multiple arbitrary URLs for the targets.
  526. This FileSystem is read-only. It is designed to be used with async
  527. targets (for now). We do not get original file details from the target FS.
  528. Configuration is by passing a dict of references at init, or a URL to
  529. a JSON file containing the same; this dict
  530. can also contain concrete data for some set of paths.
  531. Reference dict format:
  532. {path0: bytes_data, path1: (target_url, offset, size)}
  533. https://github.com/fsspec/kerchunk/blob/main/README.md
  534. """
  535. protocol = "reference"
  536. cachable = False
  537. def __init__(
  538. self,
  539. fo,
  540. target=None,
  541. ref_storage_args=None,
  542. target_protocol=None,
  543. target_options=None,
  544. remote_protocol=None,
  545. remote_options=None,
  546. fs=None,
  547. template_overrides=None,
  548. simple_templates=True,
  549. max_gap=64_000,
  550. max_block=256_000_000,
  551. cache_size=128,
  552. **kwargs,
  553. ):
  554. """
  555. Parameters
  556. ----------
  557. fo : dict or str
  558. The set of references to use for this instance, with a structure as above.
  559. If str referencing a JSON file, will use fsspec.open, in conjunction
  560. with target_options and target_protocol to open and parse JSON at this
  561. location. If a directory, then assume references are a set of parquet
  562. files to be loaded lazily.
  563. target : str
  564. For any references having target_url as None, this is the default file
  565. target to use
  566. ref_storage_args : dict
  567. If references is a str, use these kwargs for loading the JSON file.
  568. Deprecated: use target_options instead.
  569. target_protocol : str
  570. Used for loading the reference file, if it is a path. If None, protocol
  571. will be derived from the given path
  572. target_options : dict
  573. Extra FS options for loading the reference file ``fo``, if given as a path
  574. remote_protocol : str
  575. The protocol of the filesystem on which the references will be evaluated
  576. (unless fs is provided). If not given, will be derived from the first
  577. URL that has a protocol in the templates or in the references, in that
  578. order.
  579. remote_options : dict
  580. kwargs to go with remote_protocol
  581. fs : AbstractFileSystem | dict(str, (AbstractFileSystem | dict))
  582. Directly provide a file system(s):
  583. - a single filesystem instance
  584. - a dict of protocol:filesystem, where each value is either a filesystem
  585. instance, or a dict of kwargs that can be used to create in
  586. instance for the given protocol
  587. If this is given, remote_options and remote_protocol are ignored.
  588. template_overrides : dict
  589. Swap out any templates in the references file with these - useful for
  590. testing.
  591. simple_templates: bool
  592. Whether templates can be processed with simple replace (True) or if
  593. jinja is needed (False, much slower). All reference sets produced by
  594. ``kerchunk`` are simple in this sense, but the spec allows for complex.
  595. max_gap, max_block: int
  596. For merging multiple concurrent requests to the same remote file.
  597. Neighboring byte ranges will only be merged when their
  598. inter-range gap is <= ``max_gap``. Default is 64KB. Set to 0
  599. to only merge when it requires no extra bytes. Pass a negative
  600. number to disable merging, appropriate for local target files.
  601. Neighboring byte ranges will only be merged when the size of
  602. the aggregated range is <= ``max_block``. Default is 256MB.
  603. cache_size : int
  604. Maximum size of LRU cache, where cache_size*record_size denotes
  605. the total number of references that can be loaded in memory at once.
  606. Only used for lazily loaded references.
  607. kwargs : passed to parent class
  608. """
  609. super().__init__(**kwargs)
  610. self.target = target
  611. self.template_overrides = template_overrides
  612. self.simple_templates = simple_templates
  613. self.templates = {}
  614. self.fss = {}
  615. self._dircache = {}
  616. self.max_gap = max_gap
  617. self.max_block = max_block
  618. if isinstance(fo, str):
  619. dic = dict(
  620. **(ref_storage_args or target_options or {}), protocol=target_protocol
  621. )
  622. ref_fs, fo2 = fsspec.core.url_to_fs(fo, **dic)
  623. if ref_fs.isfile(fo2):
  624. # text JSON
  625. with fsspec.open(fo, "rb", **dic) as f:
  626. logger.info("Read reference from URL %s", fo)
  627. text = json.load(f)
  628. self._process_references(text, template_overrides)
  629. else:
  630. # Lazy parquet refs
  631. logger.info("Open lazy reference dict from URL %s", fo)
  632. self.references = LazyReferenceMapper(
  633. fo2,
  634. fs=ref_fs,
  635. cache_size=cache_size,
  636. )
  637. else:
  638. # dictionaries
  639. self._process_references(fo, template_overrides)
  640. if isinstance(fs, dict):
  641. self.fss = {
  642. k: (
  643. fsspec.filesystem(k.split(":", 1)[0], **opts)
  644. if isinstance(opts, dict)
  645. else opts
  646. )
  647. for k, opts in fs.items()
  648. }
  649. if None not in self.fss:
  650. self.fss[None] = filesystem("file")
  651. return
  652. if fs is not None:
  653. # single remote FS
  654. remote_protocol = (
  655. fs.protocol[0] if isinstance(fs.protocol, tuple) else fs.protocol
  656. )
  657. self.fss[remote_protocol] = fs
  658. if remote_protocol is None:
  659. # get single protocol from any templates
  660. for ref in self.templates.values():
  661. if callable(ref):
  662. ref = ref()
  663. protocol, _ = fsspec.core.split_protocol(ref)
  664. if protocol and protocol not in self.fss:
  665. fs = filesystem(protocol, **(remote_options or {}))
  666. self.fss[protocol] = fs
  667. if remote_protocol is None:
  668. # get single protocol from references
  669. # TODO: warning here, since this can be very expensive?
  670. for ref in self.references.values():
  671. if callable(ref):
  672. ref = ref()
  673. if isinstance(ref, list) and ref[0]:
  674. protocol, _ = fsspec.core.split_protocol(ref[0])
  675. if protocol not in self.fss:
  676. fs = filesystem(protocol, **(remote_options or {}))
  677. self.fss[protocol] = fs
  678. # only use first remote URL
  679. break
  680. if remote_protocol and remote_protocol not in self.fss:
  681. fs = filesystem(remote_protocol, **(remote_options or {}))
  682. self.fss[remote_protocol] = fs
  683. self.fss[None] = fs or filesystem("file") # default one
  684. # Wrap any non-async filesystems to ensure async methods are available below
  685. for k, f in self.fss.items():
  686. if not f.async_impl:
  687. self.fss[k] = AsyncFileSystemWrapper(f, asynchronous=self.asynchronous)
  688. elif self.asynchronous ^ f.asynchronous:
  689. raise ValueError(
  690. "Reference-FS's target filesystem must have same value"
  691. "of asynchronous"
  692. )
  693. def _cat_common(self, path, start=None, end=None):
  694. path = self._strip_protocol(path)
  695. logger.debug(f"cat: {path}")
  696. try:
  697. part = self.references[path]
  698. except KeyError as exc:
  699. raise FileNotFoundError(path) from exc
  700. if isinstance(part, str):
  701. part = part.encode()
  702. if hasattr(part, "to_bytes"):
  703. part = part.to_bytes()
  704. if isinstance(part, bytes):
  705. logger.debug(f"Reference: {path}, type bytes")
  706. if part.startswith(b"base64:"):
  707. part = base64.b64decode(part[7:])
  708. return part, None, None
  709. if len(part) == 1:
  710. logger.debug(f"Reference: {path}, whole file => {part}")
  711. url = part[0]
  712. start1, end1 = start, end
  713. else:
  714. url, start0, size = part
  715. logger.debug(f"Reference: {path} => {url}, offset {start0}, size {size}")
  716. end0 = start0 + size
  717. if start is not None:
  718. if start >= 0:
  719. start1 = start0 + start
  720. else:
  721. start1 = end0 + start
  722. else:
  723. start1 = start0
  724. if end is not None:
  725. if end >= 0:
  726. end1 = start0 + end
  727. else:
  728. end1 = end0 + end
  729. else:
  730. end1 = end0
  731. if url is None:
  732. url = self.target
  733. return url, start1, end1
  734. async def _cat_file(self, path, start=None, end=None, **kwargs):
  735. part_or_url, start0, end0 = self._cat_common(path, start=start, end=end)
  736. if isinstance(part_or_url, bytes):
  737. return part_or_url[start:end]
  738. protocol, _ = split_protocol(part_or_url)
  739. try:
  740. return await self.fss[protocol]._cat_file(
  741. part_or_url, start=start0, end=end0
  742. )
  743. except Exception as e:
  744. raise ReferenceNotReachable(path, part_or_url) from e
  745. def cat_file(self, path, start=None, end=None, **kwargs):
  746. part_or_url, start0, end0 = self._cat_common(path, start=start, end=end)
  747. if isinstance(part_or_url, bytes):
  748. return part_or_url[start:end]
  749. protocol, _ = split_protocol(part_or_url)
  750. try:
  751. return self.fss[protocol].cat_file(part_or_url, start=start0, end=end0)
  752. except Exception as e:
  753. raise ReferenceNotReachable(path, part_or_url) from e
  754. def pipe_file(self, path, value, **_):
  755. """Temporarily add binary data or reference as a file"""
  756. self.references[path] = value
  757. async def _get_file(self, rpath, lpath, **kwargs):
  758. if self.isdir(rpath):
  759. return os.makedirs(lpath, exist_ok=True)
  760. data = await self._cat_file(rpath)
  761. with open(lpath, "wb") as f:
  762. f.write(data)
  763. def get_file(self, rpath, lpath, callback=DEFAULT_CALLBACK, **kwargs):
  764. if self.isdir(rpath):
  765. return os.makedirs(lpath, exist_ok=True)
  766. data = self.cat_file(rpath, **kwargs)
  767. callback.set_size(len(data))
  768. if isfilelike(lpath):
  769. lpath.write(data)
  770. else:
  771. with open(lpath, "wb") as f:
  772. f.write(data)
  773. callback.absolute_update(len(data))
  774. def get(self, rpath, lpath, recursive=False, **kwargs):
  775. if recursive:
  776. # trigger directory build
  777. self.ls("")
  778. rpath = self.expand_path(rpath, recursive=recursive)
  779. fs = fsspec.filesystem("file", auto_mkdir=True)
  780. targets = other_paths(rpath, lpath)
  781. if recursive:
  782. data = self.cat([r for r in rpath if not self.isdir(r)])
  783. else:
  784. data = self.cat(rpath)
  785. for remote, local in zip(rpath, targets):
  786. if remote in data:
  787. fs.pipe_file(local, data[remote])
  788. def cat(self, path, recursive=False, on_error="raise", **kwargs):
  789. if isinstance(path, str) and recursive:
  790. raise NotImplementedError
  791. if isinstance(path, list) and (recursive or any("*" in p for p in path)):
  792. raise NotImplementedError
  793. # TODO: if references is lazy, pre-fetch all paths in batch before access
  794. proto_dict = _protocol_groups(path, self.references)
  795. out = {}
  796. for proto, paths in proto_dict.items():
  797. fs = self.fss[proto]
  798. urls, starts, ends, valid_paths = [], [], [], []
  799. for p in paths:
  800. # find references or label not-found. Early exit if any not
  801. # found and on_error is "raise"
  802. try:
  803. u, s, e = self._cat_common(p)
  804. if not isinstance(u, (bytes, str)):
  805. # nan/None from parquet
  806. continue
  807. except FileNotFoundError as err:
  808. if on_error == "raise":
  809. raise
  810. if on_error != "omit":
  811. out[p] = err
  812. else:
  813. urls.append(u)
  814. starts.append(s)
  815. ends.append(e)
  816. valid_paths.append(p)
  817. # process references into form for merging
  818. urls2 = []
  819. starts2 = []
  820. ends2 = []
  821. paths2 = []
  822. whole_files = set()
  823. for u, s, e, p in zip(urls, starts, ends, valid_paths):
  824. if isinstance(u, bytes):
  825. # data
  826. out[p] = u
  827. elif s is None:
  828. # whole file - limits are None, None, but no further
  829. # entries take for this file
  830. whole_files.add(u)
  831. urls2.append(u)
  832. starts2.append(s)
  833. ends2.append(e)
  834. paths2.append(p)
  835. for u, s, e, p in zip(urls, starts, ends, valid_paths):
  836. # second run to account for files that are to be loaded whole
  837. if s is not None and u not in whole_files:
  838. urls2.append(u)
  839. starts2.append(s)
  840. ends2.append(e)
  841. paths2.append(p)
  842. # merge and fetch consolidated ranges
  843. new_paths, new_starts, new_ends = merge_offset_ranges(
  844. list(urls2),
  845. list(starts2),
  846. list(ends2),
  847. sort=True,
  848. max_gap=self.max_gap,
  849. max_block=self.max_block,
  850. )
  851. bytes_out = fs.cat_ranges(new_paths, new_starts, new_ends)
  852. # unbundle from merged bytes - simple approach
  853. for u, s, e, p in zip(urls, starts, ends, valid_paths):
  854. if p in out:
  855. continue # was bytes, already handled
  856. for np, ns, ne, b in zip(new_paths, new_starts, new_ends, bytes_out):
  857. if np == u and (ns is None or ne is None):
  858. if isinstance(b, Exception):
  859. out[p] = b
  860. else:
  861. out[p] = b[s:e]
  862. elif np == u and s >= ns and e <= ne:
  863. if isinstance(b, Exception):
  864. out[p] = b
  865. else:
  866. out[p] = b[s - ns : (e - ne) or None]
  867. for k, v in out.copy().items():
  868. # these were valid references, but fetch failed, so transform exc
  869. if isinstance(v, Exception) and k in self.references:
  870. ex = out[k]
  871. new_ex = ReferenceNotReachable(k, self.references[k])
  872. new_ex.__cause__ = ex
  873. if on_error == "raise":
  874. raise new_ex
  875. elif on_error != "omit":
  876. out[k] = new_ex
  877. if len(out) == 1 and isinstance(path, str) and "*" not in path:
  878. return _first(out)
  879. return out
  880. def _process_references(self, references, template_overrides=None):
  881. vers = references.get("version", None)
  882. if vers is None:
  883. self._process_references0(references)
  884. elif vers == 1:
  885. self._process_references1(references, template_overrides=template_overrides)
  886. else:
  887. raise ValueError(f"Unknown reference spec version: {vers}")
  888. # TODO: we make dircache by iterating over all entries, but for Spec >= 1,
  889. # can replace with programmatic. Is it even needed for mapper interface?
  890. def _process_references0(self, references):
  891. """Make reference dict for Spec Version 0"""
  892. if isinstance(references, dict):
  893. # do not do this for lazy/parquet backend, which will not make dicts,
  894. # but must remain writable in the original object
  895. references = {
  896. key: json.dumps(val) if isinstance(val, dict) else val
  897. for key, val in references.items()
  898. }
  899. self.references = references
  900. def _process_references1(self, references, template_overrides=None):
  901. if not self.simple_templates or self.templates:
  902. import jinja2
  903. self.references = {}
  904. self._process_templates(references.get("templates", {}))
  905. @lru_cache(1000)
  906. def _render_jinja(u):
  907. return jinja2.Template(u).render(**self.templates)
  908. for k, v in references.get("refs", {}).items():
  909. if isinstance(v, str):
  910. if v.startswith("base64:"):
  911. self.references[k] = base64.b64decode(v[7:])
  912. self.references[k] = v
  913. elif isinstance(v, dict):
  914. self.references[k] = json.dumps(v)
  915. elif self.templates:
  916. u = v[0]
  917. if "{{" in u:
  918. if self.simple_templates:
  919. u = (
  920. u.replace("{{", "{")
  921. .replace("}}", "}")
  922. .format(**self.templates)
  923. )
  924. else:
  925. u = _render_jinja(u)
  926. self.references[k] = [u] if len(v) == 1 else [u, v[1], v[2]]
  927. else:
  928. self.references[k] = v
  929. self.references.update(self._process_gen(references.get("gen", [])))
  930. def _process_templates(self, tmp):
  931. self.templates = {}
  932. if self.template_overrides is not None:
  933. tmp.update(self.template_overrides)
  934. for k, v in tmp.items():
  935. if "{{" in v:
  936. import jinja2
  937. self.templates[k] = lambda temp=v, **kwargs: jinja2.Template(
  938. temp
  939. ).render(**kwargs)
  940. else:
  941. self.templates[k] = v
  942. def _process_gen(self, gens):
  943. out = {}
  944. for gen in gens:
  945. dimension = {
  946. k: (
  947. v
  948. if isinstance(v, list)
  949. else range(v.get("start", 0), v["stop"], v.get("step", 1))
  950. )
  951. for k, v in gen["dimensions"].items()
  952. }
  953. products = (
  954. dict(zip(dimension.keys(), values))
  955. for values in itertools.product(*dimension.values())
  956. )
  957. for pr in products:
  958. import jinja2
  959. key = jinja2.Template(gen["key"]).render(**pr, **self.templates)
  960. url = jinja2.Template(gen["url"]).render(**pr, **self.templates)
  961. if ("offset" in gen) and ("length" in gen):
  962. offset = int(
  963. jinja2.Template(gen["offset"]).render(**pr, **self.templates)
  964. )
  965. length = int(
  966. jinja2.Template(gen["length"]).render(**pr, **self.templates)
  967. )
  968. out[key] = [url, offset, length]
  969. elif ("offset" in gen) ^ ("length" in gen):
  970. raise ValueError(
  971. "Both 'offset' and 'length' are required for a "
  972. "reference generator entry if either is provided."
  973. )
  974. else:
  975. out[key] = [url]
  976. return out
  977. def _dircache_from_items(self):
  978. self.dircache = {"": []}
  979. it = self.references.items()
  980. for path, part in it:
  981. if isinstance(part, (bytes, str)) or hasattr(part, "to_bytes"):
  982. size = len(part)
  983. elif len(part) == 1:
  984. size = None
  985. else:
  986. _, _, size = part
  987. par = path.rsplit("/", 1)[0] if "/" in path else ""
  988. par0 = par
  989. subdirs = [par0]
  990. while par0 and par0 not in self.dircache:
  991. # collect parent directories
  992. par0 = self._parent(par0)
  993. subdirs.append(par0)
  994. subdirs.reverse()
  995. for parent, child in zip(subdirs, subdirs[1:]):
  996. # register newly discovered directories
  997. assert child not in self.dircache
  998. assert parent in self.dircache
  999. self.dircache[parent].append(
  1000. {"name": child, "type": "directory", "size": 0}
  1001. )
  1002. self.dircache[child] = []
  1003. self.dircache[par].append({"name": path, "type": "file", "size": size})
  1004. def _open(self, path, mode="rb", block_size=None, cache_options=None, **kwargs):
  1005. part_or_url, start0, end0 = self._cat_common(path)
  1006. # This logic is kept outside `ReferenceFile` to avoid unnecessary redirection.
  1007. # That does mean `_cat_common` gets called twice if it eventually reaches `ReferenceFile`.
  1008. if isinstance(part_or_url, bytes):
  1009. return io.BytesIO(part_or_url[start0:end0])
  1010. protocol, _ = split_protocol(part_or_url)
  1011. if start0 is None and end0 is None:
  1012. return self.fss[protocol]._open(
  1013. part_or_url,
  1014. mode,
  1015. block_size=block_size,
  1016. cache_options=cache_options,
  1017. **kwargs,
  1018. )
  1019. return ReferenceFile(
  1020. self,
  1021. path,
  1022. mode,
  1023. block_size=block_size,
  1024. cache_options=cache_options,
  1025. **kwargs,
  1026. )
  1027. def ls(self, path, detail=True, **kwargs):
  1028. logger.debug("list %s", path)
  1029. path = self._strip_protocol(path)
  1030. if isinstance(self.references, LazyReferenceMapper):
  1031. try:
  1032. return self.references.ls(path, detail)
  1033. except KeyError:
  1034. pass
  1035. raise FileNotFoundError(f"'{path}' is not a known key")
  1036. if not self.dircache:
  1037. self._dircache_from_items()
  1038. out = self._ls_from_cache(path)
  1039. if out is None:
  1040. raise FileNotFoundError(path)
  1041. if detail:
  1042. return out
  1043. return [o["name"] for o in out]
  1044. def exists(self, path, **kwargs): # overwrite auto-sync version
  1045. return self.isdir(path) or self.isfile(path)
  1046. def isdir(self, path): # overwrite auto-sync version
  1047. if self.dircache:
  1048. return path in self.dircache
  1049. elif isinstance(self.references, LazyReferenceMapper):
  1050. return path in self.references.listdir()
  1051. else:
  1052. # this may be faster than building dircache for single calls, but
  1053. # by looping will be slow for many calls; could cache it?
  1054. return any(_.startswith(f"{path}/") for _ in self.references)
  1055. def isfile(self, path): # overwrite auto-sync version
  1056. return path in self.references
  1057. async def _ls(self, path, detail=True, **kwargs): # calls fast sync code
  1058. return self.ls(path, detail, **kwargs)
  1059. def find(self, path, maxdepth=None, withdirs=False, detail=False, **kwargs):
  1060. if withdirs:
  1061. return super().find(
  1062. path, maxdepth=maxdepth, withdirs=withdirs, detail=detail, **kwargs
  1063. )
  1064. if path:
  1065. path = self._strip_protocol(path)
  1066. r = sorted(k for k in self.references if k.startswith(path))
  1067. else:
  1068. r = sorted(self.references)
  1069. if detail:
  1070. if not self.dircache:
  1071. self._dircache_from_items()
  1072. return {k: self._ls_from_cache(k)[0] for k in r}
  1073. else:
  1074. return r
  1075. def info(self, path, **kwargs):
  1076. out = self.references.get(path)
  1077. if out is not None:
  1078. if isinstance(out, (str, bytes)):
  1079. # decode base64 here
  1080. return {"name": path, "type": "file", "size": len(out)}
  1081. elif len(out) > 1:
  1082. return {"name": path, "type": "file", "size": out[2]}
  1083. else:
  1084. out0 = [{"name": path, "type": "file", "size": None}]
  1085. else:
  1086. out = self.ls(path, True)
  1087. out0 = [o for o in out if o["name"] == path]
  1088. if not out0:
  1089. return {"name": path, "type": "directory", "size": 0}
  1090. if out0[0]["size"] is None:
  1091. # if this is a whole remote file, update size using remote FS
  1092. prot, _ = split_protocol(self.references[path][0])
  1093. out0[0]["size"] = self.fss[prot].size(self.references[path][0])
  1094. return out0[0]
  1095. async def _info(self, path, **kwargs): # calls fast sync code
  1096. return self.info(path)
  1097. async def _rm_file(self, path, **kwargs):
  1098. self.references.pop(
  1099. path, None
  1100. ) # ignores FileNotFound, just as well for directories
  1101. self.dircache.clear() # this is a bit heavy handed
  1102. async def _pipe_file(self, path, data, mode="overwrite", **kwargs):
  1103. if mode == "create" and self.exists(path):
  1104. raise FileExistsError
  1105. # can be str or bytes
  1106. self.references[path] = data
  1107. self.dircache.clear() # this is a bit heavy handed
  1108. async def _put_file(self, lpath, rpath, mode="overwrite", **kwargs):
  1109. # puts binary
  1110. if mode == "create" and self.exists(rpath):
  1111. raise FileExistsError
  1112. with open(lpath, "rb") as f:
  1113. self.references[rpath] = f.read()
  1114. self.dircache.clear() # this is a bit heavy handed
  1115. def save_json(self, url, **storage_options):
  1116. """Write modified references into new location"""
  1117. out = {}
  1118. for k, v in self.references.items():
  1119. if isinstance(v, bytes):
  1120. try:
  1121. out[k] = v.decode("ascii")
  1122. except UnicodeDecodeError:
  1123. out[k] = (b"base64:" + base64.b64encode(v)).decode()
  1124. else:
  1125. out[k] = v
  1126. with fsspec.open(url, "wb", **storage_options) as f:
  1127. f.write(json.dumps({"version": 1, "refs": out}).encode())
  1128. class ReferenceFile(AbstractBufferedFile):
  1129. def __init__(
  1130. self,
  1131. fs,
  1132. path,
  1133. mode="rb",
  1134. block_size="default",
  1135. autocommit=True,
  1136. cache_type="readahead",
  1137. cache_options=None,
  1138. size=None,
  1139. **kwargs,
  1140. ):
  1141. super().__init__(
  1142. fs,
  1143. path,
  1144. mode=mode,
  1145. block_size=block_size,
  1146. autocommit=autocommit,
  1147. size=size,
  1148. cache_type=cache_type,
  1149. cache_options=cache_options,
  1150. **kwargs,
  1151. )
  1152. part_or_url, self.start, self.end = self.fs._cat_common(self.path)
  1153. protocol, _ = split_protocol(part_or_url)
  1154. self.src_fs = self.fs.fss[protocol]
  1155. self.src_path = part_or_url
  1156. self._f = None
  1157. @property
  1158. def f(self):
  1159. if self._f is None or self._f.closed:
  1160. self._f = self.src_fs._open(
  1161. self.src_path,
  1162. mode=self.mode,
  1163. block_size=self.blocksize,
  1164. autocommit=self.autocommit,
  1165. cache_type="none",
  1166. **self.kwargs,
  1167. )
  1168. return self._f
  1169. def close(self):
  1170. if self._f is not None:
  1171. self._f.close()
  1172. return super().close()
  1173. def _fetch_range(self, start, end):
  1174. start = start + self.start
  1175. end = min(end + self.start, self.end)
  1176. self.f.seek(start)
  1177. return self.f.read(end - start)