File: iterator.py

package info (click to toggle)
python-pex 1.5.3-1
  • links: PTS, VCS
  • area: main
  • in suites: bookworm, sid, trixie
  • size: 2,840 kB
  • sloc: python: 9,757; sh: 1,394; makefile: 165
file content (39 lines) | stat: -rw-r--r-- 1,436 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).

"""The glue between fetchers, crawlers and requirements."""

import itertools
from abc import abstractmethod

from .compatibility import AbstractClass
from .crawler import Crawler
from .fetcher import PyPIFetcher
from .package import Package


class IteratorInterface(AbstractClass):
  @abstractmethod
  def iter(self, req):
    """Return a list of packages that satisfy the requirement."""
    pass


class Iterator(IteratorInterface):
  """A requirement iterator, the glue between fetchers, crawlers and requirements."""

  def __init__(self, fetchers=None, crawler=None, follow_links=False, allow_prereleases=None):
    self._crawler = crawler or Crawler()
    self._fetchers = fetchers if fetchers is not None else [PyPIFetcher()]
    self.__follow_links = follow_links
    self.__allow_prereleases = allow_prereleases

  def _iter_requirement_urls(self, req):
    return itertools.chain.from_iterable(fetcher.urls(req) for fetcher in self._fetchers)

  def iter(self, req):
    url_iterator = self._iter_requirement_urls(req)
    crawled_url_iterator = self._crawler.crawl(url_iterator, follow_links=self.__follow_links)
    for package in filter(None, map(Package.from_href, crawled_url_iterator)):
      if package.satisfies(req, allow_prereleases=self.__allow_prereleases):
        yield package