File: bitbucket.py

package info (click to toggle)
python-nvchecker 2.16-1
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid, trixie
  • size: 736 kB
  • sloc: python: 4,801; makefile: 25
file content (73 lines) | stat: -rw-r--r-- 2,198 bytes parent folder | download | duplicates (2)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
# MIT licensed
# Copyright (c) 2013-2020 lilydjwg <lilydjwg@gmail.com>, et al.

from typing import Any, List, Union
from urllib.parse import urlencode

from nvchecker.api import VersionResult, RichResult, Entry, AsyncCache

# doc: https://developer.atlassian.com/cloud/bitbucket/rest/api-group-commits/#api-repositories-workspace-repo-slug-commits-get
BITBUCKET_URL = 'https://bitbucket.org/api/2.0/repositories/%s/commits/%s'
# doc: https://developer.atlassian.com/cloud/bitbucket/rest/api-group-refs/#api-repositories-workspace-repo-slug-refs-tags-get
BITBUCKET_MAX_TAG = 'https://bitbucket.org/api/2.0/repositories/%s/refs/tags'

async def get_version(
  name: str, conf: Entry, *,
  cache: AsyncCache,
  **kwargs: Any,
) -> VersionResult:
  repo = conf['bitbucket']
  br = conf.get('branch', '')
  use_max_tag = conf.get('use_max_tag', False)
  use_sorted_tags = conf.get('use_sorted_tags', False)

  if use_sorted_tags or use_max_tag:
    parameters = {'fields': 'values.name,values.links.html.href,next'}

    if use_sorted_tags:
      parameters['sort'] = conf.get('sort', '-target.date')
      if 'query' in conf:
        parameters['q'] = conf['query']

  if use_sorted_tags:
    url = BITBUCKET_MAX_TAG % repo
    url += '?' + urlencode(parameters)

    return await _get_tags(url, max_page=1, cache=cache)

  elif use_max_tag:
    url = BITBUCKET_MAX_TAG % repo
    url += '?' + urlencode(parameters)

    max_page = conf.get('max_page', 3)
    return await _get_tags(url, max_page=max_page, cache=cache)

  else:
    url = BITBUCKET_URL % (repo, br)
    data = await cache.get_json(url)
    return RichResult(
      version = data['values'][0]['date'].split('T', 1)[0].replace('-', ''),
      url = data['values'][0]['links']['html']['href'],
    )

async def _get_tags(
  url: str, *,
  max_page: int,
  cache: AsyncCache,
) -> VersionResult:
  ret: List[Union[str, RichResult]] = []

  for _ in range(max_page):
    data = await cache.get_json(url)
    ret.extend([
      RichResult(
        version = tag['name'],
        url = tag['links']['html']['href'],
      ) for tag in data['values']
    ])
    if 'next' in data:
      url = data['next']
    else:
      break

  return ret