File: compat.py

package info (click to toggle)
python-pint 0.25-1
  • links: PTS, VCS
  • area: main
  • in suites: forky
  • size: 1,916 kB
  • sloc: python: 20,307; makefile: 149
file content (408 lines) | stat: -rw-r--r-- 10,735 bytes parent folder | download | duplicates (2)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
"""
pint.compat
~~~~~~~~~~~

Compatibility layer.

:copyright: 2013 by Pint Authors, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""

from __future__ import annotations

import math
import sys
from collections.abc import Callable, Iterable, Mapping
from decimal import Decimal
from importlib import import_module
from numbers import Number
from typing import (
    Any,
    # Remove once all dependent packages change their imports.
    Never,  # noqa
    NoReturn,
    Self,  # noqa
    TypeAlias,  # noqa
    Unpack,  # noqa
)

if sys.version_info >= (3, 13):
    from warnings import deprecated  # noqa
else:
    from typing_extensions import deprecated  # noqa


def missing_dependency(
    package: str, display_name: str | None = None
) -> Callable[..., NoReturn]:
    """Return a helper function that raises an exception when used.

    It provides a way delay a missing dependency exception until it is used.
    """
    display_name = display_name or package

    def _inner(*args: Any, **kwargs: Any) -> NoReturn:
        raise Exception(
            "This feature requires %s. Please install it by running:\n"
            "pip install %s" % (display_name, package)
        )

    return _inner


def fully_qualified_name(t: type) -> str:
    """Return the fully qualified name of a type."""
    module = t.__module__
    name = t.__qualname__

    if module is None or module == "builtins":
        return name

    return f"{module}.{name}"


def check_upcast_type(obj: type) -> bool:
    """Check if the type object is an upcast type."""

    # TODO: merge or unify name with is_upcast_type

    fqn = fully_qualified_name(obj)
    if fqn not in upcast_type_map:
        return False
    else:
        module_name, class_name = fqn.rsplit(".", 1)
        cls = getattr(import_module(module_name), class_name)

    upcast_type_map[fqn] = cls
    # This is to check we are importing the same thing.
    # and avoid weird problems. Maybe instead of return
    # we should raise an error if false.
    return obj in upcast_type_map.values()


def is_upcast_type(other: type) -> bool:
    """Check if the type object is an upcast type."""

    # TODO: merge or unify name with check_upcast_type

    if other in upcast_type_map.values():
        return True
    return check_upcast_type(other)


def is_duck_array_type(cls: type) -> bool:
    """Check if the type object represents a (non-Quantity) duck array type."""
    # TODO (NEP 30): replace duck array check with hasattr(other, "__duckarray__")
    return issubclass(cls, ndarray) or (
        not hasattr(cls, "_magnitude")
        and not hasattr(cls, "_units")
        and HAS_NUMPY_ARRAY_FUNCTION
        and hasattr(cls, "__array_function__")
        and hasattr(cls, "ndim")
        and hasattr(cls, "dtype")
    )


def is_duck_array(obj: type) -> bool:
    """Check if an object represents a (non-Quantity) duck array type."""
    return is_duck_array_type(type(obj))


def eq(lhs: Any, rhs: Any, check_all: bool) -> bool | Iterable[bool]:
    """Comparison of scalars and arrays.

    Parameters
    ----------
    lhs
        left-hand side
    rhs
        right-hand side
    check_all
        if True, reduce sequence to single bool;
        return True if all the elements are equal.

    Returns
    -------
    bool or array_like of bool
    """
    out = lhs == rhs
    if check_all and is_duck_array_type(type(out)):
        return out.all()
    return out


def isnan(obj: Any, check_all: bool) -> bool | Iterable[bool]:
    """Test for NaN or NaT.

    Parameters
    ----------
    obj
        scalar or vector
    check_all
        if True, reduce sequence to single bool;
        return True if any of the elements are NaN.

    Returns
    -------
    bool or array_like of bool.
        Always return False for non-numeric types.
    """
    if is_duck_array_type(type(obj)):
        if obj.dtype.kind in "ifc":
            out = np.isnan(obj)
        elif obj.dtype.kind in "Mm":
            out = np.isnat(obj)
        else:
            if HAS_UNCERTAINTIES:
                try:
                    out = unp.isnan(obj)
                except TypeError:
                    # Not a numeric or UFloat type
                    out = np.full(obj.shape, False)
            else:
                # Not a numeric or datetime type
                out = np.full(obj.shape, False)
        return out.any() if check_all else out
    if isinstance(obj, np_datetime64):
        return np.isnat(obj)
    elif HAS_UNCERTAINTIES and isinstance(obj, UFloat):
        return unp.isnan(obj)
    try:
        return math.isnan(obj)
    except TypeError:
        return False


def zero_or_nan(obj: Any, check_all: bool) -> bool | Iterable[bool]:
    """Test if obj is zero, NaN, or NaT.

    Parameters
    ----------
    obj
        scalar or vector
    check_all
        if True, reduce sequence to single bool;
        return True if all the elements are zero, NaN, or NaT.

    Returns
    -------
    bool or array_like of bool.
        Always return False for non-numeric types.
    """
    out = eq(obj, 0, False) + isnan(obj, False)
    if check_all and is_duck_array_type(type(out)):
        return out.all()
    return out


# TODO: remove this warning after v0.10
class BehaviorChangeWarning(UserWarning):
    pass


##############
# try imports
##############

try:
    import babel  # noqa: F401
    from babel import units as babel_units

    HAS_BABEL = hasattr(babel_units, "format_unit")
except ImportError:
    HAS_BABEL = False

try:
    import uncertainties  # noqa: F401

    HAS_UNCERTAINTIES = True
except ImportError:
    HAS_UNCERTAINTIES = False

try:
    import numpy  # noqa: F401

    HAS_NUMPY = True
except ImportError:
    HAS_NUMPY = False

try:
    import mip  # noqa: F401

    HAS_MIP = True
except ImportError:
    HAS_MIP = False

try:
    import dask  # noqa: F401

    HAS_DASK = True
except ImportError:
    HAS_DASK = False


##############################
# Imports are handled here
# in order to be able to have
# them as constants
# in mypy configuration.
##############################

if HAS_BABEL:
    from babel import Locale
    from babel import units as babel_units

    babel_parse = Locale.parse
else:
    babel_parse = missing_dependency("Babel")  # noqa: F811 # type:ignore
    babel_units = babel_parse
    Locale = missing_dependency

if HAS_UNCERTAINTIES:
    from uncertainties import UFloat, ufloat

    unp = None
else:
    UFloat = ufloat = unp = None


if HAS_NUMPY:
    import numpy as np
    from numpy import datetime64 as np_datetime64
    from numpy import (
        exp,  # noqa: F401
        log,  # noqa: F401
        ndarray,
    )

    NUMPY_VER = np.__version__
    if HAS_UNCERTAINTIES:
        from uncertainties import unumpy as unp

        NUMERIC_TYPES = (Number, Decimal, ndarray, np.number, UFloat)
    else:
        NUMERIC_TYPES = (Number, Decimal, ndarray, np.number)

    def _to_magnitude(value, force_ndarray=False, force_ndarray_like=False):
        if isinstance(value, (dict, bool)) or value is None:
            raise TypeError(f"Invalid magnitude for Quantity: {value!r}")
        elif isinstance(value, str) and value == "":
            raise ValueError("Quantity magnitude cannot be an empty string.")
        elif isinstance(value, (list, tuple)):
            return np.asarray(value)
        elif HAS_UNCERTAINTIES:
            from pint.facets.measurement.objects import Measurement

            if isinstance(value, Measurement):
                return ufloat(value.value, value.error)
        if force_ndarray or (
            force_ndarray_like and not is_duck_array_type(type(value))
        ):
            return np.asarray(value)
        return value

    def _test_array_function_protocol():
        # Test if the __array_function__ protocol is enabled
        try:

            class FakeArray:
                def __array_function__(self, *args, **kwargs):
                    return

            np.concatenate([FakeArray()])
            return True
        except ValueError:
            return False

    HAS_NUMPY_ARRAY_FUNCTION = _test_array_function_protocol()

    NP_NO_VALUE = np._NoValue

else:
    np = None

    class ndarray:
        pass

    class np_datetime64:
        pass

    from math import (
        exp,  # noqa: F401
        log,  # noqa: F401
    )

    NUMPY_VER = "0"
    NUMERIC_TYPES = (Number, Decimal)
    HAS_NUMPY_ARRAY_FUNCTION = False
    NP_NO_VALUE = None

    def _to_magnitude(value, force_ndarray=False, force_ndarray_like=False):
        if force_ndarray or force_ndarray_like:
            raise ValueError(
                "Cannot force to ndarray or ndarray-like when NumPy is not present."
            )
        elif isinstance(value, (dict, bool)) or value is None:
            raise TypeError(f"Invalid magnitude for Quantity: {value!r}")
        elif isinstance(value, str) and value == "":
            raise ValueError("Quantity magnitude cannot be an empty string.")
        elif isinstance(value, (list, tuple)):
            raise TypeError(
                "lists and tuples are valid magnitudes for "
                "Quantity only when NumPy is present."
            )
        elif HAS_UNCERTAINTIES:
            from pint.facets.measurement.objects import Measurement

            if isinstance(value, Measurement):
                return ufloat(value.value, value.error)
        return value


if HAS_MIP:
    import mip

    mip_model = mip.model
    mip_Model = mip.Model
    mip_INF = mip.INF
    mip_INTEGER = mip.INTEGER
    mip_xsum = mip.xsum
    mip_OptimizationStatus = mip.OptimizationStatus
else:
    mip_missing = missing_dependency("mip")
    mip_model = mip_missing
    mip_Model = mip_missing
    mip_INF = mip_missing
    mip_INTEGER = mip_missing
    mip_xsum = mip_missing
    mip_OptimizationStatus = mip_missing


# Define location of pint.Quantity in NEP-13 type cast hierarchy by defining upcast
# types using guarded imports

if HAS_DASK:
    from dask import array as dask_array
    from dask.base import compute, persist, visualize
else:
    compute, persist, visualize = None, None, None
    dask_array = None


# TODO: merge with upcast_type_map

#: List upcast type names
upcast_type_names = (
    "pint_pandas.pint_array.PintArray",
    "xarray.core.dataarray.DataArray",
    "xarray.core.dataset.Dataset",
    "xarray.core.variable.Variable",
    "pandas.core.series.Series",
    "pandas.core.frame.DataFrame",
    "pandas.Series",
    "pandas.DataFrame",
    "xarray.core.dataarray.DataArray",
)

#: Map type name to the actual type (for upcast types).
upcast_type_map: Mapping[str, type | None] = {k: None for k in upcast_type_names}