Skip to content

Commit 0b2424a

Browse files
fujiisoupshoyer
authored andcommitted
Deprecate old pandas support (#1530)
* Deprecate old numpy and pandas support. * Clean up npcompat * recover unintentionally deleted line * an empty commit to trigger ci. * remove nanprod. update what's new * Minor fix related to np.nanprod * Update installing.rst
1 parent b190501 commit 0b2424a

10 files changed

+19
-219
lines changed

.travis.yml

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -76,11 +76,6 @@ before_install:
7676
install:
7777
- conda env create --file ci/requirements-$CONDA_ENV.yml
7878
- source activate test_env
79-
# scipy should not have been installed, but it's included in older versions of
80-
# the conda pandas package
81-
- if [[ "$CONDA_ENV" == "py27-min" ]]; then
82-
conda remove scipy;
83-
fi
8479
- python setup.py install
8580

8681
script:

ci/requirements-py27-min.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,8 +2,8 @@ name: test_env
22
dependencies:
33
- python=2.7
44
- pytest
5-
- numpy==1.9.3
6-
- pandas==0.15.0
5+
- numpy==1.11
6+
- pandas==0.18.0
77
- pip:
88
- coveralls
99
- pytest-cov

doc/installing.rst

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -7,8 +7,8 @@ Required dependencies
77
---------------------
88

99
- Python 2.7, 3.4, 3.5, or 3.6
10-
- `numpy <http://www.numpy.org/>`__ (1.7 or later)
11-
- `pandas <http://pandas.pydata.org/>`__ (0.15.0 or later)
10+
- `numpy <http://www.numpy.org/>`__ (1.11 or later)
11+
- `pandas <http://pandas.pydata.org/>`__ (0.18.0 or later)
1212

1313
Optional dependencies
1414
---------------------

doc/whats-new.rst

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,13 @@ What's New
1818
v0.9.7 (unreleased)
1919
-------------------
2020

21+
Backward Incompatible Changes
22+
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
23+
24+
- Old numpy < 1.11 and pandas < 0.18 are no longer supported (:issue:`1512`).
25+
By `Keisuke Fujii <https://github.com/fujiisoup>`_.
26+
27+
2128
Enhancements
2229
~~~~~~~~~~~~
2330

setup.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@
3535
'Topic :: Scientific/Engineering',
3636
]
3737

38-
INSTALL_REQUIRES = ['numpy >= 1.7', 'pandas >= 0.15.0']
38+
INSTALL_REQUIRES = ['numpy >= 1.11', 'pandas >= 0.18.0']
3939
TESTS_REQUIRE = ['pytest >= 2.7.1']
4040

4141
DESCRIPTION = "N-D labeled arrays and datasets in Python"

xarray/core/duck_array_ops.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -83,10 +83,10 @@ def isnull(data):
8383
where = _dask_or_eager_func('where', n_array_args=3)
8484
insert = _dask_or_eager_func('insert')
8585
take = _dask_or_eager_func('take')
86-
broadcast_to = _dask_or_eager_func('broadcast_to', npcompat)
86+
broadcast_to = _dask_or_eager_func('broadcast_to')
8787

8888
concatenate = _dask_or_eager_func('concatenate', list_of_args=True)
89-
stack = _dask_or_eager_func('stack', npcompat, list_of_args=True)
89+
stack = _dask_or_eager_func('stack', list_of_args=True)
9090

9191
array_all = _dask_or_eager_func('all')
9292
array_any = _dask_or_eager_func('any')
@@ -232,8 +232,7 @@ def f(values, axis=None, skipna=None, **kwargs):
232232
std = _create_nan_agg_method('std', numeric_only=True)
233233
var = _create_nan_agg_method('var', numeric_only=True)
234234
median = _create_nan_agg_method('median', numeric_only=True)
235-
prod = _create_nan_agg_method('prod', numeric_only=True, np_compat=True,
236-
no_bottleneck=True)
235+
prod = _create_nan_agg_method('prod', numeric_only=True, no_bottleneck=True)
237236
cumprod = _create_nan_agg_method('cumprod', numeric_only=True, np_compat=True,
238237
no_bottleneck=True, keep_dims=True)
239238
cumsum = _create_nan_agg_method('cumsum', numeric_only=True, np_compat=True,

xarray/core/npcompat.py

Lines changed: 2 additions & 199 deletions
Original file line numberDiff line numberDiff line change
@@ -4,139 +4,11 @@
44
import numpy as np
55

66
try:
7-
from numpy import broadcast_to, stack, nanprod, nancumsum, nancumprod
7+
from numpy import nancumsum, nancumprod
88
except ImportError: # pragma: no cover
9-
# Code copied from newer versions of NumPy (v1.10 to v1.12).
9+
# Code copied from newer versions of NumPy (v1.12).
1010
# Used under the terms of NumPy's license, see licenses/NUMPY_LICENSE.
1111

12-
def _maybe_view_as_subclass(original_array, new_array):
13-
if type(original_array) is not type(new_array):
14-
# if input was an ndarray subclass and subclasses were OK,
15-
# then view the result as that subclass.
16-
new_array = new_array.view(type=type(original_array))
17-
# Since we have done something akin to a view from original_array, we
18-
# should let the subclass finalize (if it has it implemented, i.e., is
19-
# not None).
20-
if new_array.__array_finalize__:
21-
new_array.__array_finalize__(original_array)
22-
return new_array
23-
24-
def _broadcast_to(array, shape, subok, readonly):
25-
shape = tuple(shape) if np.iterable(shape) else (shape,)
26-
array = np.array(array, copy=False, subok=subok)
27-
if not shape and array.shape:
28-
raise ValueError('cannot broadcast a non-scalar to a scalar array')
29-
if any(size < 0 for size in shape):
30-
raise ValueError('all elements of broadcast shape must be non-'
31-
'negative')
32-
broadcast = np.nditer(
33-
(array,), flags=['multi_index', 'zerosize_ok', 'refs_ok'],
34-
op_flags=['readonly'], itershape=shape, order='C').itviews[0]
35-
result = _maybe_view_as_subclass(array, broadcast)
36-
if not readonly and array.flags.writeable:
37-
result.flags.writeable = True
38-
return result
39-
40-
def broadcast_to(array, shape, subok=False):
41-
"""Broadcast an array to a new shape.
42-
43-
Parameters
44-
----------
45-
array : array_like
46-
The array to broadcast.
47-
shape : tuple
48-
The shape of the desired array.
49-
subok : bool, optional
50-
If True, then sub-classes will be passed-through, otherwise
51-
the returned array will be forced to be a base-class array (default).
52-
53-
Returns
54-
-------
55-
broadcast : array
56-
A readonly view on the original array with the given shape. It is
57-
typically not contiguous. Furthermore, more than one element of a
58-
broadcasted array may refer to a single memory location.
59-
60-
Raises
61-
------
62-
ValueError
63-
If the array is not compatible with the new shape according to NumPy's
64-
broadcasting rules.
65-
66-
Examples
67-
--------
68-
>>> x = np.array([1, 2, 3])
69-
>>> np.broadcast_to(x, (3, 3))
70-
array([[1, 2, 3],
71-
[1, 2, 3],
72-
[1, 2, 3]])
73-
"""
74-
return _broadcast_to(array, shape, subok=subok, readonly=True)
75-
76-
def stack(arrays, axis=0):
77-
"""
78-
Join a sequence of arrays along a new axis.
79-
80-
.. versionadded:: 1.10.0
81-
82-
Parameters
83-
----------
84-
arrays : sequence of ndarrays
85-
Each array must have the same shape.
86-
axis : int, optional
87-
The axis along which the arrays will be stacked.
88-
89-
Returns
90-
-------
91-
stacked : ndarray
92-
The stacked array has one more dimension than the input arrays.
93-
See Also
94-
--------
95-
concatenate : Join a sequence of arrays along an existing axis.
96-
split : Split array into a list of multiple sub-arrays of equal size.
97-
98-
Examples
99-
--------
100-
>>> arrays = [np.random.randn(3, 4) for _ in range(10)]
101-
>>> np.stack(arrays, axis=0).shape
102-
(10, 3, 4)
103-
104-
>>> np.stack(arrays, axis=1).shape
105-
(3, 10, 4)
106-
107-
>>> np.stack(arrays, axis=2).shape
108-
(3, 4, 10)
109-
110-
>>> a = np.array([1, 2, 3])
111-
>>> b = np.array([2, 3, 4])
112-
>>> np.stack((a, b))
113-
array([[1, 2, 3],
114-
[2, 3, 4]])
115-
116-
>>> np.stack((a, b), axis=-1)
117-
array([[1, 2],
118-
[2, 3],
119-
[3, 4]])
120-
"""
121-
arrays = [np.asanyarray(arr) for arr in arrays]
122-
if not arrays:
123-
raise ValueError('need at least one array to stack')
124-
125-
shapes = set(arr.shape for arr in arrays)
126-
if len(shapes) != 1:
127-
raise ValueError('all input arrays must have the same shape')
128-
129-
result_ndim = arrays[0].ndim + 1
130-
if not -result_ndim <= axis < result_ndim:
131-
msg = 'axis {0} out of bounds [-{1}, {1})'.format(axis, result_ndim)
132-
raise IndexError(msg)
133-
if axis < 0:
134-
axis += result_ndim
135-
136-
sl = (slice(None),) * axis + (np.newaxis,)
137-
expanded_arrays = [arr[sl] for arr in arrays]
138-
return np.concatenate(expanded_arrays, axis=axis)
139-
14012
def _replace_nan(a, val):
14113
"""
14214
If `a` is of inexact type, make a copy of `a`, replace NaNs with
@@ -178,75 +50,6 @@ def _replace_nan(a, val):
17850
np.copyto(a, val, where=mask)
17951
return a, mask
18052

181-
def nanprod(a, axis=None, dtype=None, out=None, keepdims=0):
182-
"""
183-
Return the product of array elements over a given axis treating Not a
184-
Numbers (NaNs) as zero.
185-
186-
One is returned for slices that are all-NaN or empty.
187-
188-
.. versionadded:: 1.10.0
189-
190-
Parameters
191-
----------
192-
a : array_like
193-
Array containing numbers whose sum is desired. If `a` is not an
194-
array, a conversion is attempted.
195-
axis : int, optional
196-
Axis along which the product is computed. The default is to compute
197-
the product of the flattened array.
198-
dtype : data-type, optional
199-
The type of the returned array and of the accumulator in which the
200-
elements are summed. By default, the dtype of `a` is used. An
201-
exception is when `a` has an integer type with less precision than
202-
the platform (u)intp. In that case, the default will be either
203-
(u)int32 or (u)int64 depending on whether the platform is 32 or 64
204-
bits. For inexact inputs, dtype must be inexact.
205-
out : ndarray, optional
206-
Alternate output array in which to place the result. The default
207-
is ``None``. If provided, it must have the same shape as the
208-
expected output, but the type will be cast if necessary. See
209-
`doc.ufuncs` for details. The casting of NaN to integer can yield
210-
unexpected results.
211-
keepdims : bool, optional
212-
If True, the axes which are reduced are left in the result as
213-
dimensions with size one. With this option, the result will
214-
broadcast correctly against the original `arr`.
215-
216-
Returns
217-
-------
218-
y : ndarray or numpy scalar
219-
220-
See Also
221-
--------
222-
numpy.prod : Product across array propagating NaNs.
223-
isnan : Show which elements are NaN.
224-
225-
Notes
226-
-----
227-
Numpy integer arithmetic is modular. If the size of a product exceeds
228-
the size of an integer accumulator, its value will wrap around and the
229-
result will be incorrect. Specifying ``dtype=double`` can alleviate
230-
that problem.
231-
232-
Examples
233-
--------
234-
>>> np.nanprod(1)
235-
1
236-
>>> np.nanprod([1])
237-
1
238-
>>> np.nanprod([1, np.nan])
239-
1.0
240-
>>> a = np.array([[1, 2], [3, np.nan]])
241-
>>> np.nanprod(a)
242-
6.0
243-
>>> np.nanprod(a, axis=0)
244-
array([ 3., 2.])
245-
246-
"""
247-
a, mask = _replace_nan(a, 1)
248-
return np.prod(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
249-
25053
def nancumsum(a, axis=None, dtype=None, out=None):
25154
"""
25255
Return the cumulative sum of array elements over a given axis treating

xarray/tests/test_computation.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -275,7 +275,7 @@ def test_apply_output_core_dimension():
275275

276276
def stack_negative(obj):
277277
def func(x):
278-
return xr.core.npcompat.stack([x, -x], axis=-1)
278+
return np.stack([x, -x], axis=-1)
279279
result = apply_ufunc(func, obj, output_core_dims=[['sign']])
280280
if isinstance(result, (xr.Dataset, xr.DataArray)):
281281
result.coords['sign'] = [1, -1]
@@ -303,7 +303,7 @@ def func(x):
303303

304304
def original_and_stack_negative(obj):
305305
def func(x):
306-
return (x, xr.core.npcompat.stack([x, -x], axis=-1))
306+
return (x, np.stack([x, -x], axis=-1))
307307
result = apply_ufunc(func, obj, output_core_dims=[[], ['sign']])
308308
if isinstance(result[1], (xr.Dataset, xr.DataArray)):
309309
result[1].coords['sign'] = [1, -1]

xarray/tests/test_dataarray.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1442,8 +1442,6 @@ def test_reduce(self):
14421442
expected = DataArray(5, {'c': -999})
14431443
self.assertDataArrayIdentical(expected, actual)
14441444

1445-
@pytest.mark.skipif(LooseVersion(np.__version__) < LooseVersion('1.10.0'),
1446-
reason='requires numpy version 1.10.0 or later')
14471445
# skip due to bug in older versions of numpy.nanpercentile
14481446
def test_quantile(self):
14491447
for q in [0.25, [0.50], [0.25, 0.75]]:

xarray/tests/test_dataset.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2923,8 +2923,6 @@ def mean_only_one_axis(x, axis):
29232923
with self.assertRaisesRegexp(TypeError, 'non-integer axis'):
29242924
ds.reduce(mean_only_one_axis, ['x', 'y'])
29252925

2926-
@pytest.mark.skipif(LooseVersion(np.__version__) < LooseVersion('1.10.0'),
2927-
reason='requires numpy version 1.10.0 or later')
29282926
def test_quantile(self):
29292927

29302928
ds = create_test_data(seed=123)

0 commit comments

Comments
 (0)