Skip to content

RF: Circumvents a deprecation warning from np.fromstring #702

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 7 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions nibabel/cifti2/tests/test_cifti2.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ def test_cifti2_metadata():
assert_equal(md.data, dict(metadata_test))

assert_equal(list(iter(md)), list(iter(collections.OrderedDict(metadata_test))))

md.update({'a': 'aval', 'b': 'bval'})
assert_equal(md.data, dict(metadata_test))

Expand Down Expand Up @@ -310,7 +310,7 @@ def test_matrix():

assert_raises(ci.Cifti2HeaderError, m.insert, 0, mim_none)
assert_equal(m.mapped_indices, [])

h = ci.Cifti2Header(matrix=m)
assert_equal(m.mapped_indices, [])
m.insert(0, mim_0)
Expand Down
16 changes: 8 additions & 8 deletions nibabel/externals/netcdf.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@

import numpy as np # noqa
from ..py3k import asbytes, asstr
from numpy import fromstring, ndarray, dtype, empty, array, asarray
from numpy import frombuffer, ndarray, dtype, empty, array, asarray
from numpy import little_endian as LITTLE_ENDIAN
from functools import reduce

Expand Down Expand Up @@ -519,7 +519,7 @@ def _read(self):
if not magic == b'CDF':
raise TypeError("Error: %s is not a valid NetCDF 3 file" %
self.filename)
self.__dict__['version_byte'] = fromstring(self.fp.read(1), '>b')[0]
self.__dict__['version_byte'] = frombuffer(self.fp.read(1), '>b')[0]

# Read file headers and set data.
self._read_numrecs()
Expand Down Expand Up @@ -608,7 +608,7 @@ def _read_var_array(self):
# Calculate size to avoid problems with vsize (above)
a_size = reduce(mul, shape, 1) * size
if self.file_bytes >= 0 and begin_ + a_size > self.file_bytes:
data = fromstring(b'\x00'*a_size, dtype=dtype_)
data = frombuffer(b'\x00'*a_size, dtype=dtype_)
elif self.use_mmap:
mm = mmap(self.fp.fileno(), begin_+a_size, access=ACCESS_READ)
data = ndarray.__new__(ndarray, shape, dtype=dtype_,
Expand All @@ -622,7 +622,7 @@ def _read_var_array(self):
buf = self.fp.read(a_size)
if len(buf) < a_size:
buf = b'\x00'*a_size
data = fromstring(buf, dtype=dtype_)
data = frombuffer(buf, dtype=dtype_)
data.shape = shape
self.fp.seek(pos)

Expand All @@ -644,7 +644,7 @@ def _read_var_array(self):
else:
pos = self.fp.tell()
self.fp.seek(begin)
rec_array = fromstring(self.fp.read(self._recs*self._recsize), dtype=dtypes)
rec_array = frombuffer(self.fp.read(self._recs*self._recsize), dtype=dtypes)
rec_array.shape = (self._recs,)
self.fp.seek(pos)

Expand Down Expand Up @@ -687,7 +687,7 @@ def _read_values(self):
self.fp.read(-count % 4) # read padding

if typecode is not 'c':
values = fromstring(values, dtype='>%s' % typecode)
values = frombuffer(values, dtype='>%s' % typecode)
if values.shape == (1,):
values = values[0]
else:
Expand All @@ -705,14 +705,14 @@ def _pack_int(self, value):
_pack_int32 = _pack_int

def _unpack_int(self):
return int(fromstring(self.fp.read(4), '>i')[0])
return int(frombuffer(self.fp.read(4), '>i')[0])
_unpack_int32 = _unpack_int

def _pack_int64(self, value):
self.fp.write(array(value, '>q').tostring())

def _unpack_int64(self):
return fromstring(self.fp.read(8), '>q')[0]
return frombuffer(self.fp.read(8), '>q')[0]

def _pack_string(self, s):
count = len(s)
Expand Down
4 changes: 2 additions & 2 deletions nibabel/gifti/parse_gifti_fast.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ def read_data_block(encoding, endian, ordering, datatype, shape, data):
dec = base64.b64decode(data.encode('ascii'))
dt = data_type_codes.type[datatype]
sh = tuple(shape)
newarr = np.fromstring(dec, dtype=dt)
newarr = np.frombuffer(dec, dtype=dt)
if len(newarr.shape) != len(sh):
newarr = newarr.reshape(sh, order=ord)

Expand All @@ -59,7 +59,7 @@ def read_data_block(encoding, endian, ordering, datatype, shape, data):
zdec = zlib.decompress(dec)
dt = data_type_codes.type[datatype]
sh = tuple(shape)
newarr = np.fromstring(zdec, dtype=dt)
newarr = np.frombuffer(zdec, dtype=dt)
if len(newarr.shape) != len(sh):
newarr = newarr.reshape(sh, order=ord)

Expand Down
2 changes: 1 addition & 1 deletion nibabel/nifti1.py
Original file line number Diff line number Diff line change
Expand Up @@ -579,7 +579,7 @@ def from_fileobj(klass, fileobj, size, byteswap):
# otherwise there should be a full extension header
if not len(ext_def) == 8:
raise HeaderDataError('failed to read extension header')
ext_def = np.fromstring(ext_def, dtype=np.int32)
ext_def = np.frombuffer(ext_def, dtype=np.int32)
if byteswap:
ext_def = ext_def.byteswap()
# be extra verbose
Expand Down
3 changes: 3 additions & 0 deletions nibabel/openers.py
Original file line number Diff line number Diff line change
Expand Up @@ -209,6 +209,9 @@ def fileno(self):
def read(self, *args, **kwargs):
return self.fobj.read(*args, **kwargs)

def readinto(self, *args, **kwargs):
return self.fobj.readinto(*args, **kwargs)

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Oh, nice. I hadn't realized we didn't already have this...

def write(self, *args, **kwargs):
return self.fobj.write(*args, **kwargs)

Expand Down
8 changes: 4 additions & 4 deletions nibabel/streamlines/trk.py
Original file line number Diff line number Diff line change
Expand Up @@ -557,10 +557,10 @@ def _read_header(fileobj):

with Opener(fileobj) as f:

# Read the header in one block.
header_str = f.read(header_2_dtype.itemsize)
header_rec = np.fromstring(string=header_str, dtype=header_2_dtype)

# Read the header into a bytearray.
header_buf = bytearray(header_2_dtype.itemsize)
f.readinto(header_buf)
header_rec = np.frombuffer(buffer=header_buf, dtype=header_2_dtype)
# Check endianness
endianness = native_code
if header_rec['hdr_size'] != TrkFile.HEADER_SIZE:
Expand Down