Learn more  » Push, build, and install  RubyGems npm packages Python packages Maven artifacts PHP packages Go Modules Bower components Debian packages RPM packages NuGet packages

agriconnect / pandas   python

Repository URL to install this package:

/ io / packers.py

"""
Msgpack serializer support for reading and writing pandas data structures
to disk

portions of msgpack_numpy package, by Lev Givon were incorporated
into this module (and tests_packers.py)

License
=======

Copyright (c) 2013, Lev Givon.
All rights reserved.

Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:

* Redistributions of source code must retain the above copyright
  notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
  copyright notice, this list of conditions and the following
  disclaimer in the documentation and/or other materials provided
  with the distribution.
* Neither the name of Lev Givon nor the names of any
  contributors may be used to endorse or promote products derived
  from this software without specific prior written permission.

THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""

from datetime import date, datetime, timedelta
import os
from textwrap import dedent
import warnings

from dateutil.parser import parse
import numpy as np

import pandas.compat as compat
from pandas.compat import u, u_safe
from pandas.errors import PerformanceWarning
from pandas.util._move import (
    BadMove as _BadMove, move_into_mutable_buffer as _move_into_mutable_buffer)

from pandas.core.dtypes.common import (
    is_categorical_dtype, is_datetime64tz_dtype, is_object_dtype,
    needs_i8_conversion, pandas_dtype)

from pandas import (  # noqa:F401
    Categorical, CategoricalIndex, DataFrame, DatetimeIndex, Float64Index,
    Index, Int64Index, Interval, IntervalIndex, MultiIndex, NaT, Panel, Period,
    PeriodIndex, RangeIndex, Series, TimedeltaIndex, Timestamp)
from pandas.core import internals
from pandas.core.arrays import DatetimeArray, IntervalArray, PeriodArray
from pandas.core.arrays.sparse import BlockIndex, IntIndex
from pandas.core.generic import NDFrame
from pandas.core.internals import BlockManager, _safe_reshape, make_block
from pandas.core.sparse.api import SparseDataFrame, SparseSeries

from pandas.io.common import _stringify_path, get_filepath_or_buffer
from pandas.io.msgpack import ExtType, Packer as _Packer, Unpacker as _Unpacker

# check which compression libs we have installed
try:
    import zlib

    def _check_zlib():
        pass
except ImportError:
    def _check_zlib():
        raise ImportError('zlib is not installed')

_check_zlib.__doc__ = dedent(
    """\
    Check if zlib is installed.

    Raises
    ------
    ImportError
        Raised when zlib is not installed.
    """,
)

try:
    import blosc

    def _check_blosc():
        pass
except ImportError:
    def _check_blosc():
        raise ImportError('blosc is not installed')

_check_blosc.__doc__ = dedent(
    """\
    Check if blosc is installed.

    Raises
    ------
    ImportError
        Raised when blosc is not installed.
    """,
)

# until we can pass this into our conversion functions,
# this is pretty hacky
compressor = None


def to_msgpack(path_or_buf, *args, **kwargs):
    """
    msgpack (serialize) object to input file path

    THIS IS AN EXPERIMENTAL LIBRARY and the storage format
    may not be stable until a future release.

    Parameters
    ----------
    path_or_buf : string File path, buffer-like, or None
                  if None, return generated string
    args : an object or objects to serialize
    encoding : encoding for unicode objects
    append : boolean whether to append to an existing msgpack
             (default is False)
    compress : type of compressor (zlib or blosc), default to None (no
               compression)
    """
    global compressor
    compressor = kwargs.pop('compress', None)
    if compressor:
        compressor = u(compressor)
    append = kwargs.pop('append', None)
    if append:
        mode = 'a+b'
    else:
        mode = 'wb'

    def writer(fh):
        for a in args:
            fh.write(pack(a, **kwargs))

    path_or_buf = _stringify_path(path_or_buf)
    if isinstance(path_or_buf, compat.string_types):
        with open(path_or_buf, mode) as fh:
            writer(fh)
    elif path_or_buf is None:
        buf = compat.BytesIO()
        writer(buf)
        return buf.getvalue()
    else:
        writer(path_or_buf)


def read_msgpack(path_or_buf, encoding='utf-8', iterator=False, **kwargs):
    """
    Load msgpack pandas object from the specified
    file path

    THIS IS AN EXPERIMENTAL LIBRARY and the storage format
    may not be stable until a future release.

    Parameters
    ----------
    path_or_buf : string File path, BytesIO like or string
    encoding : Encoding for decoding msgpack str type
    iterator : boolean, if True, return an iterator to the unpacker
               (default is False)

    Returns
    -------
    obj : same type as object stored in file
    """
    path_or_buf, _, _, should_close = get_filepath_or_buffer(path_or_buf)
    if iterator:
        return Iterator(path_or_buf)

    def read(fh):
        unpacked_obj = list(unpack(fh, encoding=encoding, **kwargs))
        if len(unpacked_obj) == 1:
            return unpacked_obj[0]

        if should_close:
            try:
                path_or_buf.close()
            except IOError:
                pass
        return unpacked_obj

    # see if we have an actual file
    if isinstance(path_or_buf, compat.string_types):
        try:
            exists = os.path.exists(path_or_buf)
        except (TypeError, ValueError):
            exists = False

        if exists:
            with open(path_or_buf, 'rb') as fh:
                return read(fh)

    if isinstance(path_or_buf, compat.binary_type):
        # treat as a binary-like
        fh = None
        try:
            # We can't distinguish between a path and a buffer of bytes in
            # Python 2 so instead assume the first byte of a valid path is
            # less than 0x80.
            if compat.PY3 or ord(path_or_buf[0]) >= 0x80:
                fh = compat.BytesIO(path_or_buf)
                return read(fh)
        finally:
            if fh is not None:
                fh.close()
    elif hasattr(path_or_buf, 'read') and compat.callable(path_or_buf.read):
        # treat as a buffer like
        return read(path_or_buf)

    raise ValueError('path_or_buf needs to be a string file path or file-like')


dtype_dict = {21: np.dtype('M8[ns]'),
              u('datetime64[ns]'): np.dtype('M8[ns]'),
              u('datetime64[us]'): np.dtype('M8[us]'),
              22: np.dtype('m8[ns]'),
              u('timedelta64[ns]'): np.dtype('m8[ns]'),
              u('timedelta64[us]'): np.dtype('m8[us]'),

              # this is platform int, which we need to remap to np.int64
              # for compat on windows platforms
              7: np.dtype('int64'),
              'category': 'category'
              }


def dtype_for(t):
    """ return my dtype mapping, whether number or name """
    if t in dtype_dict:
        return dtype_dict[t]
    return np.typeDict.get(t, t)


c2f_dict = {'complex': np.float64,
            'complex128': np.float64,
            'complex64': np.float32}

# windows (32 bit) compat
if hasattr(np, 'float128'):
    c2f_dict['complex256'] = np.float128


def c2f(r, i, ctype_name):
    """
    Convert strings to complex number instance with specified numpy type.
    """

    ftype = c2f_dict[ctype_name]
    return np.typeDict[ctype_name](ftype(r) + 1j * ftype(i))


def convert(values):
    """ convert the numpy values to a list """

    dtype = values.dtype

    if is_categorical_dtype(values):
        return values

    elif is_object_dtype(dtype):
        return values.ravel().tolist()

    if needs_i8_conversion(dtype):
        values = values.view('i8')
    v = values.ravel()

    if compressor == 'zlib':
        _check_zlib()

        # return string arrays like they are
        if dtype == np.object_:
            return v.tolist()

        # convert to a bytes array
        v = v.tostring()
        return ExtType(0, zlib.compress(v))

    elif compressor == 'blosc':
        _check_blosc()

        # return string arrays like they are
        if dtype == np.object_:
            return v.tolist()

        # convert to a bytes array
        v = v.tostring()
        return ExtType(0, blosc.compress(v, typesize=dtype.itemsize))

    # ndarray (on original dtype)
    return ExtType(0, v.tostring())


def unconvert(values, dtype, compress=None):

    as_is_ext = isinstance(values, ExtType) and values.code == 0

    if as_is_ext:
        values = values.data

    if is_categorical_dtype(dtype):
        return values

    elif is_object_dtype(dtype):
        return np.array(values, dtype=object)

    dtype = pandas_dtype(dtype).base

    if not as_is_ext:
        values = values.encode('latin1')

    if compress:
        if compress == u'zlib':
            _check_zlib()
            decompress = zlib.decompress
        elif compress == u'blosc':
            _check_blosc()
            decompress = blosc.decompress
        else:
            raise ValueError("compress must be one of 'zlib' or 'blosc'")

        try:
            return np.frombuffer(
                _move_into_mutable_buffer(decompress(values)),
                dtype=dtype,
            )
        except _BadMove as e:
            # Pull the decompressed data off of the `_BadMove` exception.
            # We don't just store this in the locals because we want to
            # minimize the risk of giving users access to a `bytes` object
Loading ...